Merge "Move constants to JSON file"
diff --git a/BUILD b/BUILD
index 8b45953..9368a1f 100644
--- a/BUILD
+++ b/BUILD
@@ -28,6 +28,10 @@
 # gazelle:resolve go github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/request_all_matches //scouting/webserver/requests/messages:request_all_matches_go_fbs
 # gazelle:resolve go github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/refresh_match_list //scouting/webserver/requests/messages:refresh_match_list_go_fbs
 # gazelle:resolve go github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/refresh_match_list_response //scouting/webserver/requests/messages:refresh_match_list_response_go_fbs
+# gazelle:resolve go github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/request_shift_schedule //scouting/webserver/requests/messages:request_shift_schedule_go_fbs
+# gazelle:resolve go github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/request_shift_schedule_response //scouting/webserver/requests/messages:request_shift_schedule_response_go_fbs
+# gazelle:resolve go github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_shift_schedule //scouting/webserver/requests/messages:submit_shift_schedule_go_fbs
+# gazelle:resolve go github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_shift_schedule_response //scouting/webserver/requests/messages:submit_shift_schedule_response_go_fbs
 
 gazelle(
     name = "gazelle",
diff --git a/Cargo.toml b/Cargo.toml
index fe957f9..8f57539 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -47,6 +47,11 @@
 uuid = "1.0"
 toml = "0.5"
 anyhow = "1.0"
+futures = "0.3"
+once_cell = "1.13"
+thiserror = "1.0"
+bitflags = "1.3"
+smallvec = "1.9"
 
 # For bindgen.
 bindgen = "0.58.1"
diff --git a/WORKSPACE b/WORKSPACE
index e33527c..185018c 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -297,7 +297,9 @@
     "//tools/rust:rust-toolchain-x86",
     "//tools/rust:rust-toolchain-armv7",
     "//tools/rust:rust-toolchain-arm64",
-    "//tools/rust:rust-toolchain-roborio",
+    # TODO(Brian): Make this work. See the comment on
+    # //tools/platforms:linux_roborio for details.
+    #"//tools/rust:rust-toolchain-roborio",
     "//tools/rust:noop_rust_toolchain",
     "//tools/ts:noop_node_toolchain",
 )
@@ -849,9 +851,9 @@
         "armv7-unknown-linux-gnueabihf",
         "aarch64-unknown-linux-gnu",
     ],
-    rustfmt_version = "1.58.1",
+    rustfmt_version = "1.62.0",
     toolchain_name_prefix = "toolchain_for",
-    version = "1.58.1",
+    version = "1.62.0",
 )
 
 load("@io_bazel_rules_webtesting//web:repositories.bzl", "web_test_repositories")
@@ -1026,6 +1028,14 @@
     url = "https://www.frc971.org/Build-Dependencies/MarkupSafe-1.1.1.tar.gz",
 )
 
+http_archive(
+    name = "python_yapf",
+    build_file = "@//debian:python_yapf.BUILD",
+    sha256 = "410ed0f592c898d75d73f7792aee6569bdbc0b57bc72b417c722c17f41f66b12",
+    strip_prefix = "yapf-0.32.0",
+    url = "https://github.com/google/yapf/archive/refs/tags/v0.32.0.tar.gz",
+)
+
 # //debian:lzma_amd64
 http_archive(
     name = "lzma_amd64",
diff --git a/aos/BUILD b/aos/BUILD
index b04bb3d..2bd0ec8 100644
--- a/aos/BUILD
+++ b/aos/BUILD
@@ -396,7 +396,7 @@
     ],
     data = [
         "//aos/events:pingpong_config",
-        "//aos/events:pong.bfbs",
+        "//aos/events:pong_fbs_reflection_out",
         "//aos/testdata:test_configs",
     ],
     target_compatible_with = ["@platforms//os:linux"],
diff --git a/aos/README.md b/aos/README.md
index b99f51d..356c259 100644
--- a/aos/README.md
+++ b/aos/README.md
@@ -14,6 +14,46 @@
 aos_graph_nodes | dot -Tx11
 ```
 
+### Rust
+
+AOS has experimental rust support. This involves creating Rust wrappers for all of the relevant
+C++ types. There must be exactly one wrapper for each type, or you will get confusing errors about
+trying to convert Rust types with very similar names (in different crates though) when you try
+using them together. To standardize this, we have some conventions.
+
+We use autocxx to generate the raw wrappers. Sometimes autocxx needs tweaked C++ signatures to
+generate usable Rust bindings. These go in a separate C++ file with a `_for_rust` suffix, and have
+functions with `ForRust` suffixes.
+
+We want to pass around pointers and references to the autocxx-generated flatbuffers types so we can
+create byte slices to use with the Rust versions, but we ignore many of the flatbuffers types needed
+to wrap individual methods. Some of them are tricky to wrap.
+
+Converting between the autocxx-generated and rustc-generated flatbuffers types is tricky. The Rust
+flatbuffers API is based on slices, but the C++ API that autocxx is wrapping just uses pointers. We
+can convert from a Rust flatbuffer to its C++ equivalent pretty easily, but going the other way
+doesn't work. To maximize flexibility, each C++ wrapper module exposes APIs that take
+autocxx-generated types and provide convenient conversions for the types belonging to that module.
+Flatbuffers returned from C++ by value (typically in a `aos::Flatbuffer`) get returned as Rust
+`aos_flatbuffers::Flatbuffer` objects, while ones being returned from C++ by pointer (or reference)
+are exposed as the autocxx types.
+
+For the file `aos/xyz.fbs`, Rust wrappers go in `aos/xyz.rs`. The Rust flatbuffers generated
+code will be in `aos/xyz_fbs.rs`.
+
+For the file `aos/abc.h`, Rust wrappers go in `aos/abc.rs`. These wrappers may be more sophisticated
+than simple unsafe wrappers, but they should avoid adding additional functionality. Any additional
+C++ code goes in `aos/abc_for_rust.h`/`aos/abc_for_rust.cc`.
+
+All Rust functions intended to be called from other files gets exported outside of the `ffi`
+module. In some cases, this is just giving the raw autocxx wrappers a new name. In other cases,
+these wrappers can attach lifetimes etc and be safe. This makes it clear which functions and
+types are being exported, because autocxx generates a lot of wrappers. Do not just make the
+entire `ffi` module, or any of its submodules, public.
+
+Rust modules map to Bazel rules. This means we end up lots of Rust modules. We name them like
+`aos_events_event_loop` for all the code in `aos/events/event_loop.rs`.
+
 ### NOTES
 
 Some functions need to be in separate translation units in order for them to be guaranteed to work. As the C standard says,
diff --git a/aos/configuration.cc b/aos/configuration.cc
index d05cba8..db00ce7 100644
--- a/aos/configuration.cc
+++ b/aos/configuration.cc
@@ -181,11 +181,11 @@
   // instead.  It is much faster to load .bfbs files than .json files.
   if (!binary_path_exists && !util::PathExists(raw_path)) {
     const bool path_is_absolute = raw_path.size() > 0 && raw_path[0] == '/';
-    if (path_is_absolute && !extra_import_paths.empty()) {
-      LOG(ERROR)
-          << "Can't specify extra import paths if attempting to read a config "
-             "file from an absolute path (path is "
-          << raw_path << ").";
+    if (path_is_absolute) {
+      // Nowhere else to look up an absolute path, so fail now. Note that we
+      // always have at least one extra import path based on /proc/self/exe, so
+      // warning about those paths existing isn't helpful.
+      LOG(ERROR) << ": Failed to find file " << path << ".";
       return std::nullopt;
     }
 
diff --git a/aos/events/logging/BUILD b/aos/events/logging/BUILD
index a4de942..9f6234a 100644
--- a/aos/events/logging/BUILD
+++ b/aos/events/logging/BUILD
@@ -187,7 +187,7 @@
     srcs = [
         "lzma_encoder_test.cc",
     ],
-    shard_count = 4,
+    shard_count = 8,
     target_compatible_with = ["@platforms//os:linux"],
     deps = [
         ":buffer_encoder_param_test",
@@ -497,3 +497,13 @@
     gen_reflections = 1,
     target_compatible_with = ["@platforms//os:linux"],
 )
+
+cc_binary(
+    name = "timestamp_plot",
+    srcs = ["timestamp_plot.cc"],
+    deps = [
+        "//aos:init",
+        "//frc971/analysis:in_process_plotter",
+        "@com_google_absl//absl/strings",
+    ],
+)
diff --git a/aos/events/logging/boot_timestamp.h b/aos/events/logging/boot_timestamp.h
index d0fde73..bd9b357 100644
--- a/aos/events/logging/boot_timestamp.h
+++ b/aos/events/logging/boot_timestamp.h
@@ -4,6 +4,7 @@
 #include <iostream>
 
 #include "aos/time/time.h"
+#include "glog/logging.h"
 
 namespace aos::logger {
 
@@ -24,6 +25,18 @@
     return {boot, duration - d};
   }
 
+  BootDuration operator-(BootDuration d) const {
+    CHECK_EQ(d.boot, boot);
+    return {boot, duration - d.duration};
+  }
+
+  BootDuration operator+(BootDuration d) const {
+    CHECK_EQ(d.boot, boot);
+    return {boot, duration + d.duration};
+  }
+
+  BootDuration operator/(int x) const { return {boot, duration / x}; }
+
   bool operator==(const BootDuration &m2) const {
     return boot == m2.boot && duration == m2.duration;
   }
@@ -77,12 +90,22 @@
   BootTimestamp operator+(monotonic_clock::duration d) const {
     return {boot, time + d};
   }
+
+  BootTimestamp operator+=(monotonic_clock::duration d) {
+    time += d;
+    return *this;
+  }
   BootTimestamp operator-(monotonic_clock::duration d) const {
     return {boot, time - d};
   }
   BootTimestamp operator+(BootDuration d) const {
     return {boot, time + d.duration};
   }
+
+  BootDuration operator-(BootTimestamp t) const {
+    CHECK_EQ(t.boot, boot);
+    return {boot, time - t.time};
+  }
 };
 
 // Structure to hold both a boot and queue index.  Queue indices reset after
diff --git a/aos/events/logging/buffer_encoder_param_test.cc b/aos/events/logging/buffer_encoder_param_test.cc
index e464a52..7c9bb4b 100644
--- a/aos/events/logging/buffer_encoder_param_test.cc
+++ b/aos/events/logging/buffer_encoder_param_test.cc
@@ -69,7 +69,7 @@
       const size_t read_result =
           decoder->Read(chunk.data(), chunk.data() + chunk_size);
       if (read_result + total_decoded_size != total_encoded_size) {
-        // We didn't read anything, so we should've read the complete chunk.
+        // We didn't read everything, so we should've read the complete chunk.
         ASSERT_EQ(read_result, chunk_size)
             << "Read " << read_result + total_decoded_size << " of "
             << total_encoded_size << " expected bytes.";
diff --git a/aos/events/logging/logger_test.cc b/aos/events/logging/logger_test.cc
index bb5b41e..5f265e2 100644
--- a/aos/events/logging/logger_test.cc
+++ b/aos/events/logging/logger_test.cc
@@ -3482,9 +3482,9 @@
 }
 
 constexpr std::string_view kCombinedConfigSha1(
-    "158a244107a7dc637fc5934ac161cb9e6c26195930fd8f82bb351c3ad7cce349");
+    "bcc66bc13a90a4a268649744e244129c5d024f5abd67587dcfbd7158d8abfc44");
 constexpr std::string_view kSplitConfigSha1(
-    "c73aa7913a9e116ee0a793d8280fac170b7eeea8e7350f45c6ac5bfc4ab018e1");
+    "d97e998164a6f1bf078aad77ef127329728ac9198a13a5ab8d5f30d84a932662");
 
 INSTANTIATE_TEST_SUITE_P(
     All, MultinodeLoggerTest,
diff --git a/aos/events/logging/lzma_encoder.cc b/aos/events/logging/lzma_encoder.cc
index f32d7e2..5cb9897 100644
--- a/aos/events/logging/lzma_encoder.cc
+++ b/aos/events/logging/lzma_encoder.cc
@@ -2,6 +2,8 @@
 
 #include "glog/logging.h"
 
+DEFINE_int32(lzma_threads, 1, "Number of threads to use for encoding");
+
 namespace aos::logger {
 namespace {
 
@@ -48,16 +50,32 @@
 
 }  // namespace
 
-LzmaEncoder::LzmaEncoder(const uint32_t compression_preset)
+LzmaEncoder::LzmaEncoder(const uint32_t compression_preset, size_t block_size)
     : stream_(LZMA_STREAM_INIT), compression_preset_(compression_preset) {
   CHECK_GE(compression_preset_, 0u)
       << ": Compression preset must be in the range [0, 9].";
   CHECK_LE(compression_preset_, 9u)
       << ": Compression preset must be in the range [0, 9].";
 
-  lzma_ret status =
-      lzma_easy_encoder(&stream_, compression_preset_, LZMA_CHECK_CRC64);
-  CHECK(LzmaCodeIsOk(status));
+  if (FLAGS_lzma_threads <= 1) {
+    lzma_ret status =
+        lzma_easy_encoder(&stream_, compression_preset_, LZMA_CHECK_CRC64);
+    CHECK(LzmaCodeIsOk(status));
+  } else {
+    lzma_mt mt_options;
+    memset(&mt_options, 0, sizeof(mt_options));
+    mt_options.threads = FLAGS_lzma_threads;
+    mt_options.block_size = block_size;
+    // Compress for at most 100 ms before relinquishing control back to the main
+    // thread.
+    mt_options.timeout = 100;
+    mt_options.preset = compression_preset_;
+    mt_options.filters = nullptr;
+    mt_options.check = LZMA_CHECK_CRC64;
+    lzma_ret status = lzma_stream_encoder_mt(&stream_, &mt_options);
+    CHECK(LzmaCodeIsOk(status));
+  }
+
   stream_.avail_out = 0;
   VLOG(2) << "LzmaEncoder: Initialization succeeded.";
 }
diff --git a/aos/events/logging/lzma_encoder.h b/aos/events/logging/lzma_encoder.h
index 4edd0e8..14c00eb 100644
--- a/aos/events/logging/lzma_encoder.h
+++ b/aos/events/logging/lzma_encoder.h
@@ -18,8 +18,10 @@
 // Encodes buffers using liblzma.
 class LzmaEncoder final : public DetachedBufferEncoder {
  public:
-  // Initializes the LZMA stream and encoder.
-  explicit LzmaEncoder(uint32_t compression_preset);
+  // Initializes the LZMA stream and encoder.  The block size is the block size
+  // used by the multithreaded encoder for batching.  A block size of 0 tells
+  // lzma to pick it's favorite block size.
+  explicit LzmaEncoder(uint32_t compression_preset, size_t block_size = 0);
   LzmaEncoder(const LzmaEncoder &) = delete;
   LzmaEncoder(LzmaEncoder &&other) = delete;
   LzmaEncoder &operator=(const LzmaEncoder &) = delete;
@@ -36,7 +38,7 @@
   size_t queue_size() const final { return queue_.size(); }
 
  private:
-  static constexpr size_t kEncodedBufferSizeBytes{4096};
+  static constexpr size_t kEncodedBufferSizeBytes{4096 * 10};
 
   void RunLzmaCode(lzma_action action);
 
diff --git a/aos/events/logging/lzma_encoder_test.cc b/aos/events/logging/lzma_encoder_test.cc
index bbd0c60..1b8c895 100644
--- a/aos/events/logging/lzma_encoder_test.cc
+++ b/aos/events/logging/lzma_encoder_test.cc
@@ -5,12 +5,37 @@
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
 
+DECLARE_int32(lzma_threads);
+
 namespace aos::logger::testing {
 
 INSTANTIATE_TEST_SUITE_P(
+    MtLzma, BufferEncoderTest,
+    ::testing::Combine(::testing::Values([]() {
+                         FLAGS_lzma_threads = 3;
+                         return std::make_unique<LzmaEncoder>(2, 4096);
+                       }),
+                       ::testing::Values([](std::string_view filename) {
+                         return std::make_unique<LzmaDecoder>(filename);
+                       }),
+                       ::testing::Range(0, 100)));
+
+INSTANTIATE_TEST_SUITE_P(
+    MtLzmaThreaded, BufferEncoderTest,
+    ::testing::Combine(::testing::Values([]() {
+                         FLAGS_lzma_threads = 3;
+                         return std::make_unique<LzmaEncoder>(5, 4096);
+                       }),
+                       ::testing::Values([](std::string_view filename) {
+                         return std::make_unique<ThreadedLzmaDecoder>(filename);
+                       }),
+                       ::testing::Range(0, 100)));
+
+INSTANTIATE_TEST_SUITE_P(
     Lzma, BufferEncoderTest,
     ::testing::Combine(::testing::Values([]() {
-                         return std::make_unique<LzmaEncoder>(2);
+                         FLAGS_lzma_threads = 1;
+                         return std::make_unique<LzmaEncoder>(2, 4096);
                        }),
                        ::testing::Values([](std::string_view filename) {
                          return std::make_unique<LzmaDecoder>(filename);
@@ -20,7 +45,8 @@
 INSTANTIATE_TEST_SUITE_P(
     LzmaThreaded, BufferEncoderTest,
     ::testing::Combine(::testing::Values([]() {
-                         return std::make_unique<LzmaEncoder>(2);
+                         FLAGS_lzma_threads = 1;
+                         return std::make_unique<LzmaEncoder>(5, 4096);
                        }),
                        ::testing::Values([](std::string_view filename) {
                          return std::make_unique<ThreadedLzmaDecoder>(filename);
diff --git a/aos/events/logging/timestamp_plot.cc b/aos/events/logging/timestamp_plot.cc
new file mode 100644
index 0000000..9c5f2dd
--- /dev/null
+++ b/aos/events/logging/timestamp_plot.cc
@@ -0,0 +1,331 @@
+#include "absl/strings/str_cat.h"
+#include "absl/strings/str_split.h"
+#include "aos/init.h"
+#include "aos/util/file.h"
+#include "frc971/analysis/in_process_plotter.h"
+
+using frc971::analysis::Plotter;
+
+DEFINE_bool(all, false, "If true, plot *all* the nodes at once");
+DEFINE_bool(bounds, false, "If true, plot the noncausal bounds too.");
+DEFINE_bool(samples, true, "If true, plot the samples too.");
+
+DEFINE_string(offsets, "",
+              "Offsets to add to the monotonic clock for each node.  Use the "
+              "format of node=offset,node=offest");
+
+// Simple C++ application to read the CSV files and use the in process plotter
+// to plot them.  This smokes the pants off gnuplot in terms of interactivity.
+
+namespace aos {
+
+// Returns all the nodes.
+std::vector<std::string> Nodes() {
+  const std::string start_time_file = aos::util::ReadFileToStringOrDie(
+      "/tmp/timestamp_noncausal_starttime.csv");
+  std::vector<std::string_view> nodes = absl::StrSplit(start_time_file, '\n');
+
+  std::vector<std::string> formatted_nodes;
+  for (const std::string_view n : nodes) {
+    if (n == "") {
+      continue;
+    }
+
+    std::vector<std::string_view> l = absl::StrSplit(n, ", ");
+    CHECK_EQ(l.size(), 2u) << "'" << n << "'";
+    formatted_nodes.emplace_back(l[0]);
+  }
+
+  return formatted_nodes;
+}
+
+std::string SampleFile(std::string_view node1, std::string_view node2) {
+  return absl::StrCat("/tmp/timestamp_noncausal_", node1, "_", node2,
+                      "_samples.csv");
+}
+
+std::pair<std::vector<double>, std::vector<double>> ReadSamples(
+    std::string_view node1, std::string_view node2, bool flip) {
+  std::vector<double> samplefile12_t;
+  std::vector<double> samplefile12_o;
+
+  const std::string file =
+      aos::util::ReadFileToStringOrDie(SampleFile(node1, node2));
+  bool first = true;
+  std::vector<std::string_view> lines = absl::StrSplit(file, '\n');
+  samplefile12_t.reserve(lines.size());
+  for (const std::string_view n : lines) {
+    if (first) {
+      first = false;
+      continue;
+    }
+    if (n == "") {
+      continue;
+    }
+
+    std::vector<std::string_view> l = absl::StrSplit(n, ", ");
+    CHECK_EQ(l.size(), 4u);
+    double t;
+    double o;
+    CHECK(absl::SimpleAtod(l[0], &t));
+    CHECK(absl::SimpleAtod(l[1], &o));
+    samplefile12_t.emplace_back(t);
+    samplefile12_o.emplace_back(flip ? -o : o);
+  }
+  return std::make_pair(samplefile12_t, samplefile12_o);
+}
+
+void Offset(std::vector<double> *v, double offset) {
+  for (double &x : *v) {
+    x += offset;
+  }
+}
+
+// Returns all the nodes which talk to each other.
+std::vector<std::pair<std::string, std::string>> NodeConnections() {
+  const std::vector<std::string> nodes = Nodes();
+  std::vector<std::pair<std::string, std::string>> result;
+  for (size_t i = 1; i < nodes.size(); ++i) {
+    for (size_t j = 0; j < i; ++j) {
+      const std::string_view node1 = nodes[j];
+      const std::string_view node2 = nodes[i];
+      if (aos::util::PathExists(SampleFile(node1, node2))) {
+        result.emplace_back(node1, node2);
+        LOG(INFO) << "Found pairing " << node1 << ", " << node2;
+      }
+    }
+  }
+  return result;
+}
+
+// Class to encapsulate the plotter state to make it easy to plot multiple
+// connections.
+class NodePlotter {
+ public:
+  NodePlotter() : nodes_(Nodes()) {
+    plotter_.AddFigure("Time");
+    if (!FLAGS_offsets.empty()) {
+      for (std::string_view nodeoffset : absl::StrSplit(FLAGS_offsets, ',')) {
+        std::vector<std::string_view> node_offset =
+            absl::StrSplit(nodeoffset, '=');
+        CHECK_EQ(node_offset.size(), 2u);
+        double o;
+        CHECK(absl::SimpleAtod(node_offset[1], &o));
+        offset_.emplace(std::string(node_offset[0]), o);
+      }
+    }
+  }
+
+  void AddNodes(std::string_view node1, std::string_view node2);
+
+  void Serve() {
+    plotter_.Publish();
+    plotter_.Spin();
+  }
+
+ private:
+  std::pair<std::vector<double>, std::vector<double>> ReadLines(
+      std::string_view node1, std::string_view node2, bool flip);
+
+  std::pair<std::vector<double>, std::vector<double>> ReadOffset(
+      std::string_view node1, std::string_view node2);
+
+  double TimeOffset(std::string_view node) {
+    auto it = offset_.find(std::string(node));
+    if (it == offset_.end()) {
+      return 0.0;
+    } else {
+      return it->second;
+    }
+  }
+
+  std::map<std::string, double> offset_;
+
+  Plotter plotter_;
+
+  std::vector<std::string> nodes_;
+};
+
+std::pair<std::vector<double>, std::vector<double>> NodePlotter::ReadLines(
+    std::string_view node1, std::string_view node2, bool flip) {
+  std::vector<double> samplefile12_t;
+  std::vector<double> samplefile12_o;
+
+  const std::string file = aos::util::ReadFileToStringOrDie(
+      absl::StrCat("/tmp/timestamp_noncausal_", node1, "_", node2, ".csv"));
+  bool first = true;
+  std::vector<std::string_view> lines = absl::StrSplit(file, '\n');
+  samplefile12_t.reserve(lines.size());
+  for (const std::string_view n : lines) {
+    if (first) {
+      first = false;
+      continue;
+    }
+    if (n == "") {
+      continue;
+    }
+
+    std::vector<std::string_view> l = absl::StrSplit(n, ", ");
+    CHECK_EQ(l.size(), 3u);
+    double t;
+    double o;
+    CHECK(absl::SimpleAtod(l[0], &t));
+    CHECK(absl::SimpleAtod(l[2], &o));
+    samplefile12_t.emplace_back(t);
+    samplefile12_o.emplace_back(flip ? -o : o);
+  }
+  return std::make_pair(samplefile12_t, samplefile12_o);
+}
+
+std::pair<std::vector<double>, std::vector<double>> NodePlotter::ReadOffset(
+    std::string_view node1, std::string_view node2) {
+  int node1_index = -1;
+  int node2_index = -1;
+
+  {
+    int index = 0;
+    for (const std::string &n : nodes_) {
+      if (n == node1) {
+        node1_index = index;
+      }
+      if (n == node2) {
+        node2_index = index;
+      }
+      ++index;
+    }
+  }
+  CHECK_NE(node1_index, -1) << ": Unknown node " << node1;
+  CHECK_NE(node2_index, -1) << ": Unknown node " << node2;
+  std::vector<double> offsetfile_t;
+  std::vector<double> offsetfile_o;
+
+  const std::string file =
+      aos::util::ReadFileToStringOrDie("/tmp/timestamp_noncausal_offsets.csv");
+  bool first = true;
+  std::vector<std::string_view> lines = absl::StrSplit(file, '\n');
+  offsetfile_t.reserve(lines.size());
+  for (const std::string_view n : lines) {
+    if (first) {
+      first = false;
+      continue;
+    }
+    if (n == "") {
+      continue;
+    }
+
+    std::vector<std::string_view> l = absl::StrSplit(n, ", ");
+    CHECK_LT(static_cast<size_t>(node1_index + 1), l.size());
+    CHECK_LT(static_cast<size_t>(node2_index + 1), l.size());
+    double t;
+    double o1;
+    double o2;
+    CHECK(absl::SimpleAtod(l[0], &t));
+    CHECK(absl::SimpleAtod(l[1 + node1_index], &o1));
+    CHECK(absl::SimpleAtod(l[1 + node2_index], &o2));
+    offsetfile_t.emplace_back(t);
+    offsetfile_o.emplace_back(o2 - o1);
+  }
+  return std::make_pair(offsetfile_t, offsetfile_o);
+}
+
+void NodePlotter::AddNodes(std::string_view node1, std::string_view node2) {
+  const double offset1 = TimeOffset(node1);
+  const double offset2 = TimeOffset(node2);
+
+  std::pair<std::vector<double>, std::vector<double>> samplefile12 =
+      ReadSamples(node1, node2, false);
+  std::pair<std::vector<double>, std::vector<double>> samplefile21 =
+      ReadSamples(node2, node1, true);
+
+  std::pair<std::vector<double>, std::vector<double>> noncausalfile12 =
+      ReadLines(node1, node2, false);
+  std::pair<std::vector<double>, std::vector<double>> noncausalfile21 =
+      ReadLines(node2, node1, true);
+
+  std::pair<std::vector<double>, std::vector<double>> offsetfile =
+      ReadOffset(node1, node2);
+
+  Offset(&samplefile12.second, offset2 - offset1);
+  Offset(&samplefile21.second, offset2 - offset1);
+  Offset(&noncausalfile12.second, offset2 - offset1);
+  Offset(&noncausalfile21.second, offset2 - offset1);
+  Offset(&offsetfile.second, offset2 - offset1);
+
+  CHECK_EQ(samplefile12.first.size(), samplefile12.second.size());
+  CHECK_EQ(samplefile21.first.size(), samplefile21.second.size());
+  CHECK_EQ(noncausalfile12.first.size(), noncausalfile12.second.size());
+  CHECK_EQ(noncausalfile21.first.size(), noncausalfile21.second.size());
+
+  LOG(INFO) << samplefile12.first.size() + samplefile21.first.size() +
+                   noncausalfile12.first.size() + noncausalfile21.first.size()
+            << " points";
+
+  plotter_.AddLine(offsetfile.first, offsetfile.second,
+                   Plotter::LineOptions{
+                       .label = absl::StrCat("filter ", node2, " ", node1),
+                       // TODO(austin): roboRIO compiler wants all the fields
+                       // filled out, but other compilers don't...  Sigh.
+                       .line_style = "*-",
+                       .color = "yellow",
+                       .point_size = 2.0});
+
+  if (FLAGS_samples) {
+    plotter_.AddLine(samplefile12.first, samplefile12.second,
+                     Plotter::LineOptions{
+                         .label = absl::StrCat("sample ", node1, " ", node2),
+                         .line_style = "*",
+                         .color = "purple",
+                     });
+    plotter_.AddLine(samplefile21.first, samplefile21.second,
+                     Plotter::LineOptions{
+                         .label = absl::StrCat("sample ", node2, " ", node1),
+                         .line_style = "*",
+                         .color = "green",
+                     });
+  }
+
+  if (FLAGS_bounds) {
+    plotter_.AddLine(
+        noncausalfile12.first, noncausalfile12.second,
+        Plotter::LineOptions{.label = absl::StrCat("nc ", node1, " ", node2),
+                             .line_style = "-",
+                             .color = "blue"});
+    plotter_.AddLine(
+        noncausalfile21.first, noncausalfile21.second,
+        Plotter::LineOptions{.label = absl::StrCat("nc ", node2, " ", node1),
+                             .line_style = "-",
+                             .color = "orange"});
+  }
+}
+
+int Main(int argc, const char *const *argv) {
+  NodePlotter plotter;
+
+  if (FLAGS_all) {
+    for (std::pair<std::string, std::string> ab : NodeConnections()) {
+      plotter.AddNodes(ab.first, ab.second);
+    }
+  } else {
+    CHECK_EQ(argc, 3);
+
+    LOG(INFO) << argv[1];
+    LOG(INFO) << argv[2];
+
+    const std::string_view node1 = argv[1];
+    const std::string_view node2 = argv[2];
+
+    plotter.AddNodes(node1, node2);
+  }
+
+  plotter.Serve();
+
+  return 0;
+}
+
+}  // namespace aos
+
+int main(int argc, char **argv) {
+  aos::InitGoogle(&argc, &argv);
+
+  aos::Main(argc, argv);
+}
diff --git a/aos/network/log_web_proxy_main.cc b/aos/network/log_web_proxy_main.cc
index 782c2be..1cc8e17 100644
--- a/aos/network/log_web_proxy_main.cc
+++ b/aos/network/log_web_proxy_main.cc
@@ -15,6 +15,8 @@
 DEFINE_string(data_dir, "www", "Directory to serve data files from");
 DEFINE_string(node, "", "Directory to serve data files from");
 DEFINE_int32(buffer_size, -1, "-1 if infinite, in # of messages / channel.");
+DEFINE_double(monotonic_start_time, -1.0, "Start time (sec)");
+DEFINE_double(monotonic_end_time, -1.0, "End time (sec)");
 
 int main(int argc, char **argv) {
   aos::InitGoogle(&argc, &argv);
@@ -43,6 +45,15 @@
 
   event_loop->SkipTimingReport();
 
+  if (FLAGS_monotonic_start_time > 0) {
+    event_loop->AddTimer([&reader]() { reader.event_loop_factory()->Exit(); })
+        ->Setup(aos::monotonic_clock::time_point(
+            std::chrono::duration_cast<std::chrono::nanoseconds>(
+                std::chrono::duration<double>(FLAGS_monotonic_start_time))));
+
+    reader.event_loop_factory()->Run();
+  }
+
   aos::web_proxy::WebProxy web_proxy(
       event_loop.get(), aos::web_proxy::StoreHistory::kYes, FLAGS_buffer_size);
 
@@ -51,5 +62,12 @@
   // Keep the web proxy alive past when we finish reading the logfile.
   reader.set_exit_on_finish(false);
 
+  if (FLAGS_monotonic_end_time > 0) {
+    event_loop->AddTimer([&web_proxy]() { web_proxy.StopRecording(); })
+        ->Setup(aos::monotonic_clock::time_point(
+            std::chrono::duration_cast<std::chrono::nanoseconds>(
+                std::chrono::duration<double>(FLAGS_monotonic_end_time))));
+  }
+
   reader.event_loop_factory()->Run();
 }
diff --git a/aos/network/multinode_timestamp_filter.cc b/aos/network/multinode_timestamp_filter.cc
index b87d5b9..81fc03f 100644
--- a/aos/network/multinode_timestamp_filter.cc
+++ b/aos/network/multinode_timestamp_filter.cc
@@ -20,6 +20,11 @@
 DEFINE_int32(max_invalid_distance_ns, 0,
              "The max amount of time we will let the solver go backwards.");
 
+DEFINE_bool(bounds_offset_error, false,
+            "If true, use the offset to the bounds for solving instead of to "
+            "the interpolation lines.  This seems to make startup a bit "
+            "better, but won't track the middle as well.");
+
 namespace aos {
 namespace message_bridge {
 namespace {
@@ -130,7 +135,16 @@
 TimestampProblem::Derivitives TimestampProblem::ComputeDerivitives(
       const Eigen::Ref<Eigen::VectorXd> time_offsets) {
   Derivitives result;
+
+  // We get back both interger and double remainders for the gradient.  We then
+  // add them all up.  Rather than doing that purely as doubles, let's save up
+  // both, compute the result, then convert the remainder to doubles.  This is a
+  // bigger issue at the start when we are extrapolating a lot, and the offsets
+  // can be quite large in each direction.
+  Eigen::Matrix<chrono::nanoseconds, Eigen::Dynamic, 1> intgrad =
+      Eigen::Matrix<chrono::nanoseconds, Eigen::Dynamic, 1>::Zero(live_nodes_);
   result.gradient = Eigen::VectorXd::Zero(live_nodes_);
+
   result.hessian = Eigen::MatrixXd::Zero(live_nodes_, live_nodes_);
 
   for (size_t i = 0; i < clock_offset_filter_for_node_.size(); ++i) {
@@ -167,30 +181,61 @@
       // extra factor, the solution will be the same (or close enough).
       constexpr double kMinNetworkDelay = 2.0;
 
-      const std::pair<NoncausalTimestampFilter::Pointer, double> offset_error =
-          filter.filter->OffsetError(
-              filter.b_filter, filter.pointer, base_clock_[i],
-              time_offsets(a_solution_index), base_clock_[filter.b_index],
-              time_offsets(b_solution_index));
-      const double error = 2.0 * (offset_error.second - kMinNetworkDelay);
+      const std::pair<NoncausalTimestampFilter::Pointer,
+                      std::pair<chrono::nanoseconds, double>>
+          offset_error =
+              FLAGS_bounds_offset_error
+                  ? filter.filter->BoundsOffsetError(
+                        filter.b_filter, filter.pointer, base_clock_[i],
+                        time_offsets(a_solution_index),
+                        base_clock_[filter.b_index],
+                        time_offsets(b_solution_index))
+                  : filter.filter->OffsetError(filter.b_filter, filter.pointer,
+                                               base_clock_[i],
+                                               time_offsets(a_solution_index),
+                                               base_clock_[filter.b_index],
+                                               time_offsets(b_solution_index));
       filter.pointer = offset_error.first;
 
-      result.gradient(a_solution_index) += -error;
-      result.gradient(b_solution_index) += error;
+      const std::pair<chrono::nanoseconds, double> error =
+          std::make_pair(offset_error.second.first,
+                         offset_error.second.second - kMinNetworkDelay);
+
+      std::pair<chrono::nanoseconds, double> grad;
+      double hess;
+
+      grad = std::make_pair(2 * error.first, 2 * error.second);
+      hess = 2.0;
+
+      intgrad(a_solution_index) += -grad.first;
+      intgrad(b_solution_index) += grad.first;
+      result.gradient(a_solution_index) += -grad.second;
+      result.gradient(b_solution_index) += grad.second;
+
+      VLOG(2) << "  Filter pair "
+              << filter.filter->node_a()->name()->string_view() << "("
+              << a_solution_index << ") -> "
+              << filter.filter->node_b()->name()->string_view() << "("
+              << b_solution_index << "): " << std::setprecision(12)
+              << error.first.count() << " + " << error.second;
 
       // Reminder, our cost function has the following form.
       //   ((tb - (1 + ma) ta - ba)^2
       // We are ignoring the slope when taking the derivative and applying the
-      // chain rule to keep the gradient smooth.  This means that the Hessian is
-      // 2 for d^2 cost/dta^2 and d^2 cost/dtb^2
-      result.hessian(a_solution_index, a_solution_index) += 2;
-      result.hessian(b_solution_index, a_solution_index) += -2;
+      // chain rule to keep the gradient smooth.  This means that the Hessian
+      // is 2 for d^2 cost/dta^2 and d^2 cost/dtb^2
+      result.hessian(a_solution_index, a_solution_index) += hess;
+      result.hessian(b_solution_index, a_solution_index) += -hess;
       result.hessian(a_solution_index, b_solution_index) =
           result.hessian(b_solution_index, a_solution_index);
-      result.hessian(b_solution_index, b_solution_index) += 2;
+      result.hessian(b_solution_index, b_solution_index) += hess;
     }
   }
 
+  for (int i = 0; i < intgrad.rows(); ++i) {
+    result.gradient(i) += static_cast<double>(intgrad(i).count());
+  }
+
   return result;
 }
 
@@ -307,9 +352,9 @@
                                              solution_node);
 }
 
-std::tuple<std::vector<BootTimestamp>, size_t> TimestampProblem::SolveNewton(
-    const std::vector<logger::BootTimestamp> &points) {
-  constexpr int kMaxIterations = 200;
+std::tuple<std::vector<BootTimestamp>, size_t, size_t>
+TimestampProblem::SolveNewton(const std::vector<logger::BootTimestamp> &points,
+                              const size_t max_iterations) {
   MaybeUpdateNodeMapping();
   for (size_t i = 0; i < points.size(); ++i) {
     if (points[i] != logger::BootTimestamp::max_time()) {
@@ -318,7 +363,7 @@
   }
   Eigen::VectorXd data = Eigen::VectorXd::Zero(live_nodes_);
 
-  int solution_number = 0;
+  size_t iteration = 0;
   size_t solution_node;
   while (true) {
     Eigen::VectorXd step;
@@ -335,12 +380,12 @@
           ComputeDerivitives(data).gradient +
           step(live_nodes_) * constraint_jacobian.transpose();
 
-      VLOG(2) << "Adjusted grad " << solution_number << " -> "
+      VLOG(2) << "Adjusted grad " << iteration << " -> "
               << std::setprecision(12) << std::fixed << std::setfill(' ')
               << adjusted_grad.transpose().format(kHeavyFormat);
     }
 
-    VLOG(2) << "Step " << solution_number << " -> " << std::setprecision(12)
+    VLOG(2) << "Step " << iteration << " -> " << std::setprecision(12)
             << std::fixed << std::setfill(' ')
             << step.transpose().format(kHeavyFormat);
     // We got there if the max step is small (this is strongly correlated to the
@@ -357,7 +402,7 @@
 
     data += step.block(0, 0, live_nodes_, 1);
 
-    ++solution_number;
+    ++iteration;
 
     // We are doing all our math with both an int64 base and a double offset.
     // This lets us handle large offsets while retaining precision down to the
@@ -376,11 +421,23 @@
         base_clock_[j].time += chrono::nanoseconds(dsolution);
         data(solution_index) -= dsolution;
       }
+      if (live(j)) {
+        VLOG(2) << "    live  "
+                << base_clock_[j].time +
+                       std::chrono::nanoseconds(static_cast<int64_t>(
+                           std::round(data(NodeToFullSolutionIndex(j)))))
+                << " "
+                << (data(NodeToFullSolutionIndex(j)) -
+                    std::round(data(NodeToFullSolutionIndex(j))))
+                << " (unrounded: " << data(NodeToFullSolutionIndex(j)) << ")";
+      } else {
+        VLOG(2) << "    dead  " << aos::monotonic_clock::min_time;
+      }
     }
 
     // And finally, don't let us iterate forever.  If it isn't converging,
     // report back.
-    if (solution_number > kMaxIterations) {
+    if (iteration > max_iterations) {
       break;
     }
   }
@@ -388,7 +445,7 @@
   for (size_t i = 0; i < points.size(); ++i) {
     if (points[i] != logger::BootTimestamp::max_time()) {
       VLOG(2) << "Solving for node " << i << " of " << points[i] << " in "
-              << solution_number << " cycles";
+              << iteration << " cycles";
     }
   }
   std::vector<BootTimestamp> result(size());
@@ -398,20 +455,24 @@
       result[i].time = base_clock(i).time +
                        std::chrono::nanoseconds(static_cast<int64_t>(
                            std::round(data(NodeToFullSolutionIndex(i)))));
-      VLOG(2) << "live  " << result[i] << " "
-              << (data(NodeToFullSolutionIndex(i)) -
-                  std::round(data(NodeToFullSolutionIndex(i))))
-              << " (unrounded: " << data(NodeToFullSolutionIndex(i)) << ")";
+      if (VLOG_IS_ON(2) || iteration > max_iterations) {
+        LOG(INFO) << "live  " << result[i] << " "
+                  << (data(NodeToFullSolutionIndex(i)) -
+                      std::round(data(NodeToFullSolutionIndex(i))))
+                  << " (unrounded: " << data(NodeToFullSolutionIndex(i)) << ")";
+      }
     } else {
       result[i] = BootTimestamp::min_time();
-      VLOG(2) << "dead  " << result[i];
+      if (VLOG_IS_ON(2) || iteration > max_iterations) {
+        LOG(INFO) << "dead  " << result[i];
+      }
     }
   }
-  if (solution_number > kMaxIterations) {
-    LOG(FATAL) << "Failed to converge.";
+  if (iteration > max_iterations) {
+    LOG(ERROR) << "Failed to converge.";
   }
 
-  return std::make_pair(std::move(result), solution_node);
+  return std::make_tuple(std::move(result), solution_node, iteration);
 }
 
 void TimestampProblem::MaybeUpdateNodeMapping() {
@@ -796,12 +857,31 @@
   }
 }
 
-MultiNodeNoncausalOffsetEstimator::~MultiNodeNoncausalOffsetEstimator() {
+void MultiNodeNoncausalOffsetEstimator::FlushAndClose(bool destructor) {
+  // Write out all the data in our filters.
   FlushAllSamples(true);
   if (fp_) {
     fclose(fp_);
     fp_ = NULL;
   }
+
+  if (filter_fps_.size() != 0 && !destructor) {
+    size_t node_a_index = 0;
+    for (const auto &filters : filters_per_node_) {
+      for (const auto &filter : filters) {
+        while (true) {
+          std::optional<std::tuple<logger::BootTimestamp, logger::BootDuration>>
+              sample = filter.filter->Consume();
+          if (!sample) {
+            break;
+          }
+          WriteFilter(filter.filter, *sample);
+        }
+      }
+      ++node_a_index;
+    }
+  }
+
   if (filter_fps_.size() != 0) {
     for (std::vector<FILE *> &filter_fp : filter_fps_) {
       for (FILE *&fp : filter_fp) {
@@ -820,6 +900,7 @@
       }
     }
   }
+
   if (all_done_) {
     size_t node_a_index = 0;
     for (const auto &filters : filters_per_node_) {
@@ -850,6 +931,10 @@
   }
 }
 
+MultiNodeNoncausalOffsetEstimator::~MultiNodeNoncausalOffsetEstimator() {
+  FlushAndClose(true);
+}
+
 UUID MultiNodeNoncausalOffsetEstimator::boot_uuid(size_t node_index,
                                                   size_t boot_count) {
   CHECK(boots_);
@@ -1554,8 +1639,14 @@
       problem->set_base_clock(node_index, {base_times[node_index].boot,
                                            base_times[node_index].time + dt});
     }
-    std::tuple<std::vector<BootTimestamp>, size_t> solution =
-        problem->SolveNewton(points);
+    std::tuple<std::vector<BootTimestamp>, size_t, size_t> solution =
+        problem->SolveNewton(points, kMaxIterations);
+
+    if (std::get<2>(solution) > kMaxIterations) {
+      UpdateSolution(std::move(std::get<0>(solution)));
+      FlushAndClose(false);
+      LOG(FATAL) << "Failed to converge.";
+    }
 
     if (!problem->ValidateSolution(std::get<0>(solution))) {
       LOG(WARNING) << "Invalid solution, constraints not met.";
@@ -1564,6 +1655,8 @@
       }
       problem->Debug();
       if (!skip_order_validation_) {
+        UpdateSolution(std::move(std::get<0>(solution)));
+        FlushAndClose(false);
         LOG(FATAL) << "Bailing, use --skip_order_validation to continue.  "
                       "Use at your own risk.";
       }
@@ -1607,6 +1700,8 @@
   if (skip_order_validation_) {
     LOG(ERROR) << "Skipping because --skip_order_validation";
   } else {
+    UpdateSolution(solution);
+    FlushAndClose(false);
     LOG(FATAL) << "Please investigate.  Use --max_invalid_distance_ns="
                << invalid_distance.count() << " to ignore this.";
   }
@@ -1701,8 +1796,14 @@
       continue;
     }
 
-    std::tuple<std::vector<BootTimestamp>, size_t> solution =
-        problem->SolveNewton(points);
+    std::tuple<std::vector<BootTimestamp>, size_t, size_t> solution =
+        problem->SolveNewton(points, kMaxIterations);
+
+    if (std::get<2>(solution) > kMaxIterations) {
+      UpdateSolution(std::move(std::get<0>(solution)));
+      FlushAndClose(false);
+      LOG(FATAL) << "Failed to converge.";
+    }
 
     // Bypass checking if order validation is turned off.  This lets us dump a
     // CSV file so we can view the problem and figure out what to do.  The
@@ -1714,6 +1815,8 @@
       }
       problem->Debug();
       if (!skip_order_validation_) {
+        UpdateSolution(std::get<0>(solution));
+        FlushAndClose(false);
         LOG(FATAL) << "Bailing, use --skip_order_validation to continue.  "
                       "Use at your own risk.";
       }
@@ -1799,6 +1902,72 @@
   return std::make_tuple(next_filter, std::move(result_times), solution_index);
 }
 
+void MultiNodeNoncausalOffsetEstimator::UpdateSolution(
+    std::vector<BootTimestamp> result_times) {
+  // Now, figure out what distributed should be.  It should move at the rate of
+  // the max elapsed time so that conversions to and from it don't round to bad
+  // values.
+  const chrono::nanoseconds dt = MaxElapsedTime(last_monotonics_, result_times);
+  last_distributed_ += dt;
+  for (size_t i = 0; i < result_times.size(); ++i) {
+    if (result_times[i] == BootTimestamp::min_time()) {
+      // Found an unknown node.  Move its time along by the amount the
+      // distributed clock moved.
+      result_times[i] = last_monotonics_[i] + dt;
+    }
+  }
+  last_monotonics_ = std::move(result_times);
+
+  if (fp_) {
+    fprintf(
+        fp_, "%.9f",
+        chrono::duration<double>(last_distributed_.time_since_epoch()).count());
+    for (const BootTimestamp t : last_monotonics_) {
+      fprintf(fp_, ", %.9f",
+              chrono::duration<double>(t.time.time_since_epoch()).count());
+    }
+    fprintf(fp_, "\n");
+  }
+}
+
+void MultiNodeNoncausalOffsetEstimator::WriteFilter(
+    NoncausalTimestampFilter *next_filter,
+    std::tuple<logger::BootTimestamp, logger::BootDuration> sample) {
+  if (filter_fps_.size() > 0 && next_filter) {
+    const int node_a_index =
+        configuration::GetNodeIndex(configuration(), next_filter->node_a());
+    const int node_b_index =
+        configuration::GetNodeIndex(configuration(), next_filter->node_b());
+
+    FILE *fp = filter_fps_[node_a_index][node_b_index];
+    if (fp == nullptr) {
+      fp = filter_fps_[node_a_index][node_b_index] = fopen(
+          absl::StrCat("/tmp/timestamp_noncausal_",
+                       next_filter->node_a()->name()->string_view(), "_",
+                       next_filter->node_b()->name()->string_view(), ".csv")
+              .c_str(),
+          "w");
+      fprintf(fp, "time_since_start,sample_ns,filtered_offset\n");
+    }
+
+    if (last_monotonics_[node_a_index].boot == std::get<0>(sample).boot) {
+      fprintf(fp, "%.9f, %.9f, %.9f\n",
+              std::chrono::duration_cast<std::chrono::duration<double>>(
+                  last_distributed_.time_since_epoch() +
+                  std::get<0>(sample).time - last_monotonics_[node_a_index].time)
+                  .count(),
+              std::chrono::duration_cast<std::chrono::duration<double>>(
+                  std::get<0>(sample).time.time_since_epoch())
+                  .count(),
+              std::chrono::duration_cast<std::chrono::duration<double>>(
+                  std::get<1>(sample).duration)
+                  .count());
+    } else {
+      LOG(WARNING) << "Not writing point, missmatched boot.";
+    }
+  }
+}
+
 std::optional<
     std::tuple<distributed_clock::time_point, std::vector<BootTimestamp>>>
 MultiNodeNoncausalOffsetEstimator::NextTimestamp() {
@@ -1920,19 +2089,8 @@
     }
   }
 
-  // Now, figure out what distributed should be.  It should move at the rate of
-  // the max elapsed time so that conversions to and from it don't round to bad
-  // values.
-  const chrono::nanoseconds dt = MaxElapsedTime(last_monotonics_, result_times);
-  last_distributed_ += dt;
-  for (size_t i = 0; i < result_times.size(); ++i) {
-    if (result_times[i] == BootTimestamp::min_time()) {
-      // Found an unknown node.  Move its time along by the amount the
-      // distributed clock moved.
-      result_times[i] = last_monotonics_[i] + dt;
-    }
-  }
-  last_monotonics_ = std::move(result_times);
+  UpdateSolution(std::move(result_times));
+  WriteFilter(next_filter, sample);
 
   // And freeze everything.
   {
@@ -1946,45 +2104,6 @@
     }
   }
 
-  if (filter_fps_.size() > 0 && next_filter) {
-    const int node_a_index =
-        configuration::GetNodeIndex(configuration(), next_filter->node_a());
-    const int node_b_index =
-        configuration::GetNodeIndex(configuration(), next_filter->node_b());
-
-    FILE *fp = filter_fps_[node_a_index][node_b_index];
-    if (fp == nullptr) {
-      fp = filter_fps_[node_a_index][node_b_index] = fopen(
-          absl::StrCat("/tmp/timestamp_noncausal_",
-                       next_filter->node_a()->name()->string_view(), "_",
-                       next_filter->node_b()->name()->string_view(), ".csv")
-              .c_str(),
-          "w");
-      fprintf(fp, "time_since_start,sample_ns,filtered_offset\n");
-    }
-
-    fprintf(fp, "%.9f, %.9f, %.9f\n",
-            std::chrono::duration_cast<std::chrono::duration<double>>(
-                last_distributed_.time_since_epoch())
-                .count(),
-            std::chrono::duration_cast<std::chrono::duration<double>>(
-                std::get<0>(sample).time.time_since_epoch())
-                .count(),
-            std::chrono::duration_cast<std::chrono::duration<double>>(
-                std::get<1>(sample).duration)
-                .count());
-  }
-
-  if (fp_) {
-    fprintf(
-        fp_, "%.9f",
-        chrono::duration<double>(last_distributed_.time_since_epoch()).count());
-    for (const BootTimestamp t : last_monotonics_) {
-      fprintf(fp_, ", %.9f",
-              chrono::duration<double>(t.time.time_since_epoch()).count());
-    }
-    fprintf(fp_, "\n");
-  }
   FlushAllSamples(false);
   return std::make_tuple(last_distributed_, last_monotonics_);
 }
diff --git a/aos/network/multinode_timestamp_filter.h b/aos/network/multinode_timestamp_filter.h
index eff6179..0954984 100644
--- a/aos/network/multinode_timestamp_filter.h
+++ b/aos/network/multinode_timestamp_filter.h
@@ -57,8 +57,8 @@
   // solver and returns the optimal time on each node, along with the node which
   // constrained the problem.  points is the list of potential constraint
   // points, and the solver uses the earliest point.
-  std::tuple<std::vector<logger::BootTimestamp>, size_t> SolveNewton(
-      const std::vector<logger::BootTimestamp> &points);
+  std::tuple<std::vector<logger::BootTimestamp>, size_t, size_t> SolveNewton(
+      const std::vector<logger::BootTimestamp> &points, size_t max_iterations);
 
   // Validates the solution, returning true if it meets all the constraints, and
   // false otherwise.
@@ -345,6 +345,8 @@
   const aos::Configuration *configuration() const { return configuration_; }
 
  private:
+  static constexpr int kMaxIterations = 400;
+
   struct CandidateTimes {
     logger::BootTimestamp next_node_time = logger::BootTimestamp::max_time();
     logger::BootDuration next_node_duration = logger::BootDuration::max_time();
@@ -389,6 +391,18 @@
   // Writes all samples to disk.
   void FlushAllSamples(bool finish);
 
+  // Adds the solution to last_distributed_.
+  void UpdateSolution(
+      std::vector<logger::BootTimestamp> result_times);
+
+  void WriteFilter(
+      NoncausalTimestampFilter *next_filter,
+      std::tuple<logger::BootTimestamp, logger::BootDuration> sample);
+
+  // Writes everything to disk anc closes it all out in preparation for either
+  // destruction or crashing.
+  void FlushAndClose(bool destructor);
+
   const Configuration *configuration_;
   const Configuration *logged_configuration_;
 
diff --git a/aos/network/multinode_timestamp_filter_test.cc b/aos/network/multinode_timestamp_filter_test.cc
index 841ff4d..dab8e06 100644
--- a/aos/network/multinode_timestamp_filter_test.cc
+++ b/aos/network/multinode_timestamp_filter_test.cc
@@ -372,15 +372,18 @@
   std::vector<BootTimestamp> points1(problem.size(), BootTimestamp::max_time());
   points1[0] = e + chrono::seconds(1);
 
-  std::tuple<std::vector<BootTimestamp>, size_t> result1 =
-      problem.SolveNewton(points1);
+  constexpr size_t kMaxIterations = 200u;
+  std::tuple<std::vector<BootTimestamp>, size_t, size_t> result1 =
+      problem.SolveNewton(points1, kMaxIterations);
+  EXPECT_LT(std::get<2>(result1), kMaxIterations);
   EXPECT_EQ(std::get<1>(result1), 0u);
   EXPECT_TRUE(problem.ValidateSolution(std::get<0>(result1)));
 
   std::vector<BootTimestamp> points2(problem.size(), BootTimestamp::max_time());
   points2[1] = std::get<0>(result1)[1];
-  std::tuple<std::vector<BootTimestamp>, size_t> result2 =
-      problem.SolveNewton(points2);
+  std::tuple<std::vector<BootTimestamp>, size_t, size_t> result2 =
+      problem.SolveNewton(points2, kMaxIterations);
+  EXPECT_LT(std::get<2>(result1), kMaxIterations);
   EXPECT_EQ(std::get<1>(result2), 1u);
   EXPECT_TRUE(problem.ValidateSolution(std::get<0>(result2)));
 
@@ -390,15 +393,17 @@
 
   // Confirm that the error is almost equal for both directions.  The solution
   // is an integer solution, so there will be a little bit of error left over.
-  EXPECT_NEAR(
+  std::pair<chrono::nanoseconds, double> a_error =
       a.OffsetError(nullptr, NoncausalTimestampFilter::Pointer(),
                     std::get<0>(result1)[0], 0.0, std::get<0>(result1)[1], 0.0)
-              .second -
-          b.OffsetError(nullptr, NoncausalTimestampFilter::Pointer(),
-                        std::get<0>(result1)[1], 0.0, std::get<0>(result1)[0],
-                        0.0)
-              .second,
-      0.0, 0.5);
+          .second;
+  std::pair<chrono::nanoseconds, double> b_error =
+      b.OffsetError(nullptr, NoncausalTimestampFilter::Pointer(),
+                    std::get<0>(result1)[1], 0.0, std::get<0>(result1)[0], 0.0)
+          .second;
+  EXPECT_NEAR(static_cast<double>((a_error.first - b_error.first).count()) +
+                  (a_error.second - b_error.second),
+              0.0, 0.5);
 }
 
 }  // namespace testing
diff --git a/aos/network/timestamp_filter.cc b/aos/network/timestamp_filter.cc
index de150c5..8d1c9fe 100644
--- a/aos/network/timestamp_filter.cc
+++ b/aos/network/timestamp_filter.cc
@@ -491,15 +491,16 @@
 std::pair<Pointer, std::pair<std::tuple<BootTimestamp, BootDuration>,
                              std::tuple<BootTimestamp, BootDuration>>>
 NoncausalTimestampFilter::FindTimestamps(const NoncausalTimestampFilter *other,
-                                         Pointer pointer, BootTimestamp ta_base,
-                                         double ta, size_t sample_boot) const {
+                                         bool use_other, Pointer pointer,
+                                         BootTimestamp ta_base, double ta,
+                                         size_t sample_boot) const {
   CHECK_GE(ta, 0.0);
   CHECK_LT(ta, 1.0);
 
   // Since ta is less than an integer, and timestamps should be at least 1 ns
   // apart, we can ignore ta if we make sure that the end of the segment is
   // strictly > than ta_base.
-  return FindTimestamps(other, pointer, ta_base, sample_boot);
+  return FindTimestamps(other, use_other, pointer, ta_base, sample_boot);
 }
 
 std::pair<
@@ -507,7 +508,7 @@
     std::pair<std::tuple<monotonic_clock::time_point, chrono::nanoseconds>,
               std::tuple<monotonic_clock::time_point, chrono::nanoseconds>>>
 NoncausalTimestampFilter::SingleFilter::FindTimestamps(
-    const SingleFilter *other, Pointer pointer,
+    const SingleFilter *other, bool use_other, Pointer pointer,
     monotonic_clock::time_point ta_base, double ta) const {
   CHECK_GE(ta, 0.0);
   CHECK_LT(ta, 1.0);
@@ -515,7 +516,7 @@
   // Since ta is less than an integer, and timestamps should be at least 1 ns
   // apart, we can ignore ta if we make sure that the end of the segment is
   // strictly > than ta_base.
-  return FindTimestamps(other, pointer, ta_base);
+  return FindTimestamps(other, use_other, pointer, ta_base);
 }
 
 std::pair<
@@ -523,9 +524,12 @@
     std::pair<std::tuple<monotonic_clock::time_point, chrono::nanoseconds>,
               std::tuple<monotonic_clock::time_point, chrono::nanoseconds>>>
 NoncausalTimestampFilter::InterpolateWithOtherFilter(
-    Pointer pointer, monotonic_clock::time_point ta,
+    Pointer pointer, bool use_other, monotonic_clock::time_point ta,
     std::tuple<monotonic_clock::time_point, chrono::nanoseconds> t0,
     std::tuple<monotonic_clock::time_point, chrono::nanoseconds> t1) {
+  if (!use_other) {
+    return std::make_pair(pointer, std::make_pair(t0, t1));
+  }
   // We have 2 timestamps bookending everything, and a list of points in the
   // middle.
   //
@@ -576,7 +580,7 @@
     std::pair<std::tuple<monotonic_clock::time_point, chrono::nanoseconds>,
               std::tuple<monotonic_clock::time_point, chrono::nanoseconds>>>
 NoncausalTimestampFilter::SingleFilter::FindTimestamps(
-    const SingleFilter *other, Pointer pointer,
+    const SingleFilter *other, bool use_other, Pointer pointer,
     monotonic_clock::time_point ta) const {
   CHECK_GT(timestamps_size(), 1u);
 
@@ -638,7 +642,7 @@
               << ": Cache changed";
         }
 
-        return InterpolateWithOtherFilter(pointer, ta, t0, t1);
+        return InterpolateWithOtherFilter(pointer, use_other, ta, t0, t1);
       }
     }
   }
@@ -673,39 +677,42 @@
                             monotonic_clock::time_point ta) {
                            return ta > std::get<0>(t) + std::get<1>(t);
                          });
-    auto other_t1_it = std::upper_bound(
-        other_t0_it, other->timestamps_.end(), std::get<0>(pointer.t1_),
-        [](monotonic_clock::time_point ta,
-           std::tuple<aos::monotonic_clock::time_point,
-                      std::chrono::nanoseconds>
-               t) { return ta < std::get<0>(t) + std::get<1>(t); });
+    if (other_t0_it != other->timestamps_.end()) {
+      auto other_t1_it = std::upper_bound(
+          other_t0_it, other->timestamps_.end(), std::get<0>(pointer.t1_),
+          [](monotonic_clock::time_point ta,
+             std::tuple<aos::monotonic_clock::time_point,
+                        std::chrono::nanoseconds>
+                 t) { return ta < std::get<0>(t) + std::get<1>(t); });
 
-    if (std::get<0>(*other_t0_it) + std::get<1>(*other_t0_it) <
-        std::get<0>(pointer.t1_)) {
-      pointer.other_points_.clear();
+      if (std::get<0>(*other_t0_it) + std::get<1>(*other_t0_it) <
+          std::get<0>(pointer.t1_)) {
+        pointer.other_points_.clear();
 
-      // Now, we've got a range.  [other_t0_it, other_t1_it).
-      for (auto other_it = other_t0_it; other_it != other_t1_it; ++other_it) {
-        const std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>
-            flipped_point =
-                std::make_tuple(std::get<0>(*other_it) + std::get<1>(*other_it),
-                                -std::get<1>(*other_it) - kMinNetworkDelay());
+        // Now, we've got a range.  [other_t0_it, other_t1_it).
+        for (auto other_it = other_t0_it; other_it != other_t1_it; ++other_it) {
+          const std::tuple<monotonic_clock::time_point,
+                           std::chrono::nanoseconds>
+              flipped_point = std::make_tuple(
+                  std::get<0>(*other_it) + std::get<1>(*other_it),
+                  -std::get<1>(*other_it) - kMinNetworkDelay());
 
-        // If the new point from the opposite direction filter is below the
-        // interpolated value at that point, then the opposite direction point
-        // defines a new min and we should take it.
-        if (NoncausalTimestampFilter::InterpolateOffset(
-                pointer.t0_, pointer.t1_, std::get<0>(flipped_point)) >
-            std::get<1>(flipped_point)) {
-          // Add it to the list of points to consider.
-          pointer.other_points_.emplace_back(std::make_pair(
-              std::distance(other->timestamps_.begin(), other_it),
-              flipped_point));
+          // If the new point from the opposite direction filter is below the
+          // interpolated value at that point, then the opposite direction point
+          // defines a new min and we should take it.
+          if (NoncausalTimestampFilter::InterpolateOffset(
+                  pointer.t0_, pointer.t1_, std::get<0>(flipped_point)) >
+              std::get<1>(flipped_point)) {
+            // Add it to the list of points to consider.
+            pointer.other_points_.emplace_back(std::make_pair(
+                std::distance(other->timestamps_.begin(), other_it),
+                flipped_point));
+          }
         }
-      }
 
-      if (pointer.other_points_.size() > 0) {
-        return InterpolateWithOtherFilter(pointer, ta, t0, t1);
+        if (pointer.other_points_.size() > 0) {
+          return InterpolateWithOtherFilter(pointer, use_other, ta, t0, t1);
+        }
       }
     }
 
@@ -766,43 +773,24 @@
 chrono::nanoseconds NoncausalTimestampFilter::ExtrapolateOffset(
     std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p0,
     monotonic_clock::time_point ta) {
-  const chrono::nanoseconds dt = ta - std::get<0>(p0);
-  if (dt <= std::chrono::nanoseconds(0)) {
-    // Extrapolate backwards, using the (positive) MaxVelocity slope
-    // We've been asked to extrapolate the offset to a time before our first
-    // sample point.  To be conservative, we'll return an extrapolated
-    // offset that is less than (less tight an estimate of the network delay)
-    // than our sample offset, bound by the max slew velocity we allow
-    //       p0
-    //      /
-    //     /
-    //   ta
-    // Since dt < 0, we shift by dt * slope to get that value
-    return std::get<1>(p0) +
-           chrono::nanoseconds(static_cast<int64_t>(
-               (absl::int128(dt.count() - MaxVelocityRatio::den / 2) *
-                absl::int128(MaxVelocityRatio::num)) /
-               absl::int128(MaxVelocityRatio::den)));
-  } else {
-    // Extrapolate forwards, using the (negative) MaxVelocity slope
-    // Same concept, except going foward past our last (most recent) sample:
-    //       pN
-    //         |
-    //          |
-    //           ta
-    // Since dt > 0, we shift by - dt * slope to get that value
-    return std::get<1>(p0) -
-           chrono::nanoseconds(static_cast<int64_t>(
-               (absl::int128(dt.count() + MaxVelocityRatio::den / 2) *
-                absl::int128(MaxVelocityRatio::num)) /
-               absl::int128(MaxVelocityRatio::den)));
-  }
+  return ExtrapolateOffset(p0, ta, 0.0).first;
 }
 
 chrono::nanoseconds NoncausalTimestampFilter::InterpolateOffset(
     std::tuple<monotonic_clock::time_point, chrono::nanoseconds> p0,
     std::tuple<monotonic_clock::time_point, chrono::nanoseconds> p1,
     monotonic_clock::time_point ta) {
+  return InterpolateOffset(p0, p1, ta, 0.0).first;
+}
+
+std::pair<chrono::nanoseconds, double>
+NoncausalTimestampFilter::InterpolateOffset(
+    std::tuple<monotonic_clock::time_point, chrono::nanoseconds> p0,
+    std::tuple<monotonic_clock::time_point, chrono::nanoseconds> p1,
+    monotonic_clock::time_point ta_base, double ta) {
+  DCHECK_GE(ta, 0.0);
+  DCHECK_LT(ta, 1.0);
+
   // Given 2 points defining a line and the time along that line, interpolate.
   //
   // ta may be massive, but the points will be close, so compute everything
@@ -814,82 +802,123 @@
   //
   // Add (or subtract, integer division rounds towards 0...) 0.5 ((dt / 2) / dt)
   // to the numerator to round to the nearest number rather than round down.
-  const chrono::nanoseconds time_in = ta - std::get<0>(p0);
+  const chrono::nanoseconds time_in = ta_base - std::get<0>(p0);
   const chrono::nanoseconds dt = std::get<0>(p1) - std::get<0>(p0);
+  const chrono::nanoseconds doffset = std::get<1>(p1) - std::get<1>(p0);
 
   absl::int128 numerator =
-      absl::int128(time_in.count()) *
-      absl::int128((std::get<1>(p1) - std::get<1>(p0)).count());
+      absl::int128(time_in.count()) * absl::int128(doffset.count());
   numerator += numerator > 0 ? absl::int128(dt.count() / 2)
                              : -absl::int128(dt.count() / 2);
-  return std::get<1>(p0) + chrono::nanoseconds(static_cast<int64_t>(
-                               numerator / absl::int128(dt.count())));
+
+  const chrono::nanoseconds integer =
+      std::get<1>(p0) + chrono::nanoseconds(static_cast<int64_t>(
+                            numerator / absl::int128(dt.count())));
+  // Compute the remainder of the division in InterpolateOffset above, and
+  // then use double math to compute it accurately.  Since integer math rounds
+  // down, we need to undo the rounding to get the double remainder.  Add or
+  // subtract dt/2/dt (0.5) to undo the addition.
+  //
+  // We have good tests which confirm for small offsets this matches nicely. For
+  // large offsets, the 128 bit math will take care of us.
+  const double remainder =
+      static_cast<double>(numerator % absl::int128(dt.count())) / dt.count() +
+      (numerator > 0 ? -0.5 : 0.5) +
+      ta * static_cast<double>(doffset.count()) /
+          static_cast<double>(dt.count());
+  return std::make_pair(integer, remainder);
 }
 
-chrono::nanoseconds NoncausalTimestampFilter::InterpolateOffset(
+chrono::nanoseconds NoncausalTimestampFilter::BoundOffset(
     std::tuple<monotonic_clock::time_point, chrono::nanoseconds> p0,
-    std::tuple<monotonic_clock::time_point, chrono::nanoseconds> /*p1*/,
-    monotonic_clock::time_point /*ta_base*/, double /*ta*/) {
-  // For the double variant, we want to split the result up into a large integer
-  // portion, and the rest.  We want to do this without introducing numerical
-  // precision problems.
-  //
-  // One way would be to carefully compute the integer portion, and then compute
-  // the double portion in such a way that the two are guaranteed to add up
-  // correctly.
-  //
-  // The simpler way is to simply just use the offset from p0 as the integer
-  // portion, and make the rest be the double portion.  It will get us most of
-  // the way there for a lot less work, and we can revisit if this breaks down.
-  //
-  // oa = p0.o + (ta - p0.t) * (p1.o - p0.o) / (p1.t - p0.t)
-  //      ^^^^
-  // TODO(austin): Use 128 bit math and the remainder to be more accurate here.
-  return std::get<1>(p0);
+    std::tuple<monotonic_clock::time_point, chrono::nanoseconds> p1,
+    monotonic_clock::time_point ta) {
+  // We are trying to solve for worst case offset given the two known points.
+  // This is on the two worst case lines from the two points, and we switch
+  // lines at the interstection.  This is equivilent to the lowest of the two
+  // lines.
+  return std::max(NoncausalTimestampFilter::ExtrapolateOffset(p0, ta),
+                  NoncausalTimestampFilter::ExtrapolateOffset(p1, ta));
 }
 
-double NoncausalTimestampFilter::InterpolateOffsetRemainder(
+std::pair<chrono::nanoseconds, double> NoncausalTimestampFilter::BoundOffset(
     std::tuple<monotonic_clock::time_point, chrono::nanoseconds> p0,
     std::tuple<monotonic_clock::time_point, chrono::nanoseconds> p1,
     monotonic_clock::time_point ta_base, double ta) {
-  const chrono::nanoseconds time_in = ta_base - std::get<0>(p0);
-  const chrono::nanoseconds dt = std::get<0>(p1) - std::get<0>(p0);
+  DCHECK_GE(ta, 0.0);
+  DCHECK_LT(ta, 1.0);
 
-  // The remainder then is the rest of the equation.
-  //
-  // oa = p0.o + (ta - p0.t) * (p1.o - p0.o) / (p1.t - p0.t)
-  //             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  // TODO(austin): Use 128 bit math and the remainder to be more accurate here.
-  return static_cast<double>(ta + time_in.count()) /
-         static_cast<double>(dt.count()) *
-         (std::get<1>(p1) - std::get<1>(p0)).count();
+  const std::pair<chrono::nanoseconds, double> o0 =
+      NoncausalTimestampFilter::ExtrapolateOffset(p0, ta_base, ta);
+  const std::pair<chrono::nanoseconds, double> o1 =
+      NoncausalTimestampFilter::ExtrapolateOffset(p1, ta_base, ta);
+
+  // Want to calculate max(o0 + o0r, o1 + o1r) without precision problems.
+  if (static_cast<double>((o0.first - o1.first).count()) >
+      o1.second - o0.second) {
+    // Ok, o0 is now > o1.  We want the max, so return o0.
+    return o0;
+  } else {
+    return o1;
+  }
 }
 
-chrono::nanoseconds NoncausalTimestampFilter::ExtrapolateOffset(
-    std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p0,
-    monotonic_clock::time_point /*ta_base*/, double /*ta*/) {
-  // TODO(austin): 128 bit math again? ...
-  // For this version, use the base offset from p0 as the base for the offset
-  return std::get<1>(p0);
-}
-
-double NoncausalTimestampFilter::ExtrapolateOffsetRemainder(
+std::pair<chrono::nanoseconds, double>
+NoncausalTimestampFilter::ExtrapolateOffset(
     std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p0,
     monotonic_clock::time_point ta_base, double ta) {
-  // Compute the remainder portion of this offset
-  // oa = p0.o +/- ((ta + ta_base) - p0.t)) * kMaxVelocity()
-  //               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  // But compute (ta + ta_base - p0.t) as (ta + (ta_base - p0.t))
-  // to handle numerical precision
-  const chrono::nanoseconds time_in = ta_base - std::get<0>(p0);
-  const double dt = static_cast<double>(ta + time_in.count());
-  if (dt < 0.0) {
-    // Extrapolate backwards with max (positive) slope (which means
-    // the returned offset should be negative)
-    return dt * kMaxVelocity();
+  DCHECK_GE(ta, 0.0);
+  DCHECK_LT(ta, 1.0);
+  // Since the point (p0) is an integer, we now can guarantee that ta won't put
+  // us on a different side of p0.  This is because ta is between 0 and 1, and
+  // always positive.  Compute the integer and double portions and return them.
+  const chrono::nanoseconds dt = ta_base - std::get<0>(p0);
+
+  if (dt < std::chrono::nanoseconds(0)) {
+    // Extrapolate backwards, using the (positive) MaxVelocity slope
+    // We've been asked to extrapolate the offset to a time before our first
+    // sample point.  To be conservative, we'll return an extrapolated
+    // offset that is less than (less tight an estimate of the network delay)
+    // than our sample offset, bound by the max slew velocity we allow
+    //       p0
+    //      /
+    //     /
+    //   ta
+    // Since dt < 0, we shift by dt * slope to get that value
+    //
+    // Take the remainder of the math in ExtrapolateOffset above and compute it
+    // with floating point math.  Our tests are good enough to confirm that this
+    // works as designed.
+    const absl::int128 numerator =
+        (absl::int128(dt.count() - MaxVelocityRatio::den / 2) *
+         absl::int128(MaxVelocityRatio::num));
+    return std::make_pair(
+        std::get<1>(p0) + chrono::nanoseconds(static_cast<int64_t>(
+                              numerator / absl::int128(MaxVelocityRatio::den))),
+        static_cast<double>(numerator % absl::int128(MaxVelocityRatio::den)) /
+                static_cast<double>(MaxVelocityRatio::den) +
+            0.5 + ta * kMaxVelocity());
   } else {
-    // Extrapolate forwards with max (negative) slope
-    return -dt * kMaxVelocity();
+    // Extrapolate forwards, using the (negative) MaxVelocity slope
+    // Same concept, except going foward past our last (most recent) sample:
+    //       pN
+    //         |
+    //          |
+    //           ta
+    // Since dt > 0, we shift by - dt * slope to get that value
+    //
+    // Take the remainder of the math in ExtrapolateOffset above and compute it
+    // with floating point math.  Our tests are good enough to confirm that this
+    // works as designed.
+    const absl::int128 numerator =
+        absl::int128(dt.count() + MaxVelocityRatio::den / 2) *
+        absl::int128(MaxVelocityRatio::num);
+    return std::make_pair(
+        std::get<1>(p0) - chrono::nanoseconds(static_cast<int64_t>(
+                              numerator / absl::int128(MaxVelocityRatio::den))),
+        -static_cast<double>(numerator % absl::int128(MaxVelocityRatio::den)) /
+                static_cast<double>(MaxVelocityRatio::den) +
+            0.5 - ta * kMaxVelocity());
   }
 }
 
@@ -911,7 +940,7 @@
       Pointer,
       std::pair<std::tuple<monotonic_clock::time_point, chrono::nanoseconds>,
                 std::tuple<monotonic_clock::time_point, chrono::nanoseconds>>>
-      points = FindTimestamps(other, pointer, ta);
+      points = FindTimestamps(other, true, pointer, ta);
   return std::make_pair(points.first,
                         NoncausalTimestampFilter::InterpolateOffset(
                             points.second.first, points.second.second, ta));
@@ -928,32 +957,56 @@
     std::pair<Pointer,
               std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>>
         reference_timestamp = GetReferenceTimestamp(ta_base, ta);
-    return std::make_pair(
-        reference_timestamp.first,
-        std::make_pair(NoncausalTimestampFilter::ExtrapolateOffset(
-                           reference_timestamp.second, ta_base, ta),
-                       NoncausalTimestampFilter::ExtrapolateOffsetRemainder(
-                           reference_timestamp.second, ta_base, ta)));
+    return std::make_pair(reference_timestamp.first,
+                          NoncausalTimestampFilter::ExtrapolateOffset(
+                              reference_timestamp.second, ta_base, ta));
   }
 
   std::pair<
       Pointer,
       std::pair<std::tuple<monotonic_clock::time_point, chrono::nanoseconds>,
                 std::tuple<monotonic_clock::time_point, chrono::nanoseconds>>>
-      points = FindTimestamps(other, pointer, ta_base, ta);
+      points = FindTimestamps(other, true, pointer, ta_base, ta);
   CHECK_LT(std::get<0>(points.second.first), std::get<0>(points.second.second));
   // Return both the integer and double portion together to save a timestamp
   // lookup.
   return std::make_pair(
       points.first,
-      std::make_pair(
-          NoncausalTimestampFilter::InterpolateOffset(
-              points.second.first, points.second.second, ta_base, ta),
-          NoncausalTimestampFilter::InterpolateOffsetRemainder(
-              points.second.first, points.second.second, ta_base, ta)));
+      NoncausalTimestampFilter::InterpolateOffset(
+          points.second.first, points.second.second, ta_base, ta));
 }
 
-std::pair<Pointer, double> NoncausalTimestampFilter::SingleFilter::OffsetError(
+std::pair<Pointer, std::pair<chrono::nanoseconds, double>>
+NoncausalTimestampFilter::SingleFilter::BoundsOffset(
+    const SingleFilter *other, Pointer pointer,
+    monotonic_clock::time_point ta_base, double ta) const {
+  CHECK_GT(timestamps_size(), 0u) << node_names_;
+  if (IsOutsideSamples(ta_base, ta)) {
+    // Special case size = 1 or ta_base before first timestamp or
+    // after last timestamp, so we need to extrapolate out
+    std::pair<Pointer,
+              std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>>
+        reference_timestamp = GetReferenceTimestamp(ta_base, ta);
+    return std::make_pair(reference_timestamp.first,
+                          NoncausalTimestampFilter::ExtrapolateOffset(
+                              reference_timestamp.second, ta_base, ta));
+  }
+
+  std::pair<
+      Pointer,
+      std::pair<std::tuple<monotonic_clock::time_point, chrono::nanoseconds>,
+                std::tuple<monotonic_clock::time_point, chrono::nanoseconds>>>
+      points = FindTimestamps(other, false, pointer, ta_base, ta);
+  CHECK_LT(std::get<0>(points.second.first), std::get<0>(points.second.second));
+  // Return both the integer and double portion together to save a timestamp
+  // lookup.
+  return std::make_pair(points.first, NoncausalTimestampFilter::BoundOffset(
+                                          points.second.first,
+                                          points.second.second, ta_base, ta));
+}
+
+std::pair<Pointer, std::pair<chrono::nanoseconds, double>>
+NoncausalTimestampFilter::SingleFilter::OffsetError(
     const SingleFilter *other, Pointer pointer,
     aos::monotonic_clock::time_point ta_base, double ta,
     aos::monotonic_clock::time_point tb_base, double tb) const {
@@ -966,9 +1019,26 @@
   // Compute the integer portion first, and the double portion second.  Subtract
   // the results of each.  This handles large offsets without losing precision.
   return std::make_pair(
-      offset.first,
-      static_cast<double>(((tb_base - ta_base) - offset.second.first).count()) +
-          ((tb - ta) - offset.second.second));
+      offset.first, std::make_pair(((tb_base - ta_base) - offset.second.first),
+                                   (tb - ta) - offset.second.second));
+}
+
+std::pair<Pointer, std::pair<chrono::nanoseconds, double>>
+NoncausalTimestampFilter::SingleFilter::BoundsOffsetError(
+    const SingleFilter *other, Pointer pointer,
+    aos::monotonic_clock::time_point ta_base, double ta,
+    aos::monotonic_clock::time_point tb_base, double tb) const {
+  NormalizeTimestamps(&ta_base, &ta);
+  NormalizeTimestamps(&tb_base, &tb);
+
+  const std::pair<Pointer, std::pair<std::chrono::nanoseconds, double>> offset =
+      BoundsOffset(other, pointer, ta_base, ta);
+
+  // Compute the integer portion first, and the double portion second.  Subtract
+  // the results of each.  This handles large offsets without losing precision.
+  return std::make_pair(
+      offset.first, std::make_pair((tb_base - ta_base) - offset.second.first,
+                                   (tb - ta) - offset.second.second));
 }
 
 std::string NoncausalTimestampFilter::DebugOffsetError(
@@ -1007,7 +1077,7 @@
                        other == nullptr
                            ? nullptr
                            : &other->filter(tb_base.boot, ta_base.boot)->filter,
-                       pointer, ta_base.time, ta)
+                       true, pointer, ta_base.time, ta)
                    .second;
 
   // As a reminder, our cost function is essentially:
@@ -1058,45 +1128,45 @@
     auto reference_timestamp = GetReferenceTimestamp(ta_base, ta);
 
     // Special case size = 1 or ta before first timestamp, so we extrapolate
-    const chrono::nanoseconds offset_base =
+    const std::pair<chrono::nanoseconds, double> offset =
         NoncausalTimestampFilter::ExtrapolateOffset(reference_timestamp.second,
                                                     ta_base, ta);
-    const double offset_remainder =
-        NoncausalTimestampFilter::ExtrapolateOffsetRemainder(
-            reference_timestamp.second, ta_base, ta);
 
     // We want to do offset + ta > tb, but we need to do it with minimal
     // numerical precision problems.
     // See below for why this is a >=
-    if (static_cast<double>((offset_base + ta_base - tb_base).count()) >=
-        tb - ta - offset_remainder) {
+    if (static_cast<double>((offset.first + ta_base - tb_base).count()) >=
+        tb - ta - offset.second) {
       LOG(ERROR) << node_names_ << " "
-                 << TimeString(ta_base, ta, offset_base, offset_remainder)
+                 << TimeString(ta_base, ta, offset.first, offset.second)
                  << " > solution time "
                  << tb_base + chrono::nanoseconds(
                                   static_cast<int64_t>(std::round(tb)))
                  << ", " << tb - std::round(tb) << " foo";
-      LOG(INFO) << "Remainder " << offset_remainder;
+      LOG(INFO) << "Remainder " << offset.second;
       return false;
     }
     return true;
   }
 
+  // Honestly, here, we care about confirming that the worst case holds.  This
+  // means that each solution is plausible based on the points that we have. The
+  // only thing we actually know is that time will slew by at most the max slew
+  // rate, so the candidate solution must be within the max slew rate from the
+  // samples.
   std::pair<
       Pointer,
       std::pair<std::tuple<monotonic_clock::time_point, chrono::nanoseconds>,
                 std::tuple<monotonic_clock::time_point, chrono::nanoseconds>>>
-      points = FindTimestamps(other, pointer, ta_base, ta);
-  const chrono::nanoseconds offset_base =
-      NoncausalTimestampFilter::InterpolateOffset(
-          points.second.first, points.second.second, ta_base, ta);
-  const double offset = NoncausalTimestampFilter::InterpolateOffsetRemainder(
-      points.second.first, points.second.second, ta_base, ta);
+      points = FindTimestamps(other, false, pointer, ta_base, ta);
+  const std::pair<chrono::nanoseconds, double> offset =
+      NoncausalTimestampFilter::BoundOffset(points.second.first,
+                                            points.second.second, ta_base, ta);
   // See below for why this is a >=
-  if (static_cast<double>((offset_base + ta_base - tb_base).count()) >=
-      tb - offset - ta) {
+  if (static_cast<double>((offset.first + ta_base - tb_base).count()) >=
+      tb - offset.second - ta) {
     LOG(ERROR) << node_names_ << " "
-               << TimeString(ta_base, ta, offset_base, offset)
+               << TimeString(ta_base, ta, offset.first, offset.second)
                << " > solution time " << tb_base << ", " << tb;
     LOG(ERROR) << "Bracketing times are " << TimeString(points.second.first)
                << " and " << TimeString(points.second.second);
@@ -1115,12 +1185,13 @@
                << ") is before the start and we have forgotten the answer.";
     return false;
   }
+
+  // The logic here mirrors the double variant above almost perfectly.  See
+  // above for the comments.
+
   if (IsOutsideSamples(ta, 0.)) {
-    // Special case size = 1 or ta_base before first timestamp or
-    // after last timestamp, so we need to extrapolate out
     auto reference_timestamp = GetReferenceTimestamp(ta, 0.);
 
-    // Special case size = 1 or ta before first timestamp, so we extrapolate
     const chrono::nanoseconds offset =
         NoncausalTimestampFilter::ExtrapolateOffset(reference_timestamp.second,
                                                     ta);
@@ -1139,10 +1210,9 @@
       Pointer,
       std::pair<std::tuple<monotonic_clock::time_point, chrono::nanoseconds>,
                 std::tuple<monotonic_clock::time_point, chrono::nanoseconds>>>
-      points = FindTimestamps(other, pointer, ta);
-  const chrono::nanoseconds offset =
-      NoncausalTimestampFilter::InterpolateOffset(points.second.first,
-                                                  points.second.second, ta);
+      points = FindTimestamps(other, false, pointer, ta);
+  const chrono::nanoseconds offset = NoncausalTimestampFilter::BoundOffset(
+      points.second.first, points.second.second, ta);
 
   // Note: this needs to be >=.  The simulation code doesn't give us a good
   // way to preserve order well enough to have causality preserved when things
@@ -1212,7 +1282,8 @@
     // adheres to our +- velocity constraint. If the point is less than the max
     // negative slope, the point violates our constraint and will never be worth
     // considering.  Ignore it.
-    if (doffset < -dt * kMaxVelocity()) {
+    if (absl::int128(doffset.count()) * absl::int128(MaxVelocityRatio::den) <
+        -absl::int128(dt.count()) * absl::int128(MaxVelocityRatio::num)) {
       VLOG(1) << std::setprecision(1) << std::fixed << node_names_
               << " Rejected sample of " << TimeString(monotonic_now, sample_ns)
               << " because " << doffset.count() << " < "
@@ -1246,7 +1317,10 @@
     //
     // In this case, point 3 is now violating our constraint and we need to
     // remove it.  This is the non-causal part of the filter.
-    while (dt * kMaxVelocity() < doffset && timestamps_.size() > 1u) {
+    while (absl::int128(dt.count()) * absl::int128(MaxVelocityRatio::num) <
+               absl::int128(doffset.count()) *
+                   absl::int128(MaxVelocityRatio::den) &&
+           timestamps_.size() > 1u) {
       CHECK(!frozen(std::get<0>(back)))
           << ": " << node_names_ << " Can't pop an already frozen sample "
           << TimeString(back) << " while inserting "
@@ -1315,7 +1389,9 @@
         const chrono::nanoseconds dt = std::get<0>(*second) - monotonic_now;
         const chrono::nanoseconds doffset = std::get<1>(*second) - sample_ns;
 
-        if (doffset < -dt * kMaxVelocity()) {
+        if (absl::int128(doffset.count()) *
+                absl::int128(MaxVelocityRatio::den) <
+            -absl::int128(dt.count()) * absl::int128(MaxVelocityRatio::num)) {
           VLOG(1) << node_names_ << " Removing redundant sample of "
                   << TimeString(*second) << " because "
                   << TimeString(timestamps_.front())
@@ -1342,7 +1418,9 @@
           const chrono::nanoseconds doffset =
               std::get<1>(*third) - std::get<1>(*second);
 
-          if (doffset > dt * kMaxVelocity()) {
+          if (absl::int128(doffset.count()) *
+                  absl::int128(MaxVelocityRatio::den) >
+              absl::int128(dt.count()) * absl::int128(MaxVelocityRatio::num)) {
             VLOG(1) << node_names_ << " Removing invalid sample of "
                     << TimeString(*second) << " because " << TimeString(*third)
                     << " would make the slope too positive.";
@@ -1366,14 +1444,19 @@
       chrono::nanoseconds next_doffset = std::get<1>(*it) - sample_ns;
 
       // If we are worse than either the previous or next point, discard.
-      if (prior_doffset < -prior_dt * kMaxVelocity()) {
+      if (absl::int128(prior_doffset.count()) *
+              absl::int128(MaxVelocityRatio::den) <
+          absl::int128(-prior_dt.count()) *
+              absl::int128(MaxVelocityRatio::num)) {
         VLOG(1) << node_names_ << " Ignoring timestamp "
                 << TimeString(monotonic_now, sample_ns) << " because "
                 << TimeString(*(it - 1))
                 << " is before and the slope would be too negative.";
         return;
       }
-      if (next_doffset > next_dt * kMaxVelocity()) {
+      if (absl::int128(next_doffset.count()) *
+              absl::int128(MaxVelocityRatio::den) >
+          absl::int128(next_dt.count()) * absl::int128(MaxVelocityRatio::num)) {
         VLOG(1) << node_names_ << " Ignoring timestamp "
                 << TimeString(monotonic_now, sample_ns) << " because "
                 << TimeString(*it)
@@ -1412,7 +1495,10 @@
         const chrono::nanoseconds next_doffset =
             std::get<1>(*next_it) - std::get<1>(*middle_it);
 
-        if (next_doffset < -next_dt * kMaxVelocity()) {
+        if (absl::int128(next_doffset.count()) *
+                absl::int128(MaxVelocityRatio::den) <
+            absl::int128(-next_dt.count()) *
+                absl::int128(MaxVelocityRatio::num)) {
           VLOG(1) << node_names_
                   << " Next slope is too negative, removing next point "
                   << TimeString(*next_it);
@@ -1431,7 +1517,10 @@
         const chrono::nanoseconds prior_doffset =
             std::get<1>(*middle_it) - std::get<1>(*prior_it);
 
-        if (prior_doffset > prior_dt * kMaxVelocity()) {
+        if (absl::int128(prior_doffset.count()) *
+                absl::int128(MaxVelocityRatio::den) >
+            absl::int128(prior_dt.count()) *
+                absl::int128(MaxVelocityRatio::num)) {
           CHECK(!frozen(std::get<0>(*prior_it)))
               << ": " << node_names_
               << " Can't pop an already frozen sample.  Increase "
diff --git a/aos/network/timestamp_filter.h b/aos/network/timestamp_filter.h
index fd430ce..2b5ff47 100644
--- a/aos/network/timestamp_filter.h
+++ b/aos/network/timestamp_filter.h
@@ -16,9 +16,6 @@
 namespace aos {
 namespace message_bridge {
 
-// TODO<jim>: Should do something to help with precision, like make it an
-// integer and divide by the value (e.g., / 1000)
-
 // Max velocity to clamp the filter to in seconds/second.
 typedef std::ratio<1, 1000> MaxVelocityRatio;
 inline constexpr double kMaxVelocity() {
@@ -317,22 +314,41 @@
   // Returns the error between the offset in the provided timestamps, and the
   // offset at ta.  Also returns a pointer to the timestamps used for the
   // lookup to be passed back in again for a more efficient second lookup.
-  std::pair<Pointer, double> OffsetError(const NoncausalTimestampFilter *other,
-                                         Pointer pointer,
-                                         logger::BootTimestamp ta_base,
-                                         double ta,
-                                         logger::BootTimestamp tb_base,
-                                         double tb) const {
+  std::pair<Pointer, std::pair<std::chrono::nanoseconds, double>> OffsetError(
+      const NoncausalTimestampFilter *other, Pointer pointer,
+      logger::BootTimestamp ta_base, double ta, logger::BootTimestamp tb_base,
+      double tb) const {
     const BootFilter *boot_filter = filter(pointer, ta_base.boot, tb_base.boot);
     const SingleFilter *other_filter =
         other == nullptr
             ? nullptr
             : other->maybe_single_filter(tb_base.boot, ta_base.boot);
-    std::pair<Pointer, double> result = boot_filter->filter.OffsetError(
-        other_filter, pointer, ta_base.time, ta, tb_base.time, tb);
+    std::pair<Pointer, std::pair<std::chrono::nanoseconds, double>> result =
+        boot_filter->filter.OffsetError(other_filter, pointer, ta_base.time, ta,
+                                        tb_base.time, tb);
     result.first.boot_filter_ = boot_filter;
     return result;
   }
+
+  // Returns the error between the offset in the provided timestamps, and the
+  // bounds offset at ta.  Also returns a pointer to the timestamps used for the
+  // lookup to be passed back in again for a more efficient second lookup.
+  std::pair<Pointer, std::pair<std::chrono::nanoseconds, double>>
+  BoundsOffsetError(const NoncausalTimestampFilter *other, Pointer pointer,
+                    logger::BootTimestamp ta_base, double ta,
+                    logger::BootTimestamp tb_base, double tb) const {
+    const BootFilter *boot_filter = filter(pointer, ta_base.boot, tb_base.boot);
+    const SingleFilter *other_filter =
+        other == nullptr
+            ? nullptr
+            : other->maybe_single_filter(tb_base.boot, ta_base.boot);
+    std::pair<Pointer, std::pair<std::chrono::nanoseconds, double>> result =
+        boot_filter->filter.BoundsOffsetError(
+            other_filter, pointer, ta_base.time, ta, tb_base.time, tb);
+    result.first.boot_filter_ = boot_filter;
+    return result;
+  }
+
   // Returns the string representation of 2 * OffsetError(ta, tb)
   std::string DebugOffsetError(const NoncausalTimestampFilter *other,
                                Pointer pointer, logger::BootTimestamp ta_base,
@@ -503,43 +519,14 @@
   }
 
   // Public for testing.
-  // Returns the offset for the point in time, using the timestamps in the deque
-  // to form a polyline used to interpolate.
-  logger::BootDuration Offset(const NoncausalTimestampFilter *other,
-                              Pointer pointer, logger::BootTimestamp ta,
-                              size_t sample_boot) const {
-    return {sample_boot,
-            filter(ta.boot, sample_boot)
-                ->filter
-                .Offset(other == nullptr
-                            ? nullptr
-                            : &other->filter(sample_boot, ta.boot)->filter,
-                        pointer, ta.time)
-                .second};
-  }
-
-  std::pair<logger::BootDuration, double> Offset(
-      const NoncausalTimestampFilter *other, Pointer pointer,
-      logger::BootTimestamp ta_base, double ta, size_t sample_boot) const {
-    std::pair<Pointer, std::pair<std::chrono::nanoseconds, double>> result =
-        filter(ta_base.boot, sample_boot)
-            ->filter.Offset(
-                other == nullptr
-                    ? nullptr
-                    : &other->filter(sample_boot, ta_base.boot)->filter,
-                pointer, ta_base.time, ta);
-    return std::make_pair(
-        logger::BootDuration{sample_boot, result.second.first},
-        result.second.second);
-  }
-
   // Assuming that there are at least 2 points in timestamps_, finds the 2
   // matching points.
   std::pair<Pointer,
             std::pair<std::tuple<logger::BootTimestamp, logger::BootDuration>,
                       std::tuple<logger::BootTimestamp, logger::BootDuration>>>
-  FindTimestamps(const NoncausalTimestampFilter *other, Pointer pointer,
-                 logger::BootTimestamp ta, size_t sample_boot) const {
+  FindTimestamps(const NoncausalTimestampFilter *other, bool use_other,
+                 Pointer pointer, logger::BootTimestamp ta,
+                 size_t sample_boot) const {
     const BootFilter *boot_filter = filter(ta.boot, sample_boot);
     std::pair<
         Pointer,
@@ -549,7 +536,7 @@
         result = boot_filter->filter.FindTimestamps(
             other == nullptr ? nullptr
                              : &other->filter(sample_boot, ta.boot)->filter,
-            pointer, ta.time);
+            use_other, pointer, ta.time);
     result.first.boot_filter_ = boot_filter;
     return std::make_pair(
         result.first,
@@ -568,8 +555,8 @@
   std::pair<Pointer,
             std::pair<std::tuple<logger::BootTimestamp, logger::BootDuration>,
                       std::tuple<logger::BootTimestamp, logger::BootDuration>>>
-  FindTimestamps(const NoncausalTimestampFilter *other, Pointer pointer,
-                 logger::BootTimestamp ta_base, double ta,
+  FindTimestamps(const NoncausalTimestampFilter *other, bool use_other,
+                 Pointer pointer, logger::BootTimestamp ta_base, double ta,
                  size_t sample_boot) const;
 
   static std::chrono::nanoseconds InterpolateOffset(
@@ -577,25 +564,26 @@
       std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p1,
       monotonic_clock::time_point ta);
 
+  static std::chrono::nanoseconds BoundOffset(
+      std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p0,
+      std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p1,
+      monotonic_clock::time_point ta);
+
   static std::chrono::nanoseconds ExtrapolateOffset(
       std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p0,
       monotonic_clock::time_point ta);
 
-  static std::chrono::nanoseconds InterpolateOffset(
+  static std::pair<std::chrono::nanoseconds, double> InterpolateOffset(
       std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p0,
       std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p1,
       monotonic_clock::time_point ta_base, double ta);
 
-  static double InterpolateOffsetRemainder(
-      std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> /*p0*/,
-      std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> /*p1*/,
+  static std::pair<std::chrono::nanoseconds, double> BoundOffset(
+      std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p0,
+      std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p1,
       monotonic_clock::time_point ta_base, double ta);
 
-  static std::chrono::nanoseconds ExtrapolateOffset(
-      std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p0,
-      monotonic_clock::time_point /*ta_base*/, double /*ta*/);
-
-  static double ExtrapolateOffsetRemainder(
+  static std::pair<std::chrono::nanoseconds, double> ExtrapolateOffset(
       std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p0,
       monotonic_clock::time_point ta_base, double ta);
 
@@ -625,14 +613,14 @@
         std::pair<
             std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>,
             std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>>>
-    FindTimestamps(const SingleFilter *other, Pointer pointer,
+    FindTimestamps(const SingleFilter *other, bool use_other, Pointer pointer,
                    monotonic_clock::time_point ta) const;
     std::pair<
         Pointer,
         std::pair<
             std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>,
             std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>>>
-    FindTimestamps(const SingleFilter *other, Pointer pointer,
+    FindTimestamps(const SingleFilter *other, bool use_other, Pointer pointer,
                    monotonic_clock::time_point ta_base, double ta) const;
 
     // Check whether the given timestamp falls within our current samples
@@ -649,11 +637,22 @@
     std::pair<Pointer, std::pair<std::chrono::nanoseconds, double>> Offset(
         const SingleFilter *other, Pointer pointer,
         monotonic_clock::time_point ta_base, double ta) const;
-    std::pair<Pointer, double> OffsetError(
+
+    std::pair<Pointer, std::pair<std::chrono::nanoseconds, double>>
+    BoundsOffset(const SingleFilter *other, Pointer pointer,
+                 monotonic_clock::time_point ta_base, double ta) const;
+
+    std::pair<Pointer, std::pair<std::chrono::nanoseconds, double>> OffsetError(
         const SingleFilter *other, Pointer pointer,
         aos::monotonic_clock::time_point ta_base, double ta,
         aos::monotonic_clock::time_point tb_base, double tb) const;
 
+    std::pair<Pointer, std::pair<std::chrono::nanoseconds, double>>
+    BoundsOffsetError(const SingleFilter *other, Pointer pointer,
+                      aos::monotonic_clock::time_point ta_base, double ta,
+                      aos::monotonic_clock::time_point tb_base,
+                      double tb) const;
+
     bool has_unobserved_line() const;
     monotonic_clock::time_point unobserved_line_end() const;
     monotonic_clock::time_point unobserved_line_remote_end() const;
@@ -705,7 +704,7 @@
               std::get<1>(timestamps_[1]) -
               aos::monotonic_clock::duration(
                   static_cast<aos::monotonic_clock::duration::rep>(
-                      absl::int128(dt.count() + MaxVelocityRatio::den / 2) *
+                      absl::int128(dt.count()) *
                       absl::int128(MaxVelocityRatio::num) /
                       absl::int128(MaxVelocityRatio::den)));
 
@@ -870,7 +869,7 @@
           std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>,
           std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>>>
   InterpolateWithOtherFilter(
-      Pointer pointer, monotonic_clock::time_point ta,
+      Pointer pointer, bool use_other, monotonic_clock::time_point ta,
       std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> t0,
       std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> t1);
 
diff --git a/aos/network/timestamp_filter_test.cc b/aos/network/timestamp_filter_test.cc
index 5932e3d..5d8eeeb 100644
--- a/aos/network/timestamp_filter_test.cc
+++ b/aos/network/timestamp_filter_test.cc
@@ -34,8 +34,73 @@
     return std::make_tuple(BootTimestamp{0, std::get<0>(result)},
                            BootDuration{0, std::get<1>(result)});
   }
+
+  logger::BootDuration Offset(const TestingNoncausalTimestampFilter *other,
+                              Pointer pointer, logger::BootTimestamp ta,
+                              size_t sample_boot) const {
+    return {sample_boot,
+            filter(ta.boot, sample_boot)
+                ->filter
+                .Offset(other == nullptr
+                            ? nullptr
+                            : &other->filter(sample_boot, ta.boot)->filter,
+                        pointer, ta.time)
+                .second};
+  }
+
+  std::pair<logger::BootDuration, double> Offset(
+      const TestingNoncausalTimestampFilter *other, Pointer pointer,
+      logger::BootTimestamp ta_base, double ta, size_t sample_boot) const {
+    std::pair<Pointer, std::pair<std::chrono::nanoseconds, double>> result =
+        filter(ta_base.boot, sample_boot)
+            ->filter.Offset(
+                other == nullptr
+                    ? nullptr
+                    : &other->filter(sample_boot, ta_base.boot)->filter,
+                pointer, ta_base.time, ta);
+    return std::make_pair(
+        logger::BootDuration{sample_boot, result.second.first},
+        result.second.second);
+  }
+
+  std::pair<logger::BootDuration, double> BoundsOffset(
+      const TestingNoncausalTimestampFilter *other, Pointer pointer,
+      logger::BootTimestamp ta_base, double ta, size_t sample_boot) const {
+    std::pair<Pointer, std::pair<std::chrono::nanoseconds, double>> result =
+        filter(ta_base.boot, sample_boot)
+            ->filter.BoundsOffset(
+                other == nullptr
+                    ? nullptr
+                    : &other->filter(sample_boot, ta_base.boot)->filter,
+                pointer, ta_base.time, ta);
+    return std::make_pair(
+        logger::BootDuration{sample_boot, result.second.first},
+        result.second.second);
+  }
 };
 
+void NormalizeTimestamps(monotonic_clock::time_point *ta_base, double *ta) {
+  double ta_orig = *ta;
+  chrono::nanoseconds ta_digits(static_cast<int64_t>(std::floor(*ta)));
+  *ta_base += ta_digits;
+  *ta -= static_cast<double>(ta_digits.count());
+
+  // Sign, numerical precision wins again.
+  //   *ta_base=1000.300249970sec, *ta=-1.35525e-20
+  // We then promptly round this to
+  //   *ta_base=1000.300249969sec, *ta=1
+  // The 1.0 then breaks the LT assumption below, so we kersplat.
+  //
+  // Detect this case directly and move the 1.0 back into ta_base.
+  if (*ta == 1.0) {
+    *ta = 0.0;
+    *ta_base += chrono::nanoseconds(1);
+  }
+
+  CHECK_GE(*ta, 0.0) << ta_digits.count() << "ns " << ta_orig;
+  CHECK_LT(*ta, 1.0);
+}
+
 // Tests that adding samples tracks more negative offsets down quickly, and
 // slowly comes back up.
 TEST(TimestampFilterTest, Sample) {
@@ -246,6 +311,28 @@
   }
 
   {
+    const BootTimestamp ta_close{
+        0, monotonic_clock::time_point(chrono::nanoseconds(0))};
+    const BootTimestamp tb_close{
+        0, monotonic_clock::time_point(chrono::nanoseconds(1999))};
+
+    // Confirm that we round the first point correctly.  We should always round
+    // the slope down to avoid invalid slopes.
+    TestingNoncausalTimestampFilter filter(node_a, node_b);
+
+    filter.Sample(ta_close, {0, chrono::microseconds(-10)});
+    filter.Debug();
+    filter.Sample(tb_close, {0, chrono::microseconds(0)});
+    filter.Debug();
+    ASSERT_EQ(filter.timestamps_size(), 2u);
+
+    EXPECT_EQ(std::get<1>(filter.timestamp(0)),
+              (BootDuration{0, -chrono::nanoseconds(1)}));
+    EXPECT_EQ(std::get<1>(filter.timestamp(1)),
+              (BootDuration{0, chrono::nanoseconds(0)}));
+  }
+
+  {
     // Too much positive slope removes points.
     TestingNoncausalTimestampFilter filter(node_a, node_b);
 
@@ -775,31 +862,23 @@
 
   const monotonic_clock::time_point t1 = e + chrono::nanoseconds(10000);
   const chrono::nanoseconds o1 = chrono::nanoseconds(100);
-  const double o1d = static_cast<double>(o1.count());
 
   const monotonic_clock::time_point t2 = t1 + chrono::nanoseconds(1000);
   const chrono::nanoseconds o2 = chrono::nanoseconds(150);
-  const double o2d = static_cast<double>(o2.count());
 
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
                 std::make_tuple(t1, o1), std::make_tuple(t2, o2), t1),
             o1);
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
                 std::make_tuple(t1, o1), std::make_tuple(t2, o2), t1, 0.0),
-            o1);
-  EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffsetRemainder(
-                std::make_tuple(t1, o1), std::make_tuple(t2, o2), t1, 0.0),
-            0.0);
+            std::make_pair(o1, 0.0));
 
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
                 std::make_tuple(t1, o1), std::make_tuple(t2, o2), t2),
             o2);
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
                 std::make_tuple(t1, o1), std::make_tuple(t2, o2), t2, 0.0),
-            o1);
-  EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffsetRemainder(
-                std::make_tuple(t1, o1), std::make_tuple(t2, o2), t2, 0.0),
-            o2d - o1d);
+            std::make_pair(o2, 0.0));
 
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
                 std::make_tuple(t1, o1), std::make_tuple(t2, o2),
@@ -808,56 +887,87 @@
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
                 std::make_tuple(t1, o1), std::make_tuple(t2, o2),
                 t1 + chrono::nanoseconds(500), 0.0),
-            o1);
-  EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffsetRemainder(
-                std::make_tuple(t1, o1), std::make_tuple(t2, o2),
-                t1 + chrono::nanoseconds(500), 0.0),
-            25.);
+            std::make_pair(o1 + chrono::nanoseconds(25), 0.0));
 
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
                 std::make_tuple(t1, o1), std::make_tuple(t2, o2),
                 t1 + chrono::nanoseconds(-200)),
             chrono::nanoseconds(90));
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
-                std::make_tuple(t1, o1), std::make_tuple(t2, o2), t1, -200.),
-            o1);
-  EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffsetRemainder(
-                std::make_tuple(t1, o1), std::make_tuple(t2, o2), t1, -200.),
-            -10.);
+                std::make_tuple(t1, o1), std::make_tuple(t2, o2),
+                t1 - chrono::nanoseconds(200), 0.0),
+            std::make_pair(o1 - chrono::nanoseconds(10), 0.0));
 
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
                 std::make_tuple(t1, o1), std::make_tuple(t2, o2),
                 t1 + chrono::nanoseconds(200)),
             chrono::nanoseconds(110));
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
-                std::make_tuple(t1, o1), std::make_tuple(t2, o2), t1, 200.),
-            o1);
-  EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffsetRemainder(
-                std::make_tuple(t1, o1), std::make_tuple(t2, o2), t1, 200.),
-            10.);
+                std::make_tuple(t1, o1), std::make_tuple(t2, o2),
+                t1 + chrono::nanoseconds(200), 0.0),
+            std::make_pair(o1 + chrono::nanoseconds(10), 0.0));
 
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
                 std::make_tuple(t1, o1), std::make_tuple(t2, o2),
                 t1 + chrono::nanoseconds(800)),
             chrono::nanoseconds(140));
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
-                std::make_tuple(t1, o1), std::make_tuple(t2, o2), t1, 800.),
-            o1);
-  EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffsetRemainder(
-                std::make_tuple(t1, o1), std::make_tuple(t2, o2), t1, 800.),
-            40.);
+                std::make_tuple(t1, o1), std::make_tuple(t2, o2),
+                t1 + chrono::nanoseconds(800), 0.0),
+            std::make_pair(o1 + chrono::nanoseconds(40), 0.0));
 
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
                 std::make_tuple(t1, o1), std::make_tuple(t2, o2),
                 t1 + chrono::nanoseconds(1200)),
             chrono::nanoseconds(160));
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
-                std::make_tuple(t1, o1), std::make_tuple(t2, o2), t1, 1200.),
-            o1);
-  EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffsetRemainder(
-                std::make_tuple(t1, o1), std::make_tuple(t2, o2), t1, 1200.),
-            60.);
+                std::make_tuple(t1, o1), std::make_tuple(t2, o2),
+                t1 + chrono::nanoseconds(1200), 0.0),
+            std::make_pair(o1 + chrono::nanoseconds(60), 0.0));
 
+  for (int i = -MaxVelocityRatio::den * MaxVelocityRatio::num * 6;
+       i <
+       MaxVelocityRatio::den * MaxVelocityRatio::num * 6 + (t2 - t1).count();
+       ++i) {
+    monotonic_clock::time_point ta_base = t1;
+    const double ta_orig = static_cast<double>(i) / 3.0;
+    double ta = ta_orig;
+
+    NormalizeTimestamps(&ta_base, &ta);
+    CHECK_GE(ta, 0.0);
+    CHECK_LT(ta, 1.0);
+
+    const chrono::nanoseconds expected_offset =
+        NoncausalTimestampFilter::InterpolateOffset(
+            std::make_tuple(t1, o1), std::make_tuple(t2, o2), ta_base);
+
+    const std::pair<chrono::nanoseconds, double> offset =
+        NoncausalTimestampFilter::InterpolateOffset(
+            std::make_tuple(t1, o1), std::make_tuple(t2, o2), ta_base, ta);
+    EXPECT_EQ(expected_offset, offset.first);
+
+    const double expected_double_offset =
+        static_cast<double>(o1.count()) +
+        static_cast<double>(ta_orig) / static_cast<double>((t2 - t1).count()) *
+            (o2 - o1).count();
+
+    EXPECT_NEAR(static_cast<double>(offset.first.count()) + offset.second,
+                expected_double_offset, 1e-9)
+        << ": i " << i << " t " << ta_base << " " << ta << " t1 " << t1
+        << " o1 " << o1.count() << "ns t2 " << t2 << " o2 " << o2.count()
+        << "ns Non-rounded: " << expected_offset.count() << "ns";
+  }
+}
+
+// Tests that all variants of ExtrapolateOffset do reasonable things.
+TEST_F(NoncausalTimestampFilterTest, ExtrapolateOffset) {
+  const monotonic_clock::time_point e = monotonic_clock::epoch();
+
+  const monotonic_clock::time_point t1 = e + chrono::nanoseconds(10000);
+  const chrono::nanoseconds o1 = chrono::nanoseconds(100);
+
+  const monotonic_clock::time_point t2 = t1 + chrono::nanoseconds(1000);
+  const chrono::nanoseconds o2 = chrono::nanoseconds(150);
   // Test extrapolation functions before t1 and after t2
   EXPECT_EQ(
       NoncausalTimestampFilter::ExtrapolateOffset(std::make_tuple(t1, o1), t1),
@@ -889,24 +999,14 @@
   // Test base + double version
   EXPECT_EQ(NoncausalTimestampFilter::ExtrapolateOffset(std::make_tuple(t1, o1),
                                                         e, 0.),
-            o1);
-  EXPECT_EQ(NoncausalTimestampFilter::ExtrapolateOffsetRemainder(
-                std::make_tuple(t1, o1), e, 0.),
-            -(t1 - e).count() * kMaxVelocity());
-
+            std::make_pair(chrono::nanoseconds(90), 0.0));
   EXPECT_EQ(NoncausalTimestampFilter::ExtrapolateOffset(std::make_tuple(t1, o1),
                                                         t1, 0.),
-            o1);
-  EXPECT_EQ(NoncausalTimestampFilter::ExtrapolateOffsetRemainder(
-                std::make_tuple(t1, o1), t1, 0.),
-            0.);
+            std::make_pair(o1, 0.0));
 
   EXPECT_EQ(NoncausalTimestampFilter::ExtrapolateOffset(std::make_tuple(t1, o1),
-                                                        t1, -1000.),
-            o1);
-  EXPECT_EQ(NoncausalTimestampFilter::ExtrapolateOffsetRemainder(
-                std::make_tuple(t1, o1), t1, -1000.),
-            -1000. * kMaxVelocity());
+                                                        t1, 0.5),
+            std::make_pair(o1, -0.5 * kMaxVelocity()));
 
   EXPECT_EQ(
       NoncausalTimestampFilter::ExtrapolateOffset(std::make_tuple(t2, o2), t2),
@@ -914,10 +1014,7 @@
 
   EXPECT_EQ(NoncausalTimestampFilter::ExtrapolateOffset(std::make_tuple(t2, o2),
                                                         t2, 0.0),
-            o2);
-  EXPECT_EQ(NoncausalTimestampFilter::ExtrapolateOffsetRemainder(
-                std::make_tuple(t2, o2), t2, 0.0),
-            0.0);
+            std::make_pair(o2, 0.0));
 
   // Test points past our last sample
   EXPECT_EQ(NoncausalTimestampFilter::ExtrapolateOffset(
@@ -925,12 +1022,116 @@
             chrono::nanoseconds(
                 static_cast<int64_t>(o2.count() - 10000. * kMaxVelocity())));
 
-  EXPECT_EQ(NoncausalTimestampFilter::ExtrapolateOffset(std::make_tuple(t2, o2),
-                                                        t2, 100.0),
+  EXPECT_EQ(
+      NoncausalTimestampFilter::ExtrapolateOffset(
+          std::make_tuple(t2, o2), t2 + chrono::nanoseconds(10000), 0.5),
+      std::make_pair(o2 - chrono::nanoseconds(10), -0.5 * kMaxVelocity()));
+
+  // Now, test that offset + remainder functions add up to the right answer for
+  // a lot of cases.  This is enough to catch all the various rounding cases.
+  for (int i = -MaxVelocityRatio::den * MaxVelocityRatio::num * 6;
+       i < MaxVelocityRatio::den * MaxVelocityRatio::num * 4; ++i) {
+    monotonic_clock::time_point ta_base = t1;
+    const double ta_orig = static_cast<double>(i) / 3.0;
+    double ta = ta_orig;
+
+    NormalizeTimestamps(&ta_base, &ta);
+    CHECK_GE(ta, 0.0);
+    CHECK_LT(ta, 1.0);
+
+    const chrono::nanoseconds expected_offset =
+        NoncausalTimestampFilter::ExtrapolateOffset(std::make_tuple(t1, o1),
+                                                    ta_base);
+
+    std::pair<chrono::nanoseconds, double> offset =
+        NoncausalTimestampFilter::ExtrapolateOffset(std::make_tuple(t1, o1),
+                                                    ta_base, ta);
+
+    EXPECT_EQ(expected_offset, offset.first);
+    EXPECT_NEAR(
+        static_cast<double>(offset.first.count()) + offset.second,
+        static_cast<double>(o1.count()) - std::abs(ta_orig) * kMaxVelocity(),
+        1e-9)
+        << ": i " << i << " t " << ta_base << " " << ta
+        << " Non-rounded: " << expected_offset.count() << "ns";
+  }
+}
+
+// Tests that all variants of BoundOffset do reasonable things.
+TEST_F(NoncausalTimestampFilterTest, BoundOffset) {
+  const monotonic_clock::time_point e = monotonic_clock::epoch();
+
+  const monotonic_clock::time_point t1 = e + chrono::nanoseconds(10000);
+  const chrono::nanoseconds o1 = chrono::nanoseconds(100);
+
+  const monotonic_clock::time_point t2 = t1 + chrono::nanoseconds(100000);
+  const chrono::nanoseconds o2 = chrono::nanoseconds(150);
+
+  EXPECT_EQ(NoncausalTimestampFilter::BoundOffset(std::make_tuple(t1, o1),
+                                                  std::make_tuple(t2, o2), t1),
+            o1);
+  EXPECT_EQ(NoncausalTimestampFilter::BoundOffset(
+                std::make_tuple(t1, o1), std::make_tuple(t2, o2), t1, 0.0),
+            std::pair(o1, 0.0));
+
+  EXPECT_EQ(NoncausalTimestampFilter::BoundOffset(std::make_tuple(t1, o1),
+                                                  std::make_tuple(t2, o2), t2),
             o2);
-  EXPECT_EQ(NoncausalTimestampFilter::ExtrapolateOffsetRemainder(
-                std::make_tuple(t2, o2), t2, 100.0),
-            -100.0 * kMaxVelocity());
+  EXPECT_EQ(NoncausalTimestampFilter::BoundOffset(
+                std::make_tuple(t1, o1), std::make_tuple(t2, o2), t2, 0.0),
+            std::pair(o2, 0.0));
+
+  // Iterate from before t1 to after t2 and confirm that the solution is right.
+  // We must always be >= than interpolation, and must also be equal to the max
+  // of extrapolating both.  Since the numbers are small enough (by
+  // construction!), the double calculation will be close enough that we can
+  // trust it.
+
+  for (int i = -MaxVelocityRatio::den * MaxVelocityRatio::num * 6;
+       i <
+       MaxVelocityRatio::den * MaxVelocityRatio::num * 6 + (t2 - t1).count();
+       ++i) {
+    monotonic_clock::time_point ta_base = t1;
+    const double ta_orig = static_cast<double>(i) / 3.0;
+    double ta = ta_orig;
+
+    NormalizeTimestamps(&ta_base, &ta);
+    CHECK_GE(ta, 0.0);
+    CHECK_LT(ta, 1.0);
+
+    const chrono::nanoseconds expected_offset_1 =
+        NoncausalTimestampFilter::ExtrapolateOffset(std::make_tuple(t1, o1),
+                                                    ta_base);
+    const chrono::nanoseconds expected_offset_2 =
+        NoncausalTimestampFilter::ExtrapolateOffset(std::make_tuple(t2, o2),
+                                                    ta_base);
+
+    // Each of the extrapolation functions have their max at the points.  They
+    // slope up before and down after.  So, we want the max.
+    //
+    //
+    //   p0  p1                                                               |
+    //  /  \/  \                                                              |
+    // /        \                                                             |
+
+    const std::pair<chrono::nanoseconds, double> offset =
+        NoncausalTimestampFilter::BoundOffset(
+            std::make_tuple(t1, o1), std::make_tuple(t2, o2), ta_base, ta);
+
+    EXPECT_EQ(std::max(expected_offset_1, expected_offset_2), offset.first);
+
+    const double expected_double_offset = std::max(
+        static_cast<double>(o1.count()) - std::abs(ta_orig) * kMaxVelocity(),
+        static_cast<double>(o2.count()) -
+            std::abs(ta_orig - (t2 - t1).count()) * kMaxVelocity());
+
+    EXPECT_NEAR(static_cast<double>(offset.first.count()) + offset.second,
+                expected_double_offset, 1e-9)
+        << ": i " << i << " t " << ta_base << " " << ta << " t1 " << t1
+        << " o1 " << o1.count() << "ns t2 " << t2 << " o2 " << o2.count()
+        << "ns Non-rounded: "
+        << std::max(expected_offset_1, expected_offset_2).count() << "ns";
+  }
 }
 
 // Tests that FindTimestamps finds timestamps in a sequence.
@@ -956,122 +1157,122 @@
   filter.Sample(t2, o2);
   filter.Sample(t3, o3);
 
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e - chrono::microseconds(10), 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t1, o1)),
                               ::testing::Eq(std::make_tuple(t2, o2))));
-  EXPECT_EQ(result, filter.FindTimestamps(nullptr, result.first,
+  EXPECT_EQ(result, filter.FindTimestamps(nullptr, true, result.first,
                                           e - chrono::microseconds(10), 0));
 
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e - chrono::microseconds(10), 0.9, 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t1, o1)),
                               ::testing::Eq(std::make_tuple(t2, o2))));
   EXPECT_EQ(result,
-            filter.FindTimestamps(nullptr, result.first,
+            filter.FindTimestamps(nullptr, true, result.first,
                                   e - chrono::microseconds(10), 0.9, 0));
 
-  result =
-      filter.FindTimestamps(nullptr, Pointer(), e + chrono::microseconds(0), 0);
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
+                                 e + chrono::microseconds(0), 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t1, o1)),
                               ::testing::Eq(std::make_tuple(t2, o2))));
-  EXPECT_EQ(result, filter.FindTimestamps(nullptr, result.first,
+  EXPECT_EQ(result, filter.FindTimestamps(nullptr, true, result.first,
                                           e + chrono::microseconds(0), 0));
 
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e + chrono::microseconds(0), 0.8, 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t1, o1)),
                               ::testing::Eq(std::make_tuple(t2, o2))));
-  EXPECT_EQ(result, filter.FindTimestamps(nullptr, result.first,
+  EXPECT_EQ(result, filter.FindTimestamps(nullptr, true, result.first,
                                           e + chrono::microseconds(0), 0.8, 0));
 
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e + chrono::microseconds(100), 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t1, o1)),
                               ::testing::Eq(std::make_tuple(t2, o2))));
-  EXPECT_EQ(result, filter.FindTimestamps(nullptr, result.first,
+  EXPECT_EQ(result, filter.FindTimestamps(nullptr, true, result.first,
                                           e + chrono::microseconds(100), 0));
 
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e + chrono::microseconds(100), 0.7, 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t1, o1)),
                               ::testing::Eq(std::make_tuple(t2, o2))));
   EXPECT_EQ(result,
-            filter.FindTimestamps(nullptr, result.first,
+            filter.FindTimestamps(nullptr, true, result.first,
                                   e + chrono::microseconds(100), 0.7, 0));
 
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e + chrono::microseconds(1000), 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t2, o2)),
                               ::testing::Eq(std::make_tuple(t3, o3))));
-  EXPECT_EQ(result, filter.FindTimestamps(nullptr, result.first,
+  EXPECT_EQ(result, filter.FindTimestamps(nullptr, true, result.first,
 
                                           e + chrono::microseconds(1000), 0));
 
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e + chrono::microseconds(1000), 0.0, 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t2, o2)),
                               ::testing::Eq(std::make_tuple(t3, o3))));
   EXPECT_EQ(result,
-            filter.FindTimestamps(nullptr, result.first,
+            filter.FindTimestamps(nullptr, true, result.first,
                                   e + chrono::microseconds(1000), 0.0, 0));
 
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e + chrono::microseconds(1500), 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t2, o2)),
                               ::testing::Eq(std::make_tuple(t3, o3))));
-  EXPECT_EQ(result, filter.FindTimestamps(nullptr, result.first,
+  EXPECT_EQ(result, filter.FindTimestamps(nullptr, true, result.first,
                                           e + chrono::microseconds(1500), 0));
 
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e + chrono::microseconds(1500), 0.0, 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t2, o2)),
                               ::testing::Eq(std::make_tuple(t3, o3))));
   EXPECT_EQ(result,
-            filter.FindTimestamps(nullptr, result.first,
+            filter.FindTimestamps(nullptr, true, result.first,
                                   e + chrono::microseconds(1500), 0.0, 0));
 
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e + chrono::microseconds(2000), 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t2, o2)),
                               ::testing::Eq(std::make_tuple(t3, o3))));
-  EXPECT_EQ(result, filter.FindTimestamps(nullptr, result.first,
+  EXPECT_EQ(result, filter.FindTimestamps(nullptr, true, result.first,
                                           e + chrono::microseconds(2000), 0));
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e + chrono::microseconds(2000), 0.1, 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t2, o2)),
                               ::testing::Eq(std::make_tuple(t3, o3))));
   EXPECT_EQ(result,
-            filter.FindTimestamps(nullptr, result.first,
+            filter.FindTimestamps(nullptr, true, result.first,
                                   e + chrono::microseconds(2000), 0.1, 0));
 
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e + chrono::microseconds(2500), 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t2, o2)),
                               ::testing::Eq(std::make_tuple(t3, o3))));
-  EXPECT_EQ(result, filter.FindTimestamps(nullptr, result.first,
+  EXPECT_EQ(result, filter.FindTimestamps(nullptr, true, result.first,
                                           e + chrono::microseconds(2500), 0));
 
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e + chrono::microseconds(2500), 0.0, 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t2, o2)),
                               ::testing::Eq(std::make_tuple(t3, o3))));
   EXPECT_EQ(result,
-            filter.FindTimestamps(nullptr, result.first,
+            filter.FindTimestamps(nullptr, true, result.first,
                                   e + chrono::microseconds(2500), 0.0, 0));
 }
 
@@ -1114,68 +1315,145 @@
 
   // Confirm the problem statement is reasonable...  We've had enough trouble
   // here in the past.
-  EXPECT_TRUE(
-      filter_a.ValidateSolution(&filter_b, Pointer(), t1_a, t1_a + o1_a + chrono::nanoseconds(1)));
-  EXPECT_TRUE(
-      filter_a.ValidateSolution(&filter_b, Pointer(), t2_a, t2_a + o2_a + chrono::nanoseconds(1)));
+  EXPECT_TRUE(filter_a.ValidateSolution(&filter_b, Pointer(), t1_a,
+                                        t1_a + o1_a + chrono::nanoseconds(1)));
+  EXPECT_TRUE(filter_a.ValidateSolution(&filter_b, Pointer(), t2_a,
+                                        t2_a + o2_a + chrono::nanoseconds(1)));
 
-  EXPECT_TRUE(
-      filter_b.ValidateSolution(&filter_a, Pointer(), t1_b, t1_b + o1_b + chrono::nanoseconds(1)));
-  EXPECT_TRUE(
-      filter_b.ValidateSolution(&filter_a, Pointer(), t2_b, t2_b + o2_b + chrono::nanoseconds(1)));
-  EXPECT_TRUE(
-      filter_b.ValidateSolution(&filter_a, Pointer(), t3_b, t3_b + o3_b + chrono::nanoseconds(1)));
+  EXPECT_TRUE(filter_b.ValidateSolution(&filter_a, Pointer(), t1_b,
+                                        t1_b + o1_b + chrono::nanoseconds(1)));
+  EXPECT_TRUE(filter_b.ValidateSolution(&filter_a, Pointer(), t2_b,
+                                        t2_b + o2_b + chrono::nanoseconds(1)));
+  EXPECT_TRUE(filter_b.ValidateSolution(&filter_a, Pointer(), t3_b,
+                                        t3_b + o3_b + chrono::nanoseconds(1)));
 
   // Before the start
-  result = filter_a.FindTimestamps(&filter_b, Pointer(),
+  result = filter_a.FindTimestamps(&filter_b, true, Pointer(),
                                    e - chrono::microseconds(10), 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t1_a, o1_a)),
                               ::testing::Eq(std::make_tuple(
                                   t2_b + o2_b, -o2_b - kMinNetworkDelay()))));
-  EXPECT_EQ(result, filter_a.FindTimestamps(&filter_b, result.first,
+  EXPECT_EQ(result, filter_a.FindTimestamps(&filter_b, true, result.first,
                                             e - chrono::microseconds(10), 0));
 
   // Before the first opposite point.
-  result = filter_a.FindTimestamps(&filter_b, Pointer(),
+  result = filter_a.FindTimestamps(&filter_b, true, Pointer(),
                                    e + chrono::microseconds(10), 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t1_a, o1_a)),
                               ::testing::Eq(std::make_tuple(
                                   t2_b + o2_b, -o2_b - kMinNetworkDelay()))));
-  EXPECT_EQ(result, filter_a.FindTimestamps(&filter_b, result.first,
+  EXPECT_EQ(result, filter_a.FindTimestamps(&filter_b, true, result.first,
                                             e + chrono::microseconds(10), 0));
 
   // Between the two opposite points.
-  result = filter_a.FindTimestamps(&filter_b, Pointer(),
+  result = filter_a.FindTimestamps(&filter_b, true, Pointer(),
                                    e + chrono::microseconds(250), 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(
                                   t2_b + o2_b, -o2_b - kMinNetworkDelay())),
                               ::testing::Eq(std::make_tuple(
                                   t3_b + o3_b, -o3_b - kMinNetworkDelay()))));
-  EXPECT_EQ(result, filter_a.FindTimestamps(&filter_b, result.first,
+  EXPECT_EQ(result, filter_a.FindTimestamps(&filter_b, true, result.first,
                                             e + chrono::microseconds(250), 0));
 
   // After the last opposite point.
-  result = filter_a.FindTimestamps(&filter_b, Pointer(),
+  result = filter_a.FindTimestamps(&filter_b, true, Pointer(),
                                    e + chrono::microseconds(450), 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(
                                   t3_b + o3_b, -o3_b - kMinNetworkDelay())),
                               ::testing::Eq(std::make_tuple(t2_a, o2_a))));
-  EXPECT_EQ(result, filter_a.FindTimestamps(&filter_b, result.first,
+  EXPECT_EQ(result, filter_a.FindTimestamps(&filter_b, true, result.first,
                                             e + chrono::microseconds(450), 0));
 
   // And after the end.
-  result = filter_a.FindTimestamps(&filter_b, Pointer(),
+  result = filter_a.FindTimestamps(&filter_b, true, Pointer(),
                                    e + chrono::microseconds(1100), 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(
                                   t3_b + o3_b, -o3_b - kMinNetworkDelay())),
                               ::testing::Eq(std::make_tuple(t2_a, o2_a))));
-  EXPECT_EQ(result, filter_a.FindTimestamps(&filter_b, result.first,
+  EXPECT_EQ(result, filter_a.FindTimestamps(&filter_b, true, result.first,
                                             e + chrono::microseconds(1100), 0));
+
+  // And make sure that the FindTimestamps with "use_other==false" flag will
+  // return the same answer as no filter.
+  for (BootTimestamp t = t1_a - chrono::microseconds(500);
+       t < t2_a + chrono::microseconds(500); t += chrono::microseconds(100)) {
+    EXPECT_EQ(filter_a.FindTimestamps(&filter_b, false, Pointer(), t, 0),
+              filter_a.FindTimestamps(nullptr, true, Pointer(), t, 0));
+  }
+}
+
+// Tests that we can validate a solution reasonably.
+TEST_F(NoncausalTimestampFilterTest, ValidateSolution) {
+  const BootTimestamp e{0, monotonic_clock::epoch()};
+  // Note: t1, t2, t3 need to be picked such that the slop is small so filter
+  // doesn't modify the timestamps.
+  const BootTimestamp t1_a = e + chrono::nanoseconds(0);
+  const BootDuration o1_a{0, chrono::nanoseconds(100)};
+  const BootTimestamp t2_a = e + chrono::microseconds(1000);
+  const BootDuration o2_a{0, chrono::nanoseconds(100)};
+
+  const BootTimestamp tmid_a = e + chrono::microseconds(500);
+  const BootDuration omid_a{0, chrono::nanoseconds(-400)};
+
+  const BootTimestamp tbefore_a = e - chrono::microseconds(500);
+  const BootDuration obefore_a{0, chrono::nanoseconds(-400)};
+  const BootTimestamp tafter_a = e + chrono::microseconds(1500);
+  const BootDuration oafter_a{0, chrono::nanoseconds(-400)};
+
+  TestingNoncausalTimestampFilter filter_a(node_a, node_b);
+  TestingNoncausalTimestampFilter filter_b(node_b, node_a);
+
+  std::pair<Pointer,
+            std::pair<std::tuple<logger::BootTimestamp, logger::BootDuration>,
+                      std::tuple<logger::BootTimestamp, logger::BootDuration>>>
+      result;
+
+  filter_a.Sample(t1_a, o1_a);
+  filter_a.Sample(t2_a, o2_a);
+
+  // At the control points, we should see that the boundary is right at the
+  // edge.
+  EXPECT_TRUE(filter_a.ValidateSolution(&filter_b, Pointer(), t1_a,
+                                        t1_a + o1_a + chrono::nanoseconds(1)));
+  EXPECT_TRUE(filter_a.ValidateSolution(&filter_b, Pointer(), t1_a, 0.0,
+                                        t1_a + o1_a, 0.00001));
+  EXPECT_FALSE(filter_a.ValidateSolution(&filter_b, Pointer(), t1_a,
+                                         t1_a + o1_a - chrono::nanoseconds(1)));
+  EXPECT_FALSE(filter_a.ValidateSolution(&filter_b, Pointer(), t1_a, 0.0,
+                                         t1_a + o1_a, -0.0001));
+
+  EXPECT_TRUE(filter_a.ValidateSolution(&filter_b, Pointer(), t2_a,
+                                        t2_a + o2_a + chrono::nanoseconds(1)));
+  EXPECT_TRUE(filter_a.ValidateSolution(&filter_b, Pointer(), t2_a, 0.0,
+                                        t2_a + o2_a, 0.00001));
+  EXPECT_FALSE(filter_a.ValidateSolution(&filter_b, Pointer(), t2_a,
+                                         t2_a + o2_a - chrono::nanoseconds(1)));
+  EXPECT_FALSE(filter_a.ValidateSolution(&filter_b, Pointer(), t2_a, 0.0,
+                                         t2_a + o2_a, -0.00001));
+
+  // Now that we've checked the control points, check in the middle to confirm
+  // it looks like we are using BoundOffset rather than interpolate.
+  EXPECT_TRUE(filter_a.ValidateSolution(
+      &filter_b, Pointer(), tmid_a, tmid_a + omid_a + chrono::nanoseconds(1)));
+  EXPECT_TRUE(filter_a.ValidateSolution(&filter_b, Pointer(), tmid_a, 0.0,
+                                        tmid_a + omid_a, 0.00001));
+
+  EXPECT_FALSE(filter_a.ValidateSolution(
+      &filter_b, Pointer(), tbefore_a,
+      tbefore_a + obefore_a - chrono::nanoseconds(1)));
+  EXPECT_FALSE(filter_a.ValidateSolution(&filter_b, Pointer(), tbefore_a, 0.0,
+                                         tbefore_a + obefore_a, -0.00001));
+
+  EXPECT_FALSE(
+      filter_a.ValidateSolution(&filter_b, Pointer(), tafter_a,
+                                tafter_a + oafter_a - chrono::nanoseconds(1)));
+  EXPECT_FALSE(filter_a.ValidateSolution(&filter_b, Pointer(), tafter_a, 0.0,
+                                         tafter_a + oafter_a, -0.00001));
 }
 
 // Tests that Offset returns results indicative of it calling InterpolateOffset
@@ -1186,15 +1464,12 @@
   // doesn't modify the timestamps.
   const BootTimestamp t1 = e + chrono::nanoseconds(1000);
   const BootDuration o1{0, chrono::nanoseconds(100)};
-  const double o1d = static_cast<double>(o1.duration.count());
 
   const BootTimestamp t2 = e + chrono::microseconds(2000);
   const BootDuration o2{0, chrono::nanoseconds(150)};
-  const double o2d = static_cast<double>(o2.duration.count());
 
   const BootTimestamp t3 = e + chrono::microseconds(3000);
   const BootDuration o3{0, chrono::nanoseconds(50)};
-  const double o3d = static_cast<double>(o3.duration.count());
 
   const BootTimestamp t4 = e + chrono::microseconds(4000);
 
@@ -1206,26 +1481,46 @@
   EXPECT_EQ(filter.Offset(nullptr, Pointer(), t1, 0), o1);
   EXPECT_EQ(filter.Offset(nullptr, Pointer(), t1, 0.0, 0),
             std::make_pair(o1, 0.0));
+  EXPECT_EQ(filter.BoundsOffset(nullptr, Pointer(), t1, 0.0, 0),
+            std::make_pair(o1, 0.0));
   // Check if we ask for something away from point that we get an offset
   // based on the MaxVelocity allowed
   const double offset_pre = -(t1.time - e.time).count() * kMaxVelocity();
   EXPECT_EQ(filter.Offset(nullptr, Pointer(), e, 0),
             o1 + chrono::nanoseconds(static_cast<int64_t>(offset_pre)));
-  EXPECT_EQ(filter.Offset(nullptr, Pointer(), e, 0.0, 0),
-            std::make_pair(o1, offset_pre));
+  EXPECT_EQ(
+      filter.Offset(nullptr, Pointer(), e, 0.0, 0),
+      std::make_pair(o1 + chrono::nanoseconds(static_cast<int64_t>(offset_pre)),
+                     0.0));
+  EXPECT_EQ(
+      filter.BoundsOffset(nullptr, Pointer(), e, 0.0, 0),
+      std::make_pair(o1 + chrono::nanoseconds(static_cast<int64_t>(offset_pre)),
+                     0.0));
 
   double offset_post = -(t2.time - t1.time).count() * kMaxVelocity();
   EXPECT_EQ(filter.Offset(nullptr, Pointer(), t2, 0),
             o1 + chrono::nanoseconds(static_cast<int64_t>(offset_post)));
-  EXPECT_EQ(filter.Offset(nullptr, Pointer(), t2, 0.0, 0),
-            std::make_pair(o1, offset_post));
+  EXPECT_EQ(
+      filter.Offset(nullptr, Pointer(), t2, 0.0, 0),
+      std::make_pair(
+          o1 + chrono::nanoseconds(static_cast<int64_t>(offset_post)), 0.0));
+  EXPECT_EQ(
+      filter.BoundsOffset(nullptr, Pointer(), t2, 0.0, 0),
+      std::make_pair(
+          o1 + chrono::nanoseconds(static_cast<int64_t>(offset_post)), 0.0));
 
   filter.Sample(t2, o2);
   filter.Sample(t3, o3);
 
   EXPECT_EQ(filter.Offset(nullptr, Pointer(), t1, 0), o1);
+  EXPECT_EQ(filter.BoundsOffset(nullptr, Pointer(), t1, 0.0, 0),
+            std::make_pair(o1, 0.0));
   EXPECT_EQ(filter.Offset(nullptr, Pointer(), t2, 0), o2);
+  EXPECT_EQ(filter.BoundsOffset(nullptr, Pointer(), t2, 0.0, 0),
+            std::make_pair(o2, 0.0));
   EXPECT_EQ(filter.Offset(nullptr, Pointer(), t3, 0), o3);
+  EXPECT_EQ(filter.BoundsOffset(nullptr, Pointer(), t3, 0.0, 0),
+            std::make_pair(o3, 0.0));
 
   EXPECT_EQ(filter.Offset(nullptr, Pointer(), t1, 0.0, 0),
             std::make_pair(o1, 0.0));
@@ -1234,7 +1529,7 @@
       filter.Offset(nullptr, Pointer(),
                     e + (t2.time_since_epoch() + t1.time_since_epoch()) / 2,
                     0.0, 0),
-      std::make_pair(o1, (o2d - o1d) / 2.));
+      std::make_pair(o1 + (o2 - o1) / 2, 0.0));
 
   EXPECT_EQ(filter.Offset(nullptr, Pointer(), t2, 0.0, 0),
             std::make_pair(o2, 0.0));
@@ -1243,7 +1538,7 @@
       filter.Offset(nullptr, Pointer(),
                     e + (t2.time_since_epoch() + t3.time_since_epoch()) / 2,
                     0.0, 0),
-      std::make_pair(o2, (o2d + o3d) / 2. - o2d));
+      std::make_pair((o2 + o3) / 2, 0.0));
 
   EXPECT_EQ(filter.Offset(nullptr, Pointer(), t3, 0.0, 0),
             std::make_pair(o3, 0.0));
@@ -1251,14 +1546,21 @@
   // Check that we still get same answer for times before our sample data...
   EXPECT_EQ(filter.Offset(nullptr, Pointer(), e, 0),
             o1 + chrono::nanoseconds(static_cast<int64_t>(offset_pre)));
-  EXPECT_EQ(filter.Offset(nullptr, Pointer(), e, 0.0, 0),
-            std::make_pair(o1, offset_pre));
+  EXPECT_EQ(
+      filter.Offset(nullptr, Pointer(), e, 0.0, 0),
+      std::make_pair(o1 + chrono::nanoseconds(static_cast<int64_t>(offset_pre)),
+                     0.0));
   // ... and after
   offset_post = -(t4.time - t3.time).count() * kMaxVelocity();
   EXPECT_EQ(filter.Offset(nullptr, Pointer(), t4, 0),
             (o3 + chrono::nanoseconds(static_cast<int64_t>(offset_post))));
-  EXPECT_EQ(filter.Offset(nullptr, Pointer(), t4, 0.0, 0),
-            std::make_pair(o3, offset_post));
+  EXPECT_EQ(
+      filter.Offset(nullptr, Pointer(), t4, 0.0, 0),
+      std::make_pair(
+          o3 + chrono::nanoseconds(static_cast<int64_t>(offset_post)), 0.0));
+
+  EXPECT_EQ(filter.BoundsOffset(nullptr, Pointer(), t2 + (t3 - t2) / 2, 0.0, 0),
+            std::make_pair(o2 - chrono::nanoseconds(500), 0.0));
 }
 
 // Tests that adding duplicates gets correctly deduplicated.
diff --git a/aos/network/web_proxy.cc b/aos/network/web_proxy.cc
index 7161ee3..d384f72 100644
--- a/aos/network/web_proxy.cc
+++ b/aos/network/web_proxy.cc
@@ -59,7 +59,7 @@
   }
   TimerHandler *const timer = event_loop_->AddTimer([this]() {
     for (auto &subscriber : subscribers_) {
-      if (subscriber) subscriber->RunIteration();
+      if (subscriber) subscriber->RunIteration(recording_);
     }
   });
 
@@ -201,54 +201,58 @@
   global_epoll = nullptr;
 }
 
-void Subscriber::RunIteration() {
-  if (channels_.empty() && (buffer_size_ == 0 || !store_history_)) {
-    fetcher_->Fetch();
-    message_buffer_.clear();
-    return;
-  }
+void WebProxy::StopRecording() { websocket_handler_->StopRecording(); }
 
-  while (fetcher_->FetchNext()) {
-    // If we aren't building up a buffer, short-circuit the FetchNext().
-    if (buffer_size_ == 0) {
+void Subscriber::RunIteration(bool fetch_new) {
+  if (fetch_new) {
+    if (channels_.empty() && (buffer_size_ == 0 || !store_history_)) {
       fetcher_->Fetch();
+      message_buffer_.clear();
+      return;
     }
-    Message message;
-    message.index = fetcher_->context().queue_index;
-    VLOG(2) << "Packing a message with " << GetPacketCount(fetcher_->context())
-            << "packets";
-    for (int packet_index = 0;
-         packet_index < GetPacketCount(fetcher_->context()); ++packet_index) {
-      // Pack directly into the mbuffer.  This is admittedly a bit painful.
-      const size_t packet_size =
-          PackedMessageSize(fetcher_->context(), packet_index);
-      struct mbuf *mbuffer = mbuf_alloc(packet_size);
 
-      {
-        // Wrap a pre-allocated builder around the mbuffer.
-        PreallocatedAllocator allocator(mbuf_buf(mbuffer), packet_size);
-        flatbuffers::FlatBufferBuilder fbb(packet_size, &allocator);
-        flatbuffers::Offset<MessageHeader> message_offset = PackMessage(
-            &fbb, fetcher_->context(), channel_index_, packet_index);
-        fbb.Finish(message_offset);
-
-        // Now, the flatbuffer is built from the back to the front.  So any
-        // extra memory will be at the front.  Setup the end and start pointers
-        // on the mbuf.
-        mbuf_set_end(mbuffer, packet_size);
-        mbuf_set_pos(mbuffer, packet_size - fbb.GetSize());
+    while (fetcher_->FetchNext()) {
+      // If we aren't building up a buffer, short-circuit the FetchNext().
+      if (buffer_size_ == 0) {
+        fetcher_->Fetch();
       }
+      Message message;
+      message.index = fetcher_->context().queue_index;
+      VLOG(2) << "Packing a message with "
+              << GetPacketCount(fetcher_->context()) << "packets";
+      for (int packet_index = 0;
+           packet_index < GetPacketCount(fetcher_->context()); ++packet_index) {
+        // Pack directly into the mbuffer.  This is admittedly a bit painful.
+        const size_t packet_size =
+            PackedMessageSize(fetcher_->context(), packet_index);
+        struct mbuf *mbuffer = mbuf_alloc(packet_size);
 
-      message.data.emplace_back(
-          std::shared_ptr<struct mbuf>(mbuffer, mem_deref));
-    }
-    message_buffer_.push_back(std::move(message));
-    // If we aren't keeping a buffer, then we should only do one iteration of
-    // the while loop--otherwise, if additional messages arrive between the
-    // first FetchNext() and the second iteration then we can end up behaving
-    // poorly (since we do a Fetch() when buffer_size_ == 0).
-    if (buffer_size_ == 0) {
-      break;
+        {
+          // Wrap a pre-allocated builder around the mbuffer.
+          PreallocatedAllocator allocator(mbuf_buf(mbuffer), packet_size);
+          flatbuffers::FlatBufferBuilder fbb(packet_size, &allocator);
+          flatbuffers::Offset<MessageHeader> message_offset = PackMessage(
+              &fbb, fetcher_->context(), channel_index_, packet_index);
+          fbb.Finish(message_offset);
+
+          // Now, the flatbuffer is built from the back to the front.  So any
+          // extra memory will be at the front.  Setup the end and start
+          // pointers on the mbuf.
+          mbuf_set_end(mbuffer, packet_size);
+          mbuf_set_pos(mbuffer, packet_size - fbb.GetSize());
+        }
+
+        message.data.emplace_back(
+            std::shared_ptr<struct mbuf>(mbuffer, mem_deref));
+      }
+      message_buffer_.push_back(std::move(message));
+      // If we aren't keeping a buffer, then we should only do one iteration of
+      // the while loop--otherwise, if additional messages arrive between the
+      // first FetchNext() and the second iteration then we can end up behaving
+      // poorly (since we do a Fetch() when buffer_size_ == 0).
+      if (buffer_size_ == 0) {
+        break;
+      }
     }
   }
   for (auto &conn : channels_) {
diff --git a/aos/network/web_proxy.h b/aos/network/web_proxy.h
index baca26e..0c1d1dc 100644
--- a/aos/network/web_proxy.h
+++ b/aos/network/web_proxy.h
@@ -41,6 +41,10 @@
   void onData(::seasocks::WebSocket *sock, const uint8_t *data,
               size_t size) override;
   void onDisconnect(::seasocks::WebSocket *sock) override;
+  // Stops recording data, even if the event loop continues running. This allows
+  // us to continue serving the webserver + websocket server, without having to
+  // load more actual data.
+  void StopRecording() { recording_ = false; }
 
  private:
   ::seasocks::Server *server_;
@@ -51,6 +55,8 @@
       connections_;
 
   EventLoop *const event_loop_;
+  // Whether to pay attention to new messages.
+  bool recording_ = true;
 };
 
 // Wrapper class that manages the seasocks server and WebsocketHandler.
@@ -75,6 +81,9 @@
 
   void SetDataPath(const char *path) { server_.setStaticPath(path); }
 
+  // Stops recording data. Useful for setting end times in log replay.
+  void StopRecording();
+
  private:
   WebProxy(aos::EventLoop *event_loop, aos::internal::EPoll *epoll,
            StoreHistory store_history, int per_channel_buffer_size_bytes);
@@ -121,7 +130,12 @@
         store_history_(store_history == StoreHistory::kYes),
         buffer_size_(buffer_size) {}
 
-  void RunIteration();
+  // Runs a single iteration of going through and fetching new data as needed
+  // and servicing any WebRTC channels that are requesting messages.
+  // fetch_new specifies whether we should actually attempt to retrieve new data
+  // on the channel--if false, will only worry about sending existing data to
+  // any clients.
+  void RunIteration(bool fetch_new);
 
   void AddListener(std::shared_ptr<ScopedDataChannel> data_channel,
                    TransferMethod transfer_method);
diff --git a/aos/network/www/BUILD b/aos/network/www/BUILD
index 442e8be..03f41a5 100644
--- a/aos/network/www/BUILD
+++ b/aos/network/www/BUILD
@@ -2,6 +2,8 @@
 load("//tools/build_rules:js.bzl", "rollup_bundle")
 load("//aos:config.bzl", "aos_config")
 
+exports_files(["styles.css"])
+
 filegroup(
     name = "files",
     srcs = glob([
diff --git a/aos/network/www/plotter.ts b/aos/network/www/plotter.ts
index 842f1b7..89b360c 100644
--- a/aos/network/www/plotter.ts
+++ b/aos/network/www/plotter.ts
@@ -105,6 +105,7 @@
   private colorLocation: WebGLUniformLocation | null;
   private pointSizeLocation: WebGLUniformLocation | null;
   private _label: string|null = null;
+  private _hidden: boolean = false;
   constructor(
       private readonly ctx: WebGLRenderingContext,
       private readonly program: WebGLProgram,
@@ -190,6 +191,15 @@
     }
   }
 
+  hidden(): boolean {
+    return this._hidden;
+  }
+
+  setHidden(hidden: boolean) {
+    this._hasUpdate = true;
+    this._hidden = hidden;
+  }
+
   getPoints(): Point[] {
     return this.points;
   }
@@ -220,6 +230,10 @@
       return;
     }
 
+    if (this._hidden) {
+      return;
+    }
+
     this.ctx.bindBuffer(this.ctx.ARRAY_BUFFER, this.buffer);
     // Note: if this is generating errors associated with the buffer size,
     // confirm that this.points really is a Float32Array.
@@ -297,92 +311,120 @@
 export class Legend {
   // Location, in pixels, of the legend in the text canvas.
   private location: number[] = [0, 0];
-  constructor(private ctx: CanvasRenderingContext2D, private lines: Line[]) {
-    this.location = [80, 30];
+  constructor(
+      private plot: Plot, private lines: Line[],
+      private legend: HTMLDivElement) {
+    this.setPosition([80, 30]);
   }
 
   setPosition(location: number[]): void {
     this.location = location;
+    this.legend.style.left = location[0] + 'px';
+    this.legend.style.top = location[1] + 'px';
   }
 
   draw(): void {
-    this.ctx.save();
+    // First, figure out if anything has changed.  The legend is created and
+    // then titles are changed afterwords, so we have to do this lazily.
+    let needsUpdate = false;
+    {
+      let child = 0;
+      for (let line of this.lines) {
+        if (line.label() === null) {
+          continue;
+        }
 
-    this.ctx.translate(this.location[0], this.location[1]);
+        if (child >= this.legend.children.length) {
+          needsUpdate = true;
+          break;
+        }
 
-    // Space between rows of the legend.
-    const step = 20;
-
-    let maxWidth = 0;
-
-    // In the legend, we render both a small line of the appropriate color as
-    // well as the text label--start/endPoint are the relative locations of the
-    // endpoints of the miniature line within the row, and textStart is where
-    // we begin rendering the text within the row.
-    const startPoint = [0, 0];
-    const endPoint = [10, -10];
-    const textStart = endPoint[0] + 5;
-
-    // Calculate how wide the legend needs to be to fit all the text.
-    this.ctx.textAlign = 'left';
-    let numLabels = 0;
-    for (let line of this.lines) {
-      if (line.label() === null) {
-        continue;
+        // Make sure both have text in the right spot.  Don't be too picky since
+        // nothing should really be changing here, and it's handy to let the
+        // user edit the HTML for testing.
+        if (this.legend.children[child].lastChild.textContent.length == 0 &&
+            line.label().length != 0) {
+          needsUpdate = true;
+          break;
+        }
+        child += 1;
       }
-      ++numLabels;
-      const width =
-          textStart + this.ctx.measureText(line.label()).actualBoundingBoxRight;
-      maxWidth = Math.max(width, maxWidth);
-    }
 
-    if (numLabels === 0) {
-      this.ctx.restore();
+      // If we got through everything, we should be pointed past the last child.
+      // If not, more children exists than lines.
+      if (child != this.legend.children.length) {
+        needsUpdate = true;
+      }
+    }
+    if (!needsUpdate) {
       return;
     }
 
-    // Total height of the body of the legend.
-    const height = step * numLabels;
+    // Nuke the old legend.
+    while (this.legend.firstChild) {
+      this.legend.removeChild(this.legend.firstChild);
+    }
 
-    // Set the legend background to be white and opaque.
-    this.ctx.fillStyle = 'rgba(255, 255, 255, 1.0)';
-    const backgroundBuffer = 5;
-    this.ctx.fillRect(
-        -backgroundBuffer, 0, maxWidth + 2.0 * backgroundBuffer,
-        height + backgroundBuffer);
-
-    // Go through each line and render the little lines and text for each Line.
+    // Now, build up a new legend.
     for (let line of this.lines) {
       if (line.label() === null) {
         continue;
       }
-      this.ctx.translate(0, step);
+
+      // The legend is a div containing both a canvas for the style/color, and a
+      // div for the text.  Make those, color in the canvas, and add it to the
+      // page.
+      let l = document.createElement('div');
+      l.classList.add('aos_legend_line');
+      let text = document.createElement('div');
+      text.textContent = line.label();
+
+      l.appendChild(text);
+      this.legend.appendChild(l);
+
+      let c = document.createElement('canvas');
+      c.width = text.offsetHeight;
+      c.height = text.offsetHeight;
+
+      const linestyleContext = c.getContext("2d");
+      linestyleContext.clearRect(0, 0, c.width, c.height);
+
       const color = line.color();
-      this.ctx.strokeStyle = `rgb(${255.0 * color[0]}, ${255.0 * color[1]}, ${255.0 * color[2]})`;
-      this.ctx.fillStyle = this.ctx.strokeStyle;
-      if (line.drawLine()) {
-        this.ctx.beginPath();
-        this.ctx.moveTo(startPoint[0], startPoint[1]);
-        this.ctx.lineTo(endPoint[0], endPoint[1]);
-        this.ctx.closePath();
-        this.ctx.stroke();
-      }
+      linestyleContext.strokeStyle = `rgb(${255.0 * color[0]}, ${
+          255.0 * color[1]}, ${255.0 * color[2]})`;
+      linestyleContext.fillStyle = linestyleContext.strokeStyle;
+
       const pointSize = line.pointSize();
-      if (pointSize > 0) {
-        this.ctx.fillRect(
-            startPoint[0] - pointSize / 2.0, startPoint[1] - pointSize / 2.0,
-            pointSize, pointSize);
-        this.ctx.fillRect(
-            endPoint[0] - pointSize / 2.0, endPoint[1] - pointSize / 2.0,
-            pointSize, pointSize);
+      const kDistanceIn = pointSize / 2.0;
+
+      if (line.drawLine()) {
+        linestyleContext.beginPath();
+        linestyleContext.moveTo(0, 0);
+        linestyleContext.lineTo(c.height, c.width);
+        linestyleContext.closePath();
+        linestyleContext.stroke();
       }
 
-      this.ctx.fillStyle = 'black';
-      this.ctx.textAlign = 'left';
-      this.ctx.fillText(line.label(), textStart, 0);
-    }
+      if (pointSize > 0) {
+        linestyleContext.fillRect(0, 0, pointSize, pointSize);
+        linestyleContext.fillRect(
+            c.height - 1 - pointSize, c.width - 1 - pointSize, pointSize,
+            pointSize);
+      }
 
-    this.ctx.restore();
+      c.addEventListener('click', (e) => {
+        if (!line.hidden()) {
+          l.classList.add('aos_legend_line_hidden');
+        } else {
+          l.classList.remove('aos_legend_line_hidden');
+        }
+
+        line.setHidden(!line.hidden());
+        this.plot.draw();
+      });
+
+      l.prepend(c);
+    }
   }
 }
 
@@ -440,7 +482,7 @@
     return divideVec(subtractVec(canvasPos, this.zoom.offset), this.zoom.scale);
   }
 
-  // Tehse return the max/min rendered points, in plot-space (this is helpful
+  // These return the max/min rendered points, in plot-space (this is helpful
   // for drawing axis labels).
   maxVisiblePoint(): number[] {
     return this.canvasToPlotCoordinates([1.0, 1.0]);
@@ -850,6 +892,7 @@
 export class Plot {
   private canvas = document.createElement('canvas');
   private textCanvas = document.createElement('canvas');
+  private legendDiv = document.createElement('div');
   private lineDrawerContext: WebGLRenderingContext;
   private drawer: LineDrawer;
   private static keysPressed:
@@ -873,24 +916,20 @@
   constructor(wrapperDiv: HTMLDivElement) {
     wrapperDiv.appendChild(this.canvas);
     wrapperDiv.appendChild(this.textCanvas);
+    this.legendDiv.classList.add('aos_legend');
+    wrapperDiv.appendChild(this.legendDiv);
     this.lastTimeMs = (new Date()).getTime();
 
     this.canvas.style.paddingLeft = this.axisLabelBuffer.left.toString() + "px";
     this.canvas.style.paddingRight = this.axisLabelBuffer.right.toString() + "px";
     this.canvas.style.paddingTop = this.axisLabelBuffer.top.toString() + "px";
     this.canvas.style.paddingBottom = this.axisLabelBuffer.bottom.toString() + "px";
-    this.canvas.style.width = "100%";
-    this.canvas.style.height = "100%";
-    this.canvas.style.boxSizing = "border-box";
+    this.canvas.classList.add('aos_plot');
 
-    this.canvas.style.position = 'absolute';
     this.lineDrawerContext = this.canvas.getContext('webgl');
     this.drawer = new LineDrawer(this.lineDrawerContext);
 
-    this.textCanvas.style.position = 'absolute';
-    this.textCanvas.style.width = "100%";
-    this.textCanvas.style.height = "100%";
-    this.textCanvas.style.pointerEvents = 'none';
+    this.textCanvas.classList.add('aos_plot_text');
 
     this.canvas.addEventListener('dblclick', (e) => {
       this.handleDoubleClick(e);
@@ -922,7 +961,7 @@
     const textCtx = this.textCanvas.getContext("2d");
     this.axisLabels =
         new AxisLabels(textCtx, this.drawer, this.axisLabelBuffer);
-    this.legend = new Legend(textCtx, this.drawer.getLines());
+    this.legend = new Legend(this, this.drawer.getLines(), this.legendDiv);
 
     this.zoomRectangle = this.getDrawer().addLine(false);
     this.zoomRectangle.setColor(Colors.WHITE);
@@ -978,6 +1017,11 @@
   }
 
   handleMouseDown(event: MouseEvent) {
+    for (let plot of this.linkedXAxes) {
+      plot.autoFollow = false;
+    }
+    this.autoFollow = false;
+
     const button = transitionButton(event);
     switch (button) {
       case PAN_BUTTON:
@@ -1114,6 +1158,7 @@
   resetZoom() {
     const minValues = this.drawer.minValues();
     const maxValues = this.drawer.maxValues();
+    const kScalar = 0.05;
     for (const plot of this.linkedXAxes) {
       const otherMin = plot.drawer.minValues();
       const otherMax = plot.drawer.maxValues();
@@ -1132,10 +1177,18 @@
     if (minValues[0] == maxValues[0]) {
       minValues[0] -= 1;
       maxValues[0] += 1;
+    } else {
+      const width = maxValues[0] - minValues[0];
+      maxValues[0] += width * kScalar;
+      minValues[0] -= width * kScalar;
     }
     if (minValues[1] == maxValues[1]) {
       minValues[1] -= 1;
       maxValues[1] += 1;
+    } else {
+      const height = maxValues[1] - minValues[1];
+      maxValues[1] += height * kScalar;
+      minValues[1] -= height * kScalar;
     }
     if (this.defaultYRange != null) {
       minValues[1] = this.defaultYRange[0];
diff --git a/aos/network/www/styles.css b/aos/network/www/styles.css
index 23ceb21..547ec94 100644
--- a/aos/network/www/styles.css
+++ b/aos/network/www/styles.css
@@ -3,3 +3,59 @@
   border-bottom: 1px solid;
   font-size: 24px;
 }
+
+.aos_plot {
+  position: absolute;
+  width: 100%;
+  height: 100%;
+  box-sizing: border-box;
+}
+
+.aos_plot_text {
+  position: absolute;
+  width: 100%;
+  height: 100%;
+  pointer-events: none;
+}
+
+.aos_legend {
+  position: absolute;
+  z-index: 1;
+  pointer-events: none;
+}
+
+.aos_legend_line {
+  background: white;
+  padding: 2px;
+  border-radius: 2px;
+  margin-top: 3px;
+  margin-bottom: 3px;
+  font-size: 12;
+}
+
+.aos_legend_line>div {
+  display: inline-block;
+  vertical-align: middle;
+  margin-left: 5px;
+}
+.aos_legend_line>canvas {
+  vertical-align: middle;
+  pointer-events: all;
+}
+
+.aos_legend_line_hidden {
+  filter: contrast(0.75);
+}
+
+.aos_cpp_plot {
+  width: 100%;
+  display: flex;
+  flex-direction: column;
+  height: 100%;
+  align-items: flex-start;
+}
+
+.aos_cpp_plot>div {
+  flex: 1;
+  width: 100%;
+}
diff --git a/aos/seasocks/gen_embedded.py b/aos/seasocks/gen_embedded.py
index fdcdfd9..b6f43aa 100755
--- a/aos/seasocks/gen_embedded.py
+++ b/aos/seasocks/gen_embedded.py
@@ -13,18 +13,18 @@
 output = sys.argv[1].replace('"', '')
 
 if not os.path.exists(os.path.dirname(output)):
-  os.makedirs(os.path.dirname(output))
+    os.makedirs(os.path.dirname(output))
 
 if len(sys.argv) >= 3:
-  web = sys.argv[2:]
+    web = sys.argv[2:]
 else:
-  web = []
-  for root, dirs, files in os.walk("./www_defaults", topdown=False):
-    for name in files + dirs:
-      web.append(os.path.join(root, name))
+    web = []
+    for root, dirs, files in os.walk("./www_defaults", topdown=False):
+        for name in files + dirs:
+            web.append(os.path.join(root, name))
 
 with open(output, 'w') as o:
-  o.write("""
+    o.write("""
 #include "internal/Embedded.h"
 
 #include <string>
@@ -35,14 +35,14 @@
 std::unordered_map<std::string, EmbeddedContent> embedded = {
 """)
 
-  for f in web:
-    with open(f, 'rb') as file:
-      bytes = file.read()
-    o.write('{"/%s", {' % os.path.basename(f))
-    o.write('"' + "".join(['\\x%02x' % x for x in bytes]) + '"')
-    o.write(',%d }},' % len(bytes))
+    for f in web:
+        with open(f, 'rb') as file:
+            bytes = file.read()
+        o.write('{"/%s", {' % os.path.basename(f))
+        o.write('"' + "".join(['\\x%02x' % x for x in bytes]) + '"')
+        o.write(',%d }},' % len(bytes))
 
-  o.write("""
+    o.write("""
 };
 
 } // namespace
diff --git a/aos/util/log_to_mcap.cc b/aos/util/log_to_mcap.cc
index 49aca69..5330c60 100644
--- a/aos/util/log_to_mcap.cc
+++ b/aos/util/log_to_mcap.cc
@@ -5,6 +5,7 @@
 
 DEFINE_string(node, "", "Node to replay from the perspective of.");
 DEFINE_string(output_path, "/tmp/log.mcap", "Log to output.");
+DEFINE_string(mode, "json", "json or flatbuffer serialization.");
 
 // Converts an AOS log to an MCAP log that can be fed into Foxglove. To try this
 // out, run:
@@ -30,6 +31,9 @@
   std::unique_ptr<aos::EventLoop> mcap_event_loop =
       reader.event_loop_factory()->MakeEventLoop("mcap", node);
   CHECK(!FLAGS_output_path.empty());
-  aos::McapLogger relogger(mcap_event_loop.get(), FLAGS_output_path);
+  aos::McapLogger relogger(mcap_event_loop.get(), FLAGS_output_path,
+                           FLAGS_mode == "flatbuffer"
+                               ? aos::McapLogger::Serialization::kFlatbuffer
+                               : aos::McapLogger::Serialization::kJson);
   reader.event_loop_factory()->Run();
 }
diff --git a/aos/util/log_to_mcap_test.py b/aos/util/log_to_mcap_test.py
index 86c3d1d..7bdffe4 100644
--- a/aos/util/log_to_mcap_test.py
+++ b/aos/util/log_to_mcap_test.py
@@ -12,25 +12,32 @@
 
 def main(argv: Sequence[Text]):
     parser = argparse.ArgumentParser()
-    parser.add_argument("--log_to_mcap", required=True, help="Path to log_to_mcap binary.")
+    parser.add_argument("--log_to_mcap",
+                        required=True,
+                        help="Path to log_to_mcap binary.")
     parser.add_argument("--mcap", required=True, help="Path to mcap binary.")
-    parser.add_argument("--generate_log", required=True, help="Path to logfile generator.")
+    parser.add_argument("--generate_log",
+                        required=True,
+                        help="Path to logfile generator.")
     args = parser.parse_args(argv)
     with tempfile.TemporaryDirectory() as tmpdir:
         log_name = tmpdir + "/test_log/"
         mcap_name = tmpdir + "/log.mcap"
-        subprocess.run([args.generate_log, "--output_folder", log_name]).check_returncode()
+        subprocess.run([args.generate_log, "--output_folder",
+                        log_name]).check_returncode()
         # Run with a really small chunk size, to force a multi-chunk file.
-        subprocess.run(
-            [args.log_to_mcap, "--output_path", mcap_name, "--mcap_chunk_size", "1000",
-             log_name]).check_returncode()
+        subprocess.run([
+            args.log_to_mcap, "--output_path", mcap_name, "--mcap_chunk_size",
+            "1000", "--mode", "json", log_name
+        ]).check_returncode()
         # MCAP attempts to find $HOME/.mcap.yaml, and dies on $HOME not existing. So
         # give it an arbitrary config location (it seems to be fine with a non-existent config).
-        doctor_result = subprocess.run(
-            [args.mcap, "doctor", mcap_name, "--config", tmpdir + "/.mcap.yaml"],
-            stdout=subprocess.PIPE,
-            stderr=subprocess.PIPE,
-            encoding='utf-8')
+        doctor_result = subprocess.run([
+            args.mcap, "doctor", mcap_name, "--config", tmpdir + "/.mcap.yaml"
+        ],
+                                       stdout=subprocess.PIPE,
+                                       stderr=subprocess.PIPE,
+                                       encoding='utf-8')
         print(doctor_result.stdout)
         print(doctor_result.stderr)
         # mcap doctor doesn't actually return a non-zero exit code on certain failures...
diff --git a/aos/util/mcap_logger.cc b/aos/util/mcap_logger.cc
index f4056ea..561a02b 100644
--- a/aos/util/mcap_logger.cc
+++ b/aos/util/mcap_logger.cc
@@ -1,10 +1,17 @@
 #include "aos/util/mcap_logger.h"
 
 #include "absl/strings/str_replace.h"
+#include "aos/flatbuffer_merge.h"
 #include "single_include/nlohmann/json.hpp"
 
-DEFINE_uint64(mcap_chunk_size, 10000000,
+DEFINE_uint64(mcap_chunk_size, 10'000'000,
               "Size, in bytes, of individual MCAP chunks");
+DEFINE_bool(fetch, false,
+            "Whether to fetch most recent messages at start of logfile. Turn "
+            "this on if there are, e.g., one-time messages sent before the "
+            "start of the logfile that you need access to. Turn it off if you "
+            "don't want to deal with having messages that have timestamps that "
+            "may be arbitrarily far before any other interesting messages.");
 
 namespace aos {
 
@@ -74,8 +81,11 @@
   return schema;
 }
 
-McapLogger::McapLogger(EventLoop *event_loop, const std::string &output_path)
-    : event_loop_(event_loop), output_(output_path) {
+McapLogger::McapLogger(EventLoop *event_loop, const std::string &output_path,
+                       Serialization serialization)
+    : event_loop_(event_loop),
+      output_(output_path),
+      serialization_(serialization) {
   event_loop->SkipTimingReport();
   event_loop->SkipAosLog();
   CHECK(output_);
@@ -119,13 +129,34 @@
   // summary and summary offset sections.
   WriteFooter(summary_offset, summary_offset_offset);
   WriteMagic();
+
+  // TODO(james): Add compression. With flatbuffers messages that contain large
+  // numbers of zeros (e.g., large grids or thresholded images) this can result
+  // in massive savings.
+  if (VLOG_IS_ON(2)) {
+    // For debugging, print out how much space each channel is taking in the
+    // overall log.
+    LOG(INFO) << total_message_bytes_;
+    std::vector<std::pair<size_t, const Channel *>> channel_bytes;
+    for (const auto &pair : total_channel_bytes_) {
+      channel_bytes.push_back(std::make_pair(pair.second, pair.first));
+    }
+    std::sort(channel_bytes.begin(), channel_bytes.end());
+    for (const auto &pair : channel_bytes) {
+      LOG(INFO) << configuration::StrippedChannelToString(pair.second) << ": "
+                << static_cast<float>(pair.first) * 1e-6 << "MB "
+                << static_cast<float>(pair.first) / total_message_bytes_
+                << "\n";
+    }
+  }
 }
 
 std::vector<McapLogger::SummaryOffset> McapLogger::WriteSchemasAndChannels(
     RegisterHandlers register_handlers) {
-  uint16_t id = 1;
+  uint16_t id = 0;
   std::map<uint16_t, const Channel *> channels;
   for (const Channel *channel : *event_loop_->configuration()->channels()) {
+    ++id;
     if (!configuration::ChannelIsReadableOnNode(channel, event_loop_->node())) {
       continue;
     }
@@ -141,8 +172,13 @@
               WriteChunk();
             }
           });
+      fetchers_[id] = event_loop_->MakeRawFetcher(channel);
+      event_loop_->OnRun([this, id, channel]() {
+        if (FLAGS_fetch && fetchers_[id]->Fetch()) {
+          WriteMessage(id, channel, fetchers_[id]->context(), &current_chunk_);
+        }
+      });
     }
-    ++id;
   }
 
   std::vector<SummaryOffset> offsets;
@@ -200,7 +236,9 @@
 
 void McapLogger::WriteSchema(const uint16_t id, const aos::Channel *channel) {
   CHECK(channel->has_schema());
-  std::string schema = JsonSchemaForFlatbuffer({channel->schema()}).dump();
+
+  const FlatbufferDetachedBuffer<reflection::Schema> schema =
+      CopyFlatBuffer(channel->schema());
 
   // Write out the schema (we don't bother deduplicating schema types):
   string_builder_.Reset();
@@ -208,10 +246,23 @@
   AppendInt16(&string_builder_, id);
   // Type name
   AppendString(&string_builder_, channel->type()->string_view());
-  // Encoding
-  AppendString(&string_builder_, "jsonschema");
-  // Actual schema itself
-  AppendString(&string_builder_, schema);
+  switch (serialization_) {
+    case Serialization::kJson:
+      // Encoding
+      AppendString(&string_builder_, "jsonschema");
+      // Actual schema itself
+      AppendString(&string_builder_,
+                   JsonSchemaForFlatbuffer({channel->schema()}).dump());
+      break;
+    case Serialization::kFlatbuffer:
+      // Encoding
+      AppendString(&string_builder_, "flatbuffer");
+      // Actual schema itself
+      AppendString(&string_builder_,
+                   {reinterpret_cast<const char *>(schema.span().data()),
+                    schema.span().size()});
+      break;
+  }
   WriteRecord(OpCode::kSchema, string_builder_.Result());
 }
 
@@ -227,7 +278,15 @@
                absl::StrCat(channel->name()->string_view(), " ",
                             channel->type()->string_view()));
   // Encoding
-  AppendString(&string_builder_, "json");
+  switch (serialization_) {
+    case Serialization::kJson:
+      AppendString(&string_builder_, "json");
+      break;
+    case Serialization::kFlatbuffer:
+      AppendString(&string_builder_, "flatbuffer");
+      break;
+  }
+
   // Metadata (technically supposed to be a Map<string, string>)
   AppendString(&string_builder_, "");
   WriteRecord(OpCode::kChannel, string_builder_.Result());
@@ -241,9 +300,15 @@
 
   if (!earliest_message_.has_value()) {
     earliest_message_ = context.monotonic_event_time;
+  } else {
+    earliest_message_ =
+        std::min(context.monotonic_event_time, earliest_message_.value());
   }
   if (!earliest_chunk_message_.has_value()) {
     earliest_chunk_message_ = context.monotonic_event_time;
+  } else {
+    earliest_chunk_message_ =
+        std::min(context.monotonic_event_time, earliest_chunk_message_.value());
   }
   latest_message_ = context.monotonic_event_time;
 
@@ -257,6 +322,8 @@
   // TODO(james): If we use this for multi-node logfiles, use distributed clock.
   AppendInt64(&string_builder_,
               context.monotonic_event_time.time_since_epoch().count());
+  // Note: Foxglove Studio doesn't appear to actually support using publish time
+  // right now.
   AppendInt64(&string_builder_,
               context.monotonic_event_time.time_since_epoch().count());
 
@@ -267,8 +334,18 @@
       << ": Corrupted flatbuffer on " << channel->name()->c_str() << " "
       << channel->type()->c_str();
 
-  aos::FlatbufferToJson(&string_builder_, channel->schema(),
-                        static_cast<const uint8_t *>(context.data));
+  switch (serialization_) {
+    case Serialization::kJson:
+      aos::FlatbufferToJson(&string_builder_, channel->schema(),
+                            static_cast<const uint8_t *>(context.data));
+      break;
+    case Serialization::kFlatbuffer:
+      string_builder_.Append(
+          {static_cast<const char *>(context.data), context.size});
+      break;
+  }
+  total_message_bytes_ += context.size;
+  total_channel_bytes_[channel] += context.size;
 
   message_indices_[channel_id].push_back(std::make_pair<uint64_t, uint64_t>(
       context.monotonic_event_time.time_since_epoch().count(),
diff --git a/aos/util/mcap_logger.h b/aos/util/mcap_logger.h
index dcacb68..5ae6413 100644
--- a/aos/util/mcap_logger.h
+++ b/aos/util/mcap_logger.h
@@ -30,7 +30,14 @@
 // available, to be able to support Foxglove fully.
 class McapLogger {
  public:
-  McapLogger(EventLoop *event_loop, const std::string &output_path);
+  // Whether to serialize the messages into the MCAP file as JSON or
+  // flatbuffers.
+  enum class Serialization {
+    kJson,
+    kFlatbuffer,
+  };
+  McapLogger(EventLoop *event_loop, const std::string &output_path,
+             Serialization serialization);
   ~McapLogger();
 
  private:
@@ -122,6 +129,9 @@
 
   aos::EventLoop *event_loop_;
   std::ofstream output_;
+  const Serialization serialization_;
+  size_t total_message_bytes_ = 0;
+  std::map<const Channel *, size_t> total_channel_bytes_;
   // Buffer containing serialized message data for the currently-being-built
   // chunk.
   std::stringstream current_chunk_;
@@ -136,6 +146,7 @@
       aos::monotonic_clock::min_time;
   // Count of all messages on each channel, indexed by channel ID.
   std::map<uint16_t, uint64_t> message_counts_;
+  std::map<uint16_t, std::unique_ptr<RawFetcher>> fetchers_;
   // MessageIndex's for each message. The std::map is indexed by channel ID. The
   // vector is then a series of pairs of (timestamp, offset from start of
   // current_chunk_).
diff --git a/aos/util/trapezoid_profile.py b/aos/util/trapezoid_profile.py
index af85339..76c9758 100644
--- a/aos/util/trapezoid_profile.py
+++ b/aos/util/trapezoid_profile.py
@@ -2,8 +2,9 @@
 
 import numpy
 
+
 class TrapezoidProfile(object):
-  """Computes a trapezoidal motion profile
+    """Computes a trapezoidal motion profile
 
   Attributes:
     _acceleration_time: the amount of time the robot will travel at the
@@ -21,136 +22,140 @@
     _timestep: time between calls to Update (delta_time)
     _output: output array containing distance to goal and velocity
   """
-  def __init__(self, delta_time):
-    """Constructs a TrapezoidProfile.
+
+    def __init__(self, delta_time):
+        """Constructs a TrapezoidProfile.
 
     Args:
       delta_time: time between calls to Update (seconds)
     """
-    self._acceleration_time = 0
-    self._acceleration = 0
-    self._constant_time = 0
-    self._deceleration_time = 0
-    self._deceleration = 0
+        self._acceleration_time = 0
+        self._acceleration = 0
+        self._constant_time = 0
+        self._deceleration_time = 0
+        self._deceleration = 0
 
-    self._maximum_acceleration = 0
-    self._maximum_velocity = 0
-    self._timestep = delta_time
+        self._maximum_acceleration = 0
+        self._maximum_velocity = 0
+        self._timestep = delta_time
 
-    self._output = numpy.array(numpy.zeros((2,1)))
+        self._output = numpy.array(numpy.zeros((2, 1)))
 
-  # Updates the state
-  def Update(self, goal_position, goal_velocity):
-    self._CalculateTimes(goal_position - self._output[0], goal_velocity)
+    # Updates the state
+    def Update(self, goal_position, goal_velocity):
+        self._CalculateTimes(goal_position - self._output[0], goal_velocity)
 
-    next_timestep = self._timestep
+        next_timestep = self._timestep
 
-    # We now have the amount of time we need to accelerate to follow the
-    # profile, the amount of time we need to move at constant velocity
-    # to follow the profile, and the amount of time we need to decelerate to
-    # follow the profile.  Do as much of that as we have time left in dt.
-    if self._acceleration_time > next_timestep:
-      self._UpdateVals(self._acceleration, next_timestep)
-    else:
-      self._UpdateVals(self._acceleration, self._acceleration_time)
-
-      next_timestep -= self._acceleration_time
-      if self._constant_time > next_timestep:
-        self._UpdateVals(0, next_timestep)
-      else:
-        self._UpdateVals(0, self._constant_time)
-        next_timestep -= self._constant_time;
-        if self._deceleration_time > next_timestep:
-          self._UpdateVals(self._deceleration, next_timestep)
+        # We now have the amount of time we need to accelerate to follow the
+        # profile, the amount of time we need to move at constant velocity
+        # to follow the profile, and the amount of time we need to decelerate to
+        # follow the profile.  Do as much of that as we have time left in dt.
+        if self._acceleration_time > next_timestep:
+            self._UpdateVals(self._acceleration, next_timestep)
         else:
-          self._UpdateVals(self._deceleration, self._deceleration_time)
-          next_timestep -= self._deceleration_time
-          self._UpdateVals(0, next_timestep)
+            self._UpdateVals(self._acceleration, self._acceleration_time)
 
-    return self._output
+            next_timestep -= self._acceleration_time
+            if self._constant_time > next_timestep:
+                self._UpdateVals(0, next_timestep)
+            else:
+                self._UpdateVals(0, self._constant_time)
+                next_timestep -= self._constant_time
+                if self._deceleration_time > next_timestep:
+                    self._UpdateVals(self._deceleration, next_timestep)
+                else:
+                    self._UpdateVals(self._deceleration,
+                                     self._deceleration_time)
+                    next_timestep -= self._deceleration_time
+                    self._UpdateVals(0, next_timestep)
 
-  # Useful for preventing windup etc.
-  def MoveCurrentState(self, current):
-    self._output = current
+        return self._output
 
-  # Useful for preventing windup etc.
-  def MoveGoal(self, dx):
-    self._output[0] += dx
+    # Useful for preventing windup etc.
+    def MoveCurrentState(self, current):
+        self._output = current
 
-  def SetGoal(self, x):
-    self._output[0] = x
+    # Useful for preventing windup etc.
+    def MoveGoal(self, dx):
+        self._output[0] += dx
 
-  def set_maximum_acceleration(self, maximum_acceleration):
-    self._maximum_acceleration = maximum_acceleration
+    def SetGoal(self, x):
+        self._output[0] = x
 
-  def set_maximum_velocity(self, maximum_velocity):
-    self._maximum_velocity = maximum_velocity
+    def set_maximum_acceleration(self, maximum_acceleration):
+        self._maximum_acceleration = maximum_acceleration
 
-  def _UpdateVals(self, acceleration, delta_time):
-    self._output[0, 0] += (self._output[1, 0] * delta_time
-        + 0.5 * acceleration * delta_time * delta_time)
-    self._output[1, 0] += acceleration * delta_time
+    def set_maximum_velocity(self, maximum_velocity):
+        self._maximum_velocity = maximum_velocity
 
-  def _CalculateTimes(self, distance_to_target, goal_velocity):
-    if distance_to_target == 0:
-      self._acceleration_time = 0
-      self._acceleration = 0
-      self._constant_time = 0
-      self._deceleration_time = 0
-      self._deceleration = 0
-      return
-    elif distance_to_target < 0:
-      # Recurse with everything inverted.
-      self._output[1] *= -1
-      self._CalculateTimes(-distance_to_target, -goal_velocity)
-      self._output[1] *= -1
-      self._acceleration *= -1
-      self._deceleration *= -1
-      return
+    def _UpdateVals(self, acceleration, delta_time):
+        self._output[0, 0] += (self._output[1, 0] * delta_time +
+                               0.5 * acceleration * delta_time * delta_time)
+        self._output[1, 0] += acceleration * delta_time
 
-    self._constant_time = 0
-    self._acceleration = self._maximum_acceleration
-    maximum_acceleration_velocity = (
-        distance_to_target * 2 * numpy.abs(self._acceleration)
-        + self._output[1] * self._output[1])
-    if maximum_acceleration_velocity > 0:
-      maximum_acceleration_velocity = numpy.sqrt(maximum_acceleration_velocity)
-    else:
-      maximum_acceleration_velocity = -numpy.sqrt(-maximum_acceleration_velocity)
+    def _CalculateTimes(self, distance_to_target, goal_velocity):
+        if distance_to_target == 0:
+            self._acceleration_time = 0
+            self._acceleration = 0
+            self._constant_time = 0
+            self._deceleration_time = 0
+            self._deceleration = 0
+            return
+        elif distance_to_target < 0:
+            # Recurse with everything inverted.
+            self._output[1] *= -1
+            self._CalculateTimes(-distance_to_target, -goal_velocity)
+            self._output[1] *= -1
+            self._acceleration *= -1
+            self._deceleration *= -1
+            return
 
-    # Since we know what we'd have to do if we kept after it to decelerate, we
-    # know the sign of the acceleration.
-    if maximum_acceleration_velocity > goal_velocity:
-      self._deceleration = -self._maximum_acceleration
-    else:
-      self._deceleration = self._maximum_acceleration
+        self._constant_time = 0
+        self._acceleration = self._maximum_acceleration
+        maximum_acceleration_velocity = (
+            distance_to_target * 2 * numpy.abs(self._acceleration) +
+            self._output[1] * self._output[1])
+        if maximum_acceleration_velocity > 0:
+            maximum_acceleration_velocity = numpy.sqrt(
+                maximum_acceleration_velocity)
+        else:
+            maximum_acceleration_velocity = -numpy.sqrt(
+                -maximum_acceleration_velocity)
 
-    # We now know the top velocity we can get to.
-    top_velocity = numpy.sqrt((distance_to_target +
-                                (self._output[1] * self._output[1]) /
-                                (2.0 * self._acceleration) +
-                                (goal_velocity * goal_velocity) /
-                                (2.0 * self._deceleration)) /
-                               (-1.0 / (2.0 * self._deceleration) +
-                                1.0 / (2.0 * self._acceleration)))
+        # Since we know what we'd have to do if we kept after it to decelerate, we
+        # know the sign of the acceleration.
+        if maximum_acceleration_velocity > goal_velocity:
+            self._deceleration = -self._maximum_acceleration
+        else:
+            self._deceleration = self._maximum_acceleration
 
-    # If it can go too fast, we now know how long we get to accelerate for and
-    # how long to go at constant velocity.
-    if top_velocity > self._maximum_velocity:
-      self._acceleration_time = ((self._maximum_velocity - self._output[1]) /
-                                 self._maximum_acceleration)
-      self._constant_time = (distance_to_target +
-                        (goal_velocity * goal_velocity -
-                         self._maximum_velocity * self._maximum_velocity) /
-                        (2.0 * self._maximum_acceleration)) / self._maximum_velocity
-    else:
-      self._acceleration_time = (
-          (top_velocity - self._output[1]) / self._acceleration)
+        # We now know the top velocity we can get to.
+        top_velocity = numpy.sqrt(
+            (distance_to_target + (self._output[1] * self._output[1]) /
+             (2.0 * self._acceleration) + (goal_velocity * goal_velocity) /
+             (2.0 * self._deceleration)) / (-1.0 /
+                                            (2.0 * self._deceleration) + 1.0 /
+                                            (2.0 * self._acceleration)))
 
-    if self._output[1] > self._maximum_velocity:
-      self._constant_time = 0
-      self._acceleration_time = 0
+        # If it can go too fast, we now know how long we get to accelerate for and
+        # how long to go at constant velocity.
+        if top_velocity > self._maximum_velocity:
+            self._acceleration_time = (
+                (self._maximum_velocity - self._output[1]) /
+                self._maximum_acceleration)
+            self._constant_time = (
+                distance_to_target +
+                (goal_velocity * goal_velocity -
+                 self._maximum_velocity * self._maximum_velocity) /
+                (2.0 * self._maximum_acceleration)) / self._maximum_velocity
+        else:
+            self._acceleration_time = ((top_velocity - self._output[1]) /
+                                       self._acceleration)
 
-    self._deceleration_time = (
-        (goal_velocity - top_velocity) / self._deceleration)
+        if self._output[1] > self._maximum_velocity:
+            self._constant_time = 0
+            self._acceleration_time = 0
 
+        self._deceleration_time = ((goal_velocity - top_velocity) /
+                                   self._deceleration)
diff --git a/aos/vision/download/downloader.py b/aos/vision/download/downloader.py
index 3dafb48..0d15b87 100644
--- a/aos/vision/download/downloader.py
+++ b/aos/vision/download/downloader.py
@@ -12,39 +12,51 @@
 import os
 import os.path
 
+
 def RunAndSplitShas(shcmd):
-  out = subprocess.check_output(shcmd)
-  return [line.split(' ')[0] for line in filter(lambda x: x, out.split('\n'))]
+    out = subprocess.check_output(shcmd)
+    return [
+        line.split(' ')[0] for line in filter(lambda x: x, out.split('\n'))
+    ]
+
 
 def GetChecksums(fnames):
-  return RunAndSplitShas(["sha256sum"] + fnames)
+    return RunAndSplitShas(["sha256sum"] + fnames)
+
 
 def GetJetsonChecksums(ssh_target, fnames):
-  target_files = ["/root/%s" % fname for fname in fnames]
-  subprocess.check_call(["ssh", ssh_target, "touch " + " ".join(target_files)])
-  cmds = ["ssh", ssh_target, "sha256sum " + " ".join(target_files)]
-  return RunAndSplitShas(cmds)
+    target_files = ["/root/%s" % fname for fname in fnames]
+    subprocess.check_call(
+        ["ssh", ssh_target, "touch " + " ".join(target_files)])
+    cmds = ["ssh", ssh_target, "sha256sum " + " ".join(target_files)]
+    return RunAndSplitShas(cmds)
+
 
 def ToJetsonFname(fname):
-  if (fname[-9:] == ".stripped"):
-    fname = fname[:-9]
-  return os.path.basename(fname)
+    if (fname[-9:] == ".stripped"):
+        fname = fname[:-9]
+    return os.path.basename(fname)
+
 
 def VerifyCheckSumsAndUpload(fnames, ssh_target):
-  jetson_fnames = [ToJetsonFname(fname) for fname in fnames]
-  checksums = GetChecksums(fnames)
-  jetson_checksums = GetJetsonChecksums(ssh_target, jetson_fnames)
-  for i in range(len(fnames)):
-    if (checksums[i] != jetson_checksums[i]):
-      # if empty, unlink
-      subprocess.check_call(["ssh", ssh_target, "unlink " + jetson_fnames[i]])
-      subprocess.check_call(["scp", fnames[i], ssh_target + ":" + jetson_fnames[i]])
+    jetson_fnames = [ToJetsonFname(fname) for fname in fnames]
+    checksums = GetChecksums(fnames)
+    jetson_checksums = GetJetsonChecksums(ssh_target, jetson_fnames)
+    for i in range(len(fnames)):
+        if (checksums[i] != jetson_checksums[i]):
+            # if empty, unlink
+            subprocess.check_call(
+                ["ssh", ssh_target, "unlink " + jetson_fnames[i]])
+            subprocess.check_call(
+                ["scp", fnames[i], ssh_target + ":" + jetson_fnames[i]])
+
 
 def main(argv):
-  args = argv[argv.index('--') + 1:]
-  files = argv[1:argv.index('--')]
+    args = argv[argv.index('--') + 1:]
+    files = argv[1:argv.index('--')]
 
-  VerifyCheckSumsAndUpload(files, args[-1])
+    VerifyCheckSumsAndUpload(files, args[-1])
+
 
 if __name__ == '__main__':
-  main(sys.argv)
+    main(sys.argv)
diff --git a/build_tests/BUILD b/build_tests/BUILD
index 24fdad8..e7f236a 100644
--- a/build_tests/BUILD
+++ b/build_tests/BUILD
@@ -5,6 +5,7 @@
 load("@rules_rust//rust:defs.bzl", "rust_binary", "rust_library", "rust_test")
 load("@npm//@bazel/typescript:index.bzl", "ts_library")
 load("@npm//@bazel/concatjs:index.bzl", "karma_web_test_suite")
+load("//tools/build_rules:autocxx.bzl", "autocxx_library")
 
 cc_test(
     name = "gflags_build_test",
@@ -119,7 +120,11 @@
 
 go_library(
     name = "build_tests_lib",
-    srcs = ["hello.go"],
+    srcs = [
+        "hello.go",
+        # Not sure why gazelle wants this here?
+        "hello_autocxx.h",
+    ],
     importpath = "github.com/frc971/971-Robot-Code/build_tests",
     target_compatible_with = ["@platforms//cpu:x86_64"],
     visibility = ["//visibility:private"],
@@ -141,19 +146,19 @@
 rust_library(
     name = "hello_lib",
     srcs = ["hello_lib.rs"],
-    target_compatible_with = ["@platforms//os:linux"],
+    target_compatible_with = ["//tools/platforms/rust:has_support"],
 )
 
 rust_test(
     name = "hello_lib_test",
     crate = ":hello_lib",
-    target_compatible_with = ["@platforms//os:linux"],
+    target_compatible_with = ["//tools/platforms/rust:has_support"],
 )
 
 rust_binary(
     name = "rust_hello",
     srcs = ["rust_hello.rs"],
-    target_compatible_with = ["@platforms//os:linux"],
+    target_compatible_with = ["//tools/platforms/rust:has_support"],
     deps = [":hello_lib"],
 )
 
@@ -182,7 +187,7 @@
 rust_library(
     name = "rust_in_cc_rs",
     srcs = ["rust_in_cc.rs"],
-    target_compatible_with = ["@platforms//os:linux"],
+    target_compatible_with = ["//tools/platforms/rust:has_support"],
 )
 
 cc_test(
@@ -191,3 +196,27 @@
     target_compatible_with = ["@platforms//os:linux"],
     deps = [":rust_in_cc_rs"],
 )
+
+cc_library(
+    name = "hello_autocxx_cc",
+    hdrs = [
+        "hello_autocxx.h",
+    ],
+)
+
+autocxx_library(
+    name = "hello_autocxx",
+    srcs = ["hello_autocxx.rs"],
+    libs = [":hello_autocxx_cc"],
+    override_cc_toolchain = "@llvm_toolchain//:cc-clang-x86_64-linux",
+    target_compatible_with = ["//tools/platforms/rust:has_support"],
+)
+
+rust_test(
+    name = "hello_autocxx_test",
+    crate = ":hello_autocxx",
+    # TODO: Make Rust play happy with pic vs nopic. Details at:
+    # https://github.com/bazelbuild/rules_rust/issues/118
+    rustc_flags = ["-Crelocation-model=static"],
+    target_compatible_with = ["//tools/platforms/rust:has_support"],
+)
diff --git a/build_tests/dummy_http_server.py b/build_tests/dummy_http_server.py
index f65122a..0661a8a 100644
--- a/build_tests/dummy_http_server.py
+++ b/build_tests/dummy_http_server.py
@@ -11,6 +11,7 @@
 from http.server import BaseHTTPRequestHandler, HTTPServer
 import os
 
+
 def parse_auth(authorization):
     auth_type, auth_info = authorization.split(" ", maxsplit=1)
     if auth_type != "Basic":
@@ -18,7 +19,9 @@
     username, _ = base64.b64decode(auth_info).decode().split(":", 1)
     return username
 
+
 class Server(BaseHTTPRequestHandler):
+
     def _set_response(self):
         self.send_response(200)
         self.send_header("Content-type", "text/html")
@@ -34,6 +37,7 @@
         username = parse_auth(self.headers["Authorization"])
         self._write(f"Hello, {username}")
 
+
 def main():
     port = int(os.environ["APACHE_WRAPPED_PORT"])
     server_address = ("", port)
@@ -44,5 +48,6 @@
         pass
     httpd.server_close()
 
+
 if __name__ == "__main__":
     main()
diff --git a/build_tests/hello_autocxx.h b/build_tests/hello_autocxx.h
new file mode 100644
index 0000000..8b6e285
--- /dev/null
+++ b/build_tests/hello_autocxx.h
@@ -0,0 +1,8 @@
+#ifndef BUILD_TESTS_HELLO_AUTOCXX_H_
+#define BUILD_TESTS_HELLO_AUTOCXX_H_
+
+#include <stdlib.h>
+
+int plain_function() { return 971; }
+
+#endif  // BUILD_TESTS_HELLO_AUTOCXX_H_
diff --git a/build_tests/hello_autocxx.rs b/build_tests/hello_autocxx.rs
new file mode 100644
index 0000000..483f8be
--- /dev/null
+++ b/build_tests/hello_autocxx.rs
@@ -0,0 +1,16 @@
+use autocxx::include_cpp;
+
+include_cpp! (
+#include "build_tests/hello_autocxx.h"
+generate!("plain_function")
+);
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_plain_function() {
+        assert_eq!(autocxx::c_int::from(971), unsafe { ffi::plain_function() });
+    }
+}
diff --git a/build_tests/python_opencv.py b/build_tests/python_opencv.py
index c4d8591..5f5f1aa 100644
--- a/build_tests/python_opencv.py
+++ b/build_tests/python_opencv.py
@@ -3,4 +3,4 @@
 import cv2
 
 if __name__ == '__main__':
-  cv2.SIFT_create()
+    cv2.SIFT_create()
diff --git a/debian/download_packages.py b/debian/download_packages.py
index be43120..6d548b5 100755
--- a/debian/download_packages.py
+++ b/debian/download_packages.py
@@ -10,18 +10,19 @@
 import argparse
 import hashlib
 
+
 def initialize_apt(apt_dir, apt_args, args):
-  os.mkdir(os.path.join(apt_dir, 'etc'))
-  os.mkdir(os.path.join(apt_dir, 'etc', 'apt'))
-  os.mkdir(os.path.join(apt_dir, 'etc', 'apt', 'trusted.gpg.d'))
-  os.mkdir(os.path.join(apt_dir, 'etc', 'apt', 'preferences.d'))
-  os.mkdir(os.path.join(apt_dir, 'var'))
-  os.mkdir(os.path.join(apt_dir, 'var', 'lib'))
-  os.mkdir(os.path.join(apt_dir, 'var', 'lib', 'dpkg'))
-  with open(os.path.join(apt_dir, 'var', 'lib', 'dpkg', 'status'), 'w'):
-    pass
-  with open(os.path.join(apt_dir, 'etc', 'apt', 'sources.list'), 'w') as f:
-    f.write("""
+    os.mkdir(os.path.join(apt_dir, 'etc'))
+    os.mkdir(os.path.join(apt_dir, 'etc', 'apt'))
+    os.mkdir(os.path.join(apt_dir, 'etc', 'apt', 'trusted.gpg.d'))
+    os.mkdir(os.path.join(apt_dir, 'etc', 'apt', 'preferences.d'))
+    os.mkdir(os.path.join(apt_dir, 'var'))
+    os.mkdir(os.path.join(apt_dir, 'var', 'lib'))
+    os.mkdir(os.path.join(apt_dir, 'var', 'lib', 'dpkg'))
+    with open(os.path.join(apt_dir, 'var', 'lib', 'dpkg', 'status'), 'w'):
+        pass
+    with open(os.path.join(apt_dir, 'etc', 'apt', 'sources.list'), 'w') as f:
+        f.write("""
 deb http://deb.debian.org/debian/ {release} main contrib non-free
 deb-src http://deb.debian.org/debian/ {release} main contrib non-free
 
@@ -34,103 +35,117 @@
 deb http://deb.debian.org/debian {release}-backports main contrib non-free
 deb-src http://deb.debian.org/debian {release}-backports main contrib non-free
 """.format(release=args.release))
-  for key in args.apt_key:
-    basename = os.path.basename(key)
-    urllib.request.urlretrieve(key, os.path.join(apt_dir, 'etc', 'apt', 'trusted.gpg.d', basename))
-  subprocess.check_call(["apt-get"] + apt_args + ["update"])
+    for key in args.apt_key:
+        basename = os.path.basename(key)
+        urllib.request.urlretrieve(
+            key, os.path.join(apt_dir, 'etc', 'apt', 'trusted.gpg.d',
+                              basename))
+    subprocess.check_call(["apt-get"] + apt_args + ["update"])
+
 
 def get_deps(apt_args, package):
-  env = dict(os.environ)
-  del env['LD_LIBRARY_PATH']
-  out = subprocess.check_output(["apt-rdepends"] + apt_args + [package], env=env)
-  deps = out.splitlines()
-  return set([dep for dep in deps if not dep.startswith(b" ")])
+    env = dict(os.environ)
+    del env['LD_LIBRARY_PATH']
+    out = subprocess.check_output(["apt-rdepends"] + apt_args + [package],
+                                  env=env)
+    deps = out.splitlines()
+    return set([dep for dep in deps if not dep.startswith(b" ")])
+
 
 def get_all_deps(apt_args, packages):
-  deps = set()
-  for package in packages or ():
-    deps.update(get_deps(apt_args, package))
-  return deps
+    deps = set()
+    for package in packages or ():
+        deps.update(get_deps(apt_args, package))
+    return deps
+
 
 def map_virtual_packages(packages):
-  '''Maps known virtual packages to the preferred concrete packages which
+    '''Maps known virtual packages to the preferred concrete packages which
   provide them.'''
-  for package in packages:
-    if package == b'python-numpy-abi9':
-      yield b'python-numpy'
-      continue
-    if package == b'python3-numpy-abi9':
-      yield b'python3-numpy'
-      continue
-    if package == b'libjack-0.125':
-      yield b'libjack-jackd2-0'
-      continue
-    if package == b'fonts-freefont':
-      yield b'fonts-freefont-ttf'
-      continue
-    if package == b'gsettings-backend':
-      yield b'dconf-gsettings-backend'
-      continue
-    if package == b'gdal-abi-2-4-0':
-      yield b'libgdal20'
-      continue
-    if package == b'libglu1':
-      yield b'libglu1-mesa'
-      continue
-    if package == b'liblapack.so.3':
-      yield b'liblapack3'
-      continue
-    if package == b'libopencl1':
-      yield b'ocl-icd-libopencl1'
-      continue
-    if package == b'libgcc1':
-      yield b'libgcc-s1'
-      continue
-    if package == b'libopencl-1.2-1':
-      yield b'ocl-icd-libopencl1'
-      continue
-    if package == b'libblas.so.3':
-      yield b'libblas3'
-      continue
-    if package == b'debconf-2.0':
-      yield b'debconf'
-      continue
-    yield package
+    for package in packages:
+        if package == b'python-numpy-abi9':
+            yield b'python-numpy'
+            continue
+        if package == b'python3-numpy-abi9':
+            yield b'python3-numpy'
+            continue
+        if package == b'libjack-0.125':
+            yield b'libjack-jackd2-0'
+            continue
+        if package == b'fonts-freefont':
+            yield b'fonts-freefont-ttf'
+            continue
+        if package == b'gsettings-backend':
+            yield b'dconf-gsettings-backend'
+            continue
+        if package == b'gdal-abi-2-4-0':
+            yield b'libgdal20'
+            continue
+        if package == b'libglu1':
+            yield b'libglu1-mesa'
+            continue
+        if package == b'liblapack.so.3':
+            yield b'liblapack3'
+            continue
+        if package == b'libopencl1':
+            yield b'ocl-icd-libopencl1'
+            continue
+        if package == b'libgcc1':
+            yield b'libgcc-s1'
+            continue
+        if package == b'libopencl-1.2-1':
+            yield b'ocl-icd-libopencl1'
+            continue
+        if package == b'libblas.so.3':
+            yield b'libblas3'
+            continue
+        if package == b'debconf-2.0':
+            yield b'debconf'
+            continue
+        yield package
+
 
 def download_deps(apt_args, packages, excludes, force_includes):
-  deps = get_all_deps(apt_args, packages)
-  exclude_deps = get_all_deps(apt_args, excludes)
-  deps -= exclude_deps
-  force_include_deps = get_all_deps(apt_args, force_includes)
-  deps |= force_include_deps
-  env = dict(os.environ)
-  del env['LD_LIBRARY_PATH']
-  subprocess.check_call([b"apt-get"] + [a.encode('utf-8') for a in apt_args] + [b"download"] + list(map_virtual_packages(deps)), env=env)
+    deps = get_all_deps(apt_args, packages)
+    exclude_deps = get_all_deps(apt_args, excludes)
+    deps -= exclude_deps
+    force_include_deps = get_all_deps(apt_args, force_includes)
+    deps |= force_include_deps
+    env = dict(os.environ)
+    del env['LD_LIBRARY_PATH']
+    subprocess.check_call([b"apt-get"] + [a.encode('utf-8')
+                                          for a in apt_args] + [b"download"] +
+                          list(map_virtual_packages(deps)),
+                          env=env)
+
 
 def fixup_files():
-  # Gotta remove those pesky epoch numbers in the file names. Bazel doesn't
-  # like them.
-  regex = re.compile(".%3a")
-  contents = os.listdir(os.getcwd())
-  for deb in contents:
-    new_name = regex.sub("", deb)
-    if new_name != deb:
-      os.rename(deb, new_name)
+    # Gotta remove those pesky epoch numbers in the file names. Bazel doesn't
+    # like them.
+    regex = re.compile(".%3a")
+    contents = os.listdir(os.getcwd())
+    for deb in contents:
+        new_name = regex.sub("", deb)
+        if new_name != deb:
+            os.rename(deb, new_name)
+
 
 def sha256_checksum(filename, block_size=65536):
-  sha256 = hashlib.sha256()
-  with open(filename, 'rb') as f:
-    for block in iter(lambda: f.read(block_size), b''):
-      sha256.update(block)
-  return sha256.hexdigest()
+    sha256 = hashlib.sha256()
+    with open(filename, 'rb') as f:
+        for block in iter(lambda: f.read(block_size), b''):
+            sha256.update(block)
+    return sha256.hexdigest()
+
 
 def print_file_list():
-  contents = os.listdir(os.getcwd())
-  contents.sort()
-  print("_files = {")
-  for deb in contents:
-    print('  "%s": "%s",' % (deb, sha256_checksum(deb)))
-  print("}")
+    contents = os.listdir(os.getcwd())
+    contents.sort()
+    print("_files = {")
+    for deb in contents:
+        print('  "%s": "%s",' % (deb, sha256_checksum(deb)))
+    print("}")
+
 
 _ALWAYS_EXCLUDE = [
     "dbus-session-bus",
@@ -144,41 +159,66 @@
     "libc6-dev",
 ]
 
+
 def main(argv):
-  parser = argparse.ArgumentParser()
-  parser.add_argument("--exclude", "-e", type=str, action="append", help="A package to exclude from the list")
-  parser.add_argument("--force-include", type=str, action="append", help="Force include this and its dependencies. Even if listed in excludes.")
-  parser.add_argument("--arch", type=str, default="amd64", help="Architecture to download files for.")
-  parser.add_argument("--apt-dir", type=str, help=" ".join([
-    "File to generate and store apt files in.",
-    "Helpful for saving time when downloading multiple groups of packages.",
-    "Some flags will be ignored in favor of the values used to create this folder, so be careful.",
-    ]))
-  parser.add_argument("--release", type=str, default="bullseye", help="Debian release to use.")
-  parser.add_argument("--apt-key", type=str, action="append", default=[
-    "https://ftp-master.debian.org/keys/archive-key-11.asc",
-    "https://ftp-master.debian.org/keys/archive-key-11-security.asc",
-  ], help="URL of an additional apt archive key to trust.")
-  parser.add_argument("package", nargs="+", help="The packages to download.")
-  args = parser.parse_args(argv[1:])
-  if args.apt_dir:
-    apt_dir = args.apt_dir
-  else:
-    apt_dir = tempfile.mkdtemp()
-  apt_args = ["-o", "Dir=" + apt_dir, "-o", "APT::Architecture=" + args.arch]
-  if not args.apt_dir:
-    print("Creating apt files in %s" % apt_dir)
-    initialize_apt(apt_dir, apt_args, args)
-  folder = tempfile.mkdtemp()
-  os.chdir(folder)
-  excludes = args.exclude or []
-  # Exclude common packages that don't make sense to include in everything all
-  # the time.
-  excludes += _ALWAYS_EXCLUDE
-  download_deps(apt_args, args.package, excludes, args.force_include)
-  fixup_files()
-  print_file_list()
-  print("Your packages are all in %s" % folder)
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--exclude",
+                        "-e",
+                        type=str,
+                        action="append",
+                        help="A package to exclude from the list")
+    parser.add_argument(
+        "--force-include",
+        type=str,
+        action="append",
+        help=
+        "Force include this and its dependencies. Even if listed in excludes.")
+    parser.add_argument("--arch",
+                        type=str,
+                        default="amd64",
+                        help="Architecture to download files for.")
+    parser.add_argument(
+        "--apt-dir",
+        type=str,
+        help=" ".join([
+            "File to generate and store apt files in.",
+            "Helpful for saving time when downloading multiple groups of packages.",
+            "Some flags will be ignored in favor of the values used to create this folder, so be careful.",
+        ]))
+    parser.add_argument("--release",
+                        type=str,
+                        default="bullseye",
+                        help="Debian release to use.")
+    parser.add_argument(
+        "--apt-key",
+        type=str,
+        action="append",
+        default=[
+            "https://ftp-master.debian.org/keys/archive-key-11.asc",
+            "https://ftp-master.debian.org/keys/archive-key-11-security.asc",
+        ],
+        help="URL of an additional apt archive key to trust.")
+    parser.add_argument("package", nargs="+", help="The packages to download.")
+    args = parser.parse_args(argv[1:])
+    if args.apt_dir:
+        apt_dir = args.apt_dir
+    else:
+        apt_dir = tempfile.mkdtemp()
+    apt_args = ["-o", "Dir=" + apt_dir, "-o", "APT::Architecture=" + args.arch]
+    if not args.apt_dir:
+        print("Creating apt files in %s" % apt_dir)
+        initialize_apt(apt_dir, apt_args, args)
+    folder = tempfile.mkdtemp()
+    os.chdir(folder)
+    excludes = args.exclude or []
+    # Exclude common packages that don't make sense to include in everything all
+    # the time.
+    excludes += _ALWAYS_EXCLUDE
+    download_deps(apt_args, args.package, excludes, args.force_include)
+    fixup_files()
+    print_file_list()
+    print("Your packages are all in %s" % folder)
+
 
 if __name__ == "__main__":
-  sys.exit(main(sys.argv))
+    sys.exit(main(sys.argv))
diff --git a/debian/python_yapf.BUILD b/debian/python_yapf.BUILD
new file mode 100644
index 0000000..c41558c
--- /dev/null
+++ b/debian/python_yapf.BUILD
@@ -0,0 +1,6 @@
+py_binary(
+    name = "python_yapf",
+    srcs = glob(["yapf/**/*.py"]),
+    main = "yapf/__main__.py",
+    visibility = ["//visibility:public"],
+)
diff --git a/frc971/analysis/BUILD b/frc971/analysis/BUILD
index 934921b..abb950b 100644
--- a/frc971/analysis/BUILD
+++ b/frc971/analysis/BUILD
@@ -37,7 +37,6 @@
     srcs = ["plot_index.ts"],
     target_compatible_with = ["@platforms//os:linux"],
     deps = [
-        ":plot_data_utils",
         "//aos:configuration_ts_fbs",
         "//aos/network/www:demo_plot",
         "//aos/network/www:proxy",
@@ -71,11 +70,21 @@
     ],
 )
 
+genrule(
+    name = "copy_css",
+    srcs = [
+        "//aos/network/www:styles.css",
+    ],
+    outs = ["styles.css"],
+    cmd = "cp $< $@",
+)
+
 filegroup(
     name = "plotter_files",
     srcs = [
         "index.html",
         "plot_index_bundle.min.js",
+        "styles.css",
     ],
 )
 
@@ -146,7 +155,7 @@
     hdrs = ["in_process_plotter.h"],
     data = [
         ":plotter",
-        ":plotter_files",
+        "//frc971/analysis/cpp_plot:cpp_plot_files",
     ],
     deps = [
         ":plot_data_fbs",
diff --git a/frc971/analysis/cpp_plot/BUILD b/frc971/analysis/cpp_plot/BUILD
new file mode 100644
index 0000000..d6e74c3
--- /dev/null
+++ b/frc971/analysis/cpp_plot/BUILD
@@ -0,0 +1,43 @@
+load("@npm//@bazel/typescript:index.bzl", "ts_library")
+load("//tools/build_rules:js.bzl", "rollup_bundle")
+
+package(default_visibility = ["//visibility:public"])
+
+ts_library(
+    name = "cpp_plot",
+    srcs = ["cpp_plot.ts"],
+    target_compatible_with = ["@platforms//os:linux"],
+    deps = [
+        "//aos:configuration_ts_fbs",
+        "//aos/network/www:proxy",
+        "//frc971/analysis:plot_data_utils",
+    ],
+)
+
+rollup_bundle(
+    name = "cpp_plot_bundle",
+    entry_point = "cpp_plot.ts",
+    target_compatible_with = ["@platforms//os:linux"],
+    deps = [
+        ":cpp_plot",
+    ],
+)
+
+genrule(
+    name = "copy_css",
+    srcs = [
+        "//aos/network/www:styles.css",
+    ],
+    outs = ["styles.css"],
+    cmd = "cp $< $@",
+)
+
+filegroup(
+    name = "cpp_plot_files",
+    srcs = [
+        "cpp_plot_bundle.js",
+        "cpp_plot_bundle.min.js",
+        "index.html",
+        "styles.css",
+    ],
+)
diff --git a/frc971/analysis/cpp_plot/cpp_plot.ts b/frc971/analysis/cpp_plot/cpp_plot.ts
new file mode 100644
index 0000000..a704e89
--- /dev/null
+++ b/frc971/analysis/cpp_plot/cpp_plot.ts
@@ -0,0 +1,15 @@
+// Plotter for the C++ in-process plotter.
+import {Configuration} from 'org_frc971/aos/configuration_generated';
+import {Connection} from 'org_frc971/aos/network/www/proxy';
+import {plotData} from 'org_frc971/frc971/analysis/plot_data_utils';
+
+const rootDiv = document.createElement('div');
+rootDiv.classList.add('aos_cpp_plot');
+document.body.appendChild(rootDiv);
+
+const conn = new Connection();
+conn.connect();
+
+conn.addConfigHandler((config: Configuration) => {
+  plotData(conn, rootDiv);
+});
diff --git a/frc971/analysis/cpp_plot/index.html b/frc971/analysis/cpp_plot/index.html
new file mode 100644
index 0000000..fbb5199
--- /dev/null
+++ b/frc971/analysis/cpp_plot/index.html
@@ -0,0 +1,9 @@
+<html>
+  <head>
+    <script src="cpp_plot_bundle.min.js" defer></script>
+    <link rel="stylesheet" href="styles.css">
+  </head>
+  <body>
+  </body>
+</html>
+
diff --git a/frc971/analysis/in_process_plotter.cc b/frc971/analysis/in_process_plotter.cc
index f345ebc..545740d 100644
--- a/frc971/analysis/in_process_plotter.cc
+++ b/frc971/analysis/in_process_plotter.cc
@@ -6,7 +6,7 @@
 namespace analysis {
 
 namespace {
-const char *kDataPath = "frc971/analysis";
+const char *kDataPath = "frc971/analysis/cpp_plot";
 const char *kConfigPath = "frc971/analysis/plotter.json";
 }  // namespace
 
@@ -19,15 +19,26 @@
       builder_(plot_sender_.MakeBuilder()) {
   web_proxy_.SetDataPath(kDataPath);
   event_loop_->SkipTimingReport();
-  color_wheel_.push_back(Color(1, 0, 0));
-  color_wheel_.push_back(Color(0, 1, 0));
-  color_wheel_.push_back(Color(0, 0, 1));
-  color_wheel_.push_back(Color(1, 1, 0));
-  color_wheel_.push_back(Color(0, 1, 1));
-  color_wheel_.push_back(Color(1, 0, 1));
-  color_wheel_.push_back(Color(1, 0.6, 0));
-  color_wheel_.push_back(Color(0.6, 0.3, 0));
-  color_wheel_.push_back(Color(1, 1, 1));
+
+  color_wheel_.emplace_back(ColorWheelColor{.name = "red", .color = {1, 0, 0}});
+  color_wheel_.emplace_back(
+      ColorWheelColor{.name = "green", .color = {0, 1, 0}});
+  color_wheel_.emplace_back(
+      ColorWheelColor{.name = "purple", .color = {0.54, 0.3, 0.75}});
+  color_wheel_.emplace_back(
+      ColorWheelColor{.name = "blue", .color = {0, 0, 1}});
+  color_wheel_.emplace_back(
+      ColorWheelColor{.name = "yellow", .color = {1, 1, 0}});
+  color_wheel_.emplace_back(
+      ColorWheelColor{.name = "teal", .color = {0, 1, 1}});
+  color_wheel_.emplace_back(
+      ColorWheelColor{.name = "pink", .color = {1, 0, 1}});
+  color_wheel_.emplace_back(
+      ColorWheelColor{.name = "orange", .color = {1, 0.6, 0}});
+  color_wheel_.emplace_back(
+      ColorWheelColor{.name = "brown", .color = {0.6, 0.3, 0}});
+  color_wheel_.emplace_back(
+      ColorWheelColor{.name = "white", .color = {1, 1, 1}});
 }
 
 void Plotter::Spin() { event_loop_factory_.Run(); }
@@ -63,15 +74,14 @@
 }
 
 void Plotter::AddLine(const std::vector<double> &x,
-                      const std::vector<double> &y, std::string_view label,
-                      std::string_view line_style) {
+                      const std::vector<double> &y, LineOptions options) {
   CHECK_EQ(x.size(), y.size());
   CHECK(!position_.IsNull())
       << "You must call AddFigure() before calling AddLine().";
 
   flatbuffers::Offset<flatbuffers::String> label_offset;
-  if (!label.empty()) {
-    label_offset = builder_.fbb()->CreateString(label);
+  if (!options.label.empty()) {
+    label_offset = builder_.fbb()->CreateString(options.label);
   }
 
   std::vector<Point> points;
@@ -81,16 +91,28 @@
   const flatbuffers::Offset<flatbuffers::Vector<const Point *>> points_offset =
       builder_.fbb()->CreateVectorOfStructs(points);
 
-  const Color *color = &color_wheel_.at(color_wheel_position_);
-  color_wheel_position_ = (color_wheel_position_ + 1) % color_wheel_.size();
+  const Color *color;
+  if (options.color.empty()) {
+    color = &color_wheel_.at(color_wheel_position_).color;
+    color_wheel_position_ = (color_wheel_position_ + 1) % color_wheel_.size();
+  } else {
+    auto it = std::find_if(
+        color_wheel_.begin(), color_wheel_.end(),
+        [options_color = options.color](const ColorWheelColor &color) {
+          return color.name == options_color;
+        });
+    CHECK(it != color_wheel_.end()) << ": Failed to find " << options.color;
+    color = &(it->color);
+  }
 
   LineStyle::Builder style_builder = builder_.MakeBuilder<LineStyle>();
-  if (line_style.find('*') != line_style.npos) {
-    style_builder.add_point_size(3.0);
+  if (options.line_style.find('*') != options.line_style.npos) {
+    style_builder.add_point_size(options.point_size);
   } else {
     style_builder.add_point_size(0.0);
   }
-  style_builder.add_draw_line(line_style.find('-') != line_style.npos);
+  style_builder.add_draw_line(options.line_style.find('-') !=
+                              options.line_style.npos);
   const flatbuffers::Offset<LineStyle> style_offset = style_builder.Finish();
 
   auto line_builder = builder_.MakeBuilder<Line>();
@@ -132,8 +154,7 @@
   plot_builder.add_title(title_);
   plot_builder.add_figures(figures_offset);
 
-  CHECK_EQ(builder_.Send(plot_builder.Finish()),
-           aos::RawSender::Error::kOk);
+  CHECK_EQ(builder_.Send(plot_builder.Finish()), aos::RawSender::Error::kOk);
 
   builder_ = plot_sender_.MakeBuilder();
 
diff --git a/frc971/analysis/in_process_plotter.h b/frc971/analysis/in_process_plotter.h
index 1c97287..4f05c19 100644
--- a/frc971/analysis/in_process_plotter.h
+++ b/frc971/analysis/in_process_plotter.h
@@ -40,16 +40,33 @@
 
   // Sets the title for the current set of plots; if you
   void Title(std::string_view title);
-  void AddFigure(std::string_view title = "", double width = 900,
-                 double height = 400);
+  void AddFigure(std::string_view title = "", double width = 0,
+                 double height = 0);
+  struct LineOptions {
+    std::string_view label = "";
+    std::string_view line_style = "*-";
+    std::string_view color = "";
+    double point_size = 3.0;
+  };
+
   void AddLine(const std::vector<double> &x, const std::vector<double> &y,
-               std::string_view label = "", std::string_view line_style = "*-");
+               std::string_view label) {
+    AddLine(x, y, LineOptions{.label = label});
+  }
+  void AddLine(const std::vector<double> &x, const std::vector<double> &y,
+               std::string_view label, std::string_view line_style) {
+    AddLine(x, y, LineOptions{.label = label, .line_style = line_style});
+  }
+  void AddLine(const std::vector<double> &x, const std::vector<double> &y,
+               LineOptions options);
+
   void ShareXAxis(bool share) { share_x_axis_ = share; }
   void XLabel(std::string_view label);
   void YLabel(std::string_view label);
   void Publish();
 
   void Spin();
+
  private:
   void MaybeFinishFigure();
 
@@ -70,8 +87,13 @@
   std::vector<flatbuffers::Offset<Figure>> figures_;
   std::vector<flatbuffers::Offset<Line>> lines_;
 
+  struct ColorWheelColor {
+    std::string name;
+    Color color;
+  };
+
   size_t color_wheel_position_ = 0;
-  std::vector<Color> color_wheel_;
+  std::vector<ColorWheelColor> color_wheel_;
 };
 
 }  // namespace analysis
diff --git a/frc971/analysis/index.html b/frc971/analysis/index.html
index edd2483..22b42c7 100644
--- a/frc971/analysis/index.html
+++ b/frc971/analysis/index.html
@@ -1,6 +1,7 @@
 <html>
   <head>
     <script src="plot_index_bundle.min.js" defer></script>
+    <link rel="stylesheet" href="styles.css">
   </head>
   <body>
   </body>
diff --git a/frc971/analysis/log_reader_test.py b/frc971/analysis/log_reader_test.py
index b41aa38..7af08e6 100644
--- a/frc971/analysis/log_reader_test.py
+++ b/frc971/analysis/log_reader_test.py
@@ -6,6 +6,7 @@
 
 
 class LogReaderTest(unittest.TestCase):
+
     def setUp(self):
         self.reader = LogReader("external/sample_logfile/file/log.fbs")
         # A list of all the channels in the logfile--this is used to confirm that
@@ -13,16 +14,14 @@
         self.all_channels = [
             ("/aos", "aos.JoystickState"), ("/aos", "aos.RobotState"),
             ("/aos", "aos.timing.Report"), ("/aos", "frc971.PDPValues"),
-            ("/aos",
-             "frc971.wpilib.PneumaticsToLog"), ("/autonomous",
-                                                "aos.common.actions.Status"),
+            ("/aos", "frc971.wpilib.PneumaticsToLog"),
+            ("/autonomous", "aos.common.actions.Status"),
             ("/autonomous", "frc971.autonomous.AutonomousMode"),
-            ("/autonomous", "frc971.autonomous.Goal"), ("/camera",
-                                                        "y2019.CameraLog"),
+            ("/autonomous", "frc971.autonomous.Goal"),
+            ("/camera", "y2019.CameraLog"),
             ("/camera", "y2019.control_loops.drivetrain.CameraFrame"),
-            ("/drivetrain",
-             "frc971.IMUValues"), ("/drivetrain",
-                                   "frc971.control_loops.drivetrain.Goal"),
+            ("/drivetrain", "frc971.IMUValues"),
+            ("/drivetrain", "frc971.control_loops.drivetrain.Goal"),
             ("/drivetrain",
              "frc971.control_loops.drivetrain.LocalizerControl"),
             ("/drivetrain", "frc971.control_loops.drivetrain.Output"),
@@ -31,9 +30,8 @@
             ("/drivetrain", "frc971.sensors.GyroReading"),
             ("/drivetrain",
              "y2019.control_loops.drivetrain.TargetSelectorHint"),
-            ("/superstructure",
-             "y2019.StatusLight"), ("/superstructure",
-                                    "y2019.control_loops.superstructure.Goal"),
+            ("/superstructure", "y2019.StatusLight"),
+            ("/superstructure", "y2019.control_loops.superstructure.Goal"),
             ("/superstructure", "y2019.control_loops.superstructure.Output"),
             ("/superstructure", "y2019.control_loops.superstructure.Position"),
             ("/superstructure", "y2019.control_loops.superstructure.Status")
diff --git a/frc971/analysis/plot_data_utils.ts b/frc971/analysis/plot_data_utils.ts
index a93a2ae..1362a09 100644
--- a/frc971/analysis/plot_data_utils.ts
+++ b/frc971/analysis/plot_data_utils.ts
@@ -42,8 +42,17 @@
         for (let ii = 0; ii < plotFb.figuresLength(); ++ii) {
           const figure = plotFb.figures(ii);
           const figureDiv = document.createElement('div');
-          figureDiv.style.width = figure.position().width().toString() + "px";
-          figureDiv.style.height = figure.position().height().toString() + "px";
+          if (figure.position().width() == 0) {
+            figureDiv.style.width = '100%';
+          } else {
+            figureDiv.style.width = figure.position().width().toString() + 'px';
+          }
+          if (figure.position().height() == 0) {
+            figureDiv.style.height = '100%';
+          } else {
+            figureDiv.style.height =
+                figure.position().height().toString() + 'px';
+          }
           figureDiv.style.position = 'relative';
           div.appendChild(figureDiv);
           const plot = new Plot(figureDiv);
@@ -90,5 +99,12 @@
             line.setPoints(points);
           }
         }
+
+        // If this is the first new element (ignoring the placeholder up top),
+        // select it by default.
+        if (plotSelect.length == 2) {
+          plotSelect.value = name;
+          plotSelect.dispatchEvent(new Event('input'));
+        }
       });
 }
diff --git a/frc971/analysis/plot_index.ts b/frc971/analysis/plot_index.ts
index 150407e..af9bd73 100644
--- a/frc971/analysis/plot_index.ts
+++ b/frc971/analysis/plot_index.ts
@@ -55,9 +55,9 @@
 import {plotVision as plot2022Vision} from
     'org_frc971/y2022/vision/vision_plotter'
 import {plotDemo} from 'org_frc971/aos/network/www/demo_plot';
-import {plotData} from 'org_frc971/frc971/analysis/plot_data_utils';
 
 const rootDiv = document.createElement('div');
+rootDiv.style.width = '100%';
 document.body.appendChild(rootDiv);
 
 const helpDiv = document.createElement('div');
@@ -96,9 +96,10 @@
 rootDiv.appendChild(plotSelect);
 
 const plotDiv = document.createElement('div');
-plotDiv.style.top = (plotSelect.getBoundingClientRect().bottom + 10).toString();
+plotDiv.style.marginTop = '10px';
 plotDiv.style.left = '0';
 plotDiv.style.position = 'absolute';
+plotDiv.style.width = '100%';
 rootDiv.appendChild(plotDiv);
 
 // The master list of all the plots that we provide. For a given config, it
@@ -124,7 +125,6 @@
   ['2022 Intake Back', new PlotState(plotDiv, plot2022IntakeBack)],
   ['2022 Climber', new PlotState(plotDiv, plot2022Climber)],
   ['2022 Turret', new PlotState(plotDiv, plot2022Turret)],
-  ['C++ Plotter', new PlotState(plotDiv, plotData)],
   ['Y2021 3rd Robot Superstructure', new PlotState(plotDiv, plot2021Superstructure)],
 ]);
 
diff --git a/frc971/analysis/plotter_config.json b/frc971/analysis/plotter_config.json
index 49266ee..fef4a50 100644
--- a/frc971/analysis/plotter_config.json
+++ b/frc971/analysis/plotter_config.json
@@ -3,7 +3,7 @@
     {
       "name": "/analysis",
       "type": "frc971.analysis.Plot",
-      "max_size": 10000000
+      "max_size": 1000000000
     }
   ],
   "imports": [
diff --git a/frc971/codelab/README.md b/frc971/codelab/README.md
index 962af02..e4cfb4b 100644
--- a/frc971/codelab/README.md
+++ b/frc971/codelab/README.md
@@ -1,7 +1,7 @@
 # FRC971 "Codelab"
 
 Welcome! This folder contains a "codelab" where you can go through the process
-of fleshing out a basic control-loop using the same infrastructure as we do for
+of fleshing out a basic control loop using the same infrastructure as we do for
 the control loops that normally run on our robots. Currently, this just consists
 of a single codelab; the instructions can be found below.
 
@@ -11,12 +11,12 @@
 
 ## Flatbuffers tutorial
 
-Our code uses flatbuffers extensively. If you're unfamiliar with them, you can take a look at these [tutorials](https://google.github.io/flatbuffers/flatbuffers_guide_tutorial.html) for how to use them.  This is optional but reommended if you are looking for more background on flatbuffers, and can be done before or after the codelab.
+Our code uses flatbuffers extensively. If you're unfamiliar with them, you can take a look at these [tutorials](https://google.github.io/flatbuffers/flatbuffers_guide_tutorial.html) for how to use them.  This is optional but recommended if you are looking for more background on flatbuffers, and can be done before or after the codelab.
 
 ## Instructions
 
 This codelab helps build basic knowledge of how to use 971 control loop
-primatives.
+primitives.
 
 When this codelab is run, it performs a series of tests to check whether the code is working properly. Your job is to add or make changes to the code to get the tests to pass.
 
@@ -33,7 +33,7 @@
 
 ### Control loops
 
-A control loop is a piece of code that is repeatedly executed while the robot is running, recieiving input from the robot controllers and sensors and sending intructions to the motors that control the robot.
+A control loop is a piece of code that is repeatedly executed while the robot is running, receiving input from the robot controllers and sensors and sending instructions to the motors that control the robot.
 
 Control loops all follow the same structure:
 There are 4 channels that send and recieve instructions. These channels are goal, position, output, and status. Goal and position are input channels, which recieve messages from the robot's sensors and input from the controller. Output and status are output channels, which send messages to the motors.
@@ -52,12 +52,12 @@
 implementation in basic.cc so that it uses the input goal/position to
 meaningfully populate the output/status messages. You can find descriptions
 of exactly what the fields of the messages mean by reading all the *.fbs
-files, and the tests below can be reviewed to help understand exactly what
-behavior is expected.
+files.  The tests in basic_test.cc can be reviewed to help understand exactly
+what behavior is expected.
 
 ### Submitting a code review
 
 Once you can get the tests to pass, follow the directions in [this file](https://software.frc971.org/gerrit/plugins/gitiles/971-Robot-Code/+/refs/heads/master/documentation/tutorials/submitting-code-for-a-review.md) for creating a
 code review of the change. We will not actually *submit* the change (since
-that  would remove the challenge for future students), but we will go through
-the code review process.
\ No newline at end of file
+that would remove the challenge for future students), but we will go through
+the code review process.
diff --git a/frc971/codelab/basic.cc b/frc971/codelab/basic.cc
index 325db73..66f08f9 100644
--- a/frc971/codelab/basic.cc
+++ b/frc971/codelab/basic.cc
@@ -10,17 +10,16 @@
 void Basic::RunIteration(const Goal *goal, const Position *position,
                          aos::Sender<Output>::Builder *output,
                          aos::Sender<Status>::Builder *status) {
-
   // FIX HERE: Set the intake_voltage to 12 Volts when
   // intake is requested (via intake in goal). Make sure not to set
   // the motor to anything but 0 V when the limit_sensor is pressed.
 
-  // This line tells the compiler to to ignore the fact that goal and
+  // This line tells the compiler to ignore the fact that goal and
   // position are not used in the code. You will need to read these messages
   // and use their values to determine the necessary output and status.
   (void)goal, (void)position;
 
-  if (output) {
+  if (output != nullptr) {
     Output::Builder builder = output->MakeBuilder<Output>();
 
     // FIX HERE: As of now, this sets the intake voltage to 0 in
@@ -33,7 +32,7 @@
     (void)output->Send(builder.Finish());
   }
 
-  if (status) {
+  if (status != nullptr) {
     Status::Builder builder = status->MakeBuilder<Status>();
     // FIX HERE: Fill out the Status message! In order to fill the
     // information in the message, use the add_<name of the field>() method
diff --git a/frc971/codelab/basic_status.fbs b/frc971/codelab/basic_status.fbs
index 58a29db..d03c785 100644
--- a/frc971/codelab/basic_status.fbs
+++ b/frc971/codelab/basic_status.fbs
@@ -5,8 +5,6 @@
   // finished. There is one field, intake_complete, which should be set to
   // true by the intake subsystem when the Goal message is requesting the intake
   // to be on and the limit sensor from the position message has been enabled.
-
-
   intake_complete:bool (id: 0);
 }
 
diff --git a/frc971/control_loops/drivetrain/wheel_nonlinearity_plot.py b/frc971/control_loops/drivetrain/wheel_nonlinearity_plot.py
index 9caa9dc..0c21957 100755
--- a/frc971/control_loops/drivetrain/wheel_nonlinearity_plot.py
+++ b/frc971/control_loops/drivetrain/wheel_nonlinearity_plot.py
@@ -7,14 +7,14 @@
 import numpy
 
 if __name__ == '__main__':
-  x = numpy.arange(-1, 1, 0.01)
+    x = numpy.arange(-1, 1, 0.01)
 
-  for nonlin in numpy.arange(0.2, 1.0, 0.1):
-    angular_range = numpy.pi * nonlin / 2.0
-    newx1 = numpy.sin(x * angular_range) / numpy.sin(angular_range)
-    newx2 = numpy.sin(newx1 * angular_range) / numpy.sin(angular_range)
+    for nonlin in numpy.arange(0.2, 1.0, 0.1):
+        angular_range = numpy.pi * nonlin / 2.0
+        newx1 = numpy.sin(x * angular_range) / numpy.sin(angular_range)
+        newx2 = numpy.sin(newx1 * angular_range) / numpy.sin(angular_range)
 
-    pylab.plot(x, newx2, label="nonlin %f" % nonlin)
+        pylab.plot(x, newx2, label="nonlin %f" % nonlin)
 
-  pylab.legend()
-  pylab.show()
+    pylab.legend()
+    pylab.show()
diff --git a/frc971/control_loops/python/BUILD b/frc971/control_loops/python/BUILD
index 8679374..f3dcb48 100644
--- a/frc971/control_loops/python/BUILD
+++ b/frc971/control_loops/python/BUILD
@@ -156,8 +156,8 @@
     srcs = [
         "color.py",
         "graph.py",
+        "multispline.py",
         "path_edit.py",
-        "points.py",
         "spline_drawing.py",
         "spline_graph.py",
         "spline_writer.py",
diff --git a/frc971/control_loops/python/angular_system.py b/frc971/control_loops/python/angular_system.py
index df6c0f6..6772e71 100755
--- a/frc971/control_loops/python/angular_system.py
+++ b/frc971/control_loops/python/angular_system.py
@@ -9,6 +9,7 @@
 
 
 class AngularSystemParams(object):
+
     def __init__(self,
                  name,
                  motor,
@@ -20,7 +21,7 @@
                  kalman_q_vel,
                  kalman_q_voltage,
                  kalman_r_position,
-                 radius = None,
+                 radius=None,
                  dt=0.00505):
         """Constructs an AngularSystemParams object.
 
@@ -44,6 +45,7 @@
 
 
 class AngularSystem(control_loop.ControlLoop):
+
     def __init__(self, params, name="AngularSystem"):
         super(AngularSystem, self).__init__(name)
         self.params = params
@@ -61,8 +63,8 @@
 
         # State is [position, velocity]
         # Input is [Voltage]
-        C1 = self.motor.Kt / (
-            self.G * self.G * self.motor.resistance * self.J * self.motor.Kv)
+        C1 = self.motor.Kt / (self.G * self.G * self.motor.resistance *
+                              self.J * self.motor.Kv)
         C2 = self.motor.Kt / (self.G * self.J * self.motor.resistance)
 
         self.A_continuous = numpy.matrix([[0, 1], [0, -C1]])
@@ -86,8 +88,9 @@
         if self.params.radius is not None:
             glog.debug('Stall force: %f (N)',
                        self.motor.stall_torque / self.G / self.params.radius)
-            glog.debug('Stall force: %f (lbf)',
-                       self.motor.stall_torque / self.G / self.params.radius * 0.224809)
+            glog.debug(
+                'Stall force: %f (lbf)', self.motor.stall_torque / self.G /
+                self.params.radius * 0.224809)
 
         glog.debug('Stall acceleration: %f (rad/s^2)',
                    self.motor.stall_torque / self.G / self.J)
@@ -117,8 +120,11 @@
 
         self.R = numpy.matrix([[(self.params.kalman_r_position**2.0)]])
 
-        self.KalmanGain, self.Q_steady = controls.kalman(
-            A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
+        self.KalmanGain, self.Q_steady = controls.kalman(A=self.A,
+                                                         B=self.B,
+                                                         C=self.C,
+                                                         Q=self.Q,
+                                                         R=self.R)
 
         glog.debug('Kal %s', repr(self.KalmanGain))
 
@@ -131,6 +137,7 @@
 
 
 class IntegralAngularSystem(AngularSystem):
+
     def __init__(self, params, name="IntegralAngularSystem"):
         super(IntegralAngularSystem, self).__init__(params, name=name)
 
@@ -153,13 +160,16 @@
 
         self.Q = numpy.matrix([[(self.params.kalman_q_pos**2.0), 0.0, 0.0],
                                [0.0, (self.params.kalman_q_vel**2.0), 0.0],
-                               [0.0, 0.0, (self.params.kalman_q_voltage
-                                           **2.0)]])
+                               [0.0, 0.0,
+                                (self.params.kalman_q_voltage**2.0)]])
 
         self.R = numpy.matrix([[(self.params.kalman_r_position**2.0)]])
 
-        self.KalmanGain, self.Q_steady = controls.kalman(
-            A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
+        self.KalmanGain, self.Q_steady = controls.kalman(A=self.A,
+                                                         B=self.B,
+                                                         C=self.C,
+                                                         Q=self.Q,
+                                                         R=self.R)
 
         self.K_unaugmented = self.K
         self.K = numpy.matrix(numpy.zeros((1, 3)))
@@ -232,10 +242,10 @@
         offset_plot.append(observer.X_hat[2, 0])
         x_hat_plot.append(observer.X_hat[0, 0])
 
-        next_goal = numpy.concatenate(
-            (profile.Update(end_goal[0, 0], end_goal[1, 0]),
-             numpy.matrix(numpy.zeros((1, 1)))),
-            axis=0)
+        next_goal = numpy.concatenate((profile.Update(
+            end_goal[0, 0], end_goal[1, 0]), numpy.matrix(numpy.zeros(
+                (1, 1)))),
+                                      axis=0)
 
         ff_U = controller.Kff * (next_goal - observer.A * goal)
 
@@ -252,8 +262,8 @@
 
         U[0, 0] = numpy.clip(U[0, 0], -vbat, vbat)
 
-        motor_current = (U[0, 0] - plant.X[1, 0] / plant.G / plant.motor.Kv
-                         ) / plant.motor.resistance
+        motor_current = (U[0, 0] - plant.X[1, 0] / plant.G /
+                         plant.motor.Kv) / plant.motor.resistance
         motor_current_plot.append(motor_current)
         battery_current = U[0, 0] * motor_current / 12.0
         battery_current_plot.append(battery_current)
@@ -281,8 +291,8 @@
         goal = controller.A * goal + controller.B * ff_U
 
         if U[0, 0] != U_uncapped[0, 0]:
-            profile.MoveCurrentState(
-                numpy.matrix([[goal[0, 0]], [goal[1, 0]]]))
+            profile.MoveCurrentState(numpy.matrix([[goal[0, 0]], [goal[1,
+                                                                       0]]]))
 
     glog.debug('Time: %f', t_plot[-1])
     glog.debug('goal_error %s', repr(end_goal - goal))
@@ -330,15 +340,14 @@
     initial_X = numpy.matrix([[0.0], [0.0]])
     augmented_R = numpy.matrix(numpy.zeros((3, 1)))
     augmented_R[0:2, :] = R
-    RunTest(
-        plant,
-        end_goal=augmented_R,
-        controller=controller,
-        observer=observer,
-        duration=2.0,
-        use_profile=False,
-        kick_time=1.0,
-        kick_magnitude=0.0)
+    RunTest(plant,
+            end_goal=augmented_R,
+            controller=controller,
+            observer=observer,
+            duration=2.0,
+            use_profile=False,
+            kick_time=1.0,
+            kick_magnitude=0.0)
 
 
 def PlotKick(params, R, plant_params=None):
@@ -357,15 +366,14 @@
     initial_X = numpy.matrix([[0.0], [0.0]])
     augmented_R = numpy.matrix(numpy.zeros((3, 1)))
     augmented_R[0:2, :] = R
-    RunTest(
-        plant,
-        end_goal=augmented_R,
-        controller=controller,
-        observer=observer,
-        duration=2.0,
-        use_profile=False,
-        kick_time=1.0,
-        kick_magnitude=2.0)
+    RunTest(plant,
+            end_goal=augmented_R,
+            controller=controller,
+            observer=observer,
+            duration=2.0,
+            use_profile=False,
+            kick_time=1.0,
+            kick_magnitude=2.0)
 
 
 def PlotMotion(params,
@@ -391,15 +399,14 @@
     initial_X = numpy.matrix([[0.0], [0.0]])
     augmented_R = numpy.matrix(numpy.zeros((3, 1)))
     augmented_R[0:2, :] = R
-    RunTest(
-        plant,
-        end_goal=augmented_R,
-        controller=controller,
-        observer=observer,
-        duration=2.0,
-        use_profile=True,
-        max_velocity=max_velocity,
-        max_acceleration=max_acceleration)
+    RunTest(plant,
+            end_goal=augmented_R,
+            controller=controller,
+            observer=observer,
+            duration=2.0,
+            use_profile=True,
+            max_velocity=max_velocity,
+            max_acceleration=max_acceleration)
 
 
 def WriteAngularSystem(params, plant_files, controller_files, year_namespaces):
@@ -431,8 +438,9 @@
         integral_angular_systems.append(
             IntegralAngularSystem(params, 'Integral' + params.name))
 
-    loop_writer = control_loop.ControlLoopWriter(
-        name, angular_systems, namespaces=year_namespaces)
+    loop_writer = control_loop.ControlLoopWriter(name,
+                                                 angular_systems,
+                                                 namespaces=year_namespaces)
     loop_writer.AddConstant(
         control_loop.Constant('kOutputRatio', '%f', angular_systems[0].G))
     loop_writer.AddConstant(
diff --git a/frc971/control_loops/python/basic_window.py b/frc971/control_loops/python/basic_window.py
index b8d49a9..44e4f49 100755
--- a/frc971/control_loops/python/basic_window.py
+++ b/frc971/control_loops/python/basic_window.py
@@ -1,5 +1,6 @@
 #!/usr/bin/python3
 import gi
+
 gi.require_version('Gtk', '3.0')
 from gi.repository import Gtk
 from gi.repository import GLib
@@ -13,6 +14,7 @@
 
 # Override the matrix of a cairo context.
 class OverrideMatrix(object):
+
     def __init__(self, cr, matrix):
         self.cr = cr
         self.matrix = matrix
diff --git a/frc971/control_loops/python/cim.py b/frc971/control_loops/python/cim.py
index ac527de..f8a6e9a 100644
--- a/frc971/control_loops/python/cim.py
+++ b/frc971/control_loops/python/cim.py
@@ -5,6 +5,7 @@
 
 
 class CIM(control_loop.ControlLoop):
+
     def __init__(self):
         super(CIM, self).__init__("CIM")
         # Stall Torque in N m
diff --git a/frc971/control_loops/python/color.py b/frc971/control_loops/python/color.py
index 5634042..a66040c 100644
--- a/frc971/control_loops/python/color.py
+++ b/frc971/control_loops/python/color.py
@@ -1,4 +1,5 @@
 class Color:
+
     def __init__(self, r, g, b, a=1.0):
         self.r = r
         self.g = g
diff --git a/frc971/control_loops/python/constants.py b/frc971/control_loops/python/constants.py
index e038cb2..b2d9d57 100644
--- a/frc971/control_loops/python/constants.py
+++ b/frc971/control_loops/python/constants.py
@@ -17,8 +17,7 @@
 
 FieldType = namedtuple(
     'Field', ['name', 'tags', 'year', 'width', 'length', 'robot', 'field_id'])
-RobotType = namedtuple(
-        "Robot", ['width', 'length'])
+RobotType = namedtuple("Robot", ['width', 'length'])
 
 GALACTIC_SEARCH = "Galactic Search"
 ARED = "A Red"
@@ -31,101 +30,91 @@
 BARREL = "Barrel"
 
 Robot2019 = RobotType(width=0.65, length=0.8)
-Robot2020 = RobotType(width=0.8128, length=0.8636) # 32 in x 34 in
+Robot2020 = RobotType(width=0.8128, length=0.8636)  # 32 in x 34 in
 Robot2021 = Robot2020
 Robot2022 = RobotType(width=0.8763, length=0.96647)
 
 FIELDS = {
     "2019 Field":
-    FieldType(
-        "2019 Field",
-        tags=[],
-        year=2019,
-        width=8.258302,
-        length=8.258302,
-        robot=Robot2019,
-        field_id="2019"),
+    FieldType("2019 Field",
+              tags=[],
+              year=2019,
+              width=8.258302,
+              length=8.258302,
+              robot=Robot2019,
+              field_id="2019"),
     "2020 Field":
-    FieldType(
-        "2020 Field",
-        tags=[],
-        year=2020,
-        width=15.98295,
-        length=8.21055,
-        robot=Robot2020,
-        field_id="2020"),
+    FieldType("2020 Field",
+              tags=[],
+              year=2020,
+              width=15.98295,
+              length=8.21055,
+              robot=Robot2020,
+              field_id="2020"),
     "2021 Galactic Search BRed":
-    FieldType(
-        "2021 Galactic Search BRed",
-        tags=[GALACTIC_SEARCH, BRED],
-        year=2021,
-        width=9.144,
-        length=4.572,
-        robot=Robot2021,
-        field_id="red_b"),
+    FieldType("2021 Galactic Search BRed",
+              tags=[GALACTIC_SEARCH, BRED],
+              year=2021,
+              width=9.144,
+              length=4.572,
+              robot=Robot2021,
+              field_id="red_b"),
     "2021 Galactic Search ARed":
-    FieldType(
-        "2021 Galactic Search ARed",
-        tags=[GALACTIC_SEARCH, ARED],
-        year=2021,
-        width=9.144,
-        length=4.572,
-        robot=Robot2021,
-        field_id="red_a"),
+    FieldType("2021 Galactic Search ARed",
+              tags=[GALACTIC_SEARCH, ARED],
+              year=2021,
+              width=9.144,
+              length=4.572,
+              robot=Robot2021,
+              field_id="red_a"),
     "2021 Galactic Search BBlue":
-    FieldType(
-        "2021 Galactic Search BBlue",
-        tags=[GALACTIC_SEARCH, BBLUE],
-        year=2021,
-        width=9.144,
-        length=4.572,
-        robot=Robot2021,
-        field_id="blue_b"),
+    FieldType("2021 Galactic Search BBlue",
+              tags=[GALACTIC_SEARCH, BBLUE],
+              year=2021,
+              width=9.144,
+              length=4.572,
+              robot=Robot2021,
+              field_id="blue_b"),
     "2021 Galactic Search ABlue":
-    FieldType(
-        "2021 Galactic Search ABlue",
-        tags=[GALACTIC_SEARCH, ABLUE],
-        year=2021,
-        width=9.144,
-        length=4.572,
-        robot=Robot2021,
-        field_id="blue_a"),
+    FieldType("2021 Galactic Search ABlue",
+              tags=[GALACTIC_SEARCH, ABLUE],
+              year=2021,
+              width=9.144,
+              length=4.572,
+              robot=Robot2021,
+              field_id="blue_a"),
     "2021 AutoNav Barrel":
-    FieldType(
-        "2021 AutoNav Barrel",
-        tags=[AUTONAV, BARREL],
-        year=2021,
-        width=9.144,
-        length=4.572,
-        robot=Robot2021,
-        field_id="autonav_barrel"),
+    FieldType("2021 AutoNav Barrel",
+              tags=[AUTONAV, BARREL],
+              year=2021,
+              width=9.144,
+              length=4.572,
+              robot=Robot2021,
+              field_id="autonav_barrel"),
     "2021 AutoNav Slalom":
-    FieldType(
-        "2021 AutoNav Slalom",
-        tags=[AUTONAV, SLALOM],
-        year=2021,
-        width=9.144,
-        length=4.572,
-        robot=Robot2021,
-        field_id="autonav_slalom"),
+    FieldType("2021 AutoNav Slalom",
+              tags=[AUTONAV, SLALOM],
+              year=2021,
+              width=9.144,
+              length=4.572,
+              robot=Robot2021,
+              field_id="autonav_slalom"),
     "2021 AutoNav Bounce":
-    FieldType(
-        "2021 AutoNav Bounce",
-        tags=[AUTONAV, BOUNCE],
-        year=2021,
-        width=9.144,
-        length=4.572,
-        robot=Robot2021,
-        field_id="autonav_bounce"),
+    FieldType("2021 AutoNav Bounce",
+              tags=[AUTONAV, BOUNCE],
+              year=2021,
+              width=9.144,
+              length=4.572,
+              robot=Robot2021,
+              field_id="autonav_bounce"),
     "2022 Field":
-    FieldType(
-        "2022 Field",
-        tags=[],
-        year=2022,
-        width=16.4592,
-        length=8.2296,
-        robot=Robot2022,
-        field_id="2022"),
+    FieldType("2022 Field",
+              tags=[],
+              year=2022,
+              width=16.4592,
+              length=8.2296,
+              robot=Robot2022,
+              field_id="2022"),
 }
 
 FIELD = FIELDS["2022 Field"]
@@ -139,5 +128,6 @@
     else:
         return "frc971/control_loops/python/spline_jsons"
 
+
 def inToM(i):
     return (i * 0.0254)
diff --git a/frc971/control_loops/python/control_loop.py b/frc971/control_loops/python/control_loop.py
index 1649dd2..e836d1a 100644
--- a/frc971/control_loops/python/control_loop.py
+++ b/frc971/control_loops/python/control_loop.py
@@ -4,6 +4,7 @@
 
 
 class Constant(object):
+
     def __init__(self, name, formatt, value, comment=None):
         self.name = name
         self.formatt = formatt
@@ -25,6 +26,7 @@
 
 
 class ControlLoopWriter(object):
+
     def __init__(self,
                  gain_schedule_name,
                  loops,
@@ -76,8 +78,9 @@
         return self._namespaces[0]
 
     def _HeaderGuard(self, header_file):
-        return ('_'.join([namespace.upper() for namespace in self._namespaces])
-                + '_' + os.path.basename(header_file).upper().replace(
+        return ('_'.join([namespace.upper()
+                          for namespace in self._namespaces]) + '_' +
+                os.path.basename(header_file).upper().replace(
                     '.', '_').replace('/', '_') + '_')
 
     def Write(self, header_file, cc_file):
@@ -163,17 +166,17 @@
                 fd.write(loop.DumpObserverHeader(self._ObserverCoeffType()))
                 fd.write('\n')
 
-            fd.write('%s Make%sPlant();\n\n' % (self._PlantType(),
-                                                self._gain_schedule_name))
+            fd.write('%s Make%sPlant();\n\n' %
+                     (self._PlantType(), self._gain_schedule_name))
 
-            fd.write('%s Make%sController();\n\n' % (self._ControllerType(),
-                                                     self._gain_schedule_name))
+            fd.write('%s Make%sController();\n\n' %
+                     (self._ControllerType(), self._gain_schedule_name))
 
-            fd.write('%s Make%sObserver();\n\n' % (self._ObserverType(),
-                                                   self._gain_schedule_name))
+            fd.write('%s Make%sObserver();\n\n' %
+                     (self._ObserverType(), self._gain_schedule_name))
 
-            fd.write('%s Make%sLoop();\n\n' % (self._LoopType(),
-                                               self._gain_schedule_name))
+            fd.write('%s Make%sLoop();\n\n' %
+                     (self._LoopType(), self._gain_schedule_name))
 
             fd.write(self._namespace_end)
             fd.write('\n\n')
@@ -182,8 +185,8 @@
     def WriteCC(self, header_file_name, cc_file):
         """Writes the cc file to the file named cc_file."""
         with open(cc_file, 'w') as fd:
-            fd.write('#include \"%s/%s\"\n' % (os.path.join(*self._namespaces),
-                                               header_file_name))
+            fd.write('#include \"%s/%s\"\n' %
+                     (os.path.join(*self._namespaces), header_file_name))
             fd.write('\n')
             fd.write('#include <chrono>\n')
             fd.write('#include <vector>\n')
@@ -208,19 +211,20 @@
                                       self._scalar_type))
                 fd.write('\n')
 
-            fd.write('%s Make%sPlant() {\n' % (self._PlantType(),
-                                               self._gain_schedule_name))
+            fd.write('%s Make%sPlant() {\n' %
+                     (self._PlantType(), self._gain_schedule_name))
             fd.write('  ::std::vector< ::std::unique_ptr<%s>> plants(%d);\n' %
                      (self._PlantCoeffType(), len(self._loops)))
             for index, loop in enumerate(self._loops):
-                fd.write('  plants[%d] = ::std::unique_ptr<%s>(new %s(%s));\n'
-                         % (index, self._PlantCoeffType(),
-                            self._PlantCoeffType(), loop.PlantFunction()))
+                fd.write(
+                    '  plants[%d] = ::std::unique_ptr<%s>(new %s(%s));\n' %
+                    (index, self._PlantCoeffType(), self._PlantCoeffType(),
+                     loop.PlantFunction()))
             fd.write('  return %s(std::move(plants));\n' % self._PlantType())
             fd.write('}\n\n')
 
-            fd.write('%s Make%sController() {\n' % (self._ControllerType(),
-                                                    self._gain_schedule_name))
+            fd.write('%s Make%sController() {\n' %
+                     (self._ControllerType(), self._gain_schedule_name))
             fd.write(
                 '  ::std::vector< ::std::unique_ptr<%s>> controllers(%d);\n' %
                 (self._ControllerCoeffType(), len(self._loops)))
@@ -233,21 +237,22 @@
                      self._ControllerType())
             fd.write('}\n\n')
 
-            fd.write('%s Make%sObserver() {\n' % (self._ObserverType(),
-                                                  self._gain_schedule_name))
-            fd.write('  ::std::vector< ::std::unique_ptr<%s>> observers(%d);\n'
-                     % (self._ObserverCoeffType(), len(self._loops)))
+            fd.write('%s Make%sObserver() {\n' %
+                     (self._ObserverType(), self._gain_schedule_name))
+            fd.write(
+                '  ::std::vector< ::std::unique_ptr<%s>> observers(%d);\n' %
+                (self._ObserverCoeffType(), len(self._loops)))
             for index, loop in enumerate(self._loops):
                 fd.write(
-                    '  observers[%d] = ::std::unique_ptr<%s>(new %s(%s));\n'
-                    % (index, self._ObserverCoeffType(),
-                       self._ObserverCoeffType(), loop.ObserverFunction()))
-            fd.write(
-                '  return %s(std::move(observers));\n' % self._ObserverType())
+                    '  observers[%d] = ::std::unique_ptr<%s>(new %s(%s));\n' %
+                    (index, self._ObserverCoeffType(),
+                     self._ObserverCoeffType(), loop.ObserverFunction()))
+            fd.write('  return %s(std::move(observers));\n' %
+                     self._ObserverType())
             fd.write('}\n\n')
 
-            fd.write('%s Make%sLoop() {\n' % (self._LoopType(),
-                                              self._gain_schedule_name))
+            fd.write('%s Make%sLoop() {\n' %
+                     (self._LoopType(), self._gain_schedule_name))
             fd.write(
                 '  return %s(Make%sPlant(), Make%sController(), Make%sObserver());\n'
                 % (self._LoopType(), self._gain_schedule_name,
@@ -259,6 +264,7 @@
 
 
 class ControlLoop(object):
+
     def __init__(self, name):
         """Constructs a control loop object.
 
@@ -291,7 +297,8 @@
         self.X = numpy.matrix(numpy.zeros((self.A.shape[0], 1)))
         self.Y = self.C * self.X
         self.X_hat = numpy.matrix(numpy.zeros((self.A.shape[0], 1)))
-        self.last_U = numpy.matrix(numpy.zeros((self.B.shape[1], max(1, self.delayed_u))))
+        self.last_U = numpy.matrix(
+            numpy.zeros((self.B.shape[1], max(1, self.delayed_u))))
 
     def PlaceControllerPoles(self, poles):
         """Places the controller poles.
@@ -368,8 +375,8 @@
                     if '.' not in write_type and 'e' not in write_type:
                         write_type += '.0'
                     write_type += 'f'
-                ans.append(
-                    '  %s(%d, %d) = %s;\n' % (matrix_name, x, y, write_type))
+                ans.append('  %s(%d, %d) = %s;\n' %
+                           (matrix_name, x, y, write_type))
 
         return ''.join(ans)
 
@@ -389,8 +396,8 @@
             string, The function which will create the object.
         """
         ans = [
-            '%s Make%sPlantCoefficients() {\n' % (plant_coefficient_type,
-                                                  self._name)
+            '%s Make%sPlantCoefficients() {\n' %
+            (plant_coefficient_type, self._name)
         ]
 
         ans.append(self._DumpMatrix('C', self.C, scalar_type))
@@ -402,11 +409,11 @@
         if plant_coefficient_type.startswith('StateFeedbackPlant'):
             ans.append(self._DumpMatrix('A', self.A, scalar_type))
             ans.append(self._DumpMatrix('B', self.B, scalar_type))
-            ans.append(
-                '  const std::chrono::nanoseconds dt(%d);\n' % (self.dt * 1e9))
-            ans.append(
-                '  return %s'
-                '(A, B, C, D, U_max, U_min, dt, %s);\n' % (plant_coefficient_type, delayed_u_string))
+            ans.append('  const std::chrono::nanoseconds dt(%d);\n' %
+                       (self.dt * 1e9))
+            ans.append('  return %s'
+                       '(A, B, C, D, U_max, U_min, dt, %s);\n' %
+                       (plant_coefficient_type, delayed_u_string))
         elif plant_coefficient_type.startswith('StateFeedbackHybridPlant'):
             ans.append(
                 self._DumpMatrix('A_continuous', self.A_continuous,
@@ -414,9 +421,10 @@
             ans.append(
                 self._DumpMatrix('B_continuous', self.B_continuous,
                                  scalar_type))
-            ans.append('  return %s'
-                       '(A_continuous, B_continuous, C, D, U_max, U_min, %s);\n' %
-                       (plant_coefficient_type, delayed_u_string))
+            ans.append(
+                '  return %s'
+                '(A_continuous, B_continuous, C, D, U_max, U_min, %s);\n' %
+                (plant_coefficient_type, delayed_u_string))
         else:
             glog.fatal('Unsupported plant type %s', plant_coefficient_type)
 
@@ -521,8 +529,8 @@
                 self._DumpMatrix('P_steady_state', self.P_steady_state,
                                  scalar_type))
             ans.append(
-                '  return %s(Q_continuous, R_continuous, P_steady_state, %s);\n' %
-                (observer_coefficient_type, delayed_u_string))
+                '  return %s(Q_continuous, R_continuous, P_steady_state, %s);\n'
+                % (observer_coefficient_type, delayed_u_string))
         else:
             glog.fatal('Unsupported observer type %s',
                        observer_coefficient_type)
@@ -532,6 +540,7 @@
 
 
 class HybridControlLoop(ControlLoop):
+
     def __init__(self, name):
         super(HybridControlLoop, self).__init__(name=name)
 
@@ -569,6 +578,7 @@
 
 
 class CIM(object):
+
     def __init__(self):
         # Stall Torque in N m
         self.stall_torque = 2.42
@@ -581,13 +591,14 @@
         # Resistance of the motor
         self.resistance = 12.0 / self.stall_current
         # Motor velocity constant
-        self.Kv = (
-            self.free_speed / (12.0 - self.resistance * self.free_current))
+        self.Kv = (self.free_speed /
+                   (12.0 - self.resistance * self.free_current))
         # Torque constant
         self.Kt = self.stall_torque / self.stall_current
 
 
 class MiniCIM(object):
+
     def __init__(self):
         # Stall Torque in N m
         self.stall_torque = 1.41
@@ -600,8 +611,8 @@
         # Resistance of the motor
         self.resistance = 12.0 / self.stall_current
         # Motor velocity constant
-        self.Kv = (
-            self.free_speed / (12.0 - self.resistance * self.free_current))
+        self.Kv = (self.free_speed /
+                   (12.0 - self.resistance * self.free_current))
         # Torque constant
         self.Kt = self.stall_torque / self.stall_current
 
@@ -610,6 +621,7 @@
 
 
 class NMotor(object):
+
     def __init__(self, motor, n):
         """Gangs together n motors."""
         self.motor = motor
@@ -625,6 +637,7 @@
 
 
 class Vex775Pro(object):
+
     def __init__(self):
         # Stall Torque in N m
         self.stall_torque = 0.71
@@ -637,8 +650,8 @@
         # Resistance of the motor
         self.resistance = 12.0 / self.stall_current
         # Motor velocity constant
-        self.Kv = (
-            self.free_speed / (12.0 - self.resistance * self.free_current))
+        self.Kv = (self.free_speed /
+                   (12.0 - self.resistance * self.free_current))
         # Torque constant
         self.Kt = self.stall_torque / self.stall_current
         # Motor inertia in kg m^2
@@ -659,8 +672,8 @@
         # Resistance of the motor (Ohms)
         self.resistance = 12.0 / self.stall_current
         # Motor velocity constant (radians / (sec * volt))
-        self.Kv = (
-            self.free_speed / (12.0 - self.resistance * self.free_current))
+        self.Kv = (self.free_speed /
+                   (12.0 - self.resistance * self.free_current))
         # Torque constant (N * m / A)
         self.Kt = self.stall_torque / self.stall_current
         # Motor inertia in kg m^2
@@ -668,6 +681,7 @@
 
 
 class MN3510(object):
+
     def __init__(self):
         # http://www.robotshop.com/en/t-motor-navigator-mn3510-360kv-brushless-motor.html#Specifications
         # Free Current in Amps
@@ -702,8 +716,8 @@
         # Resistance of the motor, divided by 2 to account for the 2 motors
         self.resistance = 12.0 / self.stall_current
         # Motor velocity constant
-        self.Kv = (
-            self.free_speed / (12.0 - self.resistance * self.free_current))
+        self.Kv = (self.free_speed /
+                   (12.0 - self.resistance * self.free_current))
         # Torque constant
         self.Kt = self.stall_torque / self.stall_current
         # Motor inertia in kg m^2
diff --git a/frc971/control_loops/python/controls.py b/frc971/control_loops/python/controls.py
index 1dfd061..4cb10a2 100644
--- a/frc971/control_loops/python/controls.py
+++ b/frc971/control_loops/python/controls.py
@@ -33,8 +33,8 @@
   Returns:
     numpy.matrix(m x n), K
   """
-    return scipy.signal.place_poles(
-        A=A, B=B, poles=numpy.array(poles)).gain_matrix
+    return scipy.signal.place_poles(A=A, B=B,
+                                    poles=numpy.array(poles)).gain_matrix
 
 
 def c2d(A, B, dt):
@@ -48,8 +48,8 @@
     em_upper = numpy.hstack((a, b))
 
     # Need to stack zeros under the a and b matrices
-    em_lower = numpy.hstack((numpy.zeros((b.shape[1], a.shape[0])),
-                             numpy.zeros((b.shape[1], b.shape[1]))))
+    em_lower = numpy.hstack((numpy.zeros(
+        (b.shape[1], a.shape[0])), numpy.zeros((b.shape[1], b.shape[1]))))
 
     em = numpy.vstack((em_upper, em_lower))
     ms = scipy.linalg.expm(dt * em)
@@ -164,8 +164,8 @@
         axis=0)
     phi = numpy.matrix(scipy.linalg.expm(M * dt))
     phi12 = phi[0:number_of_states, number_of_states:(2 * number_of_states)]
-    phi22 = phi[number_of_states:2 * number_of_states, number_of_states:2 *
-                number_of_states]
+    phi22 = phi[number_of_states:2 * number_of_states,
+                number_of_states:2 * number_of_states]
     Q_discrete = phi22.T * phi12
     Q_discrete = (Q_discrete + Q_discrete.T) / 2.0
     R_discrete = R_continuous / dt
diff --git a/frc971/control_loops/python/down_estimator.py b/frc971/control_loops/python/down_estimator.py
index 84cdfa9..956a2b3 100644
--- a/frc971/control_loops/python/down_estimator.py
+++ b/frc971/control_loops/python/down_estimator.py
@@ -18,6 +18,7 @@
 
 
 class DownEstimator(control_loop.ControlLoop):
+
     def __init__(self, name='DownEstimator'):
         super(DownEstimator, self).__init__(name)
         self.dt = 0.005
@@ -47,8 +48,11 @@
         self.U_min = numpy.matrix([[-numpy.pi]])
         self.K = numpy.matrix(numpy.zeros((1, 2)))
 
-        self.KalmanGain, self.Q_steady = controls.kalman(
-            A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
+        self.KalmanGain, self.Q_steady = controls.kalman(A=self.A,
+                                                         B=self.B,
+                                                         C=self.C,
+                                                         Q=self.Q,
+                                                         R=self.R)
 
         self.L = self.A * self.KalmanGain
 
@@ -93,8 +97,8 @@
         angle = math.pi / 2
         velocity = 1
         for i in range(100):
-            measured_velocity = velocity + (
-                random.random() - 0.5) * 0.01 + 0.05
+            measured_velocity = velocity + (random.random() -
+                                            0.5) * 0.01 + 0.05
             estimator.Predict(measured_velocity)
             estimator.Update(
                 math.sin(angle) + (random.random() - 0.5) * 0.02, 0,
@@ -115,8 +119,9 @@
         glog.error("Expected .h file name and .cc file name")
     else:
         namespaces = ['frc971', 'control_loops', 'drivetrain']
-        kf_loop_writer = control_loop.ControlLoopWriter(
-            "DownEstimator", [estimator], namespaces=namespaces)
+        kf_loop_writer = control_loop.ControlLoopWriter("DownEstimator",
+                                                        [estimator],
+                                                        namespaces=namespaces)
         kf_loop_writer.Write(argv[1], argv[2])
 
 
diff --git a/frc971/control_loops/python/drawing_constants.py b/frc971/control_loops/python/drawing_constants.py
index 644e449..431abd7 100644
--- a/frc971/control_loops/python/drawing_constants.py
+++ b/frc971/control_loops/python/drawing_constants.py
@@ -58,7 +58,8 @@
     cr.show_text(text)
     cr.scale(widthb, -heightb)
 
+
 def draw_points(cr, p, size):
     for i in range(0, len(p)):
-        draw_px_cross(cr, p[i][0], p[i][1], size, Color(
-            0, np.sqrt(0.2 * i), 0))
+        draw_px_cross(cr, p[i][0], p[i][1], size,
+                      Color(0, np.sqrt(0.2 * i), 0))
diff --git a/frc971/control_loops/python/drivetrain.py b/frc971/control_loops/python/drivetrain.py
index 264b4a6..80a4a53 100644
--- a/frc971/control_loops/python/drivetrain.py
+++ b/frc971/control_loops/python/drivetrain.py
@@ -9,6 +9,7 @@
 
 
 class DrivetrainParams(object):
+
     def __init__(self,
                  J,
                  mass,
@@ -109,6 +110,7 @@
 
 
 class Drivetrain(control_loop.ControlLoop):
+
     def __init__(self,
                  drivetrain_params,
                  name="Drivetrain",
@@ -192,8 +194,8 @@
 
         # Resistance of the motor, divided by the number of motors.
         # Motor velocity constant
-        self.Kv = (
-            self.free_speed / (12.0 - self.resistance * self.free_current))
+        self.Kv = (self.free_speed /
+                   (12.0 - self.resistance * self.free_current))
         # Torque constant
         self.Kt = self.stall_torque / self.stall_current
 
@@ -206,10 +208,10 @@
         self.msnr = self.robot_radius_l / ( self.robot_radius_r * self.mass ) - \
             self.robot_radius_l * self.robot_radius_r / self.J
         # The calculations which we will need for A and B.
-        self.tcl = self.Kt / self.Kv / (
-            self.Gl * self.Gl * self.resistance * self.r * self.r)
-        self.tcr = self.Kt / self.Kv / (
-            self.Gr * self.Gr * self.resistance * self.r * self.r)
+        self.tcl = self.Kt / self.Kv / (self.Gl * self.Gl * self.resistance *
+                                        self.r * self.r)
+        self.tcr = self.Kt / self.Kv / (self.Gr * self.Gr * self.resistance *
+                                        self.r * self.r)
         self.mpl = self.Kt / (self.Gl * self.resistance * self.r)
         self.mpr = self.Kt / (self.Gr * self.resistance * self.r)
 
@@ -268,6 +270,7 @@
 
 
 class KFDrivetrain(Drivetrain):
+
     def __init__(self,
                  drivetrain_params,
                  name="KFDrivetrain",
@@ -323,31 +326,36 @@
                                                    self.B_continuous, self.dt)
 
         if self.has_imu:
-            self.C = numpy.matrix(
-                [[1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0],
-                 [
-                     0, -0.5 / drivetrain_params.robot_radius, 0,
-                     0.5 / drivetrain_params.robot_radius, 0, 0, 0
-                 ], [0, 0, 0, 0, 0, 0, 0]])
+            self.C = numpy.matrix([[1, 0, 0, 0, 0, 0, 0],
+                                   [0, 0, 1, 0, 0, 0, 0],
+                                   [
+                                       0,
+                                       -0.5 / drivetrain_params.robot_radius,
+                                       0, 0.5 / drivetrain_params.robot_radius,
+                                       0, 0, 0
+                                   ], [0, 0, 0, 0, 0, 0, 0]])
             gravity = 9.8
             self.C[3, 0:6] = 0.5 * (self.A_continuous[1, 0:6] +
                                     self.A_continuous[3, 0:6]) / gravity
 
-            self.D = numpy.matrix(
-                [[0, 0], [0, 0], [0, 0],
-                 [
-                     0.5 * (self.B_continuous[1, 0] + self.B_continuous[3, 0])
-                     / gravity,
-                     0.5 * (self.B_continuous[1, 1] + self.B_continuous[3, 1])
-                     / gravity
-                 ]])
+            self.D = numpy.matrix([
+                [0, 0], [0, 0], [0, 0],
+                [
+                    0.5 * (self.B_continuous[1, 0] + self.B_continuous[3, 0]) /
+                    gravity,
+                    0.5 * (self.B_continuous[1, 1] + self.B_continuous[3, 1]) /
+                    gravity
+                ]
+            ])
         else:
-            self.C = numpy.matrix(
-                [[1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0],
-                 [
-                     0, -0.5 / drivetrain_params.robot_radius, 0,
-                     0.5 / drivetrain_params.robot_radius, 0, 0, 0
-                 ]])
+            self.C = numpy.matrix([[1, 0, 0, 0, 0, 0, 0],
+                                   [0, 0, 1, 0, 0, 0, 0],
+                                   [
+                                       0,
+                                       -0.5 / drivetrain_params.robot_radius,
+                                       0, 0.5 / drivetrain_params.robot_radius,
+                                       0, 0, 0
+                                   ]])
 
             self.D = numpy.matrix([[0, 0], [0, 0], [0, 0]])
 
@@ -378,14 +386,17 @@
                                    [0.0, 0.0, (r_gyro**2.0)]])
 
         # Solving for kf gains.
-        self.KalmanGain, self.Q_steady = controls.kalman(
-            A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
+        self.KalmanGain, self.Q_steady = controls.kalman(A=self.A,
+                                                         B=self.B,
+                                                         C=self.C,
+                                                         Q=self.Q,
+                                                         R=self.R)
 
         # If we don't have an IMU, pad various matrices with zeros so that
         # we can still have 4 measurement outputs.
         if not self.has_imu:
-            self.KalmanGain = numpy.hstack((self.KalmanGain,
-                                            numpy.matrix(numpy.zeros((7, 1)))))
+            self.KalmanGain = numpy.hstack(
+                (self.KalmanGain, numpy.matrix(numpy.zeros((7, 1)))))
             self.C = numpy.vstack((self.C, numpy.matrix(numpy.zeros((1, 7)))))
             self.D = numpy.vstack((self.D, numpy.matrix(numpy.zeros((1, 2)))))
             Rtmp = numpy.zeros((4, 4))
@@ -415,8 +426,8 @@
         self.Qff[2, 2] = 1.0 / qff_pos**2.0
         self.Qff[3, 3] = 1.0 / qff_vel**2.0
         self.Kff = numpy.matrix(numpy.zeros((2, 7)))
-        self.Kff[0:2, 0:4] = controls.TwoStateFeedForwards(
-            self.B[0:4, :], self.Qff)
+        self.Kff[0:2,
+                 0:4] = controls.TwoStateFeedForwards(self.B[0:4, :], self.Qff)
 
         self.InitializeState()
 
@@ -428,59 +439,50 @@
                     scalar_type='double'):
 
     # Write the generated constants out to a file.
-    drivetrain_low_low = Drivetrain(
-        name="DrivetrainLowLow",
-        left_low=True,
-        right_low=True,
-        drivetrain_params=drivetrain_params)
-    drivetrain_low_high = Drivetrain(
-        name="DrivetrainLowHigh",
-        left_low=True,
-        right_low=False,
-        drivetrain_params=drivetrain_params)
-    drivetrain_high_low = Drivetrain(
-        name="DrivetrainHighLow",
-        left_low=False,
-        right_low=True,
-        drivetrain_params=drivetrain_params)
-    drivetrain_high_high = Drivetrain(
-        name="DrivetrainHighHigh",
-        left_low=False,
-        right_low=False,
-        drivetrain_params=drivetrain_params)
+    drivetrain_low_low = Drivetrain(name="DrivetrainLowLow",
+                                    left_low=True,
+                                    right_low=True,
+                                    drivetrain_params=drivetrain_params)
+    drivetrain_low_high = Drivetrain(name="DrivetrainLowHigh",
+                                     left_low=True,
+                                     right_low=False,
+                                     drivetrain_params=drivetrain_params)
+    drivetrain_high_low = Drivetrain(name="DrivetrainHighLow",
+                                     left_low=False,
+                                     right_low=True,
+                                     drivetrain_params=drivetrain_params)
+    drivetrain_high_high = Drivetrain(name="DrivetrainHighHigh",
+                                      left_low=False,
+                                      right_low=False,
+                                      drivetrain_params=drivetrain_params)
 
-    kf_drivetrain_low_low = KFDrivetrain(
-        name="KFDrivetrainLowLow",
-        left_low=True,
-        right_low=True,
-        drivetrain_params=drivetrain_params)
-    kf_drivetrain_low_high = KFDrivetrain(
-        name="KFDrivetrainLowHigh",
-        left_low=True,
-        right_low=False,
-        drivetrain_params=drivetrain_params)
-    kf_drivetrain_high_low = KFDrivetrain(
-        name="KFDrivetrainHighLow",
-        left_low=False,
-        right_low=True,
-        drivetrain_params=drivetrain_params)
-    kf_drivetrain_high_high = KFDrivetrain(
-        name="KFDrivetrainHighHigh",
-        left_low=False,
-        right_low=False,
-        drivetrain_params=drivetrain_params)
+    kf_drivetrain_low_low = KFDrivetrain(name="KFDrivetrainLowLow",
+                                         left_low=True,
+                                         right_low=True,
+                                         drivetrain_params=drivetrain_params)
+    kf_drivetrain_low_high = KFDrivetrain(name="KFDrivetrainLowHigh",
+                                          left_low=True,
+                                          right_low=False,
+                                          drivetrain_params=drivetrain_params)
+    kf_drivetrain_high_low = KFDrivetrain(name="KFDrivetrainHighLow",
+                                          left_low=False,
+                                          right_low=True,
+                                          drivetrain_params=drivetrain_params)
+    kf_drivetrain_high_high = KFDrivetrain(name="KFDrivetrainHighHigh",
+                                           left_low=False,
+                                           right_low=False,
+                                           drivetrain_params=drivetrain_params)
 
     if isinstance(year_namespace, list):
         namespaces = year_namespace
     else:
         namespaces = [year_namespace, 'control_loops', 'drivetrain']
-    dog_loop_writer = control_loop.ControlLoopWriter(
-        "Drivetrain", [
-            drivetrain_low_low, drivetrain_low_high, drivetrain_high_low,
-            drivetrain_high_high
-        ],
-        namespaces=namespaces,
-        scalar_type=scalar_type)
+    dog_loop_writer = control_loop.ControlLoopWriter("Drivetrain", [
+        drivetrain_low_low, drivetrain_low_high, drivetrain_high_low,
+        drivetrain_high_high
+    ],
+                                                     namespaces=namespaces,
+                                                     scalar_type=scalar_type)
     dog_loop_writer.AddConstant(
         control_loop.Constant("kDt", "%f", drivetrain_low_low.dt))
     dog_loop_writer.AddConstant(
@@ -522,20 +524,20 @@
 
     dog_loop_writer.Write(drivetrain_files[0], drivetrain_files[1])
 
-    kf_loop_writer = control_loop.ControlLoopWriter(
-        "KFDrivetrain", [
-            kf_drivetrain_low_low, kf_drivetrain_low_high,
-            kf_drivetrain_high_low, kf_drivetrain_high_high
-        ],
-        namespaces=namespaces,
-        scalar_type=scalar_type)
+    kf_loop_writer = control_loop.ControlLoopWriter("KFDrivetrain", [
+        kf_drivetrain_low_low, kf_drivetrain_low_high, kf_drivetrain_high_low,
+        kf_drivetrain_high_high
+    ],
+                                                    namespaces=namespaces,
+                                                    scalar_type=scalar_type)
     kf_loop_writer.Write(kf_drivetrain_files[0], kf_drivetrain_files[1])
 
 
 def PlotDrivetrainMotions(drivetrain_params):
     # Test out the voltage error.
-    drivetrain = KFDrivetrain(
-        left_low=False, right_low=False, drivetrain_params=drivetrain_params)
+    drivetrain = KFDrivetrain(left_low=False,
+                              right_low=False,
+                              drivetrain_params=drivetrain_params)
     close_loop_left = []
     close_loop_right = []
     left_power = []
@@ -562,8 +564,9 @@
     pylab.show()
 
     # Simulate the response of the system to a step input.
-    drivetrain = KFDrivetrain(
-        left_low=False, right_low=False, drivetrain_params=drivetrain_params)
+    drivetrain = KFDrivetrain(left_low=False,
+                              right_low=False,
+                              drivetrain_params=drivetrain_params)
     simulated_left = []
     simulated_right = []
     for _ in range(100):
@@ -579,8 +582,9 @@
     pylab.show()
 
     # Simulate forwards motion.
-    drivetrain = KFDrivetrain(
-        left_low=False, right_low=False, drivetrain_params=drivetrain_params)
+    drivetrain = KFDrivetrain(left_low=False,
+                              right_low=False,
+                              drivetrain_params=drivetrain_params)
     close_loop_left = []
     close_loop_right = []
     left_power = []
diff --git a/frc971/control_loops/python/graph.py b/frc971/control_loops/python/graph.py
index c974d68..178b63d 100644
--- a/frc971/control_loops/python/graph.py
+++ b/frc971/control_loops/python/graph.py
@@ -1,41 +1,121 @@
 import gi
+
 gi.require_version('Gtk', '3.0')
 from gi.repository import Gtk
 import numpy as np
 import queue
 import threading
 import copy
-from points import Points
+from multispline import Multispline
 from libspline import Spline, DistanceSpline, Trajectory
 
 from matplotlib.backends.backend_gtk3agg import (FigureCanvasGTK3Agg as
                                                  FigureCanvas)
 from matplotlib.figure import Figure
 
+
 class Graph(Gtk.Bin):
+
     def __init__(self):
         super(Graph, self).__init__()
         fig = Figure(figsize=(5, 4), dpi=100)
         self.axis = fig.add_subplot(111)
-        canvas = FigureCanvas(fig)  # a Gtk.DrawingArea
-        canvas.set_vexpand(True)
-        canvas.set_size_request(800, 250)
-        self.add(canvas)
-        self.queue = queue.Queue(maxsize=1)
+        self.canvas = FigureCanvas(fig)  # a Gtk.DrawingArea
+        self.canvas.set_vexpand(True)
+        self.canvas.set_size_request(800, 250)
+        self.callback_id = self.canvas.mpl_connect('motion_notify_event',
+                                                   self.on_mouse_move)
+        self.add(self.canvas)
 
+        # The current graph data
+        self.data = None
+        # The size of a timestep
+        self.dt = 0.00505
+        # The position of the cursor in seconds
+        self.cursor = 0
+
+        # Reference to the parent gtk Widget that wants to get redrawn
+        # when user moves the cursor
+        self.cursor_watcher = None
+        self.cursor_line = None
+
+        self.queue = queue.Queue(maxsize=1)
         thread = threading.Thread(target=self.worker)
         thread.daemon = True
         thread.start()
 
-    def schedule_recalculate(self, points):
-        if not points.getLibsplines() or self.queue.full(): return
-        new_copy = copy.deepcopy(points)
+    def find_cursor(self):
+        """Gets the cursor position as a distance along the spline"""
+        if self.data is None:
+            return None
+        cursor_index = int(self.cursor / self.dt)
+        if cursor_index > self.data.size:
+            return None
+        # use the time to index into the position data
+        distance_at_cursor = self.data[0][cursor_index - 1]
+        multispline_index = int(self.data[5][cursor_index - 1])
+        return (multispline_index, distance_at_cursor)
+
+    def place_cursor(self, multispline_index, distance):
+        """Places the cursor at a certain distance along the spline"""
+        if self.data is None:
+            return
+
+        # find the section that is the current multispline
+        start_of_multispline = np.searchsorted(self.data[5],
+                                               multispline_index,
+                                               side='left')
+        end_of_multispline = np.searchsorted(self.data[5],
+                                             multispline_index,
+                                             side='right')
+        multispline_region = self.data[0][
+            start_of_multispline:end_of_multispline]
+
+        # convert distance along this multispline to time along trajectory
+        index = np.searchsorted(multispline_region, distance,
+                                side='left') + start_of_multispline
+        time = index * self.dt
+
+        self.cursor = time
+        self.redraw_cursor()
+
+    def on_mouse_move(self, event):
+        """Updates the cursor and all the canvases that watch it on mouse move"""
+
+        if self.data is None:
+            return
+        total_steps_taken = self.data.shape[1]
+        total_time = self.dt * total_steps_taken
+        if event.xdata is not None:
+            # clip the position if still on the canvas, but off the graph
+            self.cursor = np.clip(event.xdata, 0, total_time)
+
+            self.redraw_cursor()
+
+            # tell the field to update too
+            if self.cursor_watcher is not None:
+                self.cursor_watcher.queue_draw()
+
+    def redraw_cursor(self):
+        """Redraws the cursor line"""
+        # TODO: This redraws the entire graph and isn't very snappy
+        if self.cursor_line: self.cursor_line.remove()
+        self.cursor_line = self.axis.axvline(self.cursor)
+        self.canvas.draw_idle()
+
+    def schedule_recalculate(self, multisplines):
+        """Submits points to be graphed
+
+        Can be superseded by newer points if an old one isn't finished processing.
+        """
+
+        new_copy = copy.deepcopy(multisplines)
 
         # empty the queue
         try:
             self.queue.get_nowait()
         except queue.Empty:
-            pass # was already empty
+            pass  # was already empty
 
         # replace with new request
         self.queue.put_nowait(new_copy)
@@ -44,32 +124,50 @@
         while True:
             self.recalculate_graph(self.queue.get())
 
-    def recalculate_graph(self, points):
-        if not points.getLibsplines(): return
-        # set the size of a timestep
-        dt = 0.00505
+    def recalculate_graph(self, multisplines):
+        if len(multisplines) == 0: return
 
         # call C++ wrappers to calculate the trajectory
-        distanceSpline = DistanceSpline(points.getLibsplines())
-        traj = Trajectory(distanceSpline)
-        points.addConstraintsToTrajectory(traj)
-        traj.Plan()
-        XVA = traj.GetPlanXVA(dt)
-        if XVA is None: return
+        full_data = None
+
+        for multispline_index, multispline in enumerate(multisplines):
+            multispline.update_lib_spline()
+            if len(multispline.getLibsplines()) == 0: continue
+            distanceSpline = DistanceSpline(multispline.getLibsplines())
+            traj = Trajectory(distanceSpline)
+            multispline.addConstraintsToTrajectory(traj)
+            traj.Plan()
+            XVA = traj.GetPlanXVA(self.dt)
+            if XVA is None: continue
+            position, _, _ = XVA
+
+            voltages = np.transpose([traj.Voltage(x) for x in position])
+
+            data = np.append(XVA, voltages, axis=0)
+
+            indicies = np.full((1, XVA.shape[1]), multispline_index, dtype=int)
+            data = np.append(data, indicies, axis=0)
+
+            if full_data is not None:
+                full_data = np.append(full_data, data, axis=1)
+            else:
+                full_data = data
+
+        if full_data is None: return
+        self.data = full_data
 
         # extract values to be graphed
-        total_steps_taken = XVA.shape[1]
-        total_time = dt * total_steps_taken
-        time = np.linspace(0, total_time, num=total_steps_taken)
-        position, velocity, acceleration = XVA
-        left_voltage, right_voltage = zip(*(traj.Voltage(x) for x in position))
+        total_steps_taken = full_data.shape[1]
+        total_time = self.dt * total_steps_taken
+        times = np.linspace(0, total_time, num=total_steps_taken)
+        position, velocity, acceleration, left_voltage, right_voltage, _ = full_data
 
         # update graph
         self.axis.clear()
-        self.axis.plot(time, velocity)
-        self.axis.plot(time, acceleration)
-        self.axis.plot(time, left_voltage)
-        self.axis.plot(time, right_voltage)
+        self.axis.plot(times, velocity)
+        self.axis.plot(times, acceleration)
+        self.axis.plot(times, left_voltage)
+        self.axis.plot(times, right_voltage)
         self.axis.legend(
             ["Velocity", "Acceleration", "Left Voltage", "Right Voltage"])
         self.axis.xaxis.set_label_text("Time (sec)")
@@ -78,5 +176,5 @@
         # the total time to drive the spline
         self.axis.xaxis.set_ticks(np.linspace(0, total_time, num=8))
 
-        # ask to be redrawn
-        self.queue_draw()
+        # redraw
+        self.canvas.draw_idle()
diff --git a/frc971/control_loops/python/haptic_wheel.py b/frc971/control_loops/python/haptic_wheel.py
index 7221870..99a2d60 100755
--- a/frc971/control_loops/python/haptic_wheel.py
+++ b/frc971/control_loops/python/haptic_wheel.py
@@ -22,6 +22,7 @@
 
 
 class SystemParams(object):
+
     def __init__(self, J, G, kP, kD, kCompensationTimeconstant, q_pos, q_vel,
                  q_torque, current_limit):
         self.J = J
@@ -45,29 +46,28 @@
         #[0.25, 0.025],
 
 
-kWheel = SystemParams(
-    J=0.0008,
-    G=(1.25 + 0.02) / 0.35,
-    q_pos=0.001,
-    q_vel=0.20,
-    q_torque=0.005,
-    kP=7.0,
-    kD=0.0,
-    kCompensationTimeconstant=0.95,
-    current_limit=4.5)
-kTrigger = SystemParams(
-    J=0.00025,
-    G=(0.925 * 2.0 + 0.02) / 0.35,
-    q_pos=0.001,
-    q_vel=0.1,
-    q_torque=0.005,
-    kP=120.0,
-    kD=1.8,
-    kCompensationTimeconstant=0.95,
-    current_limit=3.0)
+kWheel = SystemParams(J=0.0008,
+                      G=(1.25 + 0.02) / 0.35,
+                      q_pos=0.001,
+                      q_vel=0.20,
+                      q_torque=0.005,
+                      kP=7.0,
+                      kD=0.0,
+                      kCompensationTimeconstant=0.95,
+                      current_limit=4.5)
+kTrigger = SystemParams(J=0.00025,
+                        G=(0.925 * 2.0 + 0.02) / 0.35,
+                        q_pos=0.001,
+                        q_vel=0.1,
+                        q_torque=0.005,
+                        kP=120.0,
+                        kD=1.8,
+                        kCompensationTimeconstant=0.95,
+                        current_limit=3.0)
 
 
 class HapticInput(control_loop.ControlLoop):
+
     def __init__(self, params=None, name='HapticInput'):
         # The defaults are for the steering wheel.
         super(HapticInput, self).__init__(name)
@@ -107,6 +107,7 @@
 
 
 class IntegralHapticInput(HapticInput):
+
     def __init__(self, params=None, name="IntegralHapticInput"):
         super(IntegralHapticInput, self).__init__(name=name, params=params)
 
@@ -133,8 +134,11 @@
 
         self.R = numpy.matrix([[(params.r_pos**2.0)]])
 
-        self.KalmanGain, self.Q_steady = controls.kalman(
-            A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
+        self.KalmanGain, self.Q_steady = controls.kalman(A=self.A,
+                                                         B=self.B,
+                                                         C=self.C,
+                                                         Q=self.Q,
+                                                         R=self.R)
         self.L = self.A * self.KalmanGain
 
         self.K_unaugmented = self.K
@@ -238,9 +242,8 @@
         return p
 
     haptic_observers = [
-        IntegralHapticInput(params=DupParamsWithJ(params, j))
-        for j in numpy.logspace(
-            numpy.log10(min_J), numpy.log10(max_J), num=num_kf)
+        IntegralHapticInput(params=DupParamsWithJ(params, j)) for j in
+        numpy.logspace(numpy.log10(min_J), numpy.log10(max_J), num=num_kf)
     ]
     # Initialize all the KF's.
     haptic_observer.X_hat[1, 0] = initial_velocity
@@ -292,10 +295,9 @@
     ax2.plot(data_time, data_request_current, label='request_current')
 
     for i, kf_torque in enumerate(kf_torques):
-        ax2.plot(
-            data_time,
-            kf_torque,
-            label='-kf_torque[%f]' % haptic_observers[i].J)
+        ax2.plot(data_time,
+                 kf_torque,
+                 label='-kf_torque[%f]' % haptic_observers[i].J)
     fig.tight_layout()
     ax1.legend(loc=3)
     ax2.legend(loc=4)
@@ -374,20 +376,18 @@
                 kind="zero")(trigger_data_time)
 
             if FLAGS.rerun_kf:
-                rerun_and_plot_kf(
-                    trigger_data_time,
-                    trigger_radians,
-                    trigger_current,
-                    resampled_trigger_request_current,
-                    kTrigger,
-                    run_correct=True)
-                rerun_and_plot_kf(
-                    wheel_data_time,
-                    wheel_radians,
-                    wheel_current,
-                    resampled_wheel_request_current,
-                    kWheel,
-                    run_correct=True)
+                rerun_and_plot_kf(trigger_data_time,
+                                  trigger_radians,
+                                  trigger_current,
+                                  resampled_trigger_request_current,
+                                  kTrigger,
+                                  run_correct=True)
+                rerun_and_plot_kf(wheel_data_time,
+                                  wheel_radians,
+                                  wheel_current,
+                                  resampled_wheel_request_current,
+                                  kWheel,
+                                  run_correct=True)
             else:
                 plot_input(trigger_data_time, trigger_radians,
                            trigger_velocity, trigger_torque, trigger_current,
@@ -403,17 +403,16 @@
     else:
         namespaces = ['frc971', 'control_loops', 'drivetrain']
         for name, params, filenames in [('HapticWheel', kWheel, argv[1:5]),
-                                        ('HapticTrigger', kTrigger,
-                                         argv[5:9])]:
+                                        ('HapticTrigger', kTrigger, argv[5:9])
+                                        ]:
             haptic_input = HapticInput(params=params, name=name)
-            loop_writer = control_loop.ControlLoopWriter(
-                name, [haptic_input],
-                namespaces=namespaces,
-                scalar_type='float')
+            loop_writer = control_loop.ControlLoopWriter(name, [haptic_input],
+                                                         namespaces=namespaces,
+                                                         scalar_type='float')
             loop_writer.Write(filenames[0], filenames[1])
 
-            integral_haptic_input = IntegralHapticInput(
-                params=params, name='Integral' + name)
+            integral_haptic_input = IntegralHapticInput(params=params,
+                                                        name='Integral' + name)
             integral_loop_writer = control_loop.ControlLoopWriter(
                 'Integral' + name, [integral_haptic_input],
                 namespaces=namespaces,
diff --git a/frc971/control_loops/python/lib_spline_test.py b/frc971/control_loops/python/lib_spline_test.py
index de44d72..7413896 100644
--- a/frc971/control_loops/python/lib_spline_test.py
+++ b/frc971/control_loops/python/lib_spline_test.py
@@ -9,6 +9,7 @@
 
 
 class TestSpline(unittest.TestCase):
+
     def testSimpleSpline(self):
         points = np.array([[1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
                            [2.0, 3.0, 4.0, 5.0, 6.0, 7.0]])
@@ -18,6 +19,7 @@
 
 
 class TestDistanceSpline(unittest.TestCase):
+
     def testDistanceSpline(self):
         points = np.array([[1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
                            [2.0, 3.0, 4.0, 5.0, 6.0, 7.0]])
@@ -27,6 +29,7 @@
 
 
 class TestTrajectory(unittest.TestCase):
+
     def testTrajectory(self):
         points = np.array([[1.0, 2.0, 3.0, 4.0, 5.0, 5.0],
                            [2.0, 3.0, 4.0, 5.0, 6.0, 7.0]])
diff --git a/frc971/control_loops/python/libspline.py b/frc971/control_loops/python/libspline.py
index 9ba6f4f..5aded23 100755
--- a/frc971/control_loops/python/libspline.py
+++ b/frc971/control_loops/python/libspline.py
@@ -46,9 +46,8 @@
         assert points.shape == (2, 6)
         self.__points = points
         self.__spline = ct.c_void_p(
-            libSpline.NewSpline(
-                np.ctypeslib.as_ctypes(self.__points[0]),
-                np.ctypeslib.as_ctypes(self.__points[1])))
+            libSpline.NewSpline(np.ctypeslib.as_ctypes(self.__points[0]),
+                                np.ctypeslib.as_ctypes(self.__points[1])))
 
     def __del__(self):
         libSpline.deleteSpline(self.__spline)
@@ -58,9 +57,8 @@
         self.__points[1, index] = y
         libSpline.deleteSpline(self.__spline)
         self.__spline = ct.c_void_p(
-            libSpline.newSpline(
-                np.ctypeslib.as_ctypes(self.__points[0]),
-                np.ctypeslib.as_ctypes(self.__points[1])))
+            libSpline.newSpline(np.ctypeslib.as_ctypes(self.__points[0]),
+                                np.ctypeslib.as_ctypes(self.__points[1])))
 
     def Point(self, alpha):
         result = np.zeros(2)
diff --git a/frc971/control_loops/python/linear_system.py b/frc971/control_loops/python/linear_system.py
index 0391d93..3bda4e1 100755
--- a/frc971/control_loops/python/linear_system.py
+++ b/frc971/control_loops/python/linear_system.py
@@ -9,6 +9,7 @@
 
 
 class LinearSystemParams(object):
+
     def __init__(self,
                  name,
                  motor,
@@ -37,6 +38,7 @@
 
 
 class LinearSystem(control_loop.ControlLoop):
+
     def __init__(self, params, name='LinearSystem'):
         super(LinearSystem, self).__init__(name)
         self.params = params
@@ -56,11 +58,11 @@
 
         # State is [position, velocity]
         # Input is [Voltage]
-        C1 = self.motor.Kt / (
-            self.G * self.G * self.radius * self.radius * self.motor.resistance
-            * self.mass * self.motor.Kv)
-        C2 = self.motor.Kt / (
-            self.G * self.radius * self.motor.resistance * self.mass)
+        C1 = self.motor.Kt / (self.G * self.G * self.radius * self.radius *
+                              self.motor.resistance * self.mass *
+                              self.motor.Kv)
+        C2 = self.motor.Kt / (self.G * self.radius * self.motor.resistance *
+                              self.mass)
 
         self.A_continuous = numpy.matrix([[0, 1], [0, -C1]])
 
@@ -109,8 +111,11 @@
 
         self.R = numpy.matrix([[(self.params.kalman_r_position**2.0)]])
 
-        self.KalmanGain, self.Q_steady = controls.kalman(
-            A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
+        self.KalmanGain, self.Q_steady = controls.kalman(A=self.A,
+                                                         B=self.B,
+                                                         C=self.C,
+                                                         Q=self.Q,
+                                                         R=self.R)
 
         glog.debug('Kal %s', repr(self.KalmanGain))
 
@@ -123,6 +128,7 @@
 
 
 class IntegralLinearSystem(LinearSystem):
+
     def __init__(self, params, name='IntegralLinearSystem'):
         super(IntegralLinearSystem, self).__init__(params, name=name)
 
@@ -145,13 +151,16 @@
 
         self.Q = numpy.matrix([[(self.params.kalman_q_pos**2.0), 0.0, 0.0],
                                [0.0, (self.params.kalman_q_vel**2.0), 0.0],
-                               [0.0, 0.0, (self.params.kalman_q_voltage
-                                           **2.0)]])
+                               [0.0, 0.0,
+                                (self.params.kalman_q_voltage**2.0)]])
 
         self.R = numpy.matrix([[(self.params.kalman_r_position**2.0)]])
 
-        self.KalmanGain, self.Q_steady = controls.kalman(
-            A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
+        self.KalmanGain, self.Q_steady = controls.kalman(A=self.A,
+                                                         B=self.B,
+                                                         C=self.C,
+                                                         Q=self.Q,
+                                                         R=self.R)
 
         self.K_unaugmented = self.K
         self.K = numpy.matrix(numpy.zeros((1, 3)))
@@ -222,10 +231,10 @@
         offset_plot.append(observer.X_hat[2, 0])
         x_hat_plot.append(observer.X_hat[0, 0])
 
-        next_goal = numpy.concatenate(
-            (profile.Update(end_goal[0, 0], end_goal[1, 0]),
-             numpy.matrix(numpy.zeros((1, 1)))),
-            axis=0)
+        next_goal = numpy.concatenate((profile.Update(
+            end_goal[0, 0], end_goal[1, 0]), numpy.matrix(numpy.zeros(
+                (1, 1)))),
+                                      axis=0)
 
         ff_U = controller.Kff * (next_goal - observer.A * goal)
 
@@ -264,8 +273,8 @@
         goal = controller.A * goal + controller.B * ff_U
 
         if U[0, 0] != U_uncapped[0, 0]:
-            profile.MoveCurrentState(
-                numpy.matrix([[goal[0, 0]], [goal[1, 0]]]))
+            profile.MoveCurrentState(numpy.matrix([[goal[0, 0]], [goal[1,
+                                                                       0]]]))
 
     glog.debug('Time: %f', t_plot[-1])
     glog.debug('goal_error %s', repr(end_goal - goal))
@@ -305,15 +314,14 @@
     initial_X = numpy.matrix([[0.0], [0.0]])
     augmented_R = numpy.matrix(numpy.zeros((3, 1)))
     augmented_R[0:2, :] = R
-    RunTest(
-        plant,
-        end_goal=augmented_R,
-        controller=controller,
-        observer=observer,
-        duration=2.0,
-        use_profile=False,
-        kick_time=1.0,
-        kick_magnitude=0.0)
+    RunTest(plant,
+            end_goal=augmented_R,
+            controller=controller,
+            observer=observer,
+            duration=2.0,
+            use_profile=False,
+            kick_time=1.0,
+            kick_magnitude=0.0)
 
 
 def PlotKick(params, R, plant_params=None):
@@ -332,15 +340,14 @@
     initial_X = numpy.matrix([[0.0], [0.0]])
     augmented_R = numpy.matrix(numpy.zeros((3, 1)))
     augmented_R[0:2, :] = R
-    RunTest(
-        plant,
-        end_goal=augmented_R,
-        controller=controller,
-        observer=observer,
-        duration=2.0,
-        use_profile=False,
-        kick_time=1.0,
-        kick_magnitude=2.0)
+    RunTest(plant,
+            end_goal=augmented_R,
+            controller=controller,
+            observer=observer,
+            duration=2.0,
+            use_profile=False,
+            kick_time=1.0,
+            kick_magnitude=2.0)
 
 
 def PlotMotion(params,
@@ -366,15 +373,14 @@
     initial_X = numpy.matrix([[0.0], [0.0]])
     augmented_R = numpy.matrix(numpy.zeros((3, 1)))
     augmented_R[0:2, :] = R
-    RunTest(
-        plant,
-        end_goal=augmented_R,
-        controller=controller,
-        observer=observer,
-        duration=2.0,
-        use_profile=True,
-        max_velocity=max_velocity,
-        max_acceleration=max_acceleration)
+    RunTest(plant,
+            end_goal=augmented_R,
+            controller=controller,
+            observer=observer,
+            duration=2.0,
+            use_profile=True,
+            max_velocity=max_velocity,
+            max_acceleration=max_acceleration)
 
 
 def WriteLinearSystem(params, plant_files, controller_files, year_namespaces):
@@ -405,8 +411,9 @@
         integral_linear_systems.append(
             IntegralLinearSystem(params, 'Integral' + params.name))
 
-    loop_writer = control_loop.ControlLoopWriter(
-        name, linear_systems, namespaces=year_namespaces)
+    loop_writer = control_loop.ControlLoopWriter(name,
+                                                 linear_systems,
+                                                 namespaces=year_namespaces)
     loop_writer.AddConstant(
         control_loop.Constant('kFreeSpeed', '%f',
                               linear_systems[0].motor.free_speed))
diff --git a/frc971/control_loops/python/multispline.py b/frc971/control_loops/python/multispline.py
new file mode 100644
index 0000000..5551587
--- /dev/null
+++ b/frc971/control_loops/python/multispline.py
@@ -0,0 +1,351 @@
+from constants import *
+import numpy as np
+import scipy.optimize
+from libspline import Spline, DistanceSpline, Trajectory
+import copy
+from dataclasses import dataclass
+
+
+@dataclass
+class ControlPointIndex:
+    """Class specifying the index of a control point"""
+
+    # The index of the multispline in the list of multisplines
+    multispline_index: int
+
+    # The index of the spline in the multispline
+    spline_index: int
+
+    # The index of the control point in the spline [0-5]
+    control_point_index: int
+
+
+class Multispline():
+
+    def __init__(self):
+        self.staged_points = []  # Holds all points not yet in spline
+        self.libsplines = []  # Formatted for libspline library usage
+        self.splines = []  # Formatted for drawing
+        self.constraints = [  # default constraints
+            {
+                "constraint_type": "LONGITUDINAL_ACCELERATION",
+                "value": 3
+            }, {
+                "constraint_type": "LATERAL_ACCELERATION",
+                "value": 2
+            }, {
+                "constraint_type": "VOLTAGE",
+                "value": 10
+            }
+        ]
+
+    def __deepcopy__(self, memo):
+        new_copy = Multispline()
+        new_copy.staged_points = copy.deepcopy(self.staged_points, memo)
+        new_copy.splines = copy.deepcopy(self.splines, memo)
+        new_copy.constraints = copy.deepcopy(self.constraints, memo)
+        new_copy.update_lib_spline()
+        return new_copy
+
+    def getLibsplines(self):
+        return self.libsplines
+
+    def setConstraint(self, id, value):
+        for constraint in self.constraints:
+            if constraint["constraint_type"] == id:
+                constraint["value"] = value
+                break
+
+    def getConstraint(self, id):
+        for constraint in self.constraints:
+            if constraint["constraint_type"] == id:
+                return constraint["value"]
+
+    def addConstraintsToTrajectory(self, trajectory):
+        for constraint in self.constraints:
+            if constraint["constraint_type"] == "VOLTAGE":
+                trajectory.SetVoltageLimit(constraint["value"])
+            elif constraint["constraint_type"] == "LATERAL_ACCELERATION":
+                trajectory.SetLateralAcceleration(constraint["value"])
+            elif constraint["constraint_type"] == "LONGITUDINAL_ACCELERATION":
+                trajectory.SetLongitudinalAcceleration(constraint["value"])
+
+    @staticmethod
+    def splineExtrapolate(multisplines, index, *, snap=True):
+        multispline = multisplines[index.multispline_index]
+        spline = multispline.splines[index.spline_index]
+
+        # find the adjacent splines that will be affected by this spline
+
+        prev_multispline = None
+        prev_spline = None
+        if index.spline_index != 0:
+            prev_spline = multispline.splines[index.spline_index - 1]
+        elif index.multispline_index != 0:
+            prev_multispline = multisplines[index.multispline_index - 1]
+            prev_spline = prev_multispline.splines[-1]
+
+        next_multispline = None
+        next_spline = None
+        if spline is not multispline.splines[-1]:
+            next_spline = multispline.splines[index.spline_index + 1]
+        elif multispline is not multisplines[-1]:
+            next_multispline = multisplines[index.multispline_index + 1]
+            if next_multispline.splines:
+                next_spline = next_multispline.splines[0]
+
+        # adjust the adjacent splines according to our smoothness constraints
+        # the three points on either side are entirely constrained by this spline
+
+        if next_spline is not None:
+            f = spline[5]  # the end of the spline
+            e = spline[4]  # determines the heading
+            d = spline[3]
+            if next_multispline is None:
+                next_spline[0] = f
+                next_spline[1] = f * 2 + e * -1
+                next_spline[2] = d + f * 4 + e * -4
+            else:
+                if snap:
+                    Multispline.snapSplines(spline,
+                                            next_spline,
+                                            match_first_to_second=False)
+
+                next_spline[0] = f
+                next_multispline.update_lib_spline()
+
+        if prev_spline is not None:
+            a = spline[0]
+            b = spline[1]
+            c = spline[2]
+            if prev_multispline is None:
+                prev_spline[5] = a
+                prev_spline[4] = a * 2 + b * -1
+                prev_spline[3] = c + a * 4 + b * -4
+            else:
+                if snap:
+                    Multispline.snapSplines(prev_spline,
+                                            spline,
+                                            match_first_to_second=True)
+
+                prev_spline[5] = a
+                prev_multispline.update_lib_spline()
+
+    def snapSplines(first_spline, second_spline, *, match_first_to_second):
+        """Snaps two adjacent splines together, preserving the heading from one to another.
+
+        The end of `first_spline` connects to the beginning of `second_spline`
+        The user will have their mouse one one of these splines, so we
+        only want to snap the spline that they're not holding.
+
+        They can have the same heading, or they can have opposite headings
+        which represents the robot driving that spline backwards.
+        """
+
+        # Represent the position of the second control point (controls the heading)
+        # as a vector from the end of the spline to the control point
+        first_vector = first_spline[-2] - first_spline[-1]
+        second_vector = second_spline[1] - second_spline[0]
+
+        # we want to preserve the distance
+        first_magnitude = np.linalg.norm(first_vector, ord=2)
+        second_magnitude = np.linalg.norm(second_vector, ord=2)
+
+        normalized_first = first_vector / first_magnitude
+        normalized_second = second_vector / second_magnitude
+
+        # the proposed new vector if we were to make them point the same direction
+        swapped_second = normalized_first * second_magnitude
+        swapped_first = normalized_second * first_magnitude
+
+        # they were pointing in opposite directions
+        if np.dot(first_vector, second_vector) < 0:
+
+            # rotate the new vectors 180 degrees
+            # to keep them pointing in opposite directions
+            swapped_first = -swapped_first
+            swapped_second = -swapped_second
+
+        # Calculate how far we ended up moving the second control point
+        # so we can move the third control point with it
+        change_in_first = swapped_first - first_vector
+        change_in_second = swapped_second - second_vector
+
+        # apply the changes and discard the other proposed snap
+        if match_first_to_second:
+            first_spline[-2] = swapped_first + first_spline[-1]
+            first_spline[-3] += change_in_first
+            # swapped_second doesn't get used
+        else:
+            second_spline[1] = swapped_second + second_spline[0]
+            second_spline[2] += change_in_second
+            # swapped_first doesn't get used
+
+    def updates_for_mouse_move(self, multisplines, index, mouse):
+        """Moves the control point and adjacent points to follow the mouse"""
+        if index == None: return
+        spline_edit = index.spline_index
+        index_of_edit = index.control_point_index
+
+        spline = self.splines[spline_edit]
+
+        # we want to move it to be on the mouse
+        diffs = mouse - spline[index_of_edit]
+
+        spline[index_of_edit] = mouse
+
+        # all three points move together with the endpoint
+        if index_of_edit == 5:
+            spline[3] += diffs
+            spline[4] += diffs
+
+            # check if the next multispline exists and has a spline
+            if index.spline_index == len(
+                    self.splines) - 1 and not index.multispline_index == len(
+                        multisplines) - 1 and len(
+                            multisplines[index.multispline_index +
+                                         1].splines) > 0:
+                # move the points that lay across the multispline boundary
+                other_spline = multisplines[index.multispline_index +
+                                            1].splines[0]
+
+                other_spline[1] += diffs
+                other_spline[2] += diffs
+
+        if index_of_edit == 0:
+            spline[2] += diffs
+            spline[1] += diffs
+
+            # check if previous multispline exists
+            if index.spline_index == 0 and not index.multispline_index == 0:
+                other_spline = multisplines[index.multispline_index -
+                                            1].splines[-1]
+
+                other_spline[3] += diffs
+                other_spline[4] += diffs
+
+        # the third point moves with the second point
+        if index_of_edit == 4:
+            spline[3] += diffs
+
+        if index_of_edit == 1:
+            spline[2] += diffs
+
+        Multispline.splineExtrapolate(multisplines, index, snap=False)
+
+    def update_lib_spline(self):
+        self.libsplines = []
+        array = np.zeros(shape=(6, 2), dtype=float)
+        for points in self.splines:
+            for j, point in enumerate(points):
+                array[j, 0] = point[0]
+                array[j, 1] = point[1]
+            spline = Spline(np.ascontiguousarray(np.transpose(array)))
+            self.libsplines.append(spline)
+
+    @staticmethod
+    def nearest_distance(multisplines, point):
+        """Finds the spot along the multisplines that is closest to a
+        given point on the field
+
+        Returns the closest multispline and the distance along that multispline
+        """
+
+        def distance(t, distance_spline, point):
+            return np.sum((distance_spline.XY(t) - point)**2)
+
+        # We know the derivative of the function,
+        # so scipy doesn't need to compute it every time
+        def ddistance(t, distance_spline, point):
+            return np.sum(2 * (distance_spline.XY(t) - point) *
+                          distance_spline.DXY(t))
+
+        best_result = None
+        best_multispline = None
+
+        for multispline_index, multispline in enumerate(multisplines):
+            distance_spline = DistanceSpline(multispline.getLibsplines())
+
+            # The optimizer finds local minima that often aren't what we want,
+            # so try from multiple locations to find a better minimum.
+            guess_points = np.linspace(0, distance_spline.Length(), num=5)
+
+            for guess in guess_points:
+                result = scipy.optimize.minimize(
+                    distance,
+                    guess,
+                    args=(distance_spline, point),
+                    bounds=((0, distance_spline.Length()), ),
+                    jac=ddistance,
+                )
+
+                if result.success and (best_result == None
+                                       or result.fun < best_result.fun):
+                    best_result = result
+                    best_multispline = multispline
+
+        return (best_multispline, best_result)
+
+    def toJsonObject(self):
+        multi_spline = {
+            "spline_count": 0,
+            "spline_x": [],
+            "spline_y": [],
+            "constraints": self.constraints,
+        }
+        for points in self.splines:
+            multi_spline["spline_count"] += 1
+            for j, point in enumerate(points):
+                if j == 0 and multi_spline["spline_count"] > 1:
+                    continue  # skip overlapping points
+                multi_spline["spline_x"].append(point[0])
+                multi_spline["spline_y"].append(point[1])
+        return multi_spline
+
+    @staticmethod
+    def fromJsonObject(multi_spline):
+        multispline = Multispline()
+        multispline.constraints = multi_spline["constraints"]
+        multispline.splines = []
+        multispline.staged_points = []
+
+        i = 0
+        for j in range(multi_spline["spline_count"]):
+            # get the last point of the last spline
+            # and read in another 6 points
+            for i in range(i, i + 6):
+                multispline.staged_points.append(
+                    [multi_spline["spline_x"][i], multi_spline["spline_y"][i]])
+            multispline.splines.append(np.array(multispline.staged_points))
+            multispline.staged_points = []
+        multispline.update_lib_spline()
+
+        return multispline
+
+    def getSplines(self):
+        return self.splines
+
+    def setControlPoint(self, index, x, y):
+        self.splines[index.spline_index][index.control_point_index] = [x, y]
+
+    def addPoint(self, x, y):
+        if (len(self.staged_points) < 6):
+            self.staged_points.append([x, y])
+        if (len(self.staged_points) == 6):
+            self.splines.append(np.array(self.staged_points))
+            self.staged_points = []
+            self.update_lib_spline()
+            return True
+
+    def extrapolate(self, spline):
+        """Stages 3 points extrapolated from the end of the multispline"""
+
+        self.staged_points = []
+
+        point1 = spline[5]
+        point2 = spline[4]
+        point3 = spline[3]
+
+        self.staged_points.append(point1)
+        self.staged_points.append(point1 * 2 - point2)
+        self.staged_points.append(point3 + point1 * 4 - point2 * 4)
diff --git a/frc971/control_loops/python/path_edit.py b/frc971/control_loops/python/path_edit.py
index 5070988..b66a668 100755
--- a/frc971/control_loops/python/path_edit.py
+++ b/frc971/control_loops/python/path_edit.py
@@ -6,17 +6,19 @@
 from graph import Graph
 import gi
 import numpy as np
+
 gi.require_version('Gtk', '3.0')
 from gi.repository import Gdk, Gtk, GLib
 import cairo
-from libspline import Spline
+from libspline import Spline, DistanceSpline
 import enum
 import json
+import copy
 from constants import FIELD
 from constants import get_json_folder
 from constants import ROBOT_SIDE_TO_BALL_CENTER, ROBOT_SIDE_TO_HATCH_PANEL, HATCH_PANEL_WIDTH, BALL_RADIUS
 from drawing_constants import set_color, draw_px_cross, draw_px_x, display_text, draw_control_points
-from points import Points
+from multispline import Multispline, ControlPointIndex
 import time
 
 
@@ -32,15 +34,15 @@
     def __init__(self):
         super(FieldWidget, self).__init__()
         self.set_field(FIELD)
-        self.set_size_request(
-            self.mToPx(self.field.width), self.mToPx(self.field.length))
+        self.set_size_request(self.mToPx(self.field.width),
+                              self.mToPx(self.field.length))
 
-        self.points = Points()
+        self.multisplines = []
         self.graph = Graph()
+        self.graph.cursor_watcher = self
         self.set_vexpand(True)
         self.set_hexpand(True)
-        # list of multisplines
-        self.multispline_stack = []
+        self.undo_history = []
         # init field drawing
         # add default spline for testing purposes
         # init editing / viewing modes and pointer location
@@ -52,9 +54,8 @@
                                            'points_for_pathedit.json')
 
         # For the editing mode
-        self.index_of_edit = -1  # Can't be zero beause array starts at 0
-        self.held_x = 0
-        self.spline_edit = -1
+        self.control_point_index = None
+        self.active_multispline_index = 0
 
         self.zoom_transform = cairo.Matrix()
 
@@ -64,6 +65,15 @@
                         | Gdk.EventMask.POINTER_MOTION_MASK
                         | Gdk.EventMask.SCROLL_MASK)
 
+    @property
+    def active_multispline(self):
+        """Get the current active multispline or create a new one"""
+        if not self.multisplines:
+            self.multisplines.append(Multispline())
+            self.active_multispline_index = len(self.multisplines) - 1
+
+        return self.multisplines[self.active_multispline_index]
+
     def set_field(self, field):
         self.field = field
         try:
@@ -90,9 +100,10 @@
     @property
     def field_transform(self):
         field_transform = cairo.Matrix()
-        field_transform.scale(1, -1) # flipped y-axis
+        field_transform.scale(1, -1)  # flipped y-axis
         field_transform.scale(1 / self.pxToM_scale(), 1 / self.pxToM_scale())
-        field_transform.translate(self.field.width / 2,  -1 * self.field.length / 2)
+        field_transform.translate(self.field.width / 2,
+                                  -1 * self.field.length / 2)
         return field_transform
 
     # returns the scale from pixels in field space to meters in field space
@@ -107,120 +118,55 @@
     def mToPx(self, m):
         return m / self.pxToM_scale()
 
-    def draw_robot_at_point(self, cr, i, p, spline):
-        p1 = [spline.Point(i)[0], spline.Point(i)[1]]
-        p2 = [
-            spline.Point(i + p)[0],
-            spline.Point(i + p)[1]
-        ]
+    def draw_robot_at_point(self, cr, spline, t):
+        """Draws the robot at a point along a Spline or DistanceSpline"""
 
-        #Calculate Robot
-        distance = np.sqrt((p2[1] - p1[1])**2 + (p2[0] - p1[0])**2)
-        x_difference_o = p2[0] - p1[0]
-        y_difference_o = p2[1] - p1[1]
-        x_difference = x_difference_o * (
-            self.field.robot.length / 2) / distance
-        y_difference = y_difference_o * (
-            self.field.robot.length / 2) / distance
+        # we accept both Spline and DistanceSpline
+        if type(spline) is Spline:
+            point = spline.Point(t)
+            theta = spline.Theta(t)
+        elif type(spline) is DistanceSpline:
+            point = spline.XY(t)
+            theta = spline.Theta(t)
+        else:
+            raise TypeError(
+                f"expected Spline or DistanceSpline (got {type(spline)})")
 
-        front_middle = []
-        front_middle.append(p1[0] + x_difference)
-        front_middle.append(p1[1] + y_difference)
+        # Transform so that +y is forward along the spline
+        transform = cairo.Matrix()
+        transform.translate(*point)
+        transform.rotate(theta - np.pi / 2)
 
-        back_middle = []
-        back_middle.append(p1[0] - x_difference)
-        back_middle.append(p1[1] - y_difference)
+        cr.save()
+        cr.set_matrix(transform.multiply(cr.get_matrix()))
 
-        slope = [-(1 / x_difference_o) / (1 / y_difference_o)]
-        angle = np.arctan(slope)
-
-        x_difference = np.sin(angle[0]) * (
-            self.field.robot.width / 2)
-        y_difference = np.cos(angle[0]) * (
-            self.field.robot.width / 2)
-
-        front_1 = []
-        front_1.append(front_middle[0] - x_difference)
-        front_1.append(front_middle[1] - y_difference)
-
-        front_2 = []
-        front_2.append(front_middle[0] + x_difference)
-        front_2.append(front_middle[1] + y_difference)
-
-        back_1 = []
-        back_1.append(back_middle[0] - x_difference)
-        back_1.append(back_middle[1] - y_difference)
-
-        back_2 = []
-        back_2.append(back_middle[0] + x_difference)
-        back_2.append(back_middle[1] + y_difference)
-
-        x_difference = x_difference_o * (
-            self.field.robot.length / 2 + ROBOT_SIDE_TO_BALL_CENTER) / distance
-        y_difference = y_difference_o * (
-            self.field.robot.length / 2 + ROBOT_SIDE_TO_BALL_CENTER) / distance
-
-        #Calculate Ball
-        ball_center = []
-        ball_center.append(p1[0] + x_difference)
-        ball_center.append(p1[1] + y_difference)
-
-        x_difference = x_difference_o * (
-            self.field.robot.length / 2 + ROBOT_SIDE_TO_HATCH_PANEL) / distance
-        y_difference = y_difference_o * (
-            self.field.robot.length / 2 + ROBOT_SIDE_TO_HATCH_PANEL) / distance
-
-        #Calculate Panel
-        panel_center = []
-        panel_center.append(p1[0] + x_difference)
-        panel_center.append(p1[1] + y_difference)
-
-        x_difference = np.sin(angle[0]) * (HATCH_PANEL_WIDTH / 2)
-        y_difference = np.cos(angle[0]) * (HATCH_PANEL_WIDTH / 2)
-
-        panel_1 = []
-        panel_1.append(panel_center[0] + x_difference)
-        panel_1.append(panel_center[1] + y_difference)
-
-        panel_2 = []
-        panel_2.append(panel_center[0] - x_difference)
-        panel_2.append(panel_center[1] - y_difference)
-
-        #Draw Robot
-        cr.move_to(front_1[0], front_1[1])
-        cr.line_to(back_1[0], back_1[1])
-        cr.line_to(back_2[0], back_2[1])
-        cr.line_to(front_2[0], front_2[1])
-        cr.line_to(front_1[0], front_1[1])
-
+        # Draw Robot
+        set_color(cr, palette["BLACK"])
+        cr.rectangle(-self.field.robot.width / 2, -self.field.robot.length / 2,
+                     self.field.robot.width, self.field.robot.length)
         cr.stroke()
 
         #Draw Ball
         set_color(cr, palette["ORANGE"], 0.5)
-        cr.move_to(back_middle[0], back_middle[1])
-        cr.line_to(ball_center[0], ball_center[1])
-        cr.arc(ball_center[0], ball_center[1], BALL_RADIUS, 0,
+        cr.arc(0, self.field.robot.length / 2 + BALL_RADIUS, BALL_RADIUS, 0,
                2 * np.pi)
         cr.stroke()
 
-        #Draw Panel
-        set_color(cr, palette["YELLOW"], 0.5)
-        cr.move_to(panel_1[0], panel_1[1])
-        cr.line_to(panel_2[0], panel_2[1])
-
-        cr.stroke()
-        cr.set_source_rgba(0, 0, 0, 1)
+        # undo the transform
+        cr.restore()
 
     def do_draw(self, cr):  # main
-        cr.set_matrix(self.field_transform.multiply(self.zoom_transform).multiply(cr.get_matrix()))
+        cr.set_matrix(
+            self.field_transform.multiply(self.zoom_transform).multiply(
+                cr.get_matrix()))
 
         cr.save()
 
         set_color(cr, palette["BLACK"])
 
         cr.set_line_width(self.pxToM(1))
-        cr.rectangle(-0.5 * self.field.width, -0.5 * self.field.length, self.field.width,
-                     self.field.length)
+        cr.rectangle(-0.5 * self.field.width, -0.5 * self.field.length,
+                     self.field.width, self.field.length)
         cr.set_line_join(cairo.LINE_JOIN_ROUND)
         cr.stroke()
 
@@ -240,58 +186,88 @@
         cr.set_line_width(self.pxToM(1))
         if self.mode == Mode.kPlacing or self.mode == Mode.kViewing:
             set_color(cr, palette["BLACK"])
-            for i, point in enumerate(self.points.getPoints()):
-                draw_px_x(cr, point[0], point[1], self.pxToM(2))
-            set_color(cr, palette["WHITE"])
+            for multispline in self.multisplines:
+                for i, point in enumerate(multispline.staged_points):
+                    draw_px_x(cr, point[0], point[1], self.pxToM(2))
         elif self.mode == Mode.kEditing:
-            set_color(cr, palette["BLACK"])
-            if self.points.getSplines():
+            if len(self.multisplines) != 0 and self.multisplines[0].getSplines(
+            ):
                 self.draw_splines(cr)
-                for i, points in enumerate(self.points.getSplines()):
-                    points = [
-                        np.array([x, y])
-                        for (x, y) in points
-                    ]
-                    draw_control_points(cr, points, width=self.pxToM(5), radius=self.pxToM(2))
+
+                for i, points in enumerate(
+                        self.active_multispline.getSplines()):
+                    points = [np.array([x, y]) for (x, y) in points]
+                    draw_control_points(cr,
+                                        points,
+                                        width=self.pxToM(5),
+                                        radius=self.pxToM(2))
 
                     p0, p1, p2, p3, p4, p5 = points
                     first_tangent = p0 + 2.0 * (p1 - p0)
                     second_tangent = p5 + 2.0 * (p4 - p5)
                     cr.set_source_rgb(0, 0.5, 0)
-                    cr.move_to(p0[0], p0[1])
+                    cr.move_to(*p0)
                     cr.set_line_width(self.pxToM(1.0))
-                    cr.line_to(first_tangent[0], first_tangent[1])
-                    cr.move_to(first_tangent[0], first_tangent[1])
-                    cr.line_to(p2[0], p2[1])
+                    cr.line_to(*first_tangent)
+                    cr.move_to(*first_tangent)
+                    cr.line_to(*p2)
 
-                    cr.move_to(p5[0], p5[1])
-                    cr.line_to(second_tangent[0], second_tangent[1])
+                    cr.move_to(*p5)
+                    cr.line_to(*second_tangent)
 
-                    cr.move_to(second_tangent[0], second_tangent[1])
-                    cr.line_to(p3[0], p3[1])
+                    cr.move_to(*second_tangent)
+                    cr.line_to(*p3)
 
                     cr.stroke()
                     cr.set_line_width(self.pxToM(2))
-            set_color(cr, palette["WHITE"])
 
+        set_color(cr, palette["WHITE"])
         cr.paint_with_alpha(0.2)
 
         draw_px_cross(cr, self.mousex, self.mousey, self.pxToM(2))
         cr.restore()
 
     def draw_splines(self, cr):
-        for i, spline in enumerate(self.points.getLibsplines()):
-            for k in np.linspace(0.02, 1, 200):
-                cr.move_to(
-                    spline.Point(k - 0.008)[0],
-                    spline.Point(k - 0.008)[1])
-                cr.line_to(
-                    spline.Point(k)[0],
-                    spline.Point(k)[1])
-                cr.stroke()
-            if i == 0:
-                self.draw_robot_at_point(cr, 0, 0.008, spline)
-            self.draw_robot_at_point(cr, 1, 0.008, spline)
+        mouse = np.array((self.mousex, self.mousey))
+
+        multispline, result = Multispline.nearest_distance(
+            self.multisplines, mouse)
+
+        if self.graph.cursor is not None and self.graph.data is not None:
+            multispline_index, x = self.graph.find_cursor()
+            distance_spline = DistanceSpline(
+                self.multisplines[multispline_index].getLibsplines())
+
+            self.draw_robot_at_point(cr, distance_spline, x)
+
+            # clear the cursor each draw so it doesn't persist
+            # after you move off the spline
+            self.graph.cursor = None
+        elif result is not None and result.fun < 2:
+            distance_spline = DistanceSpline(multispline.getLibsplines())
+            x = result.x[0]
+
+            # draw the robot to show its width
+            self.draw_robot_at_point(cr, distance_spline, x)
+
+            multispline_index = self.multisplines.index(multispline)
+            self.graph.place_cursor(multispline_index, distance=result.x[0])
+
+        for multispline in self.multisplines:
+            for i, spline in enumerate(multispline.getLibsplines()):
+                alpha = 1 if multispline == self.active_multispline else 0.2
+                set_color(cr, palette["BLACK"], alpha)
+
+                # draw lots of really small line segments to
+                # approximate the shape of the spline
+                for k in np.linspace(0.005, 1, 200):
+                    cr.move_to(*spline.Point(k - 0.008))
+                    cr.line_to(*spline.Point(k))
+                    cr.stroke()
+
+                if i == 0:
+                    self.draw_robot_at_point(cr, spline, 0)
+                self.draw_robot_at_point(cr, spline, 1)
 
     def export_json(self, file_name):
         self.path_to_export = os.path.join(
@@ -302,10 +278,12 @@
         )
 
         # Will export to json file
-        multi_spline = self.points.toMultiSpline()
-        print(multi_spline)
+        multisplines_object = [
+            multispline.toJsonObject() for multispline in self.multisplines
+        ]
+        print(multisplines_object)
         with open(self.path_to_export, mode='w') as points_file:
-            json.dump(multi_spline, points_file)
+            json.dump(multisplines_object, points_file)
 
     def import_json(self, file_name):
         self.path_to_export = os.path.join(
@@ -318,41 +296,55 @@
         # import from json file
         print("LOADING LOAD FROM " + file_name)  # Load takes a few seconds
         with open(self.path_to_export) as points_file:
-            multi_spline = json.load(points_file)
+            multisplines_object = json.load(points_file)
+
+        self.attempt_append_multisplines()
+
+        # TODO: Export multisplines in different files
+        if type(multisplines_object) is dict:
+            multisplines_object = [multisplines_object]
+        else:
+            self.multisplines = []
 
         # if people messed with the spline json,
         # it might not be the right length
         # so give them a nice error message
-        try:  # try to salvage as many segments of the spline as possible
-            self.points.fromMultiSpline(multi_spline)
-        except IndexError:
-            # check if they're both 6+5*(k-1) long
-            expected_length = 6 + 5 * (multi_spline["spline_count"] - 1)
-            x_len = len(multi_spline["spline_x"])
-            y_len = len(multi_spline["spline_x"])
-            if x_len is not expected_length:
-                print(
-                    "Error: spline x values were not the expected length; expected {} got {}"
-                    .format(expected_length, x_len))
-            elif y_len is not expected_length:
-                print(
-                    "Error: spline y values were not the expected length; expected {} got {}"
-                    .format(expected_length, y_len))
+        for multispline_object in multisplines_object:
+            print(multispline_object)
+            try:  # try to salvage as many segments of the spline as possible
+                self.multisplines.append(
+                    Multispline.fromJsonObject(multispline_object))
+            except IndexError:
+                # check if they're both 6+5*(k-1) long
+                expected_length = 6 + 5 * (multispline_object["spline_count"] -
+                                           1)
+                x_len = len(multispline_object["spline_x"])
+                y_len = len(multispline_object["spline_x"])
+                if x_len is not expected_length:
+                    print(
+                        "Error: spline x values were not the expected length; expected {} got {}"
+                        .format(expected_length, x_len))
+                elif y_len is not expected_length:
+                    print(
+                        "Error: spline y values were not the expected length; expected {} got {}"
+                        .format(expected_length, y_len))
 
         print("SPLINES LOADED")
         self.mode = Mode.kEditing
         self.queue_draw()
-        self.graph.schedule_recalculate(self.points)
+        self.graph.schedule_recalculate(self.multisplines)
 
-    def attempt_append_multispline(self):
-        if (len(self.multispline_stack) == 0 or
-        self.points.toMultiSpline() != self.multispline_stack[-1]):
-            self.multispline_stack.append(self.points.toMultiSpline())
+    def attempt_append_multisplines(self):
+        if len(self.undo_history
+               ) == 0 or self.multisplines != self.undo_history[-1]:
+            self.undo_history.append(copy.deepcopy(self.multisplines))
 
-    def clear_graph(self, should_attempt_append=True):
+    def clear(self, should_attempt_append=True):
         if should_attempt_append:
-            self.attempt_append_multispline()
-        self.points = Points()
+            self.attempt_append_multisplines()
+        self.multisplines = []
+        self.active_multispline_index = 0
+        self.control_point_index = None
         #recalulate graph using new points
         self.graph.axis.clear()
         self.graph.queue_draw()
@@ -361,26 +353,24 @@
         #redraw entire graph
         self.queue_draw()
 
-
     def undo(self):
         try:
-            self.multispline_stack.pop()
+            self.undo_history.pop()
         except IndexError:
             return
-        if len(self.multispline_stack) == 0:
-            self.clear_graph(should_attempt_append=False) #clear, don't do anything
+        if len(self.undo_history) == 0:
+            self.clear(should_attempt_append=False)  #clear, don't do anything
             return
-        multispline = self.multispline_stack[-1]
-        if multispline['spline_count'] > 0:
-            self.points.fromMultiSpline(multispline)
-            self.mode= Mode.kEditing
+        if len(self.multisplines) > 0 and not any(
+                multispline.staged_points
+                for multispline in self.multisplines):
+            self.mode = Mode.kEditing
         else:
             self.mode = Mode.kPlacing
-            self.clear_graph(should_attempt_append=False)
+            self.clear(should_attempt_append=False)
+        self.multisplines = copy.deepcopy(self.undo_history[-1])
         self.queue_draw()
 
-
-
     def do_key_press_event(self, event):
         keyval = Gdk.keyval_to_lower(event.keyval)
         if keyval == Gdk.KEY_z and event.state & Gdk.ModifierType.CONTROL_MASK:
@@ -391,85 +381,101 @@
             # F0 = A1
             # B1 = 2F0 - E0
             # C1= d0 + 4F0 - 4E0
-            spline_index = len(self.points.getSplines()) - 1
-            self.points.resetPoints()
-            self.points.extrapolate(
-                self.points.getSplines()[len(self.points.getSplines()) - 1][5],
-                self.points.getSplines()[len(self.points.getSplines()) - 1][4],
-                self.points.getSplines()[len(self.points.getSplines()) - 1][3])
+            multispline = self.active_multispline
+            multispline.extrapolate()
+            self.queue_draw()
+        elif keyval == Gdk.KEY_m:
+            self.mode = Mode.kPlacing
+            self.active_multispline_index += 1
+            self.multisplines.insert(self.active_multispline_index,
+                                     Multispline())
+
+            prev_multispline = self.multisplines[self.active_multispline_index
+                                                 - 1]
+            if prev_multispline:
+                self.active_multispline.extrapolate(
+                    prev_multispline.getSplines()[-1])
             self.queue_draw()
 
     def do_button_release_event(self, event):
-        self.attempt_append_multispline()
+        self.attempt_append_multisplines()
         self.mousex, self.mousey = self.input_transform.transform_point(
             event.x, event.y)
         if self.mode == Mode.kEditing:
-            if self.index_of_edit > -1:
-                self.points.setSplines(self.spline_edit, self.index_of_edit,
-                                       self.mousex,
-                                       self.mousey)
+            if self.control_point_index != None:
+                multispline = self.multisplines[
+                    self.control_point_index.multispline_index]
 
-                self.points.splineExtrapolate(self.spline_edit)
+                multispline.setControlPoint(self.control_point_index,
+                                            self.mousex, self.mousey)
 
-                self.points.update_lib_spline()
-                self.graph.schedule_recalculate(self.points)
+                Multispline.splineExtrapolate(self.multisplines,
+                                              self.control_point_index)
 
-                self.index_of_edit = -1
-                self.spline_edit = -1
+                multispline.update_lib_spline()
+                self.graph.schedule_recalculate(self.multisplines)
+                self.queue_draw()
+
+                self.control_point_index = None
 
     def do_button_press_event(self, event):
         self.mousex, self.mousey = self.input_transform.transform_point(
             event.x, event.y)
 
         if self.mode == Mode.kPlacing:
-            if self.points.add_point(
-                    self.mousex, self.mousey):
+            if self.active_multispline.addPoint(self.mousex, self.mousey):
                 self.mode = Mode.kEditing
-                self.graph.schedule_recalculate(self.points)
+                self.graph.schedule_recalculate(self.multisplines)
         elif self.mode == Mode.kEditing:
-            # Now after index_of_edit is not -1, the point is selected, so
-            # user can click for new point
-            if self.index_of_edit == -1:
+            # Now after we have no control point index,
+            # the user can click for new point
+            if self.control_point_index == None:
                 # Get clicked point
                 # Find nearest
                 # Move nearest to clicked
                 cur_p = [self.mousex, self.mousey]
+
                 # Get the distance between each for x and y
                 # Save the index of the point closest
-                nearest = 1  # Max distance away a the selected point can be in meters
+                nearest = 0.4  # Max distance away a the selected point can be in meters
                 index_of_closest = 0
+                index_multisplines = self.active_multispline_index
+                multispline = self.active_multispline
+
                 for index_splines, points in enumerate(
-                        self.points.getSplines()):
+                        multispline.getSplines()):
                     for index_points, val in enumerate(points):
                         distance = np.sqrt((cur_p[0] - val[0])**2 +
                                            (cur_p[1] - val[1])**2)
                         if distance < nearest:
                             nearest = distance
                             index_of_closest = index_points
-                            print("Nearest: " + str(nearest))
-                            print("Index: " + str(index_of_closest))
-                            self.index_of_edit = index_of_closest
-                            self.spline_edit = index_splines
-                            self.held_x = self.mousex
+                            self.control_point_index = ControlPointIndex(
+                                index_multisplines, index_splines,
+                                index_points)
+
+            multispline, result = Multispline.nearest_distance(
+                self.multisplines, cur_p)
+            if result and result.fun < 0.1:
+                self.active_multispline_index = self.multisplines.index(
+                    multispline)
+
         self.queue_draw()
 
     def do_motion_notify_event(self, event):
-        old_x = self.mousex
-        old_y = self.mousey
         self.mousex, self.mousey = self.input_transform.transform_point(
             event.x, event.y)
-        dif_x = self.mousex - old_x
-        dif_y = self.mousey - old_y
-        difs = np.array([dif_x, dif_y])
+        mouse = np.array([self.mousex, self.mousey])
 
-        if self.mode == Mode.kEditing and self.spline_edit != -1:
-            self.points.updates_for_mouse_move(self.index_of_edit,
-                                               self.spline_edit,
-                                               self.mousex,
-                                               self.mousey, difs)
+        if self.mode == Mode.kEditing and self.control_point_index != None:
+            multispline = self.multisplines[
+                self.control_point_index.multispline_index]
 
-            self.points.update_lib_spline()
-            self.graph.schedule_recalculate(self.points)
+            multispline.updates_for_mouse_move(self.multisplines,
+                                               self.control_point_index, mouse)
+
+            multispline.update_lib_spline()
+            self.graph.schedule_recalculate(self.multisplines)
         self.queue_draw()
 
     def do_scroll_event(self, event):
@@ -497,7 +503,8 @@
             scale = min(scale, 1)
 
         # undo the scaled translation that the old zoom transform did
-        x, y = self.invert(self.zoom_transform).transform_point(event.x, event.y)
+        x, y = self.invert(self.zoom_transform).transform_point(
+            event.x, event.y)
 
         # move the origin to point
         self.zoom_transform.translate(x, y)
diff --git a/frc971/control_loops/python/points.py b/frc971/control_loops/python/points.py
deleted file mode 100644
index 5fc4c4a..0000000
--- a/frc971/control_loops/python/points.py
+++ /dev/null
@@ -1,172 +0,0 @@
-from constants import *
-import numpy as np
-from libspline import Spline, DistanceSpline, Trajectory
-import copy
-
-class Points():
-    def __init__(self):
-        self.points = []  # Holds all points not yet in spline
-        self.libsplines = []  # Formatted for libspline library usage
-        self.splines = []  # Formatted for drawing
-        self.constraints = [  # default constraints
-            {
-                "constraint_type": "LONGITUDINAL_ACCELERATION",
-                "value": 3
-            }, {
-                "constraint_type": "LATERAL_ACCELERATION",
-                "value": 2
-            }, {
-                "constraint_type": "VOLTAGE",
-                "value": 10
-            }
-        ]
-
-    def __deepcopy__(self, memo):
-        new_copy = Points()
-        new_copy.points = copy.deepcopy(self.points, memo)
-        new_copy.splines = copy.deepcopy(self.splines, memo)
-        new_copy.constraints = copy.deepcopy(self.constraints, memo)
-        new_copy.update_lib_spline()
-        return new_copy
-
-    def getPoints(self):
-        return self.points
-
-    def resetPoints(self):
-        self.points = []
-
-    def getLibsplines(self):
-        return self.libsplines
-
-    def setConstraint(self, id, value):
-        for constraint in self.constraints:
-            if constraint["constraint_type"] == id:
-                constraint["value"] = value
-                break
-
-    def getConstraint(self, id):
-        for constraint in self.constraints:
-            if constraint["constraint_type"] == id:
-                return constraint["value"]
-
-    def addConstraintsToTrajectory(self, trajectory):
-        for constraint in self.constraints:
-            if constraint["constraint_type"] == "VOLTAGE":
-                trajectory.SetVoltageLimit(constraint["value"])
-            elif constraint["constraint_type"] == "LATERAL_ACCELERATION":
-                trajectory.SetLateralAcceleration(constraint["value"])
-            elif constraint["constraint_type"] == "LONGITUDINAL_ACCELERATION":
-                trajectory.SetLongitudinalAcceleration(constraint["value"])
-
-    def splineExtrapolate(self, o_spline_edit):
-        spline_edit = o_spline_edit
-        if not spline_edit == len(self.splines) - 1:
-            f = self.splines[spline_edit][5]
-            e = self.splines[spline_edit][4]
-            d = self.splines[spline_edit][3]
-            self.splines[spline_edit + 1][0] = f
-            self.splines[spline_edit + 1][1] = f * 2 + e * -1
-            self.splines[spline_edit + 1][2] = d + f * 4 + e * -4
-
-        if not spline_edit == 0:
-            a = self.splines[spline_edit][0]
-            b = self.splines[spline_edit][1]
-            c = self.splines[spline_edit][2]
-            self.splines[spline_edit - 1][5] = a
-            self.splines[spline_edit - 1][4] = a * 2 + b * -1
-            self.splines[spline_edit - 1][3] = c + a * 4 + b * -4
-
-    def updates_for_mouse_move(self, index_of_edit, spline_edit, x, y, difs):
-        if index_of_edit > -1:
-            self.splines[spline_edit][index_of_edit] = [x, y]
-
-            if index_of_edit == 5:
-                self.splines[spline_edit][
-                    index_of_edit -
-                    2] = self.splines[spline_edit][index_of_edit - 2] + difs
-                self.splines[spline_edit][
-                    index_of_edit -
-                    1] = self.splines[spline_edit][index_of_edit - 1] + difs
-
-            if index_of_edit == 0:
-                self.splines[spline_edit][
-                    index_of_edit +
-                    2] = self.splines[spline_edit][index_of_edit + 2] + difs
-                self.splines[spline_edit][
-                    index_of_edit +
-                    1] = self.splines[spline_edit][index_of_edit + 1] + difs
-
-            if index_of_edit == 4:
-                self.splines[spline_edit][
-                    index_of_edit -
-                    1] = self.splines[spline_edit][index_of_edit - 1] + difs
-
-            if index_of_edit == 1:
-                self.splines[spline_edit][
-                    index_of_edit +
-                    1] = self.splines[spline_edit][index_of_edit + 1] + difs
-
-            self.splineExtrapolate(spline_edit)
-
-    def update_lib_spline(self):
-        self.libsplines = []
-        array = np.zeros(shape=(6, 2), dtype=float)
-        for points in self.splines:
-            for j, point in enumerate(points):
-                array[j, 0] = point[0]
-                array[j, 1] = point[1]
-            spline = Spline(np.ascontiguousarray(np.transpose(array)))
-            self.libsplines.append(spline)
-
-    def toMultiSpline(self):
-        multi_spline = {
-            "spline_count": 0,
-            "spline_x": [],
-            "spline_y": [],
-            "constraints": self.constraints,
-        }
-        for points in self.splines:
-            multi_spline["spline_count"] += 1
-            for j, point in enumerate(points):
-                if j == 0 and multi_spline["spline_count"] > 1:
-                    continue  # skip overlapping points
-                multi_spline["spline_x"].append(point[0])
-                multi_spline["spline_y"].append(point[1])
-        return multi_spline
-
-    def fromMultiSpline(self, multi_spline):
-        self.constraints = multi_spline["constraints"]
-        self.splines = []
-        self.points = []
-
-        i = 0
-        for j in range(multi_spline["spline_count"]):
-            # get the last point of the last spline
-            # and read in another 6 points
-            for i in range(i, i + 6):
-                self.points.append(
-                    [multi_spline["spline_x"][i], multi_spline["spline_y"][i]])
-            self.splines.append(np.array(self.points))
-            self.points = []
-        self.update_lib_spline()
-
-    def getSplines(self):
-        return self.splines
-
-    def setSplines(self, spline_edit, index_of_edit, x, y):
-        self.splines[spline_edit][index_of_edit] = [x, y]
-
-    def add_point(self, x, y):
-        if (len(self.points) < 6):
-            self.points.append([x, y])
-        if (len(self.points) == 6):
-            self.splines.append(np.array(self.points))
-            self.points = []
-            self.update_lib_spline()
-            return True
-
-    def extrapolate(self, point1, point2,
-                    point3):  # where point3 is 3rd to last point
-        self.points.append(point1)
-        self.points.append(point1 * 2 - point2)
-        self.points.append(point3 + point1 * 4 - point2 * 4)
diff --git a/frc971/control_loops/python/polydrivetrain.py b/frc971/control_loops/python/polydrivetrain.py
index 39fa33d..a45d738 100644
--- a/frc971/control_loops/python/polydrivetrain.py
+++ b/frc971/control_loops/python/polydrivetrain.py
@@ -98,6 +98,7 @@
 
 
 class VelocityDrivetrainModel(control_loop.ControlLoop):
+
     def __init__(self,
                  drivetrain_params,
                  left_low=True,
@@ -109,25 +110,27 @@
             right_low=right_low,
             drivetrain_params=drivetrain_params)
         self.dt = drivetrain_params.dt
-        self.A_continuous = numpy.matrix(
-            [[
-                self._drivetrain.A_continuous[1, 1],
-                self._drivetrain.A_continuous[1, 3]
-            ],
-             [
-                 self._drivetrain.A_continuous[3, 1],
-                 self._drivetrain.A_continuous[3, 3]
-             ]])
+        self.A_continuous = numpy.matrix([[
+            self._drivetrain.A_continuous[1, 1],
+            self._drivetrain.A_continuous[1, 3]
+        ],
+                                          [
+                                              self._drivetrain.A_continuous[3,
+                                                                            1],
+                                              self._drivetrain.A_continuous[3,
+                                                                            3]
+                                          ]])
 
-        self.B_continuous = numpy.matrix(
-            [[
-                self._drivetrain.B_continuous[1, 0],
-                self._drivetrain.B_continuous[1, 1]
-            ],
-             [
-                 self._drivetrain.B_continuous[3, 0],
-                 self._drivetrain.B_continuous[3, 1]
-             ]])
+        self.B_continuous = numpy.matrix([[
+            self._drivetrain.B_continuous[1, 0],
+            self._drivetrain.B_continuous[1, 1]
+        ],
+                                          [
+                                              self._drivetrain.B_continuous[3,
+                                                                            0],
+                                              self._drivetrain.B_continuous[3,
+                                                                            1]
+                                          ]])
         self.C = numpy.matrix(numpy.eye(2))
         self.D = numpy.matrix(numpy.zeros((2, 2)))
 
@@ -141,20 +144,22 @@
         # Build a kalman filter for the velocity.  We don't care what the gains
         # are, but the hybrid kalman filter that we want to write to disk to get
         # access to A_continuous and B_continuous needs this for completeness.
-        self.Q_continuous = numpy.matrix([[(0.5**2.0), 0.0], [0.0, (0.5
-                                                                    **2.0)]])
-        self.R_continuous = numpy.matrix([[(0.1**2.0), 0.0], [0.0, (0.1
-                                                                    **2.0)]])
+        self.Q_continuous = numpy.matrix([[(0.5**2.0), 0.0], [0.0,
+                                                              (0.5**2.0)]])
+        self.R_continuous = numpy.matrix([[(0.1**2.0), 0.0], [0.0,
+                                                              (0.1**2.0)]])
         self.PlaceObserverPoles(drivetrain_params.observer_poles)
-        _, _, self.Q, self.R = controls.kalmd(
-            A_continuous=self.A_continuous,
-            B_continuous=self.B_continuous,
-            Q_continuous=self.Q_continuous,
-            R_continuous=self.R_continuous,
-            dt=self.dt)
+        _, _, self.Q, self.R = controls.kalmd(A_continuous=self.A_continuous,
+                                              B_continuous=self.B_continuous,
+                                              Q_continuous=self.Q_continuous,
+                                              R_continuous=self.R_continuous,
+                                              dt=self.dt)
 
-        self.KalmanGain, self.P_steady_state = controls.kalman(
-            A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
+        self.KalmanGain, self.P_steady_state = controls.kalman(A=self.A,
+                                                               B=self.B,
+                                                               C=self.C,
+                                                               Q=self.Q,
+                                                               R=self.R)
 
         self.G_high = self._drivetrain.G_high
         self.G_low = self._drivetrain.G_low
@@ -277,12 +282,12 @@
         low_omega = (wheel_velocity / self.CurrentDrivetrain().G_low /
                      self.CurrentDrivetrain().r)
         #print gear_name, "Motor Energy Difference.", 0.5 * 0.000140032647 * (low_omega * low_omega - high_omega * high_omega), "joules"
-        high_torque = (
-            (12.0 - high_omega / self.CurrentDrivetrain().Kv) *
-            self.CurrentDrivetrain().Kt / self.CurrentDrivetrain().resistance)
-        low_torque = (
-            (12.0 - low_omega / self.CurrentDrivetrain().Kv) *
-            self.CurrentDrivetrain().Kt / self.CurrentDrivetrain().resistance)
+        high_torque = ((12.0 - high_omega / self.CurrentDrivetrain().Kv) *
+                       self.CurrentDrivetrain().Kt /
+                       self.CurrentDrivetrain().resistance)
+        low_torque = ((12.0 - low_omega / self.CurrentDrivetrain().Kv) *
+                      self.CurrentDrivetrain().Kt /
+                      self.CurrentDrivetrain().resistance)
         high_power = high_torque * high_omega
         low_power = low_torque * low_omega
         #if should_print:
@@ -357,19 +362,18 @@
 
         self.left_gear = self.right_gear = True
         if True:
-            self.left_gear = self.ComputeGear(
-                self.X[0, 0],
-                should_print=True,
-                current_gear=self.left_gear,
-                gear_name="left")
-            self.right_gear = self.ComputeGear(
-                self.X[1, 0],
-                should_print=True,
-                current_gear=self.right_gear,
-                gear_name="right")
+            self.left_gear = self.ComputeGear(self.X[0, 0],
+                                              should_print=True,
+                                              current_gear=self.left_gear,
+                                              gear_name="left")
+            self.right_gear = self.ComputeGear(self.X[1, 0],
+                                               should_print=True,
+                                               current_gear=self.right_gear,
+                                               gear_name="right")
             if self.IsInGear(self.left_gear):
-                self.left_cim.X[0, 0] = self.MotorRPM(
-                    self.left_shifter_position, self.X[0, 0])
+                self.left_cim.X[0,
+                                0] = self.MotorRPM(self.left_shifter_position,
+                                                   self.X[0, 0])
 
             if self.IsInGear(self.right_gear):
                 self.right_cim.X[0, 0] = self.MotorRPM(
@@ -422,23 +426,23 @@
             self.boxed_R = CoerceGoal(R_poly, equality_k, equality_w, self.R)
 
             FF_volts = self.CurrentDrivetrain().FF * self.boxed_R
-            self.U_ideal = self.CurrentDrivetrain().K * (
-                self.boxed_R - self.X) + FF_volts
+            self.U_ideal = self.CurrentDrivetrain().K * (self.boxed_R -
+                                                         self.X) + FF_volts
         else:
             glog.debug('Not all in gear')
             if not self.IsInGear(self.left_gear) and not self.IsInGear(
                     self.right_gear):
                 # TODO(austin): Use battery volts here.
-                R_left = self.MotorRPM(self.left_shifter_position,
-                                       self.X[0, 0])
+                R_left = self.MotorRPM(self.left_shifter_position, self.X[0,
+                                                                          0])
                 self.U_ideal[0, 0] = numpy.clip(
                     self.left_cim.K * (R_left - self.left_cim.X) +
                     R_left / self.left_cim.Kv, self.left_cim.U_min,
                     self.left_cim.U_max)
                 self.left_cim.Update(self.U_ideal[0, 0])
 
-                R_right = self.MotorRPM(self.right_shifter_position,
-                                        self.X[1, 0])
+                R_right = self.MotorRPM(self.right_shifter_position, self.X[1,
+                                                                            0])
                 self.U_ideal[1, 0] = numpy.clip(
                     self.right_cim.K * (R_right - self.right_cim.X) +
                     R_right / self.right_cim.Kv, self.right_cim.U_min,
@@ -473,19 +477,18 @@
                         drivetrain_params,
                         scalar_type='double'):
     vdrivetrain = VelocityDrivetrain(drivetrain_params)
-    hybrid_vdrivetrain = VelocityDrivetrain(
-        drivetrain_params, name="HybridVelocityDrivetrain")
+    hybrid_vdrivetrain = VelocityDrivetrain(drivetrain_params,
+                                            name="HybridVelocityDrivetrain")
     if isinstance(year_namespace, list):
         namespaces = year_namespace
     else:
         namespaces = [year_namespace, 'control_loops', 'drivetrain']
-    dog_loop_writer = control_loop.ControlLoopWriter(
-        "VelocityDrivetrain", [
-            vdrivetrain.drivetrain_low_low, vdrivetrain.drivetrain_low_high,
-            vdrivetrain.drivetrain_high_low, vdrivetrain.drivetrain_high_high
-        ],
-        namespaces=namespaces,
-        scalar_type=scalar_type)
+    dog_loop_writer = control_loop.ControlLoopWriter("VelocityDrivetrain", [
+        vdrivetrain.drivetrain_low_low, vdrivetrain.drivetrain_low_high,
+        vdrivetrain.drivetrain_high_low, vdrivetrain.drivetrain_high_high
+    ],
+                                                     namespaces=namespaces,
+                                                     scalar_type=scalar_type)
 
     dog_loop_writer.Write(drivetrain_files[0], drivetrain_files[1])
 
@@ -503,8 +506,8 @@
 
     hybrid_loop_writer.Write(hybrid_files[0], hybrid_files[1])
 
-    cim_writer = control_loop.ControlLoopWriter(
-        "CIM", [CIM()], scalar_type=scalar_type)
+    cim_writer = control_loop.ControlLoopWriter("CIM", [CIM()],
+                                                scalar_type=scalar_type)
 
     cim_writer.Write(motor_files[0], motor_files[1])
 
diff --git a/frc971/control_loops/python/polytope_test.py b/frc971/control_loops/python/polytope_test.py
index e5b8d5f..e0a8f61 100755
--- a/frc971/control_loops/python/polytope_test.py
+++ b/frc971/control_loops/python/polytope_test.py
@@ -15,6 +15,7 @@
 
 
 class TestHPolytope(unittest.TestCase):
+
     def setUp(self):
         """Builds a simple box polytope."""
         self.H = numpy.matrix([[1, 0], [-1, 0], [0, 1], [0, -1]])
@@ -43,14 +44,12 @@
         ]
 
         for inside_point in inside_points:
-            self.assertTrue(
-                self.p.IsInside(inside_point),
-                msg='Point is' + str(inside_point))
+            self.assertTrue(self.p.IsInside(inside_point),
+                            msg='Point is' + str(inside_point))
 
         for outside_point in outside_points:
-            self.assertFalse(
-                self.p.IsInside(outside_point),
-                msg='Point is' + str(outside_point))
+            self.assertFalse(self.p.IsInside(outside_point),
+                             msg='Point is' + str(outside_point))
 
     def AreVertices(self, p, vertices):
         """Checks that all the vertices are on corners of the set."""
@@ -80,10 +79,10 @@
                     found_points.add(actual_index)
                     break
 
-        self.assertEqual(
-            len(found_points),
-            actual.shape[0],
-            msg="Expected:\n" + str(expected) + "\nActual:\n" + str(actual))
+        self.assertEqual(len(found_points),
+                         actual.shape[0],
+                         msg="Expected:\n" + str(expected) + "\nActual:\n" +
+                         str(actual))
 
     def test_Skewed_Nonsym_Vertices(self):
         """Tests the vertices of a severely skewed space."""
diff --git a/frc971/control_loops/python/spline_drawing.py b/frc971/control_loops/python/spline_drawing.py
index 4e8b04b..7bfae29 100644
--- a/frc971/control_loops/python/spline_drawing.py
+++ b/frc971/control_loops/python/spline_drawing.py
@@ -3,6 +3,7 @@
 
 import numpy as np
 from basic_window import OverrideMatrix, identity, quit_main_loop, set_color
+
 WIDTH_OF_FIELD_IN_METERS = 8.258302
 PIXELS_ON_SCREEN = 800
 
diff --git a/frc971/control_loops/python/spline_graph.py b/frc971/control_loops/python/spline_graph.py
index 2e67480..b7bc5ab 100755
--- a/frc971/control_loops/python/spline_graph.py
+++ b/frc971/control_loops/python/spline_graph.py
@@ -6,6 +6,7 @@
 from path_edit import FieldWidget
 from basic_window import RunApp
 from constants import FIELDS, FIELD, SCREEN_SIZE
+
 gi.require_version('Gtk', '3.0')
 from gi.repository import Gdk, Gtk, GLib
 import cairo
@@ -14,14 +15,16 @@
 
 
 class GridWindow(Gtk.Window):
+
     def method_connect(self, event, cb):
+
         def handler(obj, *args):
             cb(*args)
 
         self.connect(event, handler)
 
     def clear_clicked(self, button):
-        self.field.clear_graph()
+        self.field.clear()
 
     def output_json_clicked(self, button):
         self.field.export_json(self.file_name_box.get_text())
@@ -29,30 +32,36 @@
     def input_json_clicked(self, button):
         self.field.import_json(self.file_name_box.get_text())
         self.long_input.set_value(
-            self.field.points.getConstraint("LONGITUDINAL_ACCELERATION"))
+            self.field.active_multispline.getConstraint(
+                "LONGITUDINAL_ACCELERATION"))
         self.lat_input.set_value(
-            self.field.points.getConstraint("LATERAL_ACCELERATION"))
-        self.vol_input.set_value(self.field.points.getConstraint("VOLTAGE"))
+            self.field.active_multispline.getConstraint(
+                "LATERAL_ACCELERATION"))
+        self.vol_input.set_value(
+            self.field.active_multispline.getConstraint("VOLTAGE"))
 
     def undo_func(self, *args):
         self.field.undo()
+
     def long_changed(self, button):
         value = self.long_input.get_value()
-        self.field.points.setConstraint("LONGITUDINAL_ACCELERATION", value)
-        self.field.graph.schedule_recalculate(self.field.points)
+        self.field.active_multispline.setConstraint(
+            "LONGITUDINAL_ACCELERATION", value)
+        self.field.graph.schedule_recalculate(self.field.multisplines)
 
     def lat_changed(self, button):
         value = self.lat_input.get_value()
-        self.field.points.setConstraint("LATERAL_ACCELERATION", value)
-        self.field.graph.schedule_recalculate(self.field.points)
+        self.field.active_multispline.setConstraint("LATERAL_ACCELERATION",
+                                                    value)
+        self.field.graph.schedule_recalculate(self.field.multisplines)
 
     def vel_changed(self, button):
         value = self.vel_input.get_value()
 
     def vol_changed(self, button):
         value = self.vol_input.get_value()
-        self.field.points.setConstraint("VOLTAGE", value)
-        self.field.graph.schedule_recalculate(self.field.points)
+        self.field.active_multispline.setConstraint("VOLTAGE", value)
+        self.field.graph.schedule_recalculate(self.field.multisplines)
 
     def input_combobox_choice(self, combo):
         text = combo.get_active_text()
@@ -89,7 +98,8 @@
         self.long_label = Gtk.Label()
         self.long_label.set_text("Longitudinal Acceleration Restriction")
         self.long_input.set_value(
-            self.field.points.getConstraint("LONGITUDINAL_ACCELERATION"))
+            self.field.active_multispline.getConstraint(
+                "LONGITUDINAL_ACCELERATION"))
 
         self.lat_input = Gtk.SpinButton()
         self.lat_input.set_size_request(100, 20)
@@ -101,7 +111,8 @@
         self.lat_label = Gtk.Label()
         self.lat_label.set_text("Lateral Acceleration Restriction")
         self.lat_input.set_value(
-            self.field.points.getConstraint("LATERAL_ACCELERATION"))
+            self.field.active_multispline.getConstraint(
+                "LATERAL_ACCELERATION"))
 
         self.vel_input = Gtk.SpinButton()
         self.vel_input.set_size_request(100, 20)
@@ -124,7 +135,8 @@
         self.vol_input.connect("value-changed", self.vol_changed)
         self.vol_label = Gtk.Label()
         self.vol_label.set_text("Voltage Restriction")
-        self.vol_input.set_value(self.field.points.getConstraint("VOLTAGE"))
+        self.vol_input.set_value(
+            self.field.active_multispline.getConstraint("VOLTAGE"))
 
         self.output_json = Gtk.Button.new_with_label("Export")
         self.output_json.set_size_request(100, 40)
diff --git a/frc971/control_loops/python/spline_writer.py b/frc971/control_loops/python/spline_writer.py
index df622f3..a3d27cc 100644
--- a/frc971/control_loops/python/spline_writer.py
+++ b/frc971/control_loops/python/spline_writer.py
@@ -3,6 +3,7 @@
 
 
 class SplineWriter(object):
+
     def __init__(self, namespaces=None, filename="auto_splines.cc"):
         if namespaces:
             self._namespaces = namespaces
diff --git a/frc971/downloader/downloader.py b/frc971/downloader/downloader.py
index 4314845..bed72b9 100644
--- a/frc971/downloader/downloader.py
+++ b/frc971/downloader/downloader.py
@@ -31,19 +31,19 @@
 
 def main(argv):
     parser = argparse.ArgumentParser()
-    parser.add_argument(
-        "--target",
-        type=str,
-        default="roborio-971-frc.local",
-        help="Target to deploy code to.")
-    parser.add_argument(
-        "--type",
-        type=str,
-        choices=["roborio", "pi"],
-        required=True,
-        help="Target type for deployment")
-    parser.add_argument(
-        "srcs", type=str, nargs='+', help="List of files to copy over")
+    parser.add_argument("--target",
+                        type=str,
+                        default="roborio-971-frc.local",
+                        help="Target to deploy code to.")
+    parser.add_argument("--type",
+                        type=str,
+                        choices=["roborio", "pi"],
+                        required=True,
+                        help="Target type for deployment")
+    parser.add_argument("srcs",
+                        type=str,
+                        nargs='+',
+                        help="List of files to copy over")
     args = parser.parse_args(argv[1:])
 
     srcs = args.srcs
@@ -52,9 +52,8 @@
 
     result = re.match("(?:([^:@]+)@)?([^:@]+)(?::([^:@]+))?", destination)
     if not result:
-        print(
-            "Not sure how to parse destination \"%s\"!" % destination,
-            file=sys.stderr)
+        print("Not sure how to parse destination \"%s\"!" % destination,
+              file=sys.stderr)
         return 1
     user = None
     if result.group(1):
diff --git a/frc971/vision_codelab/codelab_test.py b/frc971/vision_codelab/codelab_test.py
index e14d1cd..2ccf943 100755
--- a/frc971/vision_codelab/codelab_test.py
+++ b/frc971/vision_codelab/codelab_test.py
@@ -8,6 +8,7 @@
 
 # TODO(milind): this should be integrated with bazel
 class TestCodelab(unittest.TestCase):
+
     def codelab_test(self, alliance, letter=None, img_path=None):
         if img_path is None:
             img_path = "%s_%s.png" % (alliance.name.lower(),
diff --git a/motors/decode_dump.py b/motors/decode_dump.py
index 27e1757..5d44694 100755
--- a/motors/decode_dump.py
+++ b/motors/decode_dump.py
@@ -12,51 +12,56 @@
 
 data = bytes()
 while len(data) < TOTAL_SIZE:
-  read_now = sys.stdin.buffer.read(TOTAL_SIZE - len(data))
-  if not read_now:
-    print('EOF before data finished', file=sys.stderr)
-    sys.exit(1)
-  data += read_now
+    read_now = sys.stdin.buffer.read(TOTAL_SIZE - len(data))
+    if not read_now:
+        print('EOF before data finished', file=sys.stderr)
+        sys.exit(1)
+    data += read_now
 print('%s' % len(data))
 
 readable, _, _ = select.select([sys.stdin.buffer], [], [], 1)
 if readable:
-  print('Extra bytes', file=sys.stderr)
-  sys.exit(1)
+    print('Extra bytes', file=sys.stderr)
+    sys.exit(1)
 
 decoded = []
 for i in range(DATAPOINTS):
-  datapoint = DatapointStruct.unpack_from(data, i * DatapointStruct.size)
-  decoded.append(datapoint)
+    datapoint = DatapointStruct.unpack_from(data, i * DatapointStruct.size)
+    decoded.append(datapoint)
+
 
 def current(reading, ref):
-  reading_voltage = reading / 4096 * 3.3  / 1.47 * (0.768 + 1.47)
-  reading_voltage = reading / 4096 * 3.3  / 18.0 * (18.0 + 10.0)
-  #reading_ref = ref / 4096 * 3.3
-  reading_ref = 2.5
-  #reading_ref = 0
-  #return (reading_voltage - reading_ref) / 50 / 0.0003
-  return (reading_voltage - reading_ref) / 0.195
+    reading_voltage = reading / 4096 * 3.3 / 1.47 * (0.768 + 1.47)
+    reading_voltage = reading / 4096 * 3.3 / 18.0 * (18.0 + 10.0)
+    #reading_ref = ref / 4096 * 3.3
+    reading_ref = 2.5
+    #reading_ref = 0
+    #return (reading_voltage - reading_ref) / 50 / 0.0003
+    return (reading_voltage - reading_ref) / 0.195
+
 
 with open(sys.argv[1], 'w') as out:
-  out.write('balanced0,balanced1,balanced2,current0.0,current1.0,current2.0,current0.1,current1.1,current2.1,count\n')
-  #for point in decoded[2000:7200]:
-  for point in decoded:
-    out.write(','.join(str(d) for d in (
-        current(point[0], point[6]),
-        current(point[1], point[6]),
-        current(point[2], point[6]),
-        current(point[3], point[6]),
-        current(point[4], point[6]),
-        current(point[5], point[6]),
-        current(point[6], point[6]),
-        current(point[7], point[6]),
-        current(point[8], point[6]),
-        #point[6] / 100.0,
-        #point[7] / 100.0,
-        #point[8] / 100.0,
-        point[9] / 100.0,
-        point[10] / 100.0,
-        )) + '\n')
+    out.write(
+        'balanced0,balanced1,balanced2,current0.0,current1.0,current2.0,current0.1,current1.1,current2.1,count\n'
+    )
+    #for point in decoded[2000:7200]:
+    for point in decoded:
+        out.write(','.join(
+            str(d) for d in (
+                current(point[0], point[6]),
+                current(point[1], point[6]),
+                current(point[2], point[6]),
+                current(point[3], point[6]),
+                current(point[4], point[6]),
+                current(point[5], point[6]),
+                current(point[6], point[6]),
+                current(point[7], point[6]),
+                current(point[8], point[6]),
+                #point[6] / 100.0,
+                #point[7] / 100.0,
+                #point[8] / 100.0,
+                point[9] / 100.0,
+                point[10] / 100.0,
+            )) + '\n')
 
 print('all done')
diff --git a/motors/fet12/calib_sensors.py b/motors/fet12/calib_sensors.py
index 7d882de..16c0ef7 100755
--- a/motors/fet12/calib_sensors.py
+++ b/motors/fet12/calib_sensors.py
@@ -6,8 +6,9 @@
 # calib_data_60*.csv has each output channel set at a constant value of 60.
 # calib_data_6030*.csv actuates two channels.
 
+
 def calibrate(fnames):
-  """Do fitting to calibrate ADC data given csv files.
+    """Do fitting to calibrate ADC data given csv files.
 
   CSVs should be of format:
   command_a, command_b, command_c, reading0, reading1, reading2
@@ -17,24 +18,25 @@
   only care about the averaged samples because otherwise the solution matrix
   can't be solved for in a stable manner.
   """
-  data = np.zeros((1, 6))
-  for fname in fnames:
-    data = np.vstack((data, np.genfromtxt(fname, delimiter=',')))
-  data = data[1:, :]
+    data = np.zeros((1, 6))
+    for fname in fnames:
+        data = np.vstack((data, np.genfromtxt(fname, delimiter=',')))
+    data = data[1:, :]
 
-  data = data[:, :6]
+    data = data[:, :6]
 
-  b = data[:, 0:3]
-  b = b - np.tile(np.mean(b, axis=1), (3, 1)).T
-  # Vcc / 3000 / R
-  # 3000 converts duty cycle in FTM ticks to fraction of full.
-  b *= 20.9 / 3000.0 / 0.0079
-  A = data[:, 3:]
+    b = data[:, 0:3]
+    b = b - np.tile(np.mean(b, axis=1), (3, 1)).T
+    # Vcc / 3000 / R
+    # 3000 converts duty cycle in FTM ticks to fraction of full.
+    b *= 20.9 / 3000.0 / 0.0079
+    A = data[:, 3:]
 
-  return np.linalg.lstsq(A, b[:])[0].T
+    return np.linalg.lstsq(A, b[:])[0].T
+
 
 if __name__ == "__main__":
-  if len(sys.argv) < 2:
-    print("Need filenames for data")
-    sys.exit(1)
-  print(calibrate(sys.argv[1:]))
+    if len(sys.argv) < 2:
+        print("Need filenames for data")
+        sys.exit(1)
+    print(calibrate(sys.argv[1:]))
diff --git a/motors/fet12/current_equalize.py b/motors/fet12/current_equalize.py
index ea44916..114d53f 100755
--- a/motors/fet12/current_equalize.py
+++ b/motors/fet12/current_equalize.py
@@ -4,6 +4,7 @@
 import sys
 import calib_sensors
 
+
 def manual_calibrate():
     #  38  27 -84
     #  36 -64  39
@@ -20,11 +21,12 @@
     transform = I * numpy.linalg.inv(Is)
     return transform
 
+
 def main():
     transform = manual_calibrate()
 
     if len(sys.argv) > 1:
-      transform = calib_sensors.calibrate(sys.argv[1:])
+        transform = calib_sensors.calibrate(sys.argv[1:])
 
     print("#ifndef MOTORS_FET12_CURRENT_MATRIX_")
     print("#define MOTORS_FET12_CURRENT_MATRIX_")
@@ -35,7 +37,8 @@
     print("namespace motors {")
     print("")
     print(
-        "inline ::std::array<float, 3> DecoupleCurrents(int16_t currents[3]) {")
+        "inline ::std::array<float, 3> DecoupleCurrents(int16_t currents[3]) {"
+    )
     print("  ::std::array<float, 3> ans;")
 
     for i in range(3):
@@ -54,5 +57,6 @@
 
     return 0
 
+
 if __name__ == '__main__':
     sys.exit(main())
diff --git a/motors/pistol_grip/generate_cogging.py b/motors/pistol_grip/generate_cogging.py
index d889064..5b0a6e6 100644
--- a/motors/pistol_grip/generate_cogging.py
+++ b/motors/pistol_grip/generate_cogging.py
@@ -5,71 +5,73 @@
 
 # TODO(austin): Plot flag.
 
+
 def main(argv):
-  if len(argv) < 4:
-    print 'Args: input output.cc struct_name'
-    return 1
-  data_sum = [0.0] * 4096
-  data_count = [0] * 4096
-  data_list_absolute = []
-  data_list_current = []
+    if len(argv) < 4:
+        print 'Args: input output.cc struct_name'
+        return 1
+    data_sum = [0.0] * 4096
+    data_count = [0] * 4096
+    data_list_absolute = []
+    data_list_current = []
 
-  with open(argv[1], 'r') as fd:
-    for line in fd:
-      if line.startswith('reading'):
-        split_line = line.split()
-        data_absolute = int(split_line[1])
-        data_index = int(split_line[3][2:])
-        data_current = int(split_line[2]) / 10000.0
-        data_sum[data_index] += data_current
-        data_count[data_index] += 1
-        data_list_absolute.append(data_absolute)
-        data_list_current.append(data_current)
-  data = [0.0] * 4096
-  min_zero = 4096
-  max_zero = 0
-  for i in range(0, 4096):
-    if data_count[i] == 0:
-      min_zero = min(i, min_zero)
-      max_zero = max(i, min_zero)
-
-  for i in range(0, 4096):
-    if data_count[i] != 0:
-      data[i] = data_sum[i] / data_count[i]
-  if min_zero == 0 and max_zero == 4095:
+    with open(argv[1], 'r') as fd:
+        for line in fd:
+            if line.startswith('reading'):
+                split_line = line.split()
+                data_absolute = int(split_line[1])
+                data_index = int(split_line[3][2:])
+                data_current = int(split_line[2]) / 10000.0
+                data_sum[data_index] += data_current
+                data_count[data_index] += 1
+                data_list_absolute.append(data_absolute)
+                data_list_current.append(data_current)
+    data = [0.0] * 4096
+    min_zero = 4096
+    max_zero = 0
     for i in range(0, 4096):
-      if data_count[i] != 0:
-        while i > 0:
-          data[i - 1] = data[i]
-          i -= 1
-        break;
+        if data_count[i] == 0:
+            min_zero = min(i, min_zero)
+            max_zero = max(i, min_zero)
 
-    for i in reversed(range(0, 4096)):
-      if data_count[i] != 0:
-        while i < 4095:
-          data[i + 1] = data[i]
-          i += 1
-        break;
-  else:
     for i in range(0, 4096):
-      if data_count[i] == 0:
-        if i < (min_zero + max_zero) / 2:
-          data[i] = data[min_zero - 1]
-        else:
-          data[i] = data[max_zero + 1]
+        if data_count[i] != 0:
+            data[i] = data_sum[i] / data_count[i]
+    if min_zero == 0 and max_zero == 4095:
+        for i in range(0, 4096):
+            if data_count[i] != 0:
+                while i > 0:
+                    data[i - 1] = data[i]
+                    i -= 1
+                break
 
-  pylab.plot(range(0, 4096), data)
-  pylab.figure()
-  pylab.plot(data_list_absolute, data_list_current)
-  pylab.show()
-  with open(argv[2], 'w') as out_fd:
-    out_fd.write('extern const float %s[4096];\n' % argv[3])
-    out_fd.write('const float %s[4096] = {\n' % argv[3])
-    for datapoint in data:
-      out_fd.write('    %ff,\n' % datapoint)
-    out_fd.write('};')
+        for i in reversed(range(0, 4096)):
+            if data_count[i] != 0:
+                while i < 4095:
+                    data[i + 1] = data[i]
+                    i += 1
+                break
+    else:
+        for i in range(0, 4096):
+            if data_count[i] == 0:
+                if i < (min_zero + max_zero) / 2:
+                    data[i] = data[min_zero - 1]
+                else:
+                    data[i] = data[max_zero + 1]
 
-  return 0
+    pylab.plot(range(0, 4096), data)
+    pylab.figure()
+    pylab.plot(data_list_absolute, data_list_current)
+    pylab.show()
+    with open(argv[2], 'w') as out_fd:
+        out_fd.write('extern const float %s[4096];\n' % argv[3])
+        out_fd.write('const float %s[4096] = {\n' % argv[3])
+        for datapoint in data:
+            out_fd.write('    %ff,\n' % datapoint)
+        out_fd.write('};')
+
+    return 0
+
 
 if __name__ == '__main__':
-  sys.exit(main(sys.argv))
+    sys.exit(main(sys.argv))
diff --git a/motors/plot.py b/motors/plot.py
index 3d43080..c5b02a6 100755
--- a/motors/plot.py
+++ b/motors/plot.py
@@ -3,9 +3,7 @@
 import numpy
 from matplotlib import pylab
 
-data = numpy.loadtxt('/tmp/jkalsdjflsd.csv',
-                     delimiter=',',
-                     skiprows=1)
+data = numpy.loadtxt('/tmp/jkalsdjflsd.csv', delimiter=',', skiprows=1)
 x = range(len(data))
 
 pylab.subplot(1, 1, 1)
diff --git a/motors/print/itm_read.py b/motors/print/itm_read.py
index d616da4..ba12b90 100755
--- a/motors/print/itm_read.py
+++ b/motors/print/itm_read.py
@@ -11,97 +11,105 @@
 import os
 import sys
 
+
 def open_file_for_bytes(path):
-  '''Returns a file-like object which reads bytes without buffering.'''
-  # Not using `open` because it's unclear from the docs how (if it's possible at
-  # all) to get something that will only do one read call and return what that
-  # gets on a fifo.
-  try:
-    return io.FileIO(path, 'r')
-  except FileNotFoundError:
-    # If it wasn't found, try (once) to create it and then open again.
+    '''Returns a file-like object which reads bytes without buffering.'''
+    # Not using `open` because it's unclear from the docs how (if it's possible at
+    # all) to get something that will only do one read call and return what that
+    # gets on a fifo.
     try:
-      os.mkfifo(path)
-    except FileExistsError:
-      pass
-    return io.FileIO(path, 'r')
+        return io.FileIO(path, 'r')
+    except FileNotFoundError:
+        # If it wasn't found, try (once) to create it and then open again.
+        try:
+            os.mkfifo(path)
+        except FileExistsError:
+            pass
+        return io.FileIO(path, 'r')
+
 
 def read_bytes(path):
-  '''Reads bytes from a file. This is appropriate both for regular files and
+    '''Reads bytes from a file. This is appropriate both for regular files and
   fifos.
   Args:
     path: A path-like object to open.
   Yields:
     Individual bytes from the file, until hitting EOF.
   '''
-  with open_file_for_bytes(path) as f:
-    while True:
-      buf = f.read(1024)
-      if not buf:
-        return
-      for byte in buf:
-        yield byte
+    with open_file_for_bytes(path) as f:
+        while True:
+            buf = f.read(1024)
+            if not buf:
+                return
+            for byte in buf:
+                yield byte
+
 
 def parse_packets(source):
-  '''Parses a stream of bytes into packets.
+    '''Parses a stream of bytes into packets.
   Args:
     source: A generator of individual bytes.
   Generates:
     Packets as bytes objects.
   '''
-  try:
-    while True:
-      header = next(source)
-      if header == 0:
-        # Synchronization packets consist of a bunch of 0 bits (not necessarily
-        # a whole number of bytes), followed by a 128 byte. This is for hardware
-        # to synchronize on, but we're not in a position to do that, so
-        # presumably those should get filtered out before getting here?
-        raise 'Not sure how to handle synchronization packets'
-      packet = bytearray()
-      packet.append(header)
-      header_size = header & 3
-      if header_size == 0:
-        while packet[-1] & 128 and len(packet) < 7:
-          packet.append(next(source))
-      else:
-        if header_size == 3:
-          header_size = 4
-        for _ in range(header_size):
-          packet.append(next(source))
-      yield bytes(packet)
-  except StopIteration:
-    return
+    try:
+        while True:
+            header = next(source)
+            if header == 0:
+                # Synchronization packets consist of a bunch of 0 bits (not necessarily
+                # a whole number of bytes), followed by a 128 byte. This is for hardware
+                # to synchronize on, but we're not in a position to do that, so
+                # presumably those should get filtered out before getting here?
+                raise 'Not sure how to handle synchronization packets'
+            packet = bytearray()
+            packet.append(header)
+            header_size = header & 3
+            if header_size == 0:
+                while packet[-1] & 128 and len(packet) < 7:
+                    packet.append(next(source))
+            else:
+                if header_size == 3:
+                    header_size = 4
+                for _ in range(header_size):
+                    packet.append(next(source))
+            yield bytes(packet)
+    except StopIteration:
+        return
+
 
 class PacketParser(object):
-  def __init__(self):
-    self.stimulus_handlers = {}
 
-  def register_stimulus_handler(self, port_number, handler):
-    '''Registers a function to call on packets to the specified port.'''
-    self.stimulus_handlers[port_number] = handler
+    def __init__(self):
+        self.stimulus_handlers = {}
 
-  def process(self, path):
-    for packet in parse_packets(read_bytes(path)):
-      header = packet[0]
-      header_size = header & 3
-      if header_size == 0:
-        # TODO(Brian): At least handle overflow packets here.
-        pass
-      else:
-        port_number = header >> 3
-        if port_number in self.stimulus_handlers:
-          self.stimulus_handlers[port_number](packet[1:])
-        else:
-          print('Warning: unhandled stimulus port %d' % port_number,
-                file=sys.stderr)
-          self.stimulus_handlers[port_number] = lambda _: None
+    def register_stimulus_handler(self, port_number, handler):
+        '''Registers a function to call on packets to the specified port.'''
+        self.stimulus_handlers[port_number] = handler
+
+    def process(self, path):
+        for packet in parse_packets(read_bytes(path)):
+            header = packet[0]
+            header_size = header & 3
+            if header_size == 0:
+                # TODO(Brian): At least handle overflow packets here.
+                pass
+            else:
+                port_number = header >> 3
+                if port_number in self.stimulus_handlers:
+                    self.stimulus_handlers[port_number](packet[1:])
+                else:
+                    print('Warning: unhandled stimulus port %d' % port_number,
+                          file=sys.stderr)
+                    self.stimulus_handlers[port_number] = lambda _: None
+
 
 if __name__ == '__main__':
-  parser = PacketParser()
-  def print_byte(payload):
-    sys.stdout.write(payload.decode('ascii'))
-  parser.register_stimulus_handler(0, print_byte)
+    parser = PacketParser()
 
-  for path in sys.argv[1:]:
-    parser.process(path)
+    def print_byte(payload):
+        sys.stdout.write(payload.decode('ascii'))
+
+    parser.register_stimulus_handler(0, print_byte)
+
+    for path in sys.argv[1:]:
+        parser.process(path)
diff --git a/motors/python/big_phase_current.py b/motors/python/big_phase_current.py
index 8cbce3f..d31fc72 100755
--- a/motors/python/big_phase_current.py
+++ b/motors/python/big_phase_current.py
@@ -57,51 +57,62 @@
 #switching_pattern = 'centered front shifted'
 #switching_pattern = 'anticentered'
 
-Vconv = numpy.matrix([[2.0, -1.0, -1.0],
-                      [-1.0, 2.0, -1.0],
-                      [-1.0, -1.0, 2.0]]) / 3.0
+Vconv = numpy.matrix([[2.0, -1.0, -1.0], [-1.0, 2.0, -1.0], [-1.0, -1.0, 2.0]
+                      ]) / 3.0
+
 
 def f_single(theta):
-  return K1 * numpy.sin(theta) + K2 * numpy.sin(theta * 5)
+    return K1 * numpy.sin(theta) + K2 * numpy.sin(theta * 5)
+
 
 def g_single(theta):
-  return K1 * numpy.sin(theta) - K2 * numpy.sin(theta * 5)
+    return K1 * numpy.sin(theta) - K2 * numpy.sin(theta * 5)
+
 
 def gdot_single(theta):
-  """Derivitive of the current.
+    """Derivitive of the current.
 
   Must be multiplied by omega externally.
   """
-  return K1 * numpy.cos(theta) - 5.0 * K2 * numpy.cos(theta * 5.0)
+    return K1 * numpy.cos(theta) - 5.0 * K2 * numpy.cos(theta * 5.0)
 
-f = numpy.vectorize(f_single, otypes=(numpy.float,))
-g = numpy.vectorize(g_single, otypes=(numpy.float,))
-gdot = numpy.vectorize(gdot_single, otypes=(numpy.float,))
+
+f = numpy.vectorize(f_single, otypes=(numpy.float, ))
+g = numpy.vectorize(g_single, otypes=(numpy.float, ))
+gdot = numpy.vectorize(gdot_single, otypes=(numpy.float, ))
+
 
 def torque(theta):
-  return f(theta) * g(theta)
+    return f(theta) * g(theta)
+
 
 def phase_a(function, theta):
-  return function(theta)
+    return function(theta)
+
 
 def phase_b(function, theta):
-  return function(theta + 2 * numpy.pi / 3)
+    return function(theta + 2 * numpy.pi / 3)
+
 
 def phase_c(function, theta):
-  return function(theta + 4 * numpy.pi / 3)
+    return function(theta + 4 * numpy.pi / 3)
+
 
 def phases(function, theta):
-  return numpy.matrix([[phase_a(function, theta)],
-                       [phase_b(function, theta)],
-                       [phase_c(function, theta)]])
+    return numpy.matrix([[phase_a(function,
+                                  theta)], [phase_b(function, theta)],
+                         [phase_c(function, theta)]])
+
 
 def all_phases(function, theta_range):
-  return (phase_a(function, theta_range) +
-          phase_b(function, theta_range) +
-          phase_c(function, theta_range))
+    return (phase_a(function, theta_range) + phase_b(function, theta_range) +
+            phase_c(function, theta_range))
+
 
 theta_range = numpy.linspace(start=0, stop=4 * numpy.pi, num=10000)
-one_amp_driving_voltage = R * g(theta_range) + (L * gdot(theta_range) + M * gdot(theta_range + 2.0 / 3.0 * numpy.pi) + M * gdot(theta_range - 2.0 / 3.0 * numpy.pi)) * Kv * vcc / 2.0
+one_amp_driving_voltage = R * g(theta_range) + (
+    L * gdot(theta_range) + M * gdot(theta_range + 2.0 / 3.0 * numpy.pi) +
+    M * gdot(theta_range - 2.0 / 3.0 * numpy.pi)) * Kv * vcc / 2.0
 
 max_one_amp_driving_voltage = max(one_amp_driving_voltage)
 
@@ -111,7 +122,8 @@
 
 print('Max BEMF', max(f(theta_range)))
 print('Max current', max(g(theta_range)))
-print('Max drive voltage (one_amp_driving_voltage)', max(one_amp_driving_voltage))
+print('Max drive voltage (one_amp_driving_voltage)',
+      max(one_amp_driving_voltage))
 print('one_amp_scalar', one_amp_scalar)
 
 pylab.figure()
@@ -119,12 +131,14 @@
 pylab.plot(theta_range, f(theta_range), label='bemf')
 pylab.plot(theta_range, g(theta_range), label='phase_current')
 pylab.plot(theta_range, torque(theta_range), label='phase_torque')
-pylab.plot(theta_range, all_phases(torque, theta_range), label='sum_torque/current')
+pylab.plot(theta_range,
+           all_phases(torque, theta_range),
+           label='sum_torque/current')
 pylab.legend()
 
 
 def full_sample_times(Ton, Toff, dt, n, start_time):
-  """Returns n + 4 samples for the provided switching times.
+    """Returns n + 4 samples for the provided switching times.
 
   We need the timesteps and Us to integrate.
 
@@ -139,235 +153,262 @@
     array of [t, U matrix]
   """
 
-  assert((Toff <= 1.0).all())
-  assert((Ton <= 1.0).all())
-  assert((Toff >= 0.0).all())
-  assert((Ton >= 0.0).all())
+    assert ((Toff <= 1.0).all())
+    assert ((Ton <= 1.0).all())
+    assert ((Toff >= 0.0).all())
+    assert ((Ton >= 0.0).all())
 
-  if (Ton <= Toff).all():
-    on_before_off = True
-  else:
-    # Verify that they are all ordered correctly.
-    assert(not (Ton <= Toff).any())
-    on_before_off = False
-
-  Toff = Toff.copy() * dt
-  Toff[Toff < 100e-9] = -1.0
-  Toff[Toff > dt] = dt
-
-  Ton = Ton.copy() * dt
-  Ton[Ton < 100e-9] = -1.0
-  Ton[Ton > dt - 100e-9] = dt + 1.0
-
-  result = []
-  t = 0
-
-  result_times = numpy.concatenate(
-      (numpy.linspace(0, dt, num=n),
-       numpy.reshape(numpy.asarray(Ton[numpy.logical_and(Ton < dt, Ton > 0.0)]), (-1,)),
-       numpy.reshape(numpy.asarray(Toff[numpy.logical_and(Toff < dt, Toff > 0.0)]), (-1,))
-       ))
-  result_times.sort()
-  assert((result_times >= 0).all())
-  assert((result_times <= dt).all())
-
-  for t in result_times:
-    if on_before_off:
-      U = numpy.matrix([[vcc], [vcc], [vcc]])
-      U[t <= Ton] = 0.0
-      U[Toff < t] = 0.0
+    if (Ton <= Toff).all():
+        on_before_off = True
     else:
-      U = numpy.matrix([[0.0], [0.0], [0.0]])
-      U[t > Ton] = vcc
-      U[t <= Toff] = vcc
-    result.append((float(t + start_time), U.copy()))
+        # Verify that they are all ordered correctly.
+        assert (not (Ton <= Toff).any())
+        on_before_off = False
 
-  return result
+    Toff = Toff.copy() * dt
+    Toff[Toff < 100e-9] = -1.0
+    Toff[Toff > dt] = dt
+
+    Ton = Ton.copy() * dt
+    Ton[Ton < 100e-9] = -1.0
+    Ton[Ton > dt - 100e-9] = dt + 1.0
+
+    result = []
+    t = 0
+
+    result_times = numpy.concatenate(
+        (numpy.linspace(0, dt, num=n),
+         numpy.reshape(
+             numpy.asarray(Ton[numpy.logical_and(Ton < dt, Ton > 0.0)]),
+             (-1, )),
+         numpy.reshape(
+             numpy.asarray(Toff[numpy.logical_and(Toff < dt, Toff > 0.0)]),
+             (-1, ))))
+    result_times.sort()
+    assert ((result_times >= 0).all())
+    assert ((result_times <= dt).all())
+
+    for t in result_times:
+        if on_before_off:
+            U = numpy.matrix([[vcc], [vcc], [vcc]])
+            U[t <= Ton] = 0.0
+            U[Toff < t] = 0.0
+        else:
+            U = numpy.matrix([[0.0], [0.0], [0.0]])
+            U[t > Ton] = vcc
+            U[t <= Toff] = vcc
+        result.append((float(t + start_time), U.copy()))
+
+    return result
+
 
 def sample_times(T, dt, n, start_time):
-  if switching_pattern == 'rear':
-    T = 1.0 - T
-    ans = full_sample_times(T, numpy.matrix(numpy.ones((3, 1))) * 1.0, dt, n, start_time)
-  elif switching_pattern == 'centered front shifted':
-    # Centered, but shifted to the beginning of the cycle.
-    Ton = 0.5 - T / 2.0
-    Toff = 0.5 + T / 2.0
+    if switching_pattern == 'rear':
+        T = 1.0 - T
+        ans = full_sample_times(T,
+                                numpy.matrix(numpy.ones((3, 1))) * 1.0, dt, n,
+                                start_time)
+    elif switching_pattern == 'centered front shifted':
+        # Centered, but shifted to the beginning of the cycle.
+        Ton = 0.5 - T / 2.0
+        Toff = 0.5 + T / 2.0
 
-    tn = min(Ton)[0, 0]
-    Ton -= tn
-    Toff -= tn
+        tn = min(Ton)[0, 0]
+        Ton -= tn
+        Toff -= tn
 
-    ans = full_sample_times(Ton, Toff, dt, n, start_time)
-  elif switching_pattern == 'centered':
-    # Centered, looks waaay better.
-    Ton = 0.5 - T / 2.0
-    Toff = 0.5 + T / 2.0
+        ans = full_sample_times(Ton, Toff, dt, n, start_time)
+    elif switching_pattern == 'centered':
+        # Centered, looks waaay better.
+        Ton = 0.5 - T / 2.0
+        Toff = 0.5 + T / 2.0
 
-    ans = full_sample_times(Ton, Toff, dt, n, start_time)
-  elif switching_pattern == 'anticentered':
-    # Centered, looks waaay better.
-    Toff = T / 2.0
-    Ton = 1.0 - T / 2.0
+        ans = full_sample_times(Ton, Toff, dt, n, start_time)
+    elif switching_pattern == 'anticentered':
+        # Centered, looks waaay better.
+        Toff = T / 2.0
+        Ton = 1.0 - T / 2.0
 
-    ans = full_sample_times(Ton, Toff, dt, n, start_time)
-  elif switching_pattern == 'front':
-    ans = full_sample_times(numpy.matrix(numpy.zeros((3, 1))), T, dt, n, start_time)
-  else:
-    assert(False)
+        ans = full_sample_times(Ton, Toff, dt, n, start_time)
+    elif switching_pattern == 'front':
+        ans = full_sample_times(numpy.matrix(numpy.zeros((3, 1))), T, dt, n,
+                                start_time)
+    else:
+        assert (False)
 
-  return ans
+    return ans
+
 
 class DataLogger(object):
-  def __init__(self, title=None):
-    self.title = title
-    self.ia = []
-    self.ib = []
-    self.ic = []
-    self.ia_goal = []
-    self.ib_goal = []
-    self.ic_goal = []
-    self.ia_controls = []
-    self.ib_controls = []
-    self.ic_controls = []
-    self.isensea = []
-    self.isenseb = []
-    self.isensec = []
 
-    self.va = []
-    self.vb = []
-    self.vc = []
-    self.van = []
-    self.vbn = []
-    self.vcn = []
+    def __init__(self, title=None):
+        self.title = title
+        self.ia = []
+        self.ib = []
+        self.ic = []
+        self.ia_goal = []
+        self.ib_goal = []
+        self.ic_goal = []
+        self.ia_controls = []
+        self.ib_controls = []
+        self.ic_controls = []
+        self.isensea = []
+        self.isenseb = []
+        self.isensec = []
 
-    self.ea = []
-    self.eb = []
-    self.ec = []
+        self.va = []
+        self.vb = []
+        self.vc = []
+        self.van = []
+        self.vbn = []
+        self.vcn = []
 
-    self.theta = []
-    self.omega = []
+        self.ea = []
+        self.eb = []
+        self.ec = []
 
-    self.i_goal = []
+        self.theta = []
+        self.omega = []
 
-    self.time = []
-    self.controls_time = []
-    self.predicted_time = []
+        self.i_goal = []
 
-    self.ia_pred = []
-    self.ib_pred = []
-    self.ic_pred = []
+        self.time = []
+        self.controls_time = []
+        self.predicted_time = []
 
-    self.voltage_time = []
-    self.estimated_velocity = []
-    self.U_last = numpy.matrix(numpy.zeros((3, 1)))
+        self.ia_pred = []
+        self.ib_pred = []
+        self.ic_pred = []
 
-  def log_predicted(self, current_time, p):
-    self.predicted_time.append(current_time)
-    self.ia_pred.append(p[0, 0])
-    self.ib_pred.append(p[1, 0])
-    self.ic_pred.append(p[2, 0])
+        self.voltage_time = []
+        self.estimated_velocity = []
+        self.U_last = numpy.matrix(numpy.zeros((3, 1)))
 
-  def log_controls(self, current_time, measured_current, In, E, estimated_velocity):
-    self.controls_time.append(current_time)
-    self.ia_controls.append(measured_current[0, 0])
-    self.ib_controls.append(measured_current[1, 0])
-    self.ic_controls.append(measured_current[2, 0])
+    def log_predicted(self, current_time, p):
+        self.predicted_time.append(current_time)
+        self.ia_pred.append(p[0, 0])
+        self.ib_pred.append(p[1, 0])
+        self.ic_pred.append(p[2, 0])
 
-    self.ea.append(E[0, 0])
-    self.eb.append(E[1, 0])
-    self.ec.append(E[2, 0])
+    def log_controls(self, current_time, measured_current, In, E,
+                     estimated_velocity):
+        self.controls_time.append(current_time)
+        self.ia_controls.append(measured_current[0, 0])
+        self.ib_controls.append(measured_current[1, 0])
+        self.ic_controls.append(measured_current[2, 0])
 
-    self.ia_goal.append(In[0, 0])
-    self.ib_goal.append(In[1, 0])
-    self.ic_goal.append(In[2, 0])
-    self.estimated_velocity.append(estimated_velocity)
+        self.ea.append(E[0, 0])
+        self.eb.append(E[1, 0])
+        self.ec.append(E[2, 0])
 
-  def log_data(self, X, U, current_time, Vn, i_goal):
-    self.ia.append(X[0, 0])
-    self.ib.append(X[1, 0])
-    self.ic.append(X[2, 0])
+        self.ia_goal.append(In[0, 0])
+        self.ib_goal.append(In[1, 0])
+        self.ic_goal.append(In[2, 0])
+        self.estimated_velocity.append(estimated_velocity)
 
-    self.i_goal.append(i_goal)
+    def log_data(self, X, U, current_time, Vn, i_goal):
+        self.ia.append(X[0, 0])
+        self.ib.append(X[1, 0])
+        self.ic.append(X[2, 0])
 
-    self.isensea.append(X[5, 0])
-    self.isenseb.append(X[6, 0])
-    self.isensec.append(X[7, 0])
+        self.i_goal.append(i_goal)
 
-    self.theta.append(X[3, 0])
-    self.omega.append(X[4, 0])
+        self.isensea.append(X[5, 0])
+        self.isenseb.append(X[6, 0])
+        self.isensec.append(X[7, 0])
 
-    self.time.append(current_time)
+        self.theta.append(X[3, 0])
+        self.omega.append(X[4, 0])
 
-    self.van.append(Vn[0, 0])
-    self.vbn.append(Vn[1, 0])
-    self.vcn.append(Vn[2, 0])
+        self.time.append(current_time)
 
-    if (self.U_last != U).any():
-      self.va.append(self.U_last[0, 0])
-      self.vb.append(self.U_last[1, 0])
-      self.vc.append(self.U_last[2, 0])
-      self.voltage_time.append(current_time)
+        self.van.append(Vn[0, 0])
+        self.vbn.append(Vn[1, 0])
+        self.vcn.append(Vn[2, 0])
 
-      self.va.append(U[0, 0])
-      self.vb.append(U[1, 0])
-      self.vc.append(U[2, 0])
-      self.voltage_time.append(current_time)
-      self.U_last = U.copy()
+        if (self.U_last != U).any():
+            self.va.append(self.U_last[0, 0])
+            self.vb.append(self.U_last[1, 0])
+            self.vc.append(self.U_last[2, 0])
+            self.voltage_time.append(current_time)
 
-  def plot(self):
-    fig = pylab.figure()
-    pylab.subplot(3, 1, 1)
-    pylab.plot(self.controls_time, self.ia_controls, 'ro', label='ia_controls')
-    pylab.plot(self.controls_time, self.ib_controls, 'go', label='ib_controls')
-    pylab.plot(self.controls_time, self.ic_controls, 'bo', label='ic_controls')
-    pylab.plot(self.controls_time, self.ia_goal, 'r--', label='ia_goal')
-    pylab.plot(self.controls_time, self.ib_goal, 'g--', label='ib_goal')
-    pylab.plot(self.controls_time, self.ic_goal, 'b--', label='ic_goal')
+            self.va.append(U[0, 0])
+            self.vb.append(U[1, 0])
+            self.vc.append(U[2, 0])
+            self.voltage_time.append(current_time)
+            self.U_last = U.copy()
 
-    #pylab.plot(self.controls_time, self.ia_pred, 'r*', label='ia_pred')
-    #pylab.plot(self.controls_time, self.ib_pred, 'g*', label='ib_pred')
-    #pylab.plot(self.controls_time, self.ic_pred, 'b*', label='ic_pred')
-    pylab.plot(self.time, self.isensea, 'r:', label='ia_sense')
-    pylab.plot(self.time, self.isenseb, 'g:', label='ib_sense')
-    pylab.plot(self.time, self.isensec, 'b:', label='ic_sense')
-    pylab.plot(self.time, self.ia, 'r', label='ia')
-    pylab.plot(self.time, self.ib, 'g', label='ib')
-    pylab.plot(self.time, self.ic, 'b', label='ic')
-    pylab.plot(self.time, self.i_goal, label='i_goal')
-    if self.title is not None:
-      fig.canvas.set_window_title(self.title)
-    pylab.legend()
+    def plot(self):
+        fig = pylab.figure()
+        pylab.subplot(3, 1, 1)
+        pylab.plot(self.controls_time,
+                   self.ia_controls,
+                   'ro',
+                   label='ia_controls')
+        pylab.plot(self.controls_time,
+                   self.ib_controls,
+                   'go',
+                   label='ib_controls')
+        pylab.plot(self.controls_time,
+                   self.ic_controls,
+                   'bo',
+                   label='ic_controls')
+        pylab.plot(self.controls_time, self.ia_goal, 'r--', label='ia_goal')
+        pylab.plot(self.controls_time, self.ib_goal, 'g--', label='ib_goal')
+        pylab.plot(self.controls_time, self.ic_goal, 'b--', label='ic_goal')
 
-    pylab.subplot(3, 1, 2)
-    pylab.plot(self.voltage_time, self.va, label='va')
-    pylab.plot(self.voltage_time, self.vb, label='vb')
-    pylab.plot(self.voltage_time, self.vc, label='vc')
-    pylab.plot(self.time, self.van, label='van')
-    pylab.plot(self.time, self.vbn, label='vbn')
-    pylab.plot(self.time, self.vcn, label='vcn')
-    pylab.plot(self.controls_time, self.ea, label='ea')
-    pylab.plot(self.controls_time, self.eb, label='eb')
-    pylab.plot(self.controls_time, self.ec, label='ec')
-    pylab.legend()
+        #pylab.plot(self.controls_time, self.ia_pred, 'r*', label='ia_pred')
+        #pylab.plot(self.controls_time, self.ib_pred, 'g*', label='ib_pred')
+        #pylab.plot(self.controls_time, self.ic_pred, 'b*', label='ic_pred')
+        pylab.plot(self.time, self.isensea, 'r:', label='ia_sense')
+        pylab.plot(self.time, self.isenseb, 'g:', label='ib_sense')
+        pylab.plot(self.time, self.isensec, 'b:', label='ic_sense')
+        pylab.plot(self.time, self.ia, 'r', label='ia')
+        pylab.plot(self.time, self.ib, 'g', label='ib')
+        pylab.plot(self.time, self.ic, 'b', label='ic')
+        pylab.plot(self.time, self.i_goal, label='i_goal')
+        if self.title is not None:
+            fig.canvas.set_window_title(self.title)
+        pylab.legend()
 
-    pylab.subplot(3, 1, 3)
-    pylab.plot(self.time, self.theta, label='theta')
-    pylab.plot(self.time, self.omega, label='omega')
-    pylab.plot(self.controls_time, self.estimated_velocity, label='estimated omega')
+        pylab.subplot(3, 1, 2)
+        pylab.plot(self.voltage_time, self.va, label='va')
+        pylab.plot(self.voltage_time, self.vb, label='vb')
+        pylab.plot(self.voltage_time, self.vc, label='vc')
+        pylab.plot(self.time, self.van, label='van')
+        pylab.plot(self.time, self.vbn, label='vbn')
+        pylab.plot(self.time, self.vcn, label='vcn')
+        pylab.plot(self.controls_time, self.ea, label='ea')
+        pylab.plot(self.controls_time, self.eb, label='eb')
+        pylab.plot(self.controls_time, self.ec, label='ec')
+        pylab.legend()
 
-    pylab.legend()
+        pylab.subplot(3, 1, 3)
+        pylab.plot(self.time, self.theta, label='theta')
+        pylab.plot(self.time, self.omega, label='omega')
+        pylab.plot(self.controls_time,
+                   self.estimated_velocity,
+                   label='estimated omega')
 
-    fig = pylab.figure()
-    pylab.plot(self.controls_time,
-               map(operator.sub, self.ia_goal, self.ia_controls), 'r', label='ia_error')
-    pylab.plot(self.controls_time,
-               map(operator.sub, self.ib_goal, self.ib_controls), 'g', label='ib_error')
-    pylab.plot(self.controls_time,
-               map(operator.sub, self.ic_goal, self.ic_controls), 'b', label='ic_error')
-    if self.title is not None:
-      fig.canvas.set_window_title(self.title)
-    pylab.legend()
-    pylab.show()
+        pylab.legend()
+
+        fig = pylab.figure()
+        pylab.plot(self.controls_time,
+                   map(operator.sub, self.ia_goal, self.ia_controls),
+                   'r',
+                   label='ia_error')
+        pylab.plot(self.controls_time,
+                   map(operator.sub, self.ib_goal, self.ib_controls),
+                   'g',
+                   label='ib_error')
+        pylab.plot(self.controls_time,
+                   map(operator.sub, self.ic_goal, self.ic_controls),
+                   'b',
+                   label='ic_error')
+        if self.title is not None:
+            fig.canvas.set_window_title(self.title)
+        pylab.legend()
+        pylab.show()
 
 
 # So, from running a bunch of math, we know the following:
@@ -400,180 +441,204 @@
 # inv(L_matrix) * (Vconv * V - E - R_matrix * I) = I_dot
 # B * V - inv(L_matrix) * E - A * I = I_dot
 class Simulation(object):
-  def __init__(self):
-    self.R_matrix = numpy.matrix(numpy.eye(3)) * R
-    self.L_matrix = numpy.matrix([[L, M, M], [M, L, M], [M, M, L]])
-    self.L_matrix_inv = numpy.linalg.inv(self.L_matrix)
-    self.A = self.L_matrix_inv * self.R_matrix
-    self.B = self.L_matrix_inv * Vconv
-    self.A_discrete, self.B_discrete = controls.c2d(-self.A, self.B, 1.0 / hz)
-    self.B_discrete_inverse = numpy.matrix(numpy.eye(3)) / (self.B_discrete[0, 0] - self.B_discrete[1, 0])
 
-    self.R_model = R * 1.0
-    self.L_model = L * 1.0
-    self.M_model = M * 1.0
-    self.R_matrix_model = numpy.matrix(numpy.eye(3)) * self.R_model
-    self.L_matrix_model = numpy.matrix([[self.L_model, self.M_model, self.M_model],
-                                        [self.M_model, self.L_model, self.M_model],
-                                        [self.M_model, self.M_model, self.L_model]])
-    self.L_matrix_inv_model = numpy.linalg.inv(self.L_matrix_model)
-    self.A_model = self.L_matrix_inv_model * self.R_matrix_model
-    self.B_model = self.L_matrix_inv_model * Vconv
-    self.A_discrete_model, self.B_discrete_model = \
-        controls.c2d(-self.A_model, self.B_model, 1.0 / hz)
-    self.B_discrete_inverse_model = numpy.matrix(numpy.eye(3)) / (self.B_discrete_model[0, 0] - self.B_discrete_model[1, 0])
+    def __init__(self):
+        self.R_matrix = numpy.matrix(numpy.eye(3)) * R
+        self.L_matrix = numpy.matrix([[L, M, M], [M, L, M], [M, M, L]])
+        self.L_matrix_inv = numpy.linalg.inv(self.L_matrix)
+        self.A = self.L_matrix_inv * self.R_matrix
+        self.B = self.L_matrix_inv * Vconv
+        self.A_discrete, self.B_discrete = controls.c2d(
+            -self.A, self.B, 1.0 / hz)
+        self.B_discrete_inverse = numpy.matrix(
+            numpy.eye(3)) / (self.B_discrete[0, 0] - self.B_discrete[1, 0])
 
-    print('constexpr double kL = %g;' % self.L_model)
-    print('constexpr double kM = %g;' % self.M_model)
-    print('constexpr double kR = %g;' % self.R_model)
-    print('constexpr float kAdiscrete_diagonal = %gf;' % self.A_discrete_model[0, 0])
-    print('constexpr float kAdiscrete_offdiagonal = %gf;' % self.A_discrete_model[1, 0])
-    print('constexpr float kBdiscrete_inv_diagonal = %gf;' % self.B_discrete_inverse_model[0, 0])
-    print('constexpr float kBdiscrete_inv_offdiagonal = %gf;' % self.B_discrete_inverse_model[1, 0])
-    print('constexpr double kOneAmpScalar = %g;' % one_amp_scalar)
-    print('constexpr double kMaxOneAmpDrivingVoltage = %g;' % max_one_amp_driving_voltage)
-    print('A_discrete', self.A_discrete)
-    print('B_discrete', self.B_discrete)
-    print('B_discrete_sub', numpy.linalg.inv(self.B_discrete[0:2, 0:2]))
-    print('B_discrete_inv', self.B_discrete_inverse)
+        self.R_model = R * 1.0
+        self.L_model = L * 1.0
+        self.M_model = M * 1.0
+        self.R_matrix_model = numpy.matrix(numpy.eye(3)) * self.R_model
+        self.L_matrix_model = numpy.matrix(
+            [[self.L_model, self.M_model, self.M_model],
+             [self.M_model, self.L_model, self.M_model],
+             [self.M_model, self.M_model, self.L_model]])
+        self.L_matrix_inv_model = numpy.linalg.inv(self.L_matrix_model)
+        self.A_model = self.L_matrix_inv_model * self.R_matrix_model
+        self.B_model = self.L_matrix_inv_model * Vconv
+        self.A_discrete_model, self.B_discrete_model = \
+            controls.c2d(-self.A_model, self.B_model, 1.0 / hz)
+        self.B_discrete_inverse_model = numpy.matrix(numpy.eye(3)) / (
+            self.B_discrete_model[0, 0] - self.B_discrete_model[1, 0])
 
-    # Xdot[5:, :] = (R_sense2 + R_sense1) / R_sense2 * (
-    #      (1.0 / (R_sense1 * C_sense)) * (-Isense * R_sense2 / (R_sense1 + R_sense2) * (R_sense1 / R_sense2 + 1.0) + I))
-    self.mk1 = (R_sense2 + R_sense1) / R_sense2 * (1.0 / (R_sense1 * C_sense))
-    self.mk2 = -self.mk1 * R_sense2 / (R_sense1 + R_sense2) * (R_sense1 / R_sense2 + 1.0)
+        print('constexpr double kL = %g;' % self.L_model)
+        print('constexpr double kM = %g;' % self.M_model)
+        print('constexpr double kR = %g;' % self.R_model)
+        print('constexpr float kAdiscrete_diagonal = %gf;' %
+              self.A_discrete_model[0, 0])
+        print('constexpr float kAdiscrete_offdiagonal = %gf;' %
+              self.A_discrete_model[1, 0])
+        print('constexpr float kBdiscrete_inv_diagonal = %gf;' %
+              self.B_discrete_inverse_model[0, 0])
+        print('constexpr float kBdiscrete_inv_offdiagonal = %gf;' %
+              self.B_discrete_inverse_model[1, 0])
+        print('constexpr double kOneAmpScalar = %g;' % one_amp_scalar)
+        print('constexpr double kMaxOneAmpDrivingVoltage = %g;' %
+              max_one_amp_driving_voltage)
+        print('A_discrete', self.A_discrete)
+        print('B_discrete', self.B_discrete)
+        print('B_discrete_sub', numpy.linalg.inv(self.B_discrete[0:2, 0:2]))
+        print('B_discrete_inv', self.B_discrete_inverse)
 
-    # ia, ib, ic, theta, omega, isensea, isenseb, isensec
-    self.X = numpy.matrix([[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0]])
+        # Xdot[5:, :] = (R_sense2 + R_sense1) / R_sense2 * (
+        #      (1.0 / (R_sense1 * C_sense)) * (-Isense * R_sense2 / (R_sense1 + R_sense2) * (R_sense1 / R_sense2 + 1.0) + I))
+        self.mk1 = (R_sense2 + R_sense1) / R_sense2 * (1.0 /
+                                                       (R_sense1 * C_sense))
+        self.mk2 = -self.mk1 * R_sense2 / (R_sense1 + R_sense2) * (
+            R_sense1 / R_sense2 + 1.0)
 
-    self.K = 0.05 * Vconv
-    print('A %s' % repr(self.A))
-    print('B %s' % repr(self.B))
-    print('K %s' % repr(self.K))
+        # ia, ib, ic, theta, omega, isensea, isenseb, isensec
+        self.X = numpy.matrix([[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0],
+                               [0.0]])
 
-    print('System poles are %s' % repr(numpy.linalg.eig(self.A)[0]))
-    print('Poles are %s' % repr(numpy.linalg.eig(self.A - self.B * self.K)[0]))
+        self.K = 0.05 * Vconv
+        print('A %s' % repr(self.A))
+        print('B %s' % repr(self.B))
+        print('K %s' % repr(self.K))
 
-    controllability = controls.ctrb(self.A, self.B)
-    print('Rank of augmented controlability matrix. %d' % numpy.linalg.matrix_rank(
-          controllability))
+        print('System poles are %s' % repr(numpy.linalg.eig(self.A)[0]))
+        print('Poles are %s' %
+              repr(numpy.linalg.eig(self.A - self.B * self.K)[0]))
 
-    self.data_logger = DataLogger(switching_pattern)
-    self.current_time = 0.0
+        controllability = controls.ctrb(self.A, self.B)
+        print('Rank of augmented controlability matrix. %d' %
+              numpy.linalg.matrix_rank(controllability))
 
-    self.estimated_velocity = self.X[4, 0]
+        self.data_logger = DataLogger(switching_pattern)
+        self.current_time = 0.0
 
-  def motor_diffeq(self, x, t, U):
-    I = numpy.matrix(x[0:3]).T
-    theta = x[3]
-    omega = x[4]
-    Isense = numpy.matrix(x[5:]).T
+        self.estimated_velocity = self.X[4, 0]
 
-    dflux = phases(f_single, theta) / Kv
+    def motor_diffeq(self, x, t, U):
+        I = numpy.matrix(x[0:3]).T
+        theta = x[3]
+        omega = x[4]
+        Isense = numpy.matrix(x[5:]).T
 
-    Xdot = numpy.matrix(numpy.zeros((8, 1)))
-    di_dt = -self.A_model * I + self.B_model * U - self.L_matrix_inv_model * dflux * omega
-    torque = I.T * dflux
-    Xdot[0:3, :] = di_dt
-    Xdot[3, :] = omega
-    Xdot[4, :] = torque / J
+        dflux = phases(f_single, theta) / Kv
 
-    Xdot[5:, :] = self.mk1 * I + self.mk2 * Isense
-    return numpy.squeeze(numpy.asarray(Xdot))
+        Xdot = numpy.matrix(numpy.zeros((8, 1)))
+        di_dt = -self.A_model * I + self.B_model * U - self.L_matrix_inv_model * dflux * omega
+        torque = I.T * dflux
+        Xdot[0:3, :] = di_dt
+        Xdot[3, :] = omega
+        Xdot[4, :] = torque / J
 
-  def DoControls(self, goal_current):
-    theta = self.X[3, 0]
-    # Use the actual angular velocity.
-    omega = self.X[4, 0]
+        Xdot[5:, :] = self.mk1 * I + self.mk2 * Isense
+        return numpy.squeeze(numpy.asarray(Xdot))
 
-    measured_current = self.X[5:, :].copy()
+    def DoControls(self, goal_current):
+        theta = self.X[3, 0]
+        # Use the actual angular velocity.
+        omega = self.X[4, 0]
 
-    # Ok, lets now fake it.
-    E_imag1 = numpy.exp(1j * theta) * K1 * numpy.matrix(
-            [[-1j],
-             [-1j * numpy.exp(1j * numpy.pi * 2.0 / 3.0)],
+        measured_current = self.X[5:, :].copy()
+
+        # Ok, lets now fake it.
+        E_imag1 = numpy.exp(1j * theta) * K1 * numpy.matrix(
+            [[-1j], [-1j * numpy.exp(1j * numpy.pi * 2.0 / 3.0)],
              [-1j * numpy.exp(-1j * numpy.pi * 2.0 / 3.0)]])
-    E_imag2 =  numpy.exp(1j * 5.0 * theta) * K2 * numpy.matrix(
-            [[-1j],
-             [-1j * numpy.exp(-1j * numpy.pi * 2.0 / 3.0)],
+        E_imag2 = numpy.exp(1j * 5.0 * theta) * K2 * numpy.matrix(
+            [[-1j], [-1j * numpy.exp(-1j * numpy.pi * 2.0 / 3.0)],
              [-1j * numpy.exp(1j * numpy.pi * 2.0 / 3.0)]])
 
-    overall_measured_current = ((E_imag1 + E_imag2).real.T * measured_current / one_amp_scalar)[0, 0]
+        overall_measured_current = ((E_imag1 + E_imag2).real.T *
+                                    measured_current / one_amp_scalar)[0, 0]
 
-    current_error = goal_current - overall_measured_current
-    #print(current_error)
-    self.estimated_velocity += current_error * 1.0
-    omega = self.estimated_velocity
+        current_error = goal_current - overall_measured_current
+        #print(current_error)
+        self.estimated_velocity += current_error * 1.0
+        omega = self.estimated_velocity
 
-    # Now, apply the transfer function of the inductor.
-    # Use that to difference the current across the cycle.
-    Icurrent = self.Ilast
-    # No history:
-    #Icurrent = phases(g_single, theta) * goal_current
-    Inext = phases(g_single, theta + omega * 1.0 / hz) * goal_current
+        # Now, apply the transfer function of the inductor.
+        # Use that to difference the current across the cycle.
+        Icurrent = self.Ilast
+        # No history:
+        #Icurrent = phases(g_single, theta) * goal_current
+        Inext = phases(g_single, theta + omega * 1.0 / hz) * goal_current
 
-    deltaI = Inext - Icurrent
+        deltaI = Inext - Icurrent
 
-    H1 = -numpy.linalg.inv(1j * omega * self.L_matrix + self.R_matrix) * omega / Kv
-    H2 = -numpy.linalg.inv(1j * omega * 5.0 * self.L_matrix + self.R_matrix) * omega / Kv
-    p_imag = H1 * E_imag1 + H2 * E_imag2
-    p_next_imag = numpy.exp(1j * omega * 1.0 / hz) * H1 * E_imag1 + \
-        numpy.exp(1j * omega * 5.0 * 1.0 / hz) * H2 * E_imag2
-    p = p_imag.real
+        H1 = -numpy.linalg.inv(1j * omega * self.L_matrix +
+                               self.R_matrix) * omega / Kv
+        H2 = -numpy.linalg.inv(1j * omega * 5.0 * self.L_matrix +
+                               self.R_matrix) * omega / Kv
+        p_imag = H1 * E_imag1 + H2 * E_imag2
+        p_next_imag = numpy.exp(1j * omega * 1.0 / hz) * H1 * E_imag1 + \
+            numpy.exp(1j * omega * 5.0 * 1.0 / hz) * H2 * E_imag2
+        p = p_imag.real
 
-    # So, we now know how much the change in current is due to changes in BEMF.
-    # Subtract that, and then run the stock statespace equation.
-    Vn_ff = self.B_discrete_inverse * (Inext - self.A_discrete * (Icurrent - p) - p_next_imag.real)
-    print('Vn_ff', Vn_ff)
-    print('Inext', Inext)
-    Vn = Vn_ff + self.K * (Icurrent - measured_current)
+        # So, we now know how much the change in current is due to changes in BEMF.
+        # Subtract that, and then run the stock statespace equation.
+        Vn_ff = self.B_discrete_inverse * (Inext - self.A_discrete *
+                                           (Icurrent - p) - p_next_imag.real)
+        print('Vn_ff', Vn_ff)
+        print('Inext', Inext)
+        Vn = Vn_ff + self.K * (Icurrent - measured_current)
 
-    E = phases(f_single, self.X[3, 0]) / Kv * self.X[4, 0]
-    self.data_logger.log_controls(self.current_time, measured_current, Icurrent, E, self.estimated_velocity)
+        E = phases(f_single, self.X[3, 0]) / Kv * self.X[4, 0]
+        self.data_logger.log_controls(self.current_time, measured_current,
+                                      Icurrent, E, self.estimated_velocity)
 
-    self.Ilast = Inext
+        self.Ilast = Inext
 
-    return Vn
+        return Vn
 
-  def Simulate(self):
-    start_wall_time = time.time()
-    self.Ilast = numpy.matrix(numpy.zeros((3, 1)))
-    for n in range(200):
-      goal_current = 10.0
-      max_current = (vcc - (self.X[4, 0] / Kv * 2.0)) / max_one_amp_driving_voltage
-      min_current = (-vcc - (self.X[4, 0] / Kv * 2.0)) / max_one_amp_driving_voltage
-      goal_current = max(min_current, min(max_current, goal_current))
+    def Simulate(self):
+        start_wall_time = time.time()
+        self.Ilast = numpy.matrix(numpy.zeros((3, 1)))
+        for n in range(200):
+            goal_current = 10.0
+            max_current = (
+                vcc - (self.X[4, 0] / Kv * 2.0)) / max_one_amp_driving_voltage
+            min_current = (
+                -vcc - (self.X[4, 0] / Kv * 2.0)) / max_one_amp_driving_voltage
+            goal_current = max(min_current, min(max_current, goal_current))
 
-      Vn = self.DoControls(goal_current)
+            Vn = self.DoControls(goal_current)
 
-      #Vn = numpy.matrix([[0.20], [0.0], [0.0]])
-      #Vn = numpy.matrix([[0.00], [0.20], [0.0]])
-      #Vn = numpy.matrix([[0.00], [0.0], [0.20]])
+            #Vn = numpy.matrix([[0.20], [0.0], [0.0]])
+            #Vn = numpy.matrix([[0.00], [0.20], [0.0]])
+            #Vn = numpy.matrix([[0.00], [0.0], [0.20]])
 
-      # T is the fractional rate.
-      T = Vn / vcc
-      tn = -numpy.min(T)
-      T += tn
-      if (T > 1.0).any():
-        T = T / numpy.max(T)
+            # T is the fractional rate.
+            T = Vn / vcc
+            tn = -numpy.min(T)
+            T += tn
+            if (T > 1.0).any():
+                T = T / numpy.max(T)
 
-      for t, U in sample_times(T = T,
-                               dt = 1.0 / hz, n = 10,
-                               start_time = self.current_time):
-        # Analog amplifier mode!
-        #U = Vn
+            for t, U in sample_times(T=T,
+                                     dt=1.0 / hz,
+                                     n=10,
+                                     start_time=self.current_time):
+                # Analog amplifier mode!
+                #U = Vn
 
-        self.data_logger.log_data(self.X, (U - min(U)), self.current_time, Vn, goal_current)
-        t_array = numpy.array([self.current_time, t])
-        self.X = numpy.matrix(scipy.integrate.odeint(
-            self.motor_diffeq,
-            numpy.squeeze(numpy.asarray(self.X)),
-            t_array, args=(U,)))[1, :].T
+                self.data_logger.log_data(self.X, (U - min(U)),
+                                          self.current_time, Vn, goal_current)
+                t_array = numpy.array([self.current_time, t])
+                self.X = numpy.matrix(
+                    scipy.integrate.odeint(self.motor_diffeq,
+                                           numpy.squeeze(numpy.asarray(
+                                               self.X)),
+                                           t_array,
+                                           args=(U, )))[1, :].T
 
-        self.current_time = t
+                self.current_time = t
 
-    print('Took %f to simulate' % (time.time() - start_wall_time))
+        print('Took %f to simulate' % (time.time() - start_wall_time))
 
-    self.data_logger.plot()
+        self.data_logger.plot()
+
 
 simulation = Simulation()
 simulation.Simulate()
diff --git a/motors/python/haptic_phase_current.py b/motors/python/haptic_phase_current.py
index b17c514..fec909d 100755
--- a/motors/python/haptic_phase_current.py
+++ b/motors/python/haptic_phase_current.py
@@ -54,51 +54,62 @@
 #switching_pattern = 'centered front shifted'
 #switching_pattern = 'anticentered'
 
-Vconv = numpy.matrix([[2.0, -1.0, -1.0],
-                      [-1.0, 2.0, -1.0],
-                      [-1.0, -1.0, 2.0]]) / 3.0
+Vconv = numpy.matrix([[2.0, -1.0, -1.0], [-1.0, 2.0, -1.0], [-1.0, -1.0, 2.0]
+                      ]) / 3.0
+
 
 def f_single(theta):
-  return K1 * numpy.sin(theta) + K2 * numpy.sin(theta * 5)
+    return K1 * numpy.sin(theta) + K2 * numpy.sin(theta * 5)
+
 
 def g_single(theta):
-  return K1 * numpy.sin(theta) - K2 * numpy.sin(theta * 5)
+    return K1 * numpy.sin(theta) - K2 * numpy.sin(theta * 5)
+
 
 def gdot_single(theta):
-  """Derivitive of the current.
+    """Derivitive of the current.
 
   Must be multiplied by omega externally.
   """
-  return K1 * numpy.cos(theta) - 5.0 * K2 * numpy.cos(theta * 5.0)
+    return K1 * numpy.cos(theta) - 5.0 * K2 * numpy.cos(theta * 5.0)
 
-f = numpy.vectorize(f_single, otypes=(numpy.float,))
-g = numpy.vectorize(g_single, otypes=(numpy.float,))
-gdot = numpy.vectorize(gdot_single, otypes=(numpy.float,))
+
+f = numpy.vectorize(f_single, otypes=(numpy.float, ))
+g = numpy.vectorize(g_single, otypes=(numpy.float, ))
+gdot = numpy.vectorize(gdot_single, otypes=(numpy.float, ))
+
 
 def torque(theta):
-  return f(theta) * g(theta)
+    return f(theta) * g(theta)
+
 
 def phase_a(function, theta):
-  return function(theta)
+    return function(theta)
+
 
 def phase_b(function, theta):
-  return function(theta + 2 * numpy.pi / 3)
+    return function(theta + 2 * numpy.pi / 3)
+
 
 def phase_c(function, theta):
-  return function(theta + 4 * numpy.pi / 3)
+    return function(theta + 4 * numpy.pi / 3)
+
 
 def phases(function, theta):
-  return numpy.matrix([[phase_a(function, theta)],
-                       [phase_b(function, theta)],
-                       [phase_c(function, theta)]])
+    return numpy.matrix([[phase_a(function,
+                                  theta)], [phase_b(function, theta)],
+                         [phase_c(function, theta)]])
+
 
 def all_phases(function, theta_range):
-  return (phase_a(function, theta_range) +
-          phase_b(function, theta_range) +
-          phase_c(function, theta_range))
+    return (phase_a(function, theta_range) + phase_b(function, theta_range) +
+            phase_c(function, theta_range))
+
 
 theta_range = numpy.linspace(start=0, stop=4 * numpy.pi, num=10000)
-one_amp_driving_voltage = R * g(theta_range) + (L * gdot(theta_range) + M * gdot(theta_range + 2.0 / 3.0 * numpy.pi) + M * gdot(theta_range - 2.0 / 3.0 * numpy.pi)) * Kv * vcc / 2.0
+one_amp_driving_voltage = R * g(theta_range) + (
+    L * gdot(theta_range) + M * gdot(theta_range + 2.0 / 3.0 * numpy.pi) +
+    M * gdot(theta_range - 2.0 / 3.0 * numpy.pi)) * Kv * vcc / 2.0
 
 max_one_amp_driving_voltage = max(one_amp_driving_voltage)
 
@@ -108,7 +119,8 @@
 
 print 'Max BEMF', max(f(theta_range))
 print 'Max current', max(g(theta_range))
-print 'Max drive voltage (one_amp_driving_voltage)', max(one_amp_driving_voltage)
+print 'Max drive voltage (one_amp_driving_voltage)', max(
+    one_amp_driving_voltage)
 print 'one_amp_scalar', one_amp_scalar
 
 pylab.figure()
@@ -116,12 +128,14 @@
 pylab.plot(theta_range, f(theta_range), label='bemf')
 pylab.plot(theta_range, g(theta_range), label='phase_current')
 pylab.plot(theta_range, torque(theta_range), label='phase_torque')
-pylab.plot(theta_range, all_phases(torque, theta_range), label='sum_torque/current')
+pylab.plot(theta_range,
+           all_phases(torque, theta_range),
+           label='sum_torque/current')
 pylab.legend()
 
 
 def full_sample_times(Ton, Toff, dt, n, start_time):
-  """Returns n + 4 samples for the provided switching times.
+    """Returns n + 4 samples for the provided switching times.
 
   We need the timesteps and Us to integrate.
 
@@ -136,235 +150,260 @@
     array of [t, U matrix]
   """
 
-  assert((Toff <= 1.0).all())
-  assert((Ton <= 1.0).all())
-  assert((Toff >= 0.0).all())
-  assert((Ton >= 0.0).all())
+    assert ((Toff <= 1.0).all())
+    assert ((Ton <= 1.0).all())
+    assert ((Toff >= 0.0).all())
+    assert ((Ton >= 0.0).all())
 
-  if (Ton <= Toff).all():
-    on_before_off = True
-  else:
-    # Verify that they are all ordered correctly.
-    assert(not (Ton <= Toff).any())
-    on_before_off = False
-
-  Toff = Toff.copy() * dt
-  Toff[Toff < 100e-9] = -1.0
-  Toff[Toff > dt] = dt
-
-  Ton = Ton.copy() * dt
-  Ton[Ton < 100e-9] = -1.0
-  Ton[Ton > dt - 100e-9] = dt + 1.0
-
-  result = []
-  t = 0
-
-  result_times = numpy.concatenate(
-      (numpy.linspace(0, dt, num=n),
-       numpy.reshape(numpy.asarray(Ton[numpy.logical_and(Ton < dt, Ton > 0.0)]), (-1,)),
-       numpy.reshape(numpy.asarray(Toff[numpy.logical_and(Toff < dt, Toff > 0.0)]), (-1,))
-       ))
-  result_times.sort()
-  assert((result_times >= 0).all())
-  assert((result_times <= dt).all())
-
-  for t in result_times:
-    if on_before_off:
-      U = numpy.matrix([[vcc], [vcc], [vcc]])
-      U[t <= Ton] = 0.0
-      U[Toff < t] = 0.0
+    if (Ton <= Toff).all():
+        on_before_off = True
     else:
-      U = numpy.matrix([[0.0], [0.0], [0.0]])
-      U[t > Ton] = vcc
-      U[t <= Toff] = vcc
-    result.append((float(t + start_time), U.copy()))
+        # Verify that they are all ordered correctly.
+        assert (not (Ton <= Toff).any())
+        on_before_off = False
 
-  return result
+    Toff = Toff.copy() * dt
+    Toff[Toff < 100e-9] = -1.0
+    Toff[Toff > dt] = dt
+
+    Ton = Ton.copy() * dt
+    Ton[Ton < 100e-9] = -1.0
+    Ton[Ton > dt - 100e-9] = dt + 1.0
+
+    result = []
+    t = 0
+
+    result_times = numpy.concatenate(
+        (numpy.linspace(0, dt, num=n),
+         numpy.reshape(
+             numpy.asarray(Ton[numpy.logical_and(Ton < dt, Ton > 0.0)]),
+             (-1, )),
+         numpy.reshape(
+             numpy.asarray(Toff[numpy.logical_and(Toff < dt, Toff > 0.0)]),
+             (-1, ))))
+    result_times.sort()
+    assert ((result_times >= 0).all())
+    assert ((result_times <= dt).all())
+
+    for t in result_times:
+        if on_before_off:
+            U = numpy.matrix([[vcc], [vcc], [vcc]])
+            U[t <= Ton] = 0.0
+            U[Toff < t] = 0.0
+        else:
+            U = numpy.matrix([[0.0], [0.0], [0.0]])
+            U[t > Ton] = vcc
+            U[t <= Toff] = vcc
+        result.append((float(t + start_time), U.copy()))
+
+    return result
+
 
 def sample_times(T, dt, n, start_time):
-  if switching_pattern == 'rear':
-    T = 1.0 - T
-    ans = full_sample_times(T, numpy.matrix(numpy.ones((3, 1))) * 1.0, dt, n, start_time)
-  elif switching_pattern == 'centered front shifted':
-    # Centered, but shifted to the beginning of the cycle.
-    Ton = 0.5 - T / 2.0
-    Toff = 0.5 + T / 2.0
+    if switching_pattern == 'rear':
+        T = 1.0 - T
+        ans = full_sample_times(T,
+                                numpy.matrix(numpy.ones((3, 1))) * 1.0, dt, n,
+                                start_time)
+    elif switching_pattern == 'centered front shifted':
+        # Centered, but shifted to the beginning of the cycle.
+        Ton = 0.5 - T / 2.0
+        Toff = 0.5 + T / 2.0
 
-    tn = min(Ton)[0, 0]
-    Ton -= tn
-    Toff -= tn
+        tn = min(Ton)[0, 0]
+        Ton -= tn
+        Toff -= tn
 
-    ans = full_sample_times(Ton, Toff, dt, n, start_time)
-  elif switching_pattern == 'centered':
-    # Centered, looks waaay better.
-    Ton = 0.5 - T / 2.0
-    Toff = 0.5 + T / 2.0
+        ans = full_sample_times(Ton, Toff, dt, n, start_time)
+    elif switching_pattern == 'centered':
+        # Centered, looks waaay better.
+        Ton = 0.5 - T / 2.0
+        Toff = 0.5 + T / 2.0
 
-    ans = full_sample_times(Ton, Toff, dt, n, start_time)
-  elif switching_pattern == 'anticentered':
-    # Centered, looks waaay better.
-    Toff = T / 2.0
-    Ton = 1.0 - T / 2.0
+        ans = full_sample_times(Ton, Toff, dt, n, start_time)
+    elif switching_pattern == 'anticentered':
+        # Centered, looks waaay better.
+        Toff = T / 2.0
+        Ton = 1.0 - T / 2.0
 
-    ans = full_sample_times(Ton, Toff, dt, n, start_time)
-  elif switching_pattern == 'front':
-    ans = full_sample_times(numpy.matrix(numpy.zeros((3, 1))), T, dt, n, start_time)
-  else:
-    assert(False)
+        ans = full_sample_times(Ton, Toff, dt, n, start_time)
+    elif switching_pattern == 'front':
+        ans = full_sample_times(numpy.matrix(numpy.zeros((3, 1))), T, dt, n,
+                                start_time)
+    else:
+        assert (False)
 
-  return ans
+    return ans
+
 
 class DataLogger(object):
-  def __init__(self, title=None):
-    self.title = title
-    self.ia = []
-    self.ib = []
-    self.ic = []
-    self.ia_goal = []
-    self.ib_goal = []
-    self.ic_goal = []
-    self.ia_controls = []
-    self.ib_controls = []
-    self.ic_controls = []
-    self.isensea = []
-    self.isenseb = []
-    self.isensec = []
 
-    self.va = []
-    self.vb = []
-    self.vc = []
-    self.van = []
-    self.vbn = []
-    self.vcn = []
+    def __init__(self, title=None):
+        self.title = title
+        self.ia = []
+        self.ib = []
+        self.ic = []
+        self.ia_goal = []
+        self.ib_goal = []
+        self.ic_goal = []
+        self.ia_controls = []
+        self.ib_controls = []
+        self.ic_controls = []
+        self.isensea = []
+        self.isenseb = []
+        self.isensec = []
 
-    self.ea = []
-    self.eb = []
-    self.ec = []
+        self.va = []
+        self.vb = []
+        self.vc = []
+        self.van = []
+        self.vbn = []
+        self.vcn = []
 
-    self.theta = []
-    self.omega = []
+        self.ea = []
+        self.eb = []
+        self.ec = []
 
-    self.i_goal = []
+        self.theta = []
+        self.omega = []
 
-    self.time = []
-    self.controls_time = []
-    self.predicted_time = []
+        self.i_goal = []
 
-    self.ia_pred = []
-    self.ib_pred = []
-    self.ic_pred = []
+        self.time = []
+        self.controls_time = []
+        self.predicted_time = []
 
-    self.voltage_time = []
-    self.estimated_velocity = []
-    self.U_last = numpy.matrix(numpy.zeros((3, 1)))
+        self.ia_pred = []
+        self.ib_pred = []
+        self.ic_pred = []
 
-  def log_predicted(self, current_time, p):
-    self.predicted_time.append(current_time)
-    self.ia_pred.append(p[0, 0])
-    self.ib_pred.append(p[1, 0])
-    self.ic_pred.append(p[2, 0])
+        self.voltage_time = []
+        self.estimated_velocity = []
+        self.U_last = numpy.matrix(numpy.zeros((3, 1)))
 
-  def log_controls(self, current_time, measured_current, In, E, estimated_velocity):
-    self.controls_time.append(current_time)
-    self.ia_controls.append(measured_current[0, 0])
-    self.ib_controls.append(measured_current[1, 0])
-    self.ic_controls.append(measured_current[2, 0])
+    def log_predicted(self, current_time, p):
+        self.predicted_time.append(current_time)
+        self.ia_pred.append(p[0, 0])
+        self.ib_pred.append(p[1, 0])
+        self.ic_pred.append(p[2, 0])
 
-    self.ea.append(E[0, 0])
-    self.eb.append(E[1, 0])
-    self.ec.append(E[2, 0])
+    def log_controls(self, current_time, measured_current, In, E,
+                     estimated_velocity):
+        self.controls_time.append(current_time)
+        self.ia_controls.append(measured_current[0, 0])
+        self.ib_controls.append(measured_current[1, 0])
+        self.ic_controls.append(measured_current[2, 0])
 
-    self.ia_goal.append(In[0, 0])
-    self.ib_goal.append(In[1, 0])
-    self.ic_goal.append(In[2, 0])
-    self.estimated_velocity.append(estimated_velocity)
+        self.ea.append(E[0, 0])
+        self.eb.append(E[1, 0])
+        self.ec.append(E[2, 0])
 
-  def log_data(self, X, U, current_time, Vn, i_goal):
-    self.ia.append(X[0, 0])
-    self.ib.append(X[1, 0])
-    self.ic.append(X[2, 0])
+        self.ia_goal.append(In[0, 0])
+        self.ib_goal.append(In[1, 0])
+        self.ic_goal.append(In[2, 0])
+        self.estimated_velocity.append(estimated_velocity)
 
-    self.i_goal.append(i_goal)
+    def log_data(self, X, U, current_time, Vn, i_goal):
+        self.ia.append(X[0, 0])
+        self.ib.append(X[1, 0])
+        self.ic.append(X[2, 0])
 
-    self.isensea.append(X[5, 0])
-    self.isenseb.append(X[6, 0])
-    self.isensec.append(X[7, 0])
+        self.i_goal.append(i_goal)
 
-    self.theta.append(X[3, 0])
-    self.omega.append(X[4, 0])
+        self.isensea.append(X[5, 0])
+        self.isenseb.append(X[6, 0])
+        self.isensec.append(X[7, 0])
 
-    self.time.append(current_time)
+        self.theta.append(X[3, 0])
+        self.omega.append(X[4, 0])
 
-    self.van.append(Vn[0, 0])
-    self.vbn.append(Vn[1, 0])
-    self.vcn.append(Vn[2, 0])
+        self.time.append(current_time)
 
-    if (self.U_last != U).any():
-      self.va.append(self.U_last[0, 0])
-      self.vb.append(self.U_last[1, 0])
-      self.vc.append(self.U_last[2, 0])
-      self.voltage_time.append(current_time)
+        self.van.append(Vn[0, 0])
+        self.vbn.append(Vn[1, 0])
+        self.vcn.append(Vn[2, 0])
 
-      self.va.append(U[0, 0])
-      self.vb.append(U[1, 0])
-      self.vc.append(U[2, 0])
-      self.voltage_time.append(current_time)
-      self.U_last = U.copy()
+        if (self.U_last != U).any():
+            self.va.append(self.U_last[0, 0])
+            self.vb.append(self.U_last[1, 0])
+            self.vc.append(self.U_last[2, 0])
+            self.voltage_time.append(current_time)
 
-  def plot(self):
-    fig = pylab.figure()
-    pylab.subplot(3, 1, 1)
-    pylab.plot(self.controls_time, self.ia_controls, 'ro', label='ia_controls')
-    pylab.plot(self.controls_time, self.ib_controls, 'go', label='ib_controls')
-    pylab.plot(self.controls_time, self.ic_controls, 'bo', label='ic_controls')
-    pylab.plot(self.controls_time, self.ia_goal, 'r--', label='ia_goal')
-    pylab.plot(self.controls_time, self.ib_goal, 'g--', label='ib_goal')
-    pylab.plot(self.controls_time, self.ic_goal, 'b--', label='ic_goal')
+            self.va.append(U[0, 0])
+            self.vb.append(U[1, 0])
+            self.vc.append(U[2, 0])
+            self.voltage_time.append(current_time)
+            self.U_last = U.copy()
 
-    #pylab.plot(self.controls_time, self.ia_pred, 'r*', label='ia_pred')
-    #pylab.plot(self.controls_time, self.ib_pred, 'g*', label='ib_pred')
-    #pylab.plot(self.controls_time, self.ic_pred, 'b*', label='ic_pred')
-    pylab.plot(self.time, self.isensea, 'r:', label='ia_sense')
-    pylab.plot(self.time, self.isenseb, 'g:', label='ib_sense')
-    pylab.plot(self.time, self.isensec, 'b:', label='ic_sense')
-    pylab.plot(self.time, self.ia, 'r', label='ia')
-    pylab.plot(self.time, self.ib, 'g', label='ib')
-    pylab.plot(self.time, self.ic, 'b', label='ic')
-    pylab.plot(self.time, self.i_goal, label='i_goal')
-    if self.title is not None:
-      fig.canvas.set_window_title(self.title)
-    pylab.legend()
+    def plot(self):
+        fig = pylab.figure()
+        pylab.subplot(3, 1, 1)
+        pylab.plot(self.controls_time,
+                   self.ia_controls,
+                   'ro',
+                   label='ia_controls')
+        pylab.plot(self.controls_time,
+                   self.ib_controls,
+                   'go',
+                   label='ib_controls')
+        pylab.plot(self.controls_time,
+                   self.ic_controls,
+                   'bo',
+                   label='ic_controls')
+        pylab.plot(self.controls_time, self.ia_goal, 'r--', label='ia_goal')
+        pylab.plot(self.controls_time, self.ib_goal, 'g--', label='ib_goal')
+        pylab.plot(self.controls_time, self.ic_goal, 'b--', label='ic_goal')
 
-    pylab.subplot(3, 1, 2)
-    pylab.plot(self.voltage_time, self.va, label='va')
-    pylab.plot(self.voltage_time, self.vb, label='vb')
-    pylab.plot(self.voltage_time, self.vc, label='vc')
-    pylab.plot(self.time, self.van, label='van')
-    pylab.plot(self.time, self.vbn, label='vbn')
-    pylab.plot(self.time, self.vcn, label='vcn')
-    pylab.plot(self.controls_time, self.ea, label='ea')
-    pylab.plot(self.controls_time, self.eb, label='eb')
-    pylab.plot(self.controls_time, self.ec, label='ec')
-    pylab.legend()
+        #pylab.plot(self.controls_time, self.ia_pred, 'r*', label='ia_pred')
+        #pylab.plot(self.controls_time, self.ib_pred, 'g*', label='ib_pred')
+        #pylab.plot(self.controls_time, self.ic_pred, 'b*', label='ic_pred')
+        pylab.plot(self.time, self.isensea, 'r:', label='ia_sense')
+        pylab.plot(self.time, self.isenseb, 'g:', label='ib_sense')
+        pylab.plot(self.time, self.isensec, 'b:', label='ic_sense')
+        pylab.plot(self.time, self.ia, 'r', label='ia')
+        pylab.plot(self.time, self.ib, 'g', label='ib')
+        pylab.plot(self.time, self.ic, 'b', label='ic')
+        pylab.plot(self.time, self.i_goal, label='i_goal')
+        if self.title is not None:
+            fig.canvas.set_window_title(self.title)
+        pylab.legend()
 
-    pylab.subplot(3, 1, 3)
-    pylab.plot(self.time, self.theta, label='theta')
-    pylab.plot(self.time, self.omega, label='omega')
-    #pylab.plot(self.controls_time, self.estimated_velocity, label='estimated omega')
+        pylab.subplot(3, 1, 2)
+        pylab.plot(self.voltage_time, self.va, label='va')
+        pylab.plot(self.voltage_time, self.vb, label='vb')
+        pylab.plot(self.voltage_time, self.vc, label='vc')
+        pylab.plot(self.time, self.van, label='van')
+        pylab.plot(self.time, self.vbn, label='vbn')
+        pylab.plot(self.time, self.vcn, label='vcn')
+        pylab.plot(self.controls_time, self.ea, label='ea')
+        pylab.plot(self.controls_time, self.eb, label='eb')
+        pylab.plot(self.controls_time, self.ec, label='ec')
+        pylab.legend()
 
-    pylab.legend()
+        pylab.subplot(3, 1, 3)
+        pylab.plot(self.time, self.theta, label='theta')
+        pylab.plot(self.time, self.omega, label='omega')
+        #pylab.plot(self.controls_time, self.estimated_velocity, label='estimated omega')
 
-    fig = pylab.figure()
-    pylab.plot(self.controls_time,
-               map(operator.sub, self.ia_goal, self.ia_controls), 'r', label='ia_error')
-    pylab.plot(self.controls_time,
-               map(operator.sub, self.ib_goal, self.ib_controls), 'g', label='ib_error')
-    pylab.plot(self.controls_time,
-               map(operator.sub, self.ic_goal, self.ic_controls), 'b', label='ic_error')
-    if self.title is not None:
-      fig.canvas.set_window_title(self.title)
-    pylab.legend()
-    pylab.show()
+        pylab.legend()
+
+        fig = pylab.figure()
+        pylab.plot(self.controls_time,
+                   map(operator.sub, self.ia_goal, self.ia_controls),
+                   'r',
+                   label='ia_error')
+        pylab.plot(self.controls_time,
+                   map(operator.sub, self.ib_goal, self.ib_controls),
+                   'g',
+                   label='ib_error')
+        pylab.plot(self.controls_time,
+                   map(operator.sub, self.ic_goal, self.ic_controls),
+                   'b',
+                   label='ic_error')
+        if self.title is not None:
+            fig.canvas.set_window_title(self.title)
+        pylab.legend()
+        pylab.show()
 
 
 # So, from running a bunch of math, we know the following:
@@ -397,180 +436,203 @@
 # inv(L_matrix) * (Vconv * V - E - R_matrix * I) = I_dot
 # B * V - inv(L_matrix) * E - A * I = I_dot
 class Simulation(object):
-  def __init__(self):
-    self.R_matrix = numpy.matrix(numpy.eye(3)) * R
-    self.L_matrix = numpy.matrix([[L, M, M], [M, L, M], [M, M, L]])
-    self.L_matrix_inv = numpy.linalg.inv(self.L_matrix)
-    self.A = self.L_matrix_inv * self.R_matrix
-    self.B = self.L_matrix_inv * Vconv
-    self.A_discrete, self.B_discrete = controls.c2d(-self.A, self.B, 1.0 / hz)
-    self.B_discrete_inverse = numpy.matrix(numpy.eye(3)) / (self.B_discrete[0, 0] - self.B_discrete[1, 0])
 
-    self.R_model = R * 1.0
-    self.L_model = L * 1.0
-    self.M_model = M * 1.0
-    self.R_matrix_model = numpy.matrix(numpy.eye(3)) * self.R_model
-    self.L_matrix_model = numpy.matrix([[self.L_model, self.M_model, self.M_model],
-                                        [self.M_model, self.L_model, self.M_model],
-                                        [self.M_model, self.M_model, self.L_model]])
-    self.L_matrix_inv_model = numpy.linalg.inv(self.L_matrix_model)
-    self.A_model = self.L_matrix_inv_model * self.R_matrix_model
-    self.B_model = self.L_matrix_inv_model * Vconv
-    self.A_discrete_model, self.B_discrete_model = \
-        controls.c2d(-self.A_model, self.B_model, 1.0 / hz)
-    self.B_discrete_inverse_model = numpy.matrix(numpy.eye(3)) / (self.B_discrete_model[0, 0] - self.B_discrete_model[1, 0])
+    def __init__(self):
+        self.R_matrix = numpy.matrix(numpy.eye(3)) * R
+        self.L_matrix = numpy.matrix([[L, M, M], [M, L, M], [M, M, L]])
+        self.L_matrix_inv = numpy.linalg.inv(self.L_matrix)
+        self.A = self.L_matrix_inv * self.R_matrix
+        self.B = self.L_matrix_inv * Vconv
+        self.A_discrete, self.B_discrete = controls.c2d(
+            -self.A, self.B, 1.0 / hz)
+        self.B_discrete_inverse = numpy.matrix(
+            numpy.eye(3)) / (self.B_discrete[0, 0] - self.B_discrete[1, 0])
 
-    print 'constexpr double kL = %g;' % self.L_model
-    print 'constexpr double kM = %g;' % self.M_model
-    print 'constexpr double kR = %g;' % self.R_model
-    print 'constexpr float kAdiscrete_diagonal = %gf;' % self.A_discrete_model[0, 0]
-    print 'constexpr float kAdiscrete_offdiagonal = %gf;' % self.A_discrete_model[1, 0]
-    print 'constexpr float kBdiscrete_inv_diagonal = %gf;' % self.B_discrete_inverse_model[0, 0]
-    print 'constexpr float kBdiscrete_inv_offdiagonal = %gf;' % self.B_discrete_inverse_model[1, 0]
-    print 'constexpr double kOneAmpScalar = %g;' % one_amp_scalar
-    print 'constexpr double kMaxOneAmpDrivingVoltage = %g;' % max_one_amp_driving_voltage
-    print('A_discrete', self.A_discrete)
-    print('B_discrete', self.B_discrete)
-    print('B_discrete_sub', numpy.linalg.inv(self.B_discrete[0:2, 0:2]))
-    print('B_discrete_inv', self.B_discrete_inverse)
+        self.R_model = R * 1.0
+        self.L_model = L * 1.0
+        self.M_model = M * 1.0
+        self.R_matrix_model = numpy.matrix(numpy.eye(3)) * self.R_model
+        self.L_matrix_model = numpy.matrix(
+            [[self.L_model, self.M_model, self.M_model],
+             [self.M_model, self.L_model, self.M_model],
+             [self.M_model, self.M_model, self.L_model]])
+        self.L_matrix_inv_model = numpy.linalg.inv(self.L_matrix_model)
+        self.A_model = self.L_matrix_inv_model * self.R_matrix_model
+        self.B_model = self.L_matrix_inv_model * Vconv
+        self.A_discrete_model, self.B_discrete_model = \
+            controls.c2d(-self.A_model, self.B_model, 1.0 / hz)
+        self.B_discrete_inverse_model = numpy.matrix(numpy.eye(3)) / (
+            self.B_discrete_model[0, 0] - self.B_discrete_model[1, 0])
 
-    # Xdot[5:, :] = (R_sense2 + R_sense1) / R_sense2 * (
-    #      (1.0 / (R_sense1 * C_sense)) * (-Isense * R_sense2 / (R_sense1 + R_sense2) * (R_sense1 / R_sense2 + 1.0) + I))
-    self.mk1 = (R_sense2 + R_sense1) / R_sense2 * (1.0 / (R_sense1 * C_sense))
-    self.mk2 = -self.mk1 * R_sense2 / (R_sense1 + R_sense2) * (R_sense1 / R_sense2 + 1.0)
+        print 'constexpr double kL = %g;' % self.L_model
+        print 'constexpr double kM = %g;' % self.M_model
+        print 'constexpr double kR = %g;' % self.R_model
+        print 'constexpr float kAdiscrete_diagonal = %gf;' % self.A_discrete_model[
+            0, 0]
+        print 'constexpr float kAdiscrete_offdiagonal = %gf;' % self.A_discrete_model[
+            1, 0]
+        print 'constexpr float kBdiscrete_inv_diagonal = %gf;' % self.B_discrete_inverse_model[
+            0, 0]
+        print 'constexpr float kBdiscrete_inv_offdiagonal = %gf;' % self.B_discrete_inverse_model[
+            1, 0]
+        print 'constexpr double kOneAmpScalar = %g;' % one_amp_scalar
+        print 'constexpr double kMaxOneAmpDrivingVoltage = %g;' % max_one_amp_driving_voltage
+        print('A_discrete', self.A_discrete)
+        print('B_discrete', self.B_discrete)
+        print('B_discrete_sub', numpy.linalg.inv(self.B_discrete[0:2, 0:2]))
+        print('B_discrete_inv', self.B_discrete_inverse)
 
-    # ia, ib, ic, theta, omega, isensea, isenseb, isensec
-    self.X = numpy.matrix([[0.0], [0.0], [0.0], [-2.0 * numpy.pi / 3.0], [0.0], [0.0], [0.0], [0.0]])
+        # Xdot[5:, :] = (R_sense2 + R_sense1) / R_sense2 * (
+        #      (1.0 / (R_sense1 * C_sense)) * (-Isense * R_sense2 / (R_sense1 + R_sense2) * (R_sense1 / R_sense2 + 1.0) + I))
+        self.mk1 = (R_sense2 + R_sense1) / R_sense2 * (1.0 /
+                                                       (R_sense1 * C_sense))
+        self.mk2 = -self.mk1 * R_sense2 / (R_sense1 + R_sense2) * (
+            R_sense1 / R_sense2 + 1.0)
 
-    self.K = 0.05 * Vconv
-    print('A %s' % repr(self.A))
-    print('B %s' % repr(self.B))
-    print('K %s' % repr(self.K))
+        # ia, ib, ic, theta, omega, isensea, isenseb, isensec
+        self.X = numpy.matrix([[0.0], [0.0], [0.0], [-2.0 * numpy.pi / 3.0],
+                               [0.0], [0.0], [0.0], [0.0]])
 
-    print('System poles are %s' % repr(numpy.linalg.eig(self.A)[0]))
-    print('Poles are %s' % repr(numpy.linalg.eig(self.A - self.B * self.K)[0]))
+        self.K = 0.05 * Vconv
+        print('A %s' % repr(self.A))
+        print('B %s' % repr(self.B))
+        print('K %s' % repr(self.K))
 
-    controllability = controls.ctrb(self.A, self.B)
-    print('Rank of augmented controlability matrix. %d' % numpy.linalg.matrix_rank(
-          controllability))
+        print('System poles are %s' % repr(numpy.linalg.eig(self.A)[0]))
+        print('Poles are %s' %
+              repr(numpy.linalg.eig(self.A - self.B * self.K)[0]))
 
-    self.data_logger = DataLogger(switching_pattern)
-    self.current_time = 0.0
+        controllability = controls.ctrb(self.A, self.B)
+        print('Rank of augmented controlability matrix. %d' %
+              numpy.linalg.matrix_rank(controllability))
 
-    self.estimated_velocity = self.X[4, 0]
+        self.data_logger = DataLogger(switching_pattern)
+        self.current_time = 0.0
 
-  def motor_diffeq(self, x, t, U):
-    I = numpy.matrix(x[0:3]).T
-    theta = x[3]
-    omega = x[4]
-    Isense = numpy.matrix(x[5:]).T
+        self.estimated_velocity = self.X[4, 0]
 
-    dflux = phases(f_single, theta) / Kv
+    def motor_diffeq(self, x, t, U):
+        I = numpy.matrix(x[0:3]).T
+        theta = x[3]
+        omega = x[4]
+        Isense = numpy.matrix(x[5:]).T
 
-    Xdot = numpy.matrix(numpy.zeros((8, 1)))
-    di_dt = -self.A_model * I + self.B_model * U - self.L_matrix_inv_model * dflux * omega
-    torque = I.T * dflux
-    Xdot[0:3, :] = di_dt
-    Xdot[3, :] = omega
-    Xdot[4, :] = torque / J
+        dflux = phases(f_single, theta) / Kv
 
-    Xdot[5:, :] = self.mk1 * I + self.mk2 * Isense
-    return numpy.squeeze(numpy.asarray(Xdot))
+        Xdot = numpy.matrix(numpy.zeros((8, 1)))
+        di_dt = -self.A_model * I + self.B_model * U - self.L_matrix_inv_model * dflux * omega
+        torque = I.T * dflux
+        Xdot[0:3, :] = di_dt
+        Xdot[3, :] = omega
+        Xdot[4, :] = torque / J
 
-  def DoControls(self, goal_current):
-    theta = self.X[3, 0]
-    # Use the actual angular velocity.
-    omega = self.X[4, 0]
+        Xdot[5:, :] = self.mk1 * I + self.mk2 * Isense
+        return numpy.squeeze(numpy.asarray(Xdot))
 
-    measured_current = self.X[5:, :].copy()
+    def DoControls(self, goal_current):
+        theta = self.X[3, 0]
+        # Use the actual angular velocity.
+        omega = self.X[4, 0]
 
-    # Ok, lets now fake it.
-    E_imag1 = numpy.exp(1j * theta) * K1 * numpy.matrix(
-            [[-1j],
-             [-1j * numpy.exp(1j * numpy.pi * 2.0 / 3.0)],
+        measured_current = self.X[5:, :].copy()
+
+        # Ok, lets now fake it.
+        E_imag1 = numpy.exp(1j * theta) * K1 * numpy.matrix(
+            [[-1j], [-1j * numpy.exp(1j * numpy.pi * 2.0 / 3.0)],
              [-1j * numpy.exp(-1j * numpy.pi * 2.0 / 3.0)]])
-    E_imag2 =  numpy.exp(1j * 5.0 * theta) * K2 * numpy.matrix(
-            [[-1j],
-             [-1j * numpy.exp(-1j * numpy.pi * 2.0 / 3.0)],
+        E_imag2 = numpy.exp(1j * 5.0 * theta) * K2 * numpy.matrix(
+            [[-1j], [-1j * numpy.exp(-1j * numpy.pi * 2.0 / 3.0)],
              [-1j * numpy.exp(1j * numpy.pi * 2.0 / 3.0)]])
 
-    overall_measured_current = ((E_imag1 + E_imag2).real.T * measured_current / one_amp_scalar)[0, 0]
+        overall_measured_current = ((E_imag1 + E_imag2).real.T *
+                                    measured_current / one_amp_scalar)[0, 0]
 
-    current_error = goal_current - overall_measured_current
-    #print(current_error)
-    self.estimated_velocity += current_error * 1.0
-    omega = self.estimated_velocity
+        current_error = goal_current - overall_measured_current
+        #print(current_error)
+        self.estimated_velocity += current_error * 1.0
+        omega = self.estimated_velocity
 
-    # Now, apply the transfer function of the inductor.
-    # Use that to difference the current across the cycle.
-    Icurrent = self.Ilast
-    # No history:
-    #Icurrent = phases(g_single, theta) * goal_current
-    Inext = phases(g_single, theta + omega * 1.0 / hz) * goal_current
+        # Now, apply the transfer function of the inductor.
+        # Use that to difference the current across the cycle.
+        Icurrent = self.Ilast
+        # No history:
+        #Icurrent = phases(g_single, theta) * goal_current
+        Inext = phases(g_single, theta + omega * 1.0 / hz) * goal_current
 
-    deltaI = Inext - Icurrent
+        deltaI = Inext - Icurrent
 
-    H1 = -numpy.linalg.inv(1j * omega * self.L_matrix + self.R_matrix) * omega / Kv
-    H2 = -numpy.linalg.inv(1j * omega * 5.0 * self.L_matrix + self.R_matrix) * omega / Kv
-    p_imag = H1 * E_imag1 + H2 * E_imag2
-    p_next_imag = numpy.exp(1j * omega * 1.0 / hz) * H1 * E_imag1 + \
-        numpy.exp(1j * omega * 5.0 * 1.0 / hz) * H2 * E_imag2
-    p = p_imag.real
+        H1 = -numpy.linalg.inv(1j * omega * self.L_matrix +
+                               self.R_matrix) * omega / Kv
+        H2 = -numpy.linalg.inv(1j * omega * 5.0 * self.L_matrix +
+                               self.R_matrix) * omega / Kv
+        p_imag = H1 * E_imag1 + H2 * E_imag2
+        p_next_imag = numpy.exp(1j * omega * 1.0 / hz) * H1 * E_imag1 + \
+            numpy.exp(1j * omega * 5.0 * 1.0 / hz) * H2 * E_imag2
+        p = p_imag.real
 
-    # So, we now know how much the change in current is due to changes in BEMF.
-    # Subtract that, and then run the stock statespace equation.
-    Vn_ff = self.B_discrete_inverse * (Inext - self.A_discrete * (Icurrent - p) - p_next_imag.real)
-    print 'Vn_ff', Vn_ff
-    print 'Inext', Inext
-    Vn = Vn_ff + self.K * (Icurrent - measured_current)
+        # So, we now know how much the change in current is due to changes in BEMF.
+        # Subtract that, and then run the stock statespace equation.
+        Vn_ff = self.B_discrete_inverse * (Inext - self.A_discrete *
+                                           (Icurrent - p) - p_next_imag.real)
+        print 'Vn_ff', Vn_ff
+        print 'Inext', Inext
+        Vn = Vn_ff + self.K * (Icurrent - measured_current)
 
-    E = phases(f_single, self.X[3, 0]) / Kv * self.X[4, 0]
-    self.data_logger.log_controls(self.current_time, measured_current, Icurrent, E, self.estimated_velocity)
+        E = phases(f_single, self.X[3, 0]) / Kv * self.X[4, 0]
+        self.data_logger.log_controls(self.current_time, measured_current,
+                                      Icurrent, E, self.estimated_velocity)
 
-    self.Ilast = Inext
+        self.Ilast = Inext
 
-    return Vn
+        return Vn
 
-  def Simulate(self):
-    start_wall_time = time.time()
-    self.Ilast = numpy.matrix(numpy.zeros((3, 1)))
-    for n in range(200):
-      goal_current = 1.0
-      max_current = (vcc - (self.X[4, 0] / Kv * 2.0)) / max_one_amp_driving_voltage
-      min_current = (-vcc - (self.X[4, 0] / Kv * 2.0)) / max_one_amp_driving_voltage
-      goal_current = max(min_current, min(max_current, goal_current))
+    def Simulate(self):
+        start_wall_time = time.time()
+        self.Ilast = numpy.matrix(numpy.zeros((3, 1)))
+        for n in range(200):
+            goal_current = 1.0
+            max_current = (
+                vcc - (self.X[4, 0] / Kv * 2.0)) / max_one_amp_driving_voltage
+            min_current = (
+                -vcc - (self.X[4, 0] / Kv * 2.0)) / max_one_amp_driving_voltage
+            goal_current = max(min_current, min(max_current, goal_current))
 
-      Vn = self.DoControls(goal_current)
+            Vn = self.DoControls(goal_current)
 
-      #Vn = numpy.matrix([[1.00], [0.0], [0.0]])
-      Vn = numpy.matrix([[0.00], [1.00], [0.0]])
-      #Vn = numpy.matrix([[0.00], [0.0], [1.00]])
+            #Vn = numpy.matrix([[1.00], [0.0], [0.0]])
+            Vn = numpy.matrix([[0.00], [1.00], [0.0]])
+            #Vn = numpy.matrix([[0.00], [0.0], [1.00]])
 
-      # T is the fractional rate.
-      T = Vn / vcc
-      tn = -numpy.min(T)
-      T += tn
-      if (T > 1.0).any():
-        T = T / numpy.max(T)
+            # T is the fractional rate.
+            T = Vn / vcc
+            tn = -numpy.min(T)
+            T += tn
+            if (T > 1.0).any():
+                T = T / numpy.max(T)
 
-      for t, U in sample_times(T = T,
-                               dt = 1.0 / hz, n = 10,
-                               start_time = self.current_time):
-        # Analog amplifier mode!
-        #U = Vn
+            for t, U in sample_times(T=T,
+                                     dt=1.0 / hz,
+                                     n=10,
+                                     start_time=self.current_time):
+                # Analog amplifier mode!
+                #U = Vn
 
-        self.data_logger.log_data(self.X, (U - min(U)), self.current_time, Vn, goal_current)
-        t_array = numpy.array([self.current_time, t])
-        self.X = numpy.matrix(scipy.integrate.odeint(
-            self.motor_diffeq,
-            numpy.squeeze(numpy.asarray(self.X)),
-            t_array, args=(U,)))[1, :].T
+                self.data_logger.log_data(self.X, (U - min(U)),
+                                          self.current_time, Vn, goal_current)
+                t_array = numpy.array([self.current_time, t])
+                self.X = numpy.matrix(
+                    scipy.integrate.odeint(self.motor_diffeq,
+                                           numpy.squeeze(numpy.asarray(
+                                               self.X)),
+                                           t_array,
+                                           args=(U, )))[1, :].T
 
-        self.current_time = t
+                self.current_time = t
 
-    print 'Took %f to simulate' % (time.time() - start_wall_time)
+        print 'Took %f to simulate' % (time.time() - start_wall_time)
 
-    self.data_logger.plot()
+        self.data_logger.plot()
+
 
 simulation = Simulation()
 simulation.Simulate()
diff --git a/motors/seems_reasonable/drivetrain.py b/motors/seems_reasonable/drivetrain.py
index 52b3920..ad3d92a 100644
--- a/motors/seems_reasonable/drivetrain.py
+++ b/motors/seems_reasonable/drivetrain.py
@@ -30,11 +30,10 @@
         glog.error("Expected .h file name and .cc file name")
     else:
         # Write the generated constants out to a file.
-        drivetrain.WriteDrivetrain(
-            argv[1:3],
-            argv[3:5], ['motors', 'seems_reasonable'],
-            kDrivetrain,
-            scalar_type='float')
+        drivetrain.WriteDrivetrain(argv[1:3],
+                                   argv[3:5], ['motors', 'seems_reasonable'],
+                                   kDrivetrain,
+                                   scalar_type='float')
 
 
 if __name__ == '__main__':
diff --git a/motors/seems_reasonable/polydrivetrain.py b/motors/seems_reasonable/polydrivetrain.py
index 452b3fb..665739f 100644
--- a/motors/seems_reasonable/polydrivetrain.py
+++ b/motors/seems_reasonable/polydrivetrain.py
@@ -16,19 +16,19 @@
 except gflags.DuplicateFlagError:
     pass
 
+
 def main(argv):
     if FLAGS.plot:
         polydrivetrain.PlotPolyDrivetrainMotions(drivetrain.kDrivetrain)
     elif len(argv) != 7:
         glog.fatal('Expected .h file name and .cc file name')
     else:
-        polydrivetrain.WritePolyDrivetrain(
-            argv[1:3],
-            argv[3:5],
-            argv[5:7],
-            ['motors', 'seems_reasonable'],
-            drivetrain.kDrivetrain,
-            scalar_type='float')
+        polydrivetrain.WritePolyDrivetrain(argv[1:3],
+                                           argv[3:5],
+                                           argv[5:7],
+                                           ['motors', 'seems_reasonable'],
+                                           drivetrain.kDrivetrain,
+                                           scalar_type='float')
 
 
 if __name__ == '__main__':
diff --git a/scouting/deploy/deploy.py b/scouting/deploy/deploy.py
index c9886fb..5967956 100644
--- a/scouting/deploy/deploy.py
+++ b/scouting/deploy/deploy.py
@@ -3,6 +3,7 @@
 import subprocess
 import sys
 
+
 def main(argv):
     """Installs the scouting application on the scouting server."""
     parser = argparse.ArgumentParser()
@@ -23,11 +24,16 @@
 
     # Copy the .deb to the scouting server, install it, and delete it again.
     subprocess.run(["rsync", "-L", args.deb, f"{args.host}:/tmp/{deb.name}"],
-                   check=True, stdin=sys.stdin)
+                   check=True,
+                   stdin=sys.stdin)
     subprocess.run(f"ssh -tt {args.host} sudo dpkg -i /tmp/{deb.name}",
-                   shell=True, check=True, stdin=sys.stdin)
+                   shell=True,
+                   check=True,
+                   stdin=sys.stdin)
     subprocess.run(f"ssh {args.host} rm -f /tmp/{deb.name}",
-                   shell=True, check=True, stdin=sys.stdin)
+                   shell=True,
+                   check=True,
+                   stdin=sys.stdin)
 
 
 if __name__ == "__main__":
diff --git a/scouting/testing/scouting_test_servers.py b/scouting/testing/scouting_test_servers.py
index b6e5c7a..9df23c1 100644
--- a/scouting/testing/scouting_test_servers.py
+++ b/scouting/testing/scouting_test_servers.py
@@ -18,6 +18,7 @@
 import time
 from typing import List
 
+
 def wait_for_server(port: int):
     """Waits for the server at the specified port to respond to TCP connections."""
     while True:
@@ -30,31 +31,37 @@
             connection.close()
             time.sleep(0.01)
 
+
 def create_db_config(tmpdir: Path) -> Path:
     config = tmpdir / "db_config.json"
-    config.write_text(json.dumps({
-        "username": "test",
-        "password": "password",
-        "port": 5432,
-    }))
+    config.write_text(
+        json.dumps({
+            "username": "test",
+            "password": "password",
+            "port": 5432,
+        }))
     return config
 
+
 def create_tba_config(tmpdir: Path) -> Path:
     # Configure the scouting webserver to scrape data from our fake TBA
     # server.
     config = tmpdir / "scouting_config.json"
-    config.write_text(json.dumps({
-        "api_key": "dummy_key_that_is_not_actually_used_in_this_test",
-        "base_url": "http://localhost:7000",
-    }))
+    config.write_text(
+        json.dumps({
+            "api_key": "dummy_key_that_is_not_actually_used_in_this_test",
+            "base_url": "http://localhost:7000",
+        }))
     return config
 
+
 def set_up_tba_api_dir(tmpdir: Path, year: int, event_code: str):
     tba_api_dir = tmpdir / "api" / "v3" / "event" / f"{year}{event_code}"
     tba_api_dir.mkdir(parents=True, exist_ok=True)
     (tba_api_dir / "matches").write_text(
-        Path(f"scouting/scraping/test_data/{year}_{event_code}.json").read_text()
-    )
+        Path(f"scouting/scraping/test_data/{year}_{event_code}.json").
+        read_text())
+
 
 class Runner:
     """Helps manage the services we need for testing the scouting app."""
@@ -105,6 +112,7 @@
         except FileNotFoundError:
             pass
 
+
 def discard_signal(signum, frame):
     """A NOP handler to ignore certain signals.
 
@@ -113,9 +121,12 @@
     """
     pass
 
+
 def main(argv: List[str]):
     parser = argparse.ArgumentParser()
-    parser.add_argument("--port", type=int, help="The port for the actual web server.")
+    parser.add_argument("--port",
+                        type=int,
+                        help="The port for the actual web server.")
     args = parser.parse_args(argv[1:])
 
     runner = Runner()
diff --git a/scouting/webserver/requests/BUILD b/scouting/webserver/requests/BUILD
index 744772c..87575a9 100644
--- a/scouting/webserver/requests/BUILD
+++ b/scouting/webserver/requests/BUILD
@@ -20,10 +20,14 @@
         "//scouting/webserver/requests/messages:request_matches_for_team_response_go_fbs",
         "//scouting/webserver/requests/messages:request_notes_for_team_go_fbs",
         "//scouting/webserver/requests/messages:request_notes_for_team_response_go_fbs",
+        "//scouting/webserver/requests/messages:request_shift_schedule_go_fbs",
+        "//scouting/webserver/requests/messages:request_shift_schedule_response_go_fbs",
         "//scouting/webserver/requests/messages:submit_data_scouting_go_fbs",
         "//scouting/webserver/requests/messages:submit_data_scouting_response_go_fbs",
         "//scouting/webserver/requests/messages:submit_notes_go_fbs",
         "//scouting/webserver/requests/messages:submit_notes_response_go_fbs",
+        "//scouting/webserver/requests/messages:submit_shift_schedule_go_fbs",
+        "//scouting/webserver/requests/messages:submit_shift_schedule_response_go_fbs",
         "//scouting/webserver/server",
         "@com_github_google_flatbuffers//go:go_default_library",
     ],
@@ -48,9 +52,12 @@
         "//scouting/webserver/requests/messages:request_matches_for_team_go_fbs",
         "//scouting/webserver/requests/messages:request_matches_for_team_response_go_fbs",
         "//scouting/webserver/requests/messages:request_notes_for_team_go_fbs",
+        "//scouting/webserver/requests/messages:request_shift_schedule_go_fbs",
+        "//scouting/webserver/requests/messages:request_shift_schedule_response_go_fbs",
         "//scouting/webserver/requests/messages:submit_data_scouting_go_fbs",
         "//scouting/webserver/requests/messages:submit_data_scouting_response_go_fbs",
         "//scouting/webserver/requests/messages:submit_notes_go_fbs",
+        "//scouting/webserver/requests/messages:submit_shift_schedule_go_fbs",
         "//scouting/webserver/server",
         "@com_github_google_flatbuffers//go:go_default_library",
     ],
diff --git a/scouting/webserver/requests/debug/BUILD b/scouting/webserver/requests/debug/BUILD
index 6adaee5..04c4ffa 100644
--- a/scouting/webserver/requests/debug/BUILD
+++ b/scouting/webserver/requests/debug/BUILD
@@ -13,8 +13,10 @@
         "//scouting/webserver/requests/messages:request_data_scouting_response_go_fbs",
         "//scouting/webserver/requests/messages:request_matches_for_team_response_go_fbs",
         "//scouting/webserver/requests/messages:request_notes_for_team_response_go_fbs",
+        "//scouting/webserver/requests/messages:request_shift_schedule_response_go_fbs",
         "//scouting/webserver/requests/messages:submit_data_scouting_response_go_fbs",
         "//scouting/webserver/requests/messages:submit_notes_response_go_fbs",
+        "//scouting/webserver/requests/messages:submit_shift_schedule_response_go_fbs",
         "@com_github_google_flatbuffers//go:go_default_library",
     ],
 )
diff --git a/scouting/webserver/requests/debug/cli/cli_test.py b/scouting/webserver/requests/debug/cli/cli_test.py
index 2bcd75b..d4310e9 100644
--- a/scouting/webserver/requests/debug/cli/cli_test.py
+++ b/scouting/webserver/requests/debug/cli/cli_test.py
@@ -22,6 +22,7 @@
     json_path.write_text(json.dumps(content))
     return json_path
 
+
 def run_debug_cli(args: List[str]):
     run_result = subprocess.run(
         ["scouting/webserver/requests/debug/cli/cli_/cli"] + args,
@@ -51,46 +52,71 @@
             "year": year,
             "event_code": event_code,
         })
-        exit_code, stdout, stderr = run_debug_cli(["-refreshMatchList", json_path])
+        exit_code, stdout, stderr = run_debug_cli(
+            ["-refreshMatchList", json_path])
         self.assertEqual(exit_code, 0, f"{year}{event_code}: {stderr}")
-        self.assertIn("(refresh_match_list_response.RefreshMatchListResponseT)", stdout)
+        self.assertIn(
+            "(refresh_match_list_response.RefreshMatchListResponseT)", stdout)
 
     def test_submit_and_request_data_scouting(self):
         self.refresh_match_list(year=2020, event_code="fake")
 
         # First submit some data to be added to the database.
         json_path = write_json_request({
-            "team": 100,
-            "match": 1,
-            "set_number": 2,
-            "comp_level": "quals",
-            "starting_quadrant": 3,
-            "auto_ball_1": True,
-            "auto_ball_2": False,
-            "auto_ball_3": False,
-            "auto_ball_4": False,
-            "auto_ball_5": True,
-            "missed_shots_auto": 10,
-            "upper_goal_auto": 11,
-            "lower_goal_auto": 12,
-            "missed_shots_tele": 13,
-            "upper_goal_tele": 14,
-            "lower_goal_tele": 15,
-            "defense_rating": 3,
-            "defense_received_rating": 4,
-            "climb_level": "Medium",
-            "comment": "A very inspiring and useful comment",
+            "team":
+            100,
+            "match":
+            1,
+            "set_number":
+            2,
+            "comp_level":
+            "quals",
+            "starting_quadrant":
+            3,
+            "auto_ball_1":
+            True,
+            "auto_ball_2":
+            False,
+            "auto_ball_3":
+            False,
+            "auto_ball_4":
+            False,
+            "auto_ball_5":
+            True,
+            "missed_shots_auto":
+            10,
+            "upper_goal_auto":
+            11,
+            "lower_goal_auto":
+            12,
+            "missed_shots_tele":
+            13,
+            "upper_goal_tele":
+            14,
+            "lower_goal_tele":
+            15,
+            "defense_rating":
+            3,
+            "defense_received_rating":
+            4,
+            "climb_level":
+            "Medium",
+            "comment":
+            "A very inspiring and useful comment",
         })
-        exit_code, _, stderr = run_debug_cli(["-submitDataScouting", json_path])
+        exit_code, _, stderr = run_debug_cli(
+            ["-submitDataScouting", json_path])
         self.assertEqual(exit_code, 0, stderr)
 
         # Now request the data back with zero indentation. That let's us
         # validate the data easily.
         json_path = write_json_request({})
-        exit_code, stdout, stderr = run_debug_cli(["-requestDataScouting", json_path, "-indent="])
+        exit_code, stdout, stderr = run_debug_cli(
+            ["-requestDataScouting", json_path, "-indent="])
 
         self.assertEqual(exit_code, 0, stderr)
-        self.assertIn(textwrap.dedent("""\
+        self.assertIn(
+            textwrap.dedent("""\
             {
             Team: (int32) 100,
             Match: (int32) 1,
@@ -120,10 +146,13 @@
 
         # RequestAllMatches has no fields.
         json_path = write_json_request({})
-        exit_code, stdout, stderr = run_debug_cli(["-requestAllMatches", json_path])
+        exit_code, stdout, stderr = run_debug_cli(
+            ["-requestAllMatches", json_path])
 
         self.assertEqual(exit_code, 0, stderr)
-        self.assertIn("MatchList: ([]*request_all_matches_response.MatchT) (len=90 cap=90) {", stdout)
+        self.assertIn(
+            "MatchList: ([]*request_all_matches_response.MatchT) (len=90 cap=90) {",
+            stdout)
         self.assertEqual(stdout.count("MatchNumber:"), 90)
 
     def test_request_matches_for_team(self):
@@ -132,11 +161,14 @@
         json_path = write_json_request({
             "team": 4856,
         })
-        exit_code, stdout, stderr = run_debug_cli(["-requestMatchesForTeam", json_path])
+        exit_code, stdout, stderr = run_debug_cli(
+            ["-requestMatchesForTeam", json_path])
 
         # Team 4856 has 12 matches.
         self.assertEqual(exit_code, 0, stderr)
-        self.assertIn("MatchList: ([]*request_matches_for_team_response.MatchT) (len=12 cap=12) {", stdout)
+        self.assertIn(
+            "MatchList: ([]*request_matches_for_team_response.MatchT) (len=12 cap=12) {",
+            stdout)
         self.assertEqual(stdout.count("MatchNumber:"), 12)
         self.assertEqual(len(re.findall(r": \(int32\) 4856[,\n]", stdout)), 12)
 
@@ -148,13 +180,16 @@
 
             # RequestAllMatches has no fields.
             json_path = write_json_request({})
-            exit_code, stdout, stderr = run_debug_cli(["-requestAllMatches", json_path])
+            exit_code, stdout, stderr = run_debug_cli(
+                ["-requestAllMatches", json_path])
 
             self.assertEqual(exit_code, 0, stderr)
             request_all_matches_outputs.append(stdout)
 
         self.maxDiff = None
-        self.assertEqual(request_all_matches_outputs[0], request_all_matches_outputs[1])
+        self.assertEqual(request_all_matches_outputs[0],
+                         request_all_matches_outputs[1])
+
 
 if __name__ == "__main__":
     unittest.main()
diff --git a/scouting/webserver/requests/debug/debug.go b/scouting/webserver/requests/debug/debug.go
index 21f0f05..b3df518 100644
--- a/scouting/webserver/requests/debug/debug.go
+++ b/scouting/webserver/requests/debug/debug.go
@@ -15,8 +15,10 @@
 	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/request_data_scouting_response"
 	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/request_matches_for_team_response"
 	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/request_notes_for_team_response"
+	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/request_shift_schedule_response"
 	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_data_scouting_response"
 	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_notes_response"
+	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_shift_schedule_response"
 	flatbuffers "github.com/google/flatbuffers/go"
 )
 
@@ -143,3 +145,15 @@
 		server+"/requests/request/notes_for_team", requestBytes,
 		request_notes_for_team_response.GetRootAsRequestNotesForTeamResponse)
 }
+
+func RequestShiftSchedule(server string, requestBytes []byte) (*request_shift_schedule_response.RequestShiftScheduleResponseT, error) {
+	return sendMessage[request_shift_schedule_response.RequestShiftScheduleResponseT](
+		server+"/requests/request/shift_schedule", requestBytes,
+		request_shift_schedule_response.GetRootAsRequestShiftScheduleResponse)
+}
+
+func SubmitShiftSchedule(server string, requestBytes []byte) (*submit_shift_schedule_response.SubmitShiftScheduleResponseT, error) {
+	return sendMessage[submit_shift_schedule_response.SubmitShiftScheduleResponseT](
+		server+"/requests/submit/shift_schedule", requestBytes,
+		submit_shift_schedule_response.GetRootAsSubmitShiftScheduleResponse)
+}
diff --git a/scouting/webserver/requests/messages/BUILD b/scouting/webserver/requests/messages/BUILD
index f7e194b..d804ea6 100644
--- a/scouting/webserver/requests/messages/BUILD
+++ b/scouting/webserver/requests/messages/BUILD
@@ -16,6 +16,10 @@
     "submit_notes_response",
     "request_notes_for_team",
     "request_notes_for_team_response",
+    "request_shift_schedule",
+    "request_shift_schedule_response",
+    "submit_shift_schedule",
+    "submit_shift_schedule_response",
 )
 
 filegroup(
diff --git a/scouting/webserver/requests/messages/request_shift_schedule.fbs b/scouting/webserver/requests/messages/request_shift_schedule.fbs
new file mode 100644
index 0000000..8f2a61c
--- /dev/null
+++ b/scouting/webserver/requests/messages/request_shift_schedule.fbs
@@ -0,0 +1,6 @@
+namespace scouting.webserver.requests;
+
+table RequestShiftSchedule {
+}
+
+root_type RequestShiftSchedule;
\ No newline at end of file
diff --git a/scouting/webserver/requests/messages/request_shift_schedule_response.fbs b/scouting/webserver/requests/messages/request_shift_schedule_response.fbs
new file mode 100644
index 0000000..611db49
--- /dev/null
+++ b/scouting/webserver/requests/messages/request_shift_schedule_response.fbs
@@ -0,0 +1,17 @@
+namespace scouting.webserver.requests;
+
+table MatchAssignment {
+    match_number:int (id:0);
+    R1scouter:string (id:1);
+    R2scouter:string (id:2);
+    R3scouter:string (id:3);
+    B1scouter:string (id:4);
+    B2scouter:string (id:5);
+    B3scouter:string (id:6);
+}
+
+table RequestShiftScheduleResponse {
+    shift_schedule:[MatchAssignment] (id:0);
+}
+
+root_type RequestShiftScheduleResponse;
\ No newline at end of file
diff --git a/scouting/webserver/requests/messages/submit_shift_schedule.fbs b/scouting/webserver/requests/messages/submit_shift_schedule.fbs
new file mode 100644
index 0000000..1f1833e
--- /dev/null
+++ b/scouting/webserver/requests/messages/submit_shift_schedule.fbs
@@ -0,0 +1,17 @@
+namespace scouting.webserver.requests;
+
+table MatchAssignment {
+    match_number:int (id:0);
+    R1scouter:string (id:1);
+    R2scouter:string (id:2);
+    R3scouter:string (id:3);
+    B1scouter:string (id:4);
+    B2scouter:string (id:5);
+    B3scouter:string (id:6);
+}
+
+table SubmitShiftSchedule {
+    shift_schedule:[MatchAssignment] (id:0);
+}
+
+root_type SubmitShiftSchedule;
\ No newline at end of file
diff --git a/scouting/webserver/requests/messages/submit_shift_schedule_response.fbs b/scouting/webserver/requests/messages/submit_shift_schedule_response.fbs
new file mode 100644
index 0000000..17eadbc
--- /dev/null
+++ b/scouting/webserver/requests/messages/submit_shift_schedule_response.fbs
@@ -0,0 +1,6 @@
+namespace scouting.webserver.requests;
+
+table SubmitShiftScheduleResponse {
+}
+
+root_type SubmitShiftScheduleResponse;
\ No newline at end of file
diff --git a/scouting/webserver/requests/requests.go b/scouting/webserver/requests/requests.go
index cef87aa..67a1722 100644
--- a/scouting/webserver/requests/requests.go
+++ b/scouting/webserver/requests/requests.go
@@ -23,10 +23,14 @@
 	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/request_matches_for_team_response"
 	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/request_notes_for_team"
 	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/request_notes_for_team_response"
+	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/request_shift_schedule"
+	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/request_shift_schedule_response"
 	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_data_scouting"
 	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_data_scouting_response"
 	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_notes"
 	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_notes_response"
+	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_shift_schedule"
+	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_shift_schedule_response"
 	"github.com/frc971/971-Robot-Code/scouting/webserver/server"
 	flatbuffers "github.com/google/flatbuffers/go"
 )
@@ -45,15 +49,22 @@
 type SubmitNotesResponseT = submit_notes_response.SubmitNotesResponseT
 type RequestNotesForTeam = request_notes_for_team.RequestNotesForTeam
 type RequestNotesForTeamResponseT = request_notes_for_team_response.RequestNotesForTeamResponseT
+type RequestShiftSchedule = request_shift_schedule.RequestShiftSchedule
+type RequestShiftScheduleResponseT = request_shift_schedule_response.RequestShiftScheduleResponseT
+type SubmitShiftSchedule = submit_shift_schedule.SubmitShiftSchedule
+type SubmitShiftScheduleResponseT = submit_shift_schedule_response.SubmitShiftScheduleResponseT
 
 // The interface we expect the database abstraction to conform to.
 // We use an interface here because it makes unit testing easier.
 type Database interface {
 	AddToMatch(db.Match) error
+	AddToShift(db.Shift) error
 	AddToStats(db.Stats) error
 	ReturnMatches() ([]db.Match, error)
+	ReturnAllShifts() ([]db.Shift, error)
 	ReturnStats() ([]db.Stats, error)
 	QueryMatches(int32) ([]db.Match, error)
+	QueryAllShifts(int) ([]db.Shift, error)
 	QueryStats(int) ([]db.Stats, error)
 	QueryNotes(int32) (db.NotesData, error)
 	AddNotes(db.NotesData) error
@@ -480,6 +491,91 @@
 	w.Write(builder.FinishedBytes())
 }
 
+type requestShiftScheduleHandler struct {
+	db Database
+}
+
+func (handler requestShiftScheduleHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+	requestBytes, err := io.ReadAll(req.Body)
+	if err != nil {
+		respondWithError(w, http.StatusBadRequest, fmt.Sprint("Failed to read request bytes:", err))
+		return
+	}
+
+	_, success := parseRequest(w, requestBytes, "RequestShiftSchedule", request_shift_schedule.GetRootAsRequestShiftSchedule)
+	if !success {
+		return
+	}
+
+	shiftData, err := handler.db.ReturnAllShifts()
+	if err != nil {
+		respondWithError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to query shift schedule: %v", err))
+		return
+	}
+
+	var response RequestShiftScheduleResponseT
+	for _, shifts := range shiftData {
+		response.ShiftSchedule = append(response.ShiftSchedule, &request_shift_schedule_response.MatchAssignmentT{
+			MatchNumber: shifts.MatchNumber,
+			R1scouter:   shifts.R1scouter,
+			R2scouter:   shifts.R2scouter,
+			R3scouter:   shifts.R3scouter,
+			B1scouter:   shifts.B1scouter,
+			B2scouter:   shifts.B2scouter,
+			B3scouter:   shifts.B3scouter,
+		})
+	}
+
+	builder := flatbuffers.NewBuilder(1024)
+	builder.Finish((&response).Pack(builder))
+	w.Write(builder.FinishedBytes())
+}
+
+type submitShiftScheduleHandler struct {
+	db Database
+}
+
+func (handler submitShiftScheduleHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+	// Get the username of the person submitting the data.
+	username := parseUsername(req)
+
+	requestBytes, err := io.ReadAll(req.Body)
+	if err != nil {
+		respondWithError(w, http.StatusBadRequest, fmt.Sprint("Failed to read request bytes:", err))
+		return
+	}
+
+	request, success := parseRequest[SubmitShiftSchedule](w, requestBytes, "SubmitShiftSchedule", submit_shift_schedule.GetRootAsSubmitShiftSchedule)
+	if !success {
+		return
+	}
+
+	log.Println("Got shift schedule from", username)
+	shift_schedule_length := request.ShiftScheduleLength()
+	for i := 0; i < shift_schedule_length; i++ {
+		var match_assignment submit_shift_schedule.MatchAssignment
+		request.ShiftSchedule(&match_assignment, i)
+		current_shift := db.Shift{
+			MatchNumber: match_assignment.MatchNumber(),
+			R1scouter:   string(match_assignment.R1scouter()),
+			R2scouter:   string(match_assignment.R2scouter()),
+			R3scouter:   string(match_assignment.R3scouter()),
+			B1scouter:   string(match_assignment.B1scouter()),
+			B2scouter:   string(match_assignment.B2scouter()),
+			B3scouter:   string(match_assignment.B3scouter()),
+		}
+		err = handler.db.AddToShift(current_shift)
+		if err != nil {
+			respondWithError(w, http.StatusInternalServerError, fmt.Sprint("Failed to submit shift schedule: ", err))
+			return
+		}
+	}
+
+	builder := flatbuffers.NewBuilder(50 * 1024)
+	builder.Finish((&SubmitShiftScheduleResponseT{}).Pack(builder))
+	w.Write(builder.FinishedBytes())
+}
+
 func HandleRequests(db Database, scrape ScrapeMatchList, scoutingServer server.ScoutingServer) {
 	scoutingServer.HandleFunc("/requests", unknown)
 	scoutingServer.Handle("/requests/submit/data_scouting", submitDataScoutingHandler{db})
@@ -489,4 +585,6 @@
 	scoutingServer.Handle("/requests/refresh_match_list", refreshMatchListHandler{db, scrape})
 	scoutingServer.Handle("/requests/submit/submit_notes", submitNoteScoutingHandler{db})
 	scoutingServer.Handle("/requests/request/notes_for_team", requestNotesForTeamHandler{db})
+	scoutingServer.Handle("/requests/submit/shift_schedule", submitShiftScheduleHandler{db})
+	scoutingServer.Handle("/requests/request/shift_schedule", requestShiftScheduleHandler{db})
 }
diff --git a/scouting/webserver/requests/requests_test.go b/scouting/webserver/requests/requests_test.go
index 4dda143..44fd5db 100644
--- a/scouting/webserver/requests/requests_test.go
+++ b/scouting/webserver/requests/requests_test.go
@@ -20,9 +20,12 @@
 	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/request_matches_for_team"
 	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/request_matches_for_team_response"
 	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/request_notes_for_team"
+	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/request_shift_schedule"
+	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/request_shift_schedule_response"
 	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_data_scouting"
 	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_data_scouting_response"
 	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_notes"
+	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_shift_schedule"
 	"github.com/frc971/971-Robot-Code/scouting/webserver/server"
 	flatbuffers "github.com/google/flatbuffers/go"
 )
@@ -355,6 +358,113 @@
 	}
 }
 
+func TestRequestShiftSchedule(t *testing.T) {
+	db := MockDatabase{
+		shiftSchedule: []db.Shift{
+			{
+				MatchNumber: 1,
+				R1scouter:   "Bob",
+				R2scouter:   "James",
+				R3scouter:   "Robert",
+				B1scouter:   "Alice",
+				B2scouter:   "Mary",
+				B3scouter:   "Patricia",
+			},
+			{
+				MatchNumber: 2,
+				R1scouter:   "Liam",
+				R2scouter:   "Noah",
+				R3scouter:   "Oliver",
+				B1scouter:   "Emma",
+				B2scouter:   "Charlotte",
+				B3scouter:   "Amelia",
+			},
+		},
+	}
+	scoutingServer := server.NewScoutingServer()
+	HandleRequests(&db, scrapeEmtpyMatchList, scoutingServer)
+	scoutingServer.Start(8080)
+	defer scoutingServer.Stop()
+
+	builder := flatbuffers.NewBuilder(1024)
+	builder.Finish((&request_shift_schedule.RequestShiftScheduleT{}).Pack(builder))
+
+	response, err := debug.RequestShiftSchedule("http://localhost:8080", builder.FinishedBytes())
+	if err != nil {
+		t.Fatal("Failed to request shift schedule: ", err)
+	}
+
+	expected := request_shift_schedule_response.RequestShiftScheduleResponseT{
+		ShiftSchedule: []*request_shift_schedule_response.MatchAssignmentT{
+			{
+				MatchNumber: 1,
+				R1scouter:   "Bob",
+				R2scouter:   "James",
+				R3scouter:   "Robert",
+				B1scouter:   "Alice",
+				B2scouter:   "Mary",
+				B3scouter:   "Patricia",
+			},
+			{
+				MatchNumber: 2,
+				R1scouter:   "Liam",
+				R2scouter:   "Noah",
+				R3scouter:   "Oliver",
+				B1scouter:   "Emma",
+				B2scouter:   "Charlotte",
+				B3scouter:   "Amelia",
+			},
+		},
+	}
+	if len(expected.ShiftSchedule) != len(response.ShiftSchedule) {
+		t.Fatal("Expected ", expected, ", but got ", *response)
+	}
+	for i, match := range expected.ShiftSchedule {
+		if !reflect.DeepEqual(*match, *response.ShiftSchedule[i]) {
+			t.Fatal("Expected for shift schedule", i, ":", *match, ", but got:", *response.ShiftSchedule[i])
+		}
+	}
+}
+
+func TestSubmitShiftSchedule(t *testing.T) {
+	database := MockDatabase{}
+	scoutingServer := server.NewScoutingServer()
+	HandleRequests(&database, scrapeEmtpyMatchList, scoutingServer)
+	scoutingServer.Start(8080)
+	defer scoutingServer.Stop()
+
+	builder := flatbuffers.NewBuilder(1024)
+	builder.Finish((&submit_shift_schedule.SubmitShiftScheduleT{
+		ShiftSchedule: []*submit_shift_schedule.MatchAssignmentT{
+			{MatchNumber: 1,
+				R1scouter: "Bob",
+				R2scouter: "James",
+				R3scouter: "Robert",
+				B1scouter: "Alice",
+				B2scouter: "Mary",
+				B3scouter: "Patricia"},
+		},
+	}).Pack(builder))
+
+	_, err := debug.SubmitShiftSchedule("http://localhost:8080", builder.FinishedBytes())
+	if err != nil {
+		t.Fatal("Failed to submit shift schedule: ", err)
+	}
+
+	expected := []db.Shift{
+		{MatchNumber: 1,
+			R1scouter: "Bob",
+			R2scouter: "James",
+			R3scouter: "Robert",
+			B1scouter: "Alice",
+			B2scouter: "Mary",
+			B3scouter: "Patricia"},
+	}
+	if !reflect.DeepEqual(expected, database.shiftSchedule) {
+		t.Fatal("Expected ", expected, ", but got:", database.shiftSchedule)
+	}
+}
+
 // Validates that we can download the schedule from The Blue Alliance.
 func TestRefreshMatchList(t *testing.T) {
 	scrapeMockSchedule := func(int32, string) ([]scraping.Match, error) {
@@ -433,9 +543,10 @@
 // needed for your tests.
 
 type MockDatabase struct {
-	matches []db.Match
-	stats   []db.Stats
-	notes   []db.NotesData
+	matches       []db.Match
+	stats         []db.Stats
+	notes         []db.NotesData
+	shiftSchedule []db.Shift
 }
 
 func (database *MockDatabase) AddToMatch(match db.Match) error {
@@ -488,6 +599,19 @@
 	return nil
 }
 
+func (database *MockDatabase) AddToShift(data db.Shift) error {
+	database.shiftSchedule = append(database.shiftSchedule, data)
+	return nil
+}
+
+func (database *MockDatabase) ReturnAllShifts() ([]db.Shift, error) {
+	return database.shiftSchedule, nil
+}
+
+func (database *MockDatabase) QueryAllShifts(int) ([]db.Shift, error) {
+	return []db.Shift{}, nil
+}
+
 // Returns an empty match list from the fake The Blue Alliance scraping.
 func scrapeEmtpyMatchList(int32, string) ([]scraping.Match, error) {
 	return nil, nil
diff --git a/scouting/www/index_html_generator.py b/scouting/www/index_html_generator.py
index 3b057fd..bc0e63d 100644
--- a/scouting/www/index_html_generator.py
+++ b/scouting/www/index_html_generator.py
@@ -5,9 +5,11 @@
 import sys
 from pathlib import Path
 
+
 def compute_sha256(filepath):
     return hashlib.sha256(filepath.read_bytes()).hexdigest()
 
+
 def main(argv):
     parser = argparse.ArgumentParser()
     parser.add_argument("--template", type=str)
@@ -20,9 +22,9 @@
     bundle_sha256 = compute_sha256(bundle_path)
 
     output = template.format(
-        MAIN_BUNDLE_FILE = f"/sha256/{bundle_sha256}/{bundle_path.name}",
-    )
+        MAIN_BUNDLE_FILE=f"/sha256/{bundle_sha256}/{bundle_path.name}", )
     Path(args.output).write_text(output)
 
+
 if __name__ == "__main__":
     sys.exit(main(sys.argv))
diff --git a/third_party/akaze/AKAZEConfig.h b/third_party/akaze/AKAZEConfig.h
new file mode 100644
index 0000000..5e754ed
--- /dev/null
+++ b/third_party/akaze/AKAZEConfig.h
@@ -0,0 +1,66 @@
+/**
+ * @file AKAZEConfig.h
+ * @brief AKAZE configuration file
+ * @date Feb 23, 2014
+ * @author Pablo F. Alcantarilla, Jesus Nuevo
+ */
+
+#ifndef __OPENCV_FEATURES_2D_AKAZE_CONFIG_H__
+#define __OPENCV_FEATURES_2D_AKAZE_CONFIG_H__
+
+#include <opencv2/features2d.hpp>
+
+namespace cv {
+/* ************************************************************************* */
+/// AKAZE configuration options structure
+struct AKAZEOptionsV2 {
+  AKAZEOptionsV2()
+      : omax(4),
+        nsublevels(4),
+        img_width(0),
+        img_height(0),
+        soffset(1.6f),
+        derivative_factor(1.5f),
+        sderivatives(1.0),
+        diffusivity(KAZE::DIFF_PM_G2)
+
+        ,
+        dthreshold(0.001f),
+        min_dthreshold(0.00001f)
+
+        ,
+        descriptor(AKAZE::DESCRIPTOR_MLDB),
+        descriptor_size(0),
+        descriptor_channels(3),
+        descriptor_pattern_size(10)
+
+        ,
+        kcontrast_percentile(0.7f),
+        kcontrast_nbins(300) {}
+
+  int omax;  ///< Maximum octave evolution of the image 2^sigma (coarsest scale
+             ///< sigma units)
+  int nsublevels;           ///< Default number of sublevels per scale level
+  int img_width;            ///< Width of the input image
+  int img_height;           ///< Height of the input image
+  float soffset;            ///< Base scale offset (sigma units)
+  float derivative_factor;  ///< Factor for the multiscale derivatives
+  float sderivatives;       ///< Smoothing factor for the derivatives
+  int diffusivity;          ///< Diffusivity type
+
+  float dthreshold;      ///< Detector response threshold to accept point
+  float min_dthreshold;  ///< Minimum detector threshold to accept a point
+
+  int descriptor;           ///< Type of descriptor
+  int descriptor_size;      ///< Size of the descriptor in bits. 0->Full size
+  int descriptor_channels;  ///< Number of channels in the descriptor (1, 2, 3)
+  int descriptor_pattern_size;  ///< Actual patch size is
+                                ///< 2*pattern_size*point.scale
+
+  float kcontrast_percentile;  ///< Percentile level for the contrast factor
+  int kcontrast_nbins;  ///< Number of bins for the contrast factor histogram
+};
+
+}  // namespace cv
+
+#endif
\ No newline at end of file
diff --git a/third_party/akaze/AKAZEFeatures.cpp b/third_party/akaze/AKAZEFeatures.cpp
new file mode 100644
index 0000000..2827dd5
--- /dev/null
+++ b/third_party/akaze/AKAZEFeatures.cpp
@@ -0,0 +1,2186 @@
+/**
+ * @file AKAZEFeatures.cpp
+ * @brief Main class for detecting and describing binary features in an
+ * accelerated nonlinear scale space
+ * @date Sep 15, 2013
+ * @author Pablo F. Alcantarilla, Jesus Nuevo
+ */
+
+#include "AKAZEFeatures.h"
+
+#include <cstdint>
+#include <cstring>
+#include <iostream>
+#include <opencv2/core.hpp>
+#include <opencv2/core/hal/hal.hpp>
+#include <opencv2/imgproc.hpp>
+
+#include "fed.h"
+#include "nldiffusion_functions.h"
+#include "utils.h"
+
+#ifdef AKAZE_USE_CPP11_THREADING
+#include <atomic>
+#include <functional>  // std::ref
+#include <future>
+#include <thread>
+#endif
+
+// Taken from opencv2/internal.hpp: IEEE754 constants and macros
+#define CV_TOGGLE_FLT(x) ((x) ^ ((int)(x) < 0 ? 0x7fffffff : 0))
+
+// Namespaces
+namespace cv {
+using namespace std;
+
+/// Internal Functions
+inline void Compute_Main_Orientation(cv::KeyPoint& kpt,
+                                     const TEvolutionV2& evolution_);
+static void generateDescriptorSubsampleV2(cv::Mat& sampleList,
+                                          cv::Mat& comparisons, int nbits,
+                                          int pattern_size, int nchannels);
+
+/* ************************************************************************* */
+/**
+ * @brief AKAZEFeatures constructor with input options
+ * @param options AKAZEFeatures configuration options
+ * @note This constructor allocates memory for the nonlinear scale space
+ */
+AKAZEFeaturesV2::AKAZEFeaturesV2(const AKAZEOptionsV2& options)
+    : options_(options) {
+  cout << "AKAZEFeaturesV2 constructor called" << endl;
+
+#ifdef AKAZE_USE_CPP11_THREADING
+  cout << "hardware_concurrency: " << thread::hardware_concurrency() << endl;
+#endif
+
+  reordering_ = true;
+
+  if (options_.descriptor_size > 0 &&
+      options_.descriptor >= AKAZE::DESCRIPTOR_MLDB_UPRIGHT) {
+    generateDescriptorSubsampleV2(
+        descriptorSamples_, descriptorBits_, options_.descriptor_size,
+        options_.descriptor_pattern_size, options_.descriptor_channels);
+  }
+
+  Allocate_Memory_Evolution();
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This method allocates the memory for the nonlinear diffusion evolution
+ */
+void AKAZEFeaturesV2::Allocate_Memory_Evolution(void) {
+  CV_Assert(options_.img_height > 2 &&
+            options_.img_width > 2);  // The size of modgs_ must be positive
+
+  // Set maximum size of the area for the descriptor computation
+  float smax = 0.0;
+  if (options_.descriptor == AKAZE::DESCRIPTOR_MLDB_UPRIGHT ||
+      options_.descriptor == AKAZE::DESCRIPTOR_MLDB) {
+    smax = 10.0f * sqrtf(2.0f);
+  } else if (options_.descriptor == AKAZE::DESCRIPTOR_KAZE_UPRIGHT ||
+             options_.descriptor == AKAZE::DESCRIPTOR_KAZE) {
+    smax = 12.0f * sqrtf(2.0f);
+  }
+
+  // Allocate the dimension of the matrices for the evolution
+  int level_height = options_.img_height;
+  int level_width = options_.img_width;
+  int power = 1;
+
+  for (int i = 0; i < options_.omax; i++) {
+    for (int j = 0; j < options_.nsublevels; j++) {
+      TEvolutionV2 step;
+      step.Lt.create(level_height, level_width, CV_32FC1);
+      step.Ldet.create(level_height, level_width, CV_32FC1);
+      step.Lsmooth.create(level_height, level_width, CV_32FC1);
+      step.Lx.create(level_height, level_width, CV_32FC1);
+      step.Ly.create(level_height, level_width, CV_32FC1);
+      step.Lxx.create(level_height, level_width, CV_32FC1);
+      step.Lxy.create(level_height, level_width, CV_32FC1);
+      step.Lyy.create(level_height, level_width, CV_32FC1);
+      step.esigma =
+          options_.soffset * pow(2.f, (float)j / options_.nsublevels + i);
+      step.sigma_size =
+          fRoundV2(step.esigma * options_.derivative_factor /
+                   power);  // In fact sigma_size only depends on j
+      step.border = fRoundV2(smax * step.sigma_size) + 1;
+      step.etime = 0.5f * (step.esigma * step.esigma);
+      step.octave = i;
+      step.sublevel = j;
+      step.octave_ratio = (float)power;
+
+      // Descriptors cannot be computed for the points on the border
+      if (step.border * 2 + 1 >= level_width ||
+          step.border * 2 + 1 >= level_height)
+        goto out;  // The image becomes too small
+
+      // Pre-calculate the derivative kernels
+      compute_scharr_derivative_kernelsV2(step.DxKx, step.DxKy, 1, 0,
+                                          step.sigma_size);
+      compute_scharr_derivative_kernelsV2(step.DyKx, step.DyKy, 0, 1,
+                                          step.sigma_size);
+
+      evolution_.push_back(step);
+    }
+
+    power <<= 1;
+    level_height >>= 1;
+    level_width >>= 1;
+
+    // The next octave becomes too small
+    if (level_width < 80 || level_height < 40) {
+      options_.omax = i + 1;
+      break;
+    }
+  }
+out:
+
+  // Allocate memory for workspaces
+  lx_.create(options_.img_height, options_.img_width, CV_32FC1);
+  ly_.create(options_.img_height, options_.img_width, CV_32FC1);
+  lflow_.create(options_.img_height, options_.img_width, CV_32FC1);
+  lstep_.create(options_.img_height, options_.img_width, CV_32FC1);
+  histgram_.create(1, options_.kcontrast_nbins, CV_32SC1);
+  modgs_.create(1, (options_.img_height - 2) * (options_.img_width - 2),
+                CV_32FC1);  // excluding the border
+
+  kpts_aux_.resize(evolution_.size());
+  for (size_t i = 0; i < evolution_.size(); i++)
+    kpts_aux_[i].reserve(
+        1024);  // reserve 1K points' space for each evolution step
+
+  // Allocate memory for the number of cycles and time steps
+  tsteps_.resize(evolution_.size() - 1);
+  for (size_t i = 1; i < evolution_.size(); i++) {
+    fed_tau_by_process_timeV2(evolution_[i].etime - evolution_[i - 1].etime, 1,
+                              0.25f, reordering_, tsteps_[i - 1]);
+  }
+
+#ifdef AKAZE_USE_CPP11_THREADING
+  tasklist_.resize(2);
+  for (auto& list : tasklist_) list.resize(evolution_.size());
+
+  vector<atomic_int> atomic_vec(evolution_.size());
+  taskdeps_.swap(atomic_vec);
+#endif
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This function wraps the parallel computation of Scharr derivatives.
+ * @param Lsmooth Input image to compute Scharr derivatives.
+ * @param Lx Output derivative image (horizontal)
+ * @param Ly Output derivative image (vertical)
+ * should be parallelized or not.
+ */
+static inline void image_derivatives(const cv::Mat& Lsmooth, cv::Mat& Lx,
+                                     cv::Mat& Ly) {
+#ifdef AKAZE_USE_CPP11_THREADING
+
+  if (getNumThreads() > 1 && (Lsmooth.rows * Lsmooth.cols) > (1 << 15)) {
+    auto task = async(launch::async, image_derivatives_scharrV2, ref(Lsmooth),
+                      ref(Lx), 1, 0);
+
+    image_derivatives_scharrV2(Lsmooth, Ly, 0, 1);
+    task.get();
+    return;
+  }
+
+  // Fall back to the serial path if Lsmooth is small or OpenCV parallelization
+  // is disabled
+#endif
+
+  image_derivatives_scharrV2(Lsmooth, Lx, 1, 0);
+  image_derivatives_scharrV2(Lsmooth, Ly, 0, 1);
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This method compute the first evolution step of the nonlinear scale
+ * space
+ * @param img Input image for which the nonlinear scale space needs to be
+ * created
+ * @return kcontrast factor
+ */
+float AKAZEFeaturesV2::Compute_Base_Evolution_Level(const cv::Mat& img) {
+  Mat Lsmooth(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32FC1,
+              lflow_.data /* like-a-union */);
+  Mat Lx(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32FC1, lx_.data);
+  Mat Ly(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32FC1, ly_.data);
+
+#ifdef AKAZE_USE_CPP11_THREADING
+
+  if (getNumThreads() > 2 && (img.rows * img.cols) > (1 << 16)) {
+    auto e0_Lsmooth = async(launch::async, gaussian_2D_convolutionV2, ref(img),
+                            ref(evolution_[0].Lsmooth), 0, 0, options_.soffset);
+
+    gaussian_2D_convolutionV2(img, Lsmooth, 0, 0, 1.0f);
+    image_derivatives(Lsmooth, Lx, Ly);
+    kcontrast_ =
+        async(launch::async, compute_k_percentileV2, Lx, Ly,
+              options_.kcontrast_percentile, ref(modgs_), ref(histgram_));
+
+    e0_Lsmooth.get();
+    Compute_Determinant_Hessian_Response(0);
+
+    evolution_[0].Lsmooth.copyTo(evolution_[0].Lt);
+    return 1.0f;
+  }
+
+#endif
+
+  // Compute the determinant Hessian
+  gaussian_2D_convolutionV2(img, evolution_[0].Lsmooth, 0, 0, options_.soffset);
+  Compute_Determinant_Hessian_Response(0);
+
+  // Compute the kcontrast factor using local variables
+  gaussian_2D_convolutionV2(img, Lsmooth, 0, 0, 1.0f);
+  image_derivatives(Lsmooth, Lx, Ly);
+  float kcontrast = compute_k_percentileV2(
+      Lx, Ly, options_.kcontrast_percentile, modgs_, histgram_);
+
+  // Copy the smoothed original image to the first level of the evolution Lt
+  evolution_[0].Lsmooth.copyTo(evolution_[0].Lt);
+
+  return kcontrast;
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This method creates the nonlinear scale space for a given image
+ * @param img Input image for which the nonlinear scale space needs to be
+ * created
+ * @return 0 if the nonlinear scale space was created successfully, -1 otherwise
+ */
+int AKAZEFeaturesV2::Create_Nonlinear_Scale_Space(const Mat& img) {
+  CV_Assert(evolution_.size() > 0);
+
+  // Setup the gray-scale image
+  const Mat* gray = &img;
+  if (img.channels() != 1) {
+    cvtColor(img, gray_, COLOR_BGR2GRAY);
+    gray = &gray_;
+  }
+
+  if (gray->type() == CV_8UC1) {
+    gray->convertTo(evolution_[0].Lt, CV_32F, 1 / 255.0);
+    gray = &evolution_[0].Lt;
+  } else if (gray->type() == CV_16UC1) {
+    gray->convertTo(evolution_[0].Lt, CV_32F, 1 / 65535.0);
+    gray = &evolution_[0].Lt;
+  }
+  CV_Assert(gray->type() == CV_32FC1);
+
+  // Handle the trivial case
+  if (evolution_.size() == 1) {
+    gaussian_2D_convolutionV2(*gray, evolution_[0].Lsmooth, 0, 0,
+                              options_.soffset);
+    evolution_[0].Lsmooth.copyTo(evolution_[0].Lt);
+    Compute_Determinant_Hessian_Response_Single(0);
+    return 0;
+  }
+
+  // First compute Lsmooth, Hessian, and the kcontrast factor for the base
+  // evolution level
+  float kcontrast = Compute_Base_Evolution_Level(*gray);
+
+  // Prepare Mats to be used as local workspace
+  Mat Lx(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32FC1, lx_.data);
+  Mat Ly(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32FC1, ly_.data);
+  Mat Lflow(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32FC1,
+            lflow_.data);
+  Mat Lstep(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32FC1,
+            lstep_.data);
+
+  // Now generate the rest of evolution levels
+  for (size_t i = 1; i < evolution_.size(); i++) {
+    if (evolution_[i].octave > evolution_[i - 1].octave) {
+      halfsample_imageV2(evolution_[i - 1].Lt, evolution_[i].Lt);
+      kcontrast = kcontrast * 0.75f;
+
+      // Resize the workspace images to fit Lt
+      Lx = cv::Mat(evolution_[i].Lt.rows, evolution_[i].Lt.cols, CV_32FC1,
+                   lx_.data);
+      Ly = cv::Mat(evolution_[i].Lt.rows, evolution_[i].Lt.cols, CV_32FC1,
+                   ly_.data);
+      Lflow = cv::Mat(evolution_[i].Lt.rows, evolution_[i].Lt.cols, CV_32FC1,
+                      lflow_.data);
+      Lstep = cv::Mat(evolution_[i].Lt.rows, evolution_[i].Lt.cols, CV_32FC1,
+                      lstep_.data);
+    } else {
+      evolution_[i - 1].Lt.copyTo(evolution_[i].Lt);
+    }
+
+    gaussian_2D_convolutionV2(evolution_[i].Lt, evolution_[i].Lsmooth, 0, 0,
+                              1.0f);
+
+#ifdef AKAZE_USE_CPP11_THREADING
+    if (kcontrast_.valid())
+      kcontrast *=
+          kcontrast_
+              .get(); /* Join the kcontrast task so Lx and Ly can be reused */
+#endif
+
+    // Compute the Gaussian derivatives Lx and Ly
+    image_derivatives(evolution_[i].Lsmooth, Lx, Ly);
+
+    // Compute the Hessian for feature detection
+    Compute_Determinant_Hessian_Response((int)i);
+
+    // Compute the conductivity equation Lflow
+    switch (options_.diffusivity) {
+      case KAZE::DIFF_PM_G1:
+        pm_g1V2(Lx, Ly, Lflow, kcontrast);
+        break;
+      case KAZE::DIFF_PM_G2:
+        pm_g2V2(Lx, Ly, Lflow, kcontrast);
+        break;
+      case KAZE::DIFF_WEICKERT:
+        weickert_diffusivityV2(Lx, Ly, Lflow, kcontrast);
+        break;
+      case KAZE::DIFF_CHARBONNIER:
+        charbonnier_diffusivityV2(Lx, Ly, Lflow, kcontrast);
+        break;
+      default:
+        CV_Error(options_.diffusivity, "Diffusivity is not supported");
+        break;
+    }
+
+    // Perform Fast Explicit Diffusion on Lt
+    const int total = Lstep.rows * Lstep.cols;
+    float* lt = evolution_[i].Lt.ptr<float>(0);
+    float* lstep = Lstep.ptr<float>(0);
+    std::vector<float>& tsteps = tsteps_[i - 1];
+
+    for (size_t j = 0; j < tsteps.size(); j++) {
+      nld_step_scalarV2(evolution_[i].Lt, Lflow, Lstep);
+
+      const float step_size = tsteps[j];
+      for (int k = 0; k < total; k++) lt[k] += lstep[k] * 0.5f * step_size;
+    }
+  }
+
+#ifdef AKAZE_USE_CPP11_THREADING
+
+  if (getNumThreads() > 1) {
+    // Wait all background tasks to finish
+    for (size_t i = 0; i < evolution_.size(); i++) {
+      tasklist_[0][i].get();
+      tasklist_[1][i].get();
+    }
+  }
+
+#endif
+
+  return 0;
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This method selects interesting keypoints through the nonlinear scale
+ * space
+ * @param kpts Vector of detected keypoints
+ */
+void AKAZEFeaturesV2::Feature_Detection(std::vector<KeyPoint>& kpts) {
+  Find_Scale_Space_Extrema(kpts_aux_);
+  Do_Subpixel_Refinement(kpts_aux_, kpts);
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This method computes the feature detector response for the nonlinear
+ * scale space
+ * @param level The evolution level to compute Hessian determinant
+ * @note We use the Hessian determinant as the feature detector response
+ */
+inline void AKAZEFeaturesV2::Compute_Determinant_Hessian_Response_Single(
+    const int level) {
+  TEvolutionV2& e = evolution_[level];
+
+  const int total = e.Lsmooth.cols * e.Lsmooth.rows;
+  float* lxx = e.Lxx.ptr<float>(0);
+  float* lxy = e.Lxy.ptr<float>(0);
+  float* lyy = e.Lyy.ptr<float>(0);
+  float* ldet = e.Ldet.ptr<float>(0);
+
+  // Firstly compute the multiscale derivatives
+  sepFilter2D(e.Lsmooth, e.Lx, CV_32F, e.DxKx, e.DxKy);
+  sepFilter2D(e.Lx, e.Lxx, CV_32F, e.DxKx, e.DxKy);
+  sepFilter2D(e.Lx, e.Lxy, CV_32F, e.DyKx, e.DyKy);
+  sepFilter2D(e.Lsmooth, e.Ly, CV_32F, e.DyKx, e.DyKy);
+  sepFilter2D(e.Ly, e.Lyy, CV_32F, e.DyKx, e.DyKy);
+
+  // Compute Ldet by Lxx.mul(Lyy) - Lxy.mul(Lxy)
+  for (int j = 0; j < total; j++) ldet[j] = lxx[j] * lyy[j] - lxy[j] * lxy[j];
+}
+
+#ifdef AKAZE_USE_CPP11_THREADING
+
+/* ************************************************************************* */
+/**
+ * @brief This method computes the feature detector response for the nonlinear
+ * scale space
+ * @param level The evolution level to compute Hessian determinant
+ * @note This is parallelized version of
+ * Compute_Determinant_Hessian_Response_Single()
+ */
+void AKAZEFeaturesV2::Compute_Determinant_Hessian_Response(const int level) {
+  if (getNumThreads() == 1) {
+    Compute_Determinant_Hessian_Response_Single(level);
+    return;
+  }
+
+  TEvolutionV2& e = evolution_[level];
+  atomic_int& dep = taskdeps_[level];
+
+  const int total = e.Lsmooth.cols * e.Lsmooth.rows;
+  float* lxx = e.Lxx.ptr<float>(0);
+  float* lxy = e.Lxy.ptr<float>(0);
+  float* lyy = e.Lyy.ptr<float>(0);
+  float* ldet = e.Ldet.ptr<float>(0);
+
+  dep = 0;
+
+  tasklist_[0][level] = async(launch::async, [=, &e, &dep] {
+    sepFilter2D(e.Lsmooth, e.Ly, CV_32F, e.DyKx, e.DyKy);
+    sepFilter2D(e.Ly, e.Lyy, CV_32F, e.DyKx, e.DyKy);
+
+    if (dep.fetch_add(1, memory_order_relaxed) != 1)
+      return;  // The other dependency is not ready
+
+    sepFilter2D(e.Lx, e.Lxy, CV_32F, e.DyKx, e.DyKy);
+    for (int j = 0; j < total; j++) ldet[j] = lxx[j] * lyy[j] - lxy[j] * lxy[j];
+  });
+
+  tasklist_[1][level] = async(launch::async, [=, &e, &dep] {
+    sepFilter2D(e.Lsmooth, e.Lx, CV_32F, e.DxKx, e.DxKy);
+    sepFilter2D(e.Lx, e.Lxx, CV_32F, e.DxKx, e.DxKy);
+
+    if (dep.fetch_add(1, memory_order_relaxed) != 1)
+      return;  // The other dependency is not ready
+
+    sepFilter2D(e.Lx, e.Lxy, CV_32F, e.DyKx, e.DyKy);
+    for (int j = 0; j < total; j++) ldet[j] = lxx[j] * lyy[j] - lxy[j] * lxy[j];
+  });
+
+  // tasklist_[1,2][level] have to be waited later on
+}
+
+#else
+
+void AKAZEFeaturesV2::Compute_Determinant_Hessian_Response(const int level) {
+  Compute_Determinant_Hessian_Response_Single(level);
+}
+
+#endif
+
+/* ************************************************************************* */
+/**
+ * @brief This method searches v for a neighbor point of the point candidate p
+ * @param p The keypoint candidate to search a neighbor
+ * @param v The vector to store the points to be searched
+ * @param offset The starting location in the vector v to be searched at
+ * @param idx The index of the vector v if a neighbor is found
+ * @return true if a neighbor point is found; false otherwise
+ */
+inline bool find_neighbor_point(const KeyPoint& p, const vector<KeyPoint>& v,
+                                const int offset, int& idx) {
+  const int sz = (int)v.size();
+
+  for (int i = offset; i < sz; i++) {
+    if (v[i].class_id == -1)  // Skip a deleted point
+      continue;
+
+    float dx = p.pt.x - v[i].pt.x;
+    float dy = p.pt.y - v[i].pt.y;
+    if (dx * dx + dy * dy <= p.size * p.size) {
+      idx = i;
+      return true;
+    }
+  }
+
+  return false;
+}
+
+inline bool find_neighbor_point_inv(const KeyPoint& p,
+                                    const vector<KeyPoint>& v, const int offset,
+                                    int& idx) {
+  const int sz = (int)v.size();
+
+  for (int i = offset; i < sz; i++) {
+    if (v[i].class_id == -1)  // Skip a deleted point
+      continue;
+
+    float dx = p.pt.x - v[i].pt.x;
+    float dy = p.pt.y - v[i].pt.y;
+    if (dx * dx + dy * dy <= v[i].size * v[i].size) {
+      idx = i;
+      return true;
+    }
+  }
+
+  return false;
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This method finds extrema in the nonlinear scale space
+ * @param kpts_aux Output vectors of detected keypoints; one vector for each
+ * evolution level
+ */
+inline void AKAZEFeaturesV2::Find_Scale_Space_Extrema_Single(
+    std::vector<vector<KeyPoint>>& kpts_aux) {
+  // Clear the workspace to hold the keypoint candidates
+  for (size_t i = 0; i < kpts_aux_.size(); i++) kpts_aux_[i].clear();
+
+  for (int i = 0; i < (int)evolution_.size(); i++) {
+    const TEvolutionV2& step = evolution_[i];
+
+    const float* prev = step.Ldet.ptr<float>(step.border - 1);
+    const float* curr = step.Ldet.ptr<float>(step.border);
+    const float* next = step.Ldet.ptr<float>(step.border + 1);
+
+    for (int y = step.border; y < step.Ldet.rows - step.border; y++) {
+      for (int x = step.border; x < step.Ldet.cols - step.border; x++) {
+        const float value = curr[x];
+
+        // Filter the points with the detector threshold
+        if (value <= options_.dthreshold) continue;
+        if (value <= curr[x - 1] || value <= curr[x + 1]) continue;
+        if (value <= prev[x - 1] || value <= prev[x] || value <= prev[x + 1])
+          continue;
+        if (value <= next[x - 1] || value <= next[x] || value <= next[x + 1])
+          continue;
+
+        KeyPoint point(/* x */ static_cast<float>(x * step.octave_ratio),
+                       /* y */ static_cast<float>(y * step.octave_ratio),
+                       /* size */ step.esigma * options_.derivative_factor,
+                       /* angle */ -1,
+                       /* response */ value,
+                       /* octave */ step.octave,
+                       /* class_id */ i);
+
+        int idx = 0;
+
+        // Compare response with the same scale
+        if (find_neighbor_point(point, kpts_aux[i], 0, idx)) {
+          if (point.response > kpts_aux[i][idx].response)
+            kpts_aux[i][idx] = point;  // Replace the old point
+          continue;
+        }
+
+        // Compare response with the lower scale
+        if (i > 0 && find_neighbor_point(point, kpts_aux[i - 1], 0, idx)) {
+          if (point.response > kpts_aux[i - 1][idx].response) {
+            kpts_aux[i - 1][idx].class_id = -1;  // Mark it as deleted
+            kpts_aux[i].push_back(
+                point);  // Insert the new point to the right layer
+          }
+          continue;
+        }
+
+        kpts_aux[i].push_back(point);  // A good keypoint candidate is found
+      }
+      prev = curr;
+      curr = next;
+      next += step.Ldet.cols;
+    }
+  }
+
+  // Now filter points with the upper scale level
+  for (int i = 0; i < (int)kpts_aux.size() - 1; i++) {
+    for (int j = 0; j < (int)kpts_aux[i].size(); j++) {
+      KeyPoint& pt = kpts_aux[i][j];
+
+      if (pt.class_id == -1)  // Skip a deleted point
+        continue;
+
+      int idx = 0;
+      while (find_neighbor_point_inv(pt, kpts_aux[i + 1], idx, idx)) {
+        if (pt.response > kpts_aux[i + 1][idx].response)
+          kpts_aux[i + 1][idx].class_id = -1;
+        ++idx;
+      }
+    }
+  }
+}
+
+#ifndef AKAZE_USE_CPP11_THREADING
+
+/* ************************************************************************* */
+/**
+ * @brief This method finds extrema in the nonlinear scale space
+ * @param kpts_aux Output vectors of detected keypoints; one vector for each
+ * evolution level
+ * @note This is parallelized version of Find_Scale_Space_Extrema()
+ */
+void AKAZEFeaturesV2::Find_Scale_Space_Extrema(
+    std::vector<vector<KeyPoint>>& kpts_aux) {
+  if (getNumThreads() == 1) {
+    Find_Scale_Space_Extrema_Single(kpts_aux);
+    return;
+  }
+
+  for (int i = 0; i < (int)evolution_.size(); i++) {
+    const TEvolutionV2& step = evolution_[i];
+    vector<cv::KeyPoint>& kpts = kpts_aux[i];
+
+    // Clear the workspace to hold the keypoint candidates
+    kpts_aux_[i].clear();
+
+    auto mode = (i > 0 ? launch::async : launch::deferred);
+    tasklist_[0][i] = async(
+        mode,
+        [&step, &kpts, i](const AKAZEOptionsV2& opt) {
+          const float* prev = step.Ldet.ptr<float>(step.border - 1);
+          const float* curr = step.Ldet.ptr<float>(step.border);
+          const float* next = step.Ldet.ptr<float>(step.border + 1);
+
+          for (int y = step.border; y < step.Ldet.rows - step.border; y++) {
+            for (int x = step.border; x < step.Ldet.cols - step.border; x++) {
+              const float value = curr[x];
+
+              // Filter the points with the detector threshold
+              if (value <= opt.dthreshold) continue;
+              if (value <= curr[x - 1] || value <= curr[x + 1]) continue;
+              if (value <= prev[x - 1] || value <= prev[x] ||
+                  value <= prev[x + 1])
+                continue;
+              if (value <= next[x - 1] || value <= next[x] ||
+                  value <= next[x + 1])
+                continue;
+
+              KeyPoint point(/* x */ static_cast<float>(x * step.octave_ratio),
+                             /* y */ static_cast<float>(y * step.octave_ratio),
+                             /* size */ step.esigma * opt.derivative_factor,
+                             /* angle */ -1,
+                             /* response */ value,
+                             /* octave */ step.octave,
+                             /* class_id */ i);
+
+              int idx = 0;
+
+              // Compare response with the same scale
+              if (find_neighbor_point(point, kpts, 0, idx)) {
+                if (point.response > kpts[idx].response)
+                  kpts[idx] = point;  // Replace the old point
+                continue;
+              }
+
+              kpts.push_back(point);
+            }
+
+            prev = curr;
+            curr = next;
+            next += step.Ldet.cols;
+          }
+        },
+        options_);
+  }
+
+  tasklist_[0][0].get();
+
+  // Filter points with the lower scale level
+  for (int i = 1; i < (int)kpts_aux.size(); i++) {
+    tasklist_[0][i].get();
+
+    for (int j = 0; j < (int)kpts_aux[i].size(); j++) {
+      KeyPoint& pt = kpts_aux[i][j];
+
+      int idx = 0;
+      while (find_neighbor_point(pt, kpts_aux[i - 1], idx, idx)) {
+        if (pt.response > kpts_aux[i - 1][idx].response)
+          kpts_aux[i - 1][idx].class_id = -1;
+        // else this pt may be pruned by the upper scale
+        ++idx;
+      }
+    }
+  }
+
+  // Now filter points with the upper scale level (the other direction)
+  for (int i = (int)kpts_aux.size() - 2; i >= 0; i--) {
+    for (int j = 0; j < (int)kpts_aux[i].size(); j++) {
+      KeyPoint& pt = kpts_aux[i][j];
+
+      if (pt.class_id == -1)  // Skip a deleted point
+        continue;
+
+      int idx = 0;
+      while (find_neighbor_point_inv(pt, kpts_aux[i + 1], idx, idx)) {
+        if (pt.response > kpts_aux[i + 1][idx].response)
+          kpts_aux[i + 1][idx].class_id = -1;
+        ++idx;
+      }
+    }
+  }
+}
+
+#else
+
+void AKAZEFeaturesV2::Find_Scale_Space_Extrema(
+    std::vector<vector<KeyPoint>>& kpts_aux) {
+  Find_Scale_Space_Extrema_Single(kpts_aux);
+}
+
+#endif
+
+/* ************************************************************************* */
+/**
+ * @brief This method performs subpixel refinement of the detected keypoints
+ * @param kpts_aux Input vectors of detected keypoints, sorted by evolution
+ * levels
+ * @param kpts Output vector of the final refined keypoints
+ */
+void AKAZEFeaturesV2::Do_Subpixel_Refinement(
+    std::vector<std::vector<KeyPoint>>& kpts_aux, std::vector<KeyPoint>& kpts) {
+  // Clear the keypoint vector
+  kpts.clear();
+
+  for (int i = 0; i < (int)kpts_aux.size(); i++) {
+    const float* const ldet = evolution_[i].Ldet.ptr<float>(0);
+    const float ratio = evolution_[i].octave_ratio;
+    const int cols = evolution_[i].Ldet.cols;
+
+    for (int j = 0; j < (int)kpts_aux[i].size(); j++) {
+      KeyPoint& kp = kpts_aux[i][j];
+
+      if (kp.class_id == -1) continue;  // Skip a deleted keypoint
+
+      int x = (int)(kp.pt.x / ratio);
+      int y = (int)(kp.pt.y / ratio);
+
+      // Compute the gradient
+      float Dx = 0.5f * (ldet[y * cols + x + 1] - ldet[y * cols + x - 1]);
+      float Dy = 0.5f * (ldet[(y + 1) * cols + x] - ldet[(y - 1) * cols + x]);
+
+      // Compute the Hessian
+      float Dxx = ldet[y * cols + x + 1] + ldet[y * cols + x - 1] -
+                  2.0f * ldet[y * cols + x];
+      float Dyy = ldet[(y + 1) * cols + x] + ldet[(y - 1) * cols + x] -
+                  2.0f * ldet[y * cols + x];
+      float Dxy =
+          0.25f * (ldet[(y + 1) * cols + x + 1] + ldet[(y - 1) * cols + x - 1] -
+                   ldet[(y - 1) * cols + x + 1] - ldet[(y + 1) * cols + x - 1]);
+
+      // Solve the linear system
+      Matx22f A{Dxx, Dxy, Dxy, Dyy};
+      Vec2f b{-Dx, -Dy};
+      Vec2f dst{0.0f, 0.0f};
+      solve(A, b, dst, DECOMP_LU);
+
+      float dx = dst(0);
+      float dy = dst(1);
+
+      if (fabs(dx) > 1.0f || fabs(dy) > 1.0f)
+        continue;  // Ignore the point that is not stable
+
+      // Refine the coordinates
+      kp.pt.x += dx * ratio;
+      kp.pt.y += dy * ratio;
+
+      kp.angle = 0.0;
+      kp.size *= 2.0f;  // In OpenCV the size of a keypoint is the diameter
+
+      // Push the refined keypoint to the final storage
+      kpts.push_back(kp);
+    }
+  }
+}
+
+/* ************************************************************************* */
+
+class SURF_Descriptor_Upright_64_InvokerV2 : public ParallelLoopBody {
+ public:
+  SURF_Descriptor_Upright_64_InvokerV2(
+      std::vector<KeyPoint>& kpts, Mat& desc,
+      const std::vector<TEvolutionV2>& evolution)
+      : keypoints_(kpts), descriptors_(desc), evolution_(evolution) {}
+
+  void operator()(const Range& range) const {
+    for (int i = range.start; i < range.end; i++) {
+      Get_SURF_Descriptor_Upright_64(keypoints_[i], descriptors_.ptr<float>(i));
+    }
+  }
+
+  void Get_SURF_Descriptor_Upright_64(const KeyPoint& kpt, float* desc) const;
+
+ private:
+  std::vector<KeyPoint>& keypoints_;
+  Mat& descriptors_;
+  const std::vector<TEvolutionV2>& evolution_;
+};
+
+class SURF_Descriptor_64_InvokerV2 : public ParallelLoopBody {
+ public:
+  SURF_Descriptor_64_InvokerV2(std::vector<KeyPoint>& kpts, Mat& desc,
+                               const std::vector<TEvolutionV2>& evolution)
+      : keypoints_(kpts), descriptors_(desc), evolution_(evolution) {}
+
+  void operator()(const Range& range) const {
+    for (int i = range.start; i < range.end; i++) {
+      KeyPoint& kp = keypoints_[i];
+      Compute_Main_Orientation(kp, evolution_[kp.class_id]);
+      Get_SURF_Descriptor_64(kp, descriptors_.ptr<float>(i));
+    }
+  }
+
+  void Get_SURF_Descriptor_64(const KeyPoint& kpt, float* desc) const;
+
+ private:
+  std::vector<KeyPoint>& keypoints_;
+  Mat& descriptors_;
+  const std::vector<TEvolutionV2>& evolution_;
+};
+
+class MSURF_Upright_Descriptor_64_InvokerV2 : public ParallelLoopBody {
+ public:
+  MSURF_Upright_Descriptor_64_InvokerV2(
+      std::vector<KeyPoint>& kpts, Mat& desc,
+      const std::vector<TEvolutionV2>& evolution)
+      : keypoints_(kpts), descriptors_(desc), evolution_(evolution) {}
+
+  void operator()(const Range& range) const {
+    for (int i = range.start; i < range.end; i++) {
+      Get_MSURF_Upright_Descriptor_64(keypoints_[i],
+                                      descriptors_.ptr<float>(i));
+    }
+  }
+
+  void Get_MSURF_Upright_Descriptor_64(const KeyPoint& kpt, float* desc) const;
+
+ private:
+  std::vector<KeyPoint>& keypoints_;
+  Mat& descriptors_;
+  const std::vector<TEvolutionV2>& evolution_;
+};
+
+class MSURF_Descriptor_64_InvokerV2 : public ParallelLoopBody {
+ public:
+  MSURF_Descriptor_64_InvokerV2(std::vector<KeyPoint>& kpts, Mat& desc,
+                                const std::vector<TEvolutionV2>& evolution)
+      : keypoints_(kpts), descriptors_(desc), evolution_(evolution) {}
+
+  void operator()(const Range& range) const {
+    for (int i = range.start; i < range.end; i++) {
+      Compute_Main_Orientation(keypoints_[i],
+                               evolution_[keypoints_[i].class_id]);
+      Get_MSURF_Descriptor_64(keypoints_[i], descriptors_.ptr<float>(i));
+    }
+  }
+
+  void Get_MSURF_Descriptor_64(const KeyPoint& kpt, float* desc) const;
+
+ private:
+  std::vector<KeyPoint>& keypoints_;
+  Mat& descriptors_;
+  const std::vector<TEvolutionV2>& evolution_;
+};
+
+class Upright_MLDB_Full_Descriptor_InvokerV2 : public ParallelLoopBody {
+ public:
+  Upright_MLDB_Full_Descriptor_InvokerV2(
+      std::vector<KeyPoint>& kpts, Mat& desc,
+      const std::vector<TEvolutionV2>& evolution, const AKAZEOptionsV2& options)
+      : keypoints_(kpts),
+        descriptors_(desc),
+        evolution_(evolution),
+        options_(options) {}
+
+  void operator()(const Range& range) const {
+    for (int i = range.start; i < range.end; i++) {
+      Get_Upright_MLDB_Full_Descriptor(keypoints_[i],
+                                       descriptors_.ptr<unsigned char>(i));
+    }
+  }
+
+  void Get_Upright_MLDB_Full_Descriptor(const KeyPoint& kpt,
+                                        unsigned char* desc) const;
+
+ private:
+  std::vector<KeyPoint>& keypoints_;
+  Mat& descriptors_;
+  const std::vector<TEvolutionV2>& evolution_;
+  const AKAZEOptionsV2& options_;
+};
+
+class Upright_MLDB_Descriptor_Subset_InvokerV2 : public ParallelLoopBody {
+ public:
+  Upright_MLDB_Descriptor_Subset_InvokerV2(
+      std::vector<KeyPoint>& kpts, Mat& desc,
+      const std::vector<TEvolutionV2>& evolution, const AKAZEOptionsV2& options,
+      const Mat& descriptorSamples, const Mat& descriptorBits)
+      : keypoints_(kpts),
+        descriptors_(desc),
+        evolution_(evolution),
+        options_(options),
+        descriptorSamples_(descriptorSamples),
+        descriptorBits_(descriptorBits) {}
+
+  void operator()(const Range& range) const {
+    for (int i = range.start; i < range.end; i++) {
+      Get_Upright_MLDB_Descriptor_Subset(keypoints_[i],
+                                         descriptors_.ptr<unsigned char>(i));
+    }
+  }
+
+  void Get_Upright_MLDB_Descriptor_Subset(const KeyPoint& kpt,
+                                          unsigned char* desc) const;
+
+ private:
+  std::vector<KeyPoint>& keypoints_;
+  Mat& descriptors_;
+  const std::vector<TEvolutionV2>& evolution_;
+  const AKAZEOptionsV2& options_;
+
+  const Mat& descriptorSamples_;  // List of positions in the grids to sample
+                                  // LDB bits from.
+  const Mat& descriptorBits_;
+};
+
+class MLDB_Full_Descriptor_InvokerV2 : public ParallelLoopBody {
+ public:
+  MLDB_Full_Descriptor_InvokerV2(std::vector<KeyPoint>& kpts, Mat& desc,
+                                 const std::vector<TEvolutionV2>& evolution,
+                                 const AKAZEOptionsV2& options)
+      : keypoints_(kpts),
+        descriptors_(desc),
+        evolution_(evolution),
+        options_(options) {}
+
+  void operator()(const Range& range) const {
+    for (int i = range.start; i < range.end; i++) {
+      Compute_Main_Orientation(keypoints_[i],
+                               evolution_[keypoints_[i].class_id]);
+      Get_MLDB_Full_Descriptor(keypoints_[i],
+                               descriptors_.ptr<unsigned char>(i));
+      keypoints_[i].angle *= (float)(180.0 / CV_PI);
+    }
+  }
+
+  void Get_MLDB_Full_Descriptor(const KeyPoint& kpt, unsigned char* desc) const;
+  void MLDB_Fill_Values(float* values, int sample_step, int level, float xf,
+                        float yf, float co, float si, float scale) const;
+  void MLDB_Binary_Comparisons(float* values, unsigned char* desc, int count,
+                               int& dpos) const;
+
+ private:
+  std::vector<KeyPoint>& keypoints_;
+  Mat& descriptors_;
+  const std::vector<TEvolutionV2>& evolution_;
+  const AKAZEOptionsV2& options_;
+};
+
+class MLDB_Descriptor_Subset_InvokerV2 : public ParallelLoopBody {
+ public:
+  MLDB_Descriptor_Subset_InvokerV2(std::vector<KeyPoint>& kpts, Mat& desc,
+                                   const std::vector<TEvolutionV2>& evolution,
+                                   const AKAZEOptionsV2& options,
+                                   const Mat& descriptorSamples,
+                                   const Mat& descriptorBits)
+      : keypoints_(kpts),
+        descriptors_(desc),
+        evolution_(evolution),
+        options_(options),
+        descriptorSamples_(descriptorSamples),
+        descriptorBits_(descriptorBits) {}
+
+  void operator()(const Range& range) const {
+    for (int i = range.start; i < range.end; i++) {
+      Compute_Main_Orientation(keypoints_[i],
+                               evolution_[keypoints_[i].class_id]);
+      Get_MLDB_Descriptor_Subset(keypoints_[i],
+                                 descriptors_.ptr<unsigned char>(i));
+      keypoints_[i].angle *= (float)(180.0 / CV_PI);
+    }
+  }
+
+  void Get_MLDB_Descriptor_Subset(const KeyPoint& kpt,
+                                  unsigned char* desc) const;
+
+ private:
+  std::vector<KeyPoint>& keypoints_;
+  Mat& descriptors_;
+  const std::vector<TEvolutionV2>& evolution_;
+  const AKAZEOptionsV2& options_;
+
+  const Mat& descriptorSamples_;  // List of positions in the grids to sample
+                                  // LDB bits from.
+  const Mat& descriptorBits_;
+};
+
+/**
+ * @brief This method  computes the set of descriptors through the nonlinear
+ * scale space
+ * @param kpts Vector of detected keypoints
+ * @param desc Matrix to store the descriptors
+ */
+void AKAZEFeaturesV2::Compute_Descriptors(std::vector<KeyPoint>& kpts,
+                                          Mat& desc) {
+  for (size_t i = 0; i < kpts.size(); i++) {
+    CV_Assert(0 <= kpts[i].class_id &&
+              kpts[i].class_id < static_cast<int>(evolution_.size()));
+  }
+
+  // Allocate memory for the descriptor matrix
+  if (options_.descriptor < AKAZE::DESCRIPTOR_MLDB_UPRIGHT) {
+    desc.create((int)kpts.size(), 64, CV_32FC1);
+  } else {
+    // We use the full length binary descriptor -> 486 bits
+    if (options_.descriptor_size == 0) {
+      int t = (6 + 36 + 120) * options_.descriptor_channels;
+      desc.create((int)kpts.size(), (int)ceil(t / 8.), CV_8UC1);
+    } else {
+      // We use the random bit selection length binary descriptor
+      desc.create((int)kpts.size(), (int)ceil(options_.descriptor_size / 8.),
+                  CV_8UC1);
+    }
+  }
+
+  // Compute descriptors by blocks of 16 keypoints
+  const double stride = kpts.size() / (double)(1 << 4);
+
+  switch (options_.descriptor) {
+    case AKAZE::DESCRIPTOR_KAZE_UPRIGHT:  // Upright descriptors, not invariant
+                                          // to rotation
+    {
+      parallel_for_(
+          Range(0, (int)kpts.size()),
+          MSURF_Upright_Descriptor_64_InvokerV2(kpts, desc, evolution_),
+          stride);
+    } break;
+    case AKAZE::DESCRIPTOR_KAZE: {
+      parallel_for_(Range(0, (int)kpts.size()),
+                    MSURF_Descriptor_64_InvokerV2(kpts, desc, evolution_),
+                    stride);
+    } break;
+    case AKAZE::DESCRIPTOR_MLDB_UPRIGHT:  // Upright descriptors, not invariant
+                                          // to rotation
+    {
+      if (options_.descriptor_size == 0)
+        parallel_for_(Range(0, (int)kpts.size()),
+                      Upright_MLDB_Full_Descriptor_InvokerV2(
+                          kpts, desc, evolution_, options_),
+                      stride);
+      else
+        parallel_for_(Range(0, (int)kpts.size()),
+                      Upright_MLDB_Descriptor_Subset_InvokerV2(
+                          kpts, desc, evolution_, options_, descriptorSamples_,
+                          descriptorBits_),
+                      stride);
+    } break;
+    case AKAZE::DESCRIPTOR_MLDB: {
+      if (options_.descriptor_size == 0)
+        parallel_for_(
+            Range(0, (int)kpts.size()),
+            MLDB_Full_Descriptor_InvokerV2(kpts, desc, evolution_, options_),
+            stride);
+      else
+        parallel_for_(Range(0, (int)kpts.size()),
+                      MLDB_Descriptor_Subset_InvokerV2(
+                          kpts, desc, evolution_, options_, descriptorSamples_,
+                          descriptorBits_),
+                      stride);
+    } break;
+  }
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This function samples the derivative responses Lx and Ly for the
+ * points within the radius of 6*scale from (x0, y0), then multiply 2D Gaussian
+ * weight
+ * @param Lx Horizontal derivative
+ * @param Ly Vertical derivative
+ * @param x0 X-coordinate of the center point
+ * @param y0 Y-coordinate of the center point
+ * @param scale The sampling step
+ * @param resX Output array of the weighted horizontal derivative responses
+ * @param resY Output array of the weighted vertical derivative responses
+ */
+static inline void Sample_Derivative_Response_Radius6(
+    const Mat& Lx, const Mat& Ly, const int x0, const int y0, const int scale,
+    float* resX, float* resY) {
+  /* *************************************************************************
+   */
+  /// Lookup table for 2d gaussian (sigma = 2.5) where (0,0) is top left and
+  /// (6,6) is bottom right
+  static const float gauss25[7][7] = {
+      {0.02546481f, 0.02350698f, 0.01849125f, 0.01239505f, 0.00708017f,
+       0.00344629f, 0.00142946f},
+      {0.02350698f, 0.02169968f, 0.01706957f, 0.01144208f, 0.00653582f,
+       0.00318132f, 0.00131956f},
+      {0.01849125f, 0.01706957f, 0.01342740f, 0.00900066f, 0.00514126f,
+       0.00250252f, 0.00103800f},
+      {0.01239505f, 0.01144208f, 0.00900066f, 0.00603332f, 0.00344629f,
+       0.00167749f, 0.00069579f},
+      {0.00708017f, 0.00653582f, 0.00514126f, 0.00344629f, 0.00196855f,
+       0.00095820f, 0.00039744f},
+      {0.00344629f, 0.00318132f, 0.00250252f, 0.00167749f, 0.00095820f,
+       0.00046640f, 0.00019346f},
+      {0.00142946f, 0.00131956f, 0.00103800f, 0.00069579f, 0.00039744f,
+       0.00019346f, 0.00008024f}};
+  static const int id[] = {6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6};
+  static const struct gtable {
+    float weight[109];
+    int8_t xidx[109];
+    int8_t yidx[109];
+
+    explicit gtable(void) {
+      // Generate the weight and indices by one-time initialization
+      int k = 0;
+      for (int i = -6; i <= 6; ++i) {
+        for (int j = -6; j <= 6; ++j) {
+          if (i * i + j * j < 36) {
+            weight[k] = gauss25[id[i + 6]][id[j + 6]];
+            yidx[k] = i;
+            xidx[k] = j;
+            ++k;
+          }
+        }
+      }
+      CV_DbgAssert(k == 109);
+    }
+  } g;
+
+  const float* lx = Lx.ptr<float>(0);
+  const float* ly = Ly.ptr<float>(0);
+  int cols = Lx.cols;
+
+  for (int i = 0; i < 109; i++) {
+    int j = (y0 + g.yidx[i] * scale) * cols + (x0 + g.xidx[i] * scale);
+
+    resX[i] = g.weight[i] * lx[j];
+    resY[i] = g.weight[i] * ly[j];
+  }
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This function sorts a[] by quantized float values
+ * @param a[] Input floating point array to sort
+ * @param n The length of a[]
+ * @param quantum The interval to convert a[i]'s float values to integers
+ * @param max The upper bound of a[], meaning a[i] must be in [0, max]
+ * @param idx[] Output array of the indices: a[idx[i]] forms a sorted array
+ * @param cum[] Output array of the starting indices of quantized floats
+ * @note The values of a[] in [k*quantum, (k + 1)*quantum) is labeled by
+ * the integer k, which is calculated by floor(a[i]/quantum).  After sorting,
+ * the values from a[idx[cum[k]]] to a[idx[cum[k+1]-1]] are all labeled by k.
+ * This sorting is unstable to reduce the memory access.
+ */
+static inline void quantized_counting_sort(const float a[], const int n,
+                                           const float quantum, const float max,
+                                           uint8_t idx[], uint8_t cum[]) {
+  const int nkeys = (int)(max / quantum);
+
+  // The size of cum[] must be nkeys + 1
+  memset(cum, 0, nkeys + 1);
+
+  // Count up the quantized values
+  for (int i = 0; i < n; i++) cum[(int)(a[i] / quantum)]++;
+
+  // Compute the inclusive prefix sum i.e. the end indices; cum[nkeys] is the
+  // total
+  for (int i = 1; i <= nkeys; i++) cum[i] += cum[i - 1];
+
+  // Generate the sorted indices; cum[] becomes the exclusive prefix sum i.e.
+  // the start indices of keys
+  for (int i = 0; i < n; i++) idx[--cum[(int)(a[i] / quantum)]] = i;
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This function computes the main orientation for a given keypoint
+ * @param kpt Input keypoint
+ * @note The orientation is computed using a similar approach as described in
+ * the original SURF method. See Bay et al., Speeded Up Robust Features, ECCV
+ * 2006
+ */
+inline void Compute_Main_Orientation(KeyPoint& kpt, const TEvolutionV2& e) {
+  // Get the information from the keypoint
+  int scale = fRoundV2(0.5f * kpt.size / e.octave_ratio);
+  int x0 = fRoundV2(kpt.pt.x / e.octave_ratio);
+  int y0 = fRoundV2(kpt.pt.y / e.octave_ratio);
+
+  // Sample derivatives responses for the points within radius of 6*scale
+  const int ang_size = 109;
+  float resX[ang_size], resY[ang_size];
+  Sample_Derivative_Response_Radius6(e.Lx, e.Ly, x0, y0, scale, resX, resY);
+
+  // Compute the angle of each gradient vector
+  float Ang[ang_size];
+  hal::fastAtan2(resY, resX, Ang, ang_size, false);
+
+  // Sort by the angles; angles are labeled by slices of 0.15 radian
+  const int slices = 42;
+  const float ang_step = (float)(2.0 * CV_PI / slices);
+  uint8_t slice[slices + 1];
+  uint8_t sorted_idx[ang_size];
+  quantized_counting_sort(Ang, ang_size, ang_step, (float)(2.0 * CV_PI),
+                          sorted_idx, slice);
+
+  // Find the main angle by sliding a window of 7-slice size(=PI/3) around the
+  // keypoint
+  const int win = 7;
+
+  float maxX = 0.0f, maxY = 0.0f;
+  for (int i = slice[0]; i < slice[win]; i++) {
+    maxX += resX[sorted_idx[i]];
+    maxY += resY[sorted_idx[i]];
+  }
+  float maxNorm = maxX * maxX + maxY * maxY;
+
+  for (int sn = 1; sn <= slices - win; sn++) {
+    if (slice[sn] == slice[sn - 1] && slice[sn + win] == slice[sn + win - 1])
+      continue;  // The contents of the window didn't change; don't repeat the
+                 // computation
+
+    float sumX = 0.0f, sumY = 0.0f;
+    for (int i = slice[sn]; i < slice[sn + win]; i++) {
+      sumX += resX[sorted_idx[i]];
+      sumY += resY[sorted_idx[i]];
+    }
+
+    float norm = sumX * sumX + sumY * sumY;
+    if (norm > maxNorm)
+      maxNorm = norm, maxX = sumX, maxY = sumY;  // Found bigger one; update
+  }
+
+  for (int sn = slices - win + 1; sn < slices; sn++) {
+    int remain = sn + win - slices;
+
+    if (slice[sn] == slice[sn - 1] && slice[remain] == slice[remain - 1])
+      continue;
+
+    float sumX = 0.0f, sumY = 0.0f;
+    for (int i = slice[sn]; i < slice[slices]; i++) {
+      sumX += resX[sorted_idx[i]];
+      sumY += resY[sorted_idx[i]];
+    }
+    for (int i = slice[0]; i < slice[remain]; i++) {
+      sumX += resX[sorted_idx[i]];
+      sumY += resY[sorted_idx[i]];
+    }
+
+    float norm = sumX * sumX + sumY * sumY;
+    if (norm > maxNorm) maxNorm = norm, maxX = sumX, maxY = sumY;
+  }
+
+  // Store the final result
+  kpt.angle = getAngleV2(maxX, maxY);
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This method computes the upright descriptor (not rotation invariant)
+ * of the provided keypoint
+ * @param kpt Input keypoint
+ * @param desc Descriptor vector
+ * @note Rectangular grid of 24 s x 24 s. Descriptor Length 64. The descriptor
+ * is inspired from Agrawal et al., CenSurE: Center Surround Extremas for
+ * Realtime Feature Detection and Matching, ECCV 2008
+ */
+void MSURF_Upright_Descriptor_64_InvokerV2::Get_MSURF_Upright_Descriptor_64(
+    const KeyPoint& kpt, float* desc) const {
+  float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0,
+        gauss_s2 = 0.0;
+  float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0;
+  float sample_x = 0.0, sample_y = 0.0;
+  int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0;
+  int x2 = 0, y2 = 0, kx = 0, ky = 0, i = 0, j = 0, dcount = 0;
+  float fx = 0.0, fy = 0.0, ratio = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0,
+        res4 = 0.0;
+  int scale = 0, dsize = 0, level = 0;
+
+  // Subregion centers for the 4x4 gaussian weighting
+  float cx = -0.5f, cy = 0.5f;
+
+  // Set the descriptor size and the sample and pattern sizes
+  dsize = 64;
+  sample_step = 5;
+  pattern_size = 12;
+
+  // Get the information from the keypoint
+  level = kpt.class_id;
+  ratio = evolution_[level].octave_ratio;
+  scale = fRoundV2(0.5f * kpt.size / ratio);
+  yf = kpt.pt.y / ratio;
+  xf = kpt.pt.x / ratio;
+
+  i = -8;
+
+  // Calculate descriptor for this interest point
+  // Area of size 24 s x 24 s
+  while (i < pattern_size) {
+    j = -8;
+    i = i - 4;
+
+    cx += 1.0f;
+    cy = -0.5f;
+
+    while (j < pattern_size) {
+      dx = dy = mdx = mdy = 0.0;
+      cy += 1.0f;
+      j = j - 4;
+
+      ky = i + sample_step;
+      kx = j + sample_step;
+
+      ys = yf + (ky * scale);
+      xs = xf + (kx * scale);
+
+      for (int k = i; k < i + 9; k++) {
+        for (int l = j; l < j + 9; l++) {
+          sample_y = k * scale + yf;
+          sample_x = l * scale + xf;
+
+          // Get the gaussian weighted x and y responses
+          gauss_s1 = gaussianV2(xs - sample_x, ys - sample_y, 2.50f * scale);
+
+          y1 = (int)(sample_y - .5);
+          x1 = (int)(sample_x - .5);
+
+          y2 = (int)(sample_y + .5);
+          x2 = (int)(sample_x + .5);
+
+          fx = sample_x - x1;
+          fy = sample_y - y1;
+
+          res1 = *(evolution_[level].Lx.ptr<float>(y1) + x1);
+          res2 = *(evolution_[level].Lx.ptr<float>(y1) + x2);
+          res3 = *(evolution_[level].Lx.ptr<float>(y2) + x1);
+          res4 = *(evolution_[level].Lx.ptr<float>(y2) + x2);
+          rx = (1.0f - fx) * (1.0f - fy) * res1 + fx * (1.0f - fy) * res2 +
+               (1.0f - fx) * fy * res3 + fx * fy * res4;
+
+          res1 = *(evolution_[level].Ly.ptr<float>(y1) + x1);
+          res2 = *(evolution_[level].Ly.ptr<float>(y1) + x2);
+          res3 = *(evolution_[level].Ly.ptr<float>(y2) + x1);
+          res4 = *(evolution_[level].Ly.ptr<float>(y2) + x2);
+          ry = (1.0f - fx) * (1.0f - fy) * res1 + fx * (1.0f - fy) * res2 +
+               (1.0f - fx) * fy * res3 + fx * fy * res4;
+
+          rx = gauss_s1 * rx;
+          ry = gauss_s1 * ry;
+
+          // Sum the derivatives to the cumulative descriptor
+          dx += rx;
+          dy += ry;
+          mdx += fabs(rx);
+          mdy += fabs(ry);
+        }
+      }
+
+      // Add the values to the descriptor vector
+      gauss_s2 = gaussianV2(cx - 2.0f, cy - 2.0f, 1.5f);
+
+      desc[dcount++] = dx * gauss_s2;
+      desc[dcount++] = dy * gauss_s2;
+      desc[dcount++] = mdx * gauss_s2;
+      desc[dcount++] = mdy * gauss_s2;
+
+      len += (dx * dx + dy * dy + mdx * mdx + mdy * mdy) * gauss_s2 * gauss_s2;
+
+      j += 9;
+    }
+
+    i += 9;
+  }
+
+  // convert to unit vector
+  len = sqrt(len);
+
+  for (i = 0; i < dsize; i++) {
+    desc[i] /= len;
+  }
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This method computes the descriptor of the provided keypoint given the
+ * main orientation of the keypoint
+ * @param kpt Input keypoint
+ * @param desc Descriptor vector
+ * @note Rectangular grid of 24 s x 24 s. Descriptor Length 64. The descriptor
+ * is inspired from Agrawal et al., CenSurE: Center Surround Extremas for
+ * Realtime Feature Detection and Matching, ECCV 2008
+ */
+void MSURF_Descriptor_64_InvokerV2::Get_MSURF_Descriptor_64(const KeyPoint& kpt,
+                                                            float* desc) const {
+  float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0,
+        gauss_s2 = 0.0;
+  float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0,
+        ys = 0.0, xs = 0.0;
+  float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0;
+  float fx = 0.0, fy = 0.0, ratio = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0,
+        res4 = 0.0;
+  int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0;
+  int kx = 0, ky = 0, i = 0, j = 0, dcount = 0;
+  int scale = 0, dsize = 0, level = 0;
+
+  // Subregion centers for the 4x4 gaussian weighting
+  float cx = -0.5f, cy = 0.5f;
+
+  // Set the descriptor size and the sample and pattern sizes
+  dsize = 64;
+  sample_step = 5;
+  pattern_size = 12;
+
+  // Get the information from the keypoint
+  level = kpt.class_id;
+  ratio = evolution_[level].octave_ratio;
+  scale = fRoundV2(0.5f * kpt.size / ratio);
+  angle = kpt.angle;
+  yf = kpt.pt.y / ratio;
+  xf = kpt.pt.x / ratio;
+  co = cos(angle);
+  si = sin(angle);
+
+  i = -8;
+
+  // Calculate descriptor for this interest point
+  // Area of size 24 s x 24 s
+  while (i < pattern_size) {
+    j = -8;
+    i = i - 4;
+
+    cx += 1.0f;
+    cy = -0.5f;
+
+    while (j < pattern_size) {
+      dx = dy = mdx = mdy = 0.0;
+      cy += 1.0f;
+      j = j - 4;
+
+      ky = i + sample_step;
+      kx = j + sample_step;
+
+      xs = xf + (-kx * scale * si + ky * scale * co);
+      ys = yf + (kx * scale * co + ky * scale * si);
+
+      for (int k = i; k < i + 9; ++k) {
+        for (int l = j; l < j + 9; ++l) {
+          // Get coords of sample point on the rotated axis
+          sample_y = yf + (l * scale * co + k * scale * si);
+          sample_x = xf + (-l * scale * si + k * scale * co);
+
+          // Get the gaussian weighted x and y responses
+          gauss_s1 = gaussianV2(xs - sample_x, ys - sample_y, 2.5f * scale);
+
+          y1 = fRoundV2(sample_y - 0.5f);
+          x1 = fRoundV2(sample_x - 0.5f);
+
+          y2 = fRoundV2(sample_y + 0.5f);
+          x2 = fRoundV2(sample_x + 0.5f);
+
+          fx = sample_x - x1;
+          fy = sample_y - y1;
+
+          res1 = *(evolution_[level].Lx.ptr<float>(y1) + x1);
+          res2 = *(evolution_[level].Lx.ptr<float>(y1) + x2);
+          res3 = *(evolution_[level].Lx.ptr<float>(y2) + x1);
+          res4 = *(evolution_[level].Lx.ptr<float>(y2) + x2);
+          rx = (1.0f - fx) * (1.0f - fy) * res1 + fx * (1.0f - fy) * res2 +
+               (1.0f - fx) * fy * res3 + fx * fy * res4;
+
+          res1 = *(evolution_[level].Ly.ptr<float>(y1) + x1);
+          res2 = *(evolution_[level].Ly.ptr<float>(y1) + x2);
+          res3 = *(evolution_[level].Ly.ptr<float>(y2) + x1);
+          res4 = *(evolution_[level].Ly.ptr<float>(y2) + x2);
+          ry = (1.0f - fx) * (1.0f - fy) * res1 + fx * (1.0f - fy) * res2 +
+               (1.0f - fx) * fy * res3 + fx * fy * res4;
+
+          // Get the x and y derivatives on the rotated axis
+          rry = gauss_s1 * (rx * co + ry * si);
+          rrx = gauss_s1 * (-rx * si + ry * co);
+
+          // Sum the derivatives to the cumulative descriptor
+          dx += rrx;
+          dy += rry;
+          mdx += fabs(rrx);
+          mdy += fabs(rry);
+        }
+      }
+
+      // Add the values to the descriptor vector
+      gauss_s2 = gaussianV2(cx - 2.0f, cy - 2.0f, 1.5f);
+      desc[dcount++] = dx * gauss_s2;
+      desc[dcount++] = dy * gauss_s2;
+      desc[dcount++] = mdx * gauss_s2;
+      desc[dcount++] = mdy * gauss_s2;
+
+      len += (dx * dx + dy * dy + mdx * mdx + mdy * mdy) * gauss_s2 * gauss_s2;
+
+      j += 9;
+    }
+
+    i += 9;
+  }
+
+  // convert to unit vector
+  len = sqrt(len);
+
+  for (i = 0; i < dsize; i++) {
+    desc[i] /= len;
+  }
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This method computes the rupright descriptor (not rotation invariant)
+ * of the provided keypoint
+ * @param kpt Input keypoint
+ * @param desc Descriptor vector
+ */
+void Upright_MLDB_Full_Descriptor_InvokerV2::Get_Upright_MLDB_Full_Descriptor(
+    const KeyPoint& kpt, unsigned char* desc) const {
+  float di = 0.0, dx = 0.0, dy = 0.0;
+  float ri = 0.0, rx = 0.0, ry = 0.0, xf = 0.0, yf = 0.0;
+  float sample_x = 0.0, sample_y = 0.0, ratio = 0.0;
+  int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0;
+  int level = 0, nsamples = 0, scale = 0;
+  int dcount1 = 0, dcount2 = 0;
+
+  CV_DbgAssert(options_.descriptor_channels <= 3);
+
+  // Matrices for the M-LDB descriptor: the dimensions are [grid size] by
+  // [channel size]
+  float values_1[4][3];
+  float values_2[9][3];
+  float values_3[16][3];
+
+  // Get the information from the keypoint
+  level = kpt.class_id;
+  ratio = evolution_[level].octave_ratio;
+  scale = evolution_[level].sigma_size;
+  yf = kpt.pt.y / ratio;
+  xf = kpt.pt.x / ratio;
+
+  // First 2x2 grid
+  pattern_size = options_.descriptor_pattern_size;
+  sample_step = pattern_size;
+
+  for (int i = -pattern_size; i < pattern_size; i += sample_step) {
+    for (int j = -pattern_size; j < pattern_size; j += sample_step) {
+      di = dx = dy = 0.0;
+      nsamples = 0;
+
+      for (int k = i; k < i + sample_step; k++) {
+        for (int l = j; l < j + sample_step; l++) {
+          // Get the coordinates of the sample point
+          sample_y = yf + l * scale;
+          sample_x = xf + k * scale;
+
+          y1 = fRoundV2(sample_y);
+          x1 = fRoundV2(sample_x);
+
+          ri = *(evolution_[level].Lt.ptr<float>(y1) + x1);
+          rx = *(evolution_[level].Lx.ptr<float>(y1) + x1);
+          ry = *(evolution_[level].Ly.ptr<float>(y1) + x1);
+
+          di += ri;
+          dx += rx;
+          dy += ry;
+          nsamples++;
+        }
+      }
+
+      di /= nsamples;
+      dx /= nsamples;
+      dy /= nsamples;
+
+      values_1[dcount2][0] = di;
+      values_1[dcount2][1] = dx;
+      values_1[dcount2][2] = dy;
+      dcount2++;
+    }
+  }
+
+  // Do binary comparison first level
+  for (int i = 0; i < 4; i++) {
+    for (int j = i + 1; j < 4; j++) {
+      if (values_1[i][0] > values_1[j][0]) {
+        desc[dcount1 / 8] |= (1 << (dcount1 % 8));
+      } else {
+        desc[dcount1 / 8] &= ~(1 << (dcount1 % 8));
+      }
+      dcount1++;
+
+      if (values_1[i][1] > values_1[j][1]) {
+        desc[dcount1 / 8] |= (1 << (dcount1 % 8));
+      } else {
+        desc[dcount1 / 8] &= ~(1 << (dcount1 % 8));
+      }
+      dcount1++;
+
+      if (values_1[i][2] > values_1[j][2]) {
+        desc[dcount1 / 8] |= (1 << (dcount1 % 8));
+      } else {
+        desc[dcount1 / 8] &= ~(1 << (dcount1 % 8));
+      }
+      dcount1++;
+    }
+  }
+
+  // Second 3x3 grid
+  sample_step = static_cast<int>(ceil(pattern_size * 2. / 3.));
+  dcount2 = 0;
+
+  for (int i = -pattern_size; i < pattern_size; i += sample_step) {
+    for (int j = -pattern_size; j < pattern_size; j += sample_step) {
+      di = dx = dy = 0.0;
+      nsamples = 0;
+
+      for (int k = i; k < i + sample_step; k++) {
+        for (int l = j; l < j + sample_step; l++) {
+          // Get the coordinates of the sample point
+          sample_y = yf + l * scale;
+          sample_x = xf + k * scale;
+
+          y1 = fRoundV2(sample_y);
+          x1 = fRoundV2(sample_x);
+
+          ri = *(evolution_[level].Lt.ptr<float>(y1) + x1);
+          rx = *(evolution_[level].Lx.ptr<float>(y1) + x1);
+          ry = *(evolution_[level].Ly.ptr<float>(y1) + x1);
+
+          di += ri;
+          dx += rx;
+          dy += ry;
+          nsamples++;
+        }
+      }
+
+      di /= nsamples;
+      dx /= nsamples;
+      dy /= nsamples;
+
+      values_2[dcount2][0] = di;
+      values_2[dcount2][1] = dx;
+      values_2[dcount2][2] = dy;
+      dcount2++;
+    }
+  }
+
+  // Do binary comparison second level
+  dcount2 = 0;
+  for (int i = 0; i < 9; i++) {
+    for (int j = i + 1; j < 9; j++) {
+      if (values_2[i][0] > values_2[j][0]) {
+        desc[dcount1 / 8] |= (1 << (dcount1 % 8));
+      } else {
+        desc[dcount1 / 8] &= ~(1 << (dcount1 % 8));
+      }
+      dcount1++;
+
+      if (values_2[i][1] > values_2[j][1]) {
+        desc[dcount1 / 8] |= (1 << (dcount1 % 8));
+      } else {
+        desc[dcount1 / 8] &= ~(1 << (dcount1 % 8));
+      }
+      dcount1++;
+
+      if (values_2[i][2] > values_2[j][2]) {
+        desc[dcount1 / 8] |= (1 << (dcount1 % 8));
+      } else {
+        desc[dcount1 / 8] &= ~(1 << (dcount1 % 8));
+      }
+      dcount1++;
+    }
+  }
+
+  // Third 4x4 grid
+  sample_step = pattern_size / 2;
+  dcount2 = 0;
+
+  for (int i = -pattern_size; i < pattern_size; i += sample_step) {
+    for (int j = -pattern_size; j < pattern_size; j += sample_step) {
+      di = dx = dy = 0.0;
+      nsamples = 0;
+
+      for (int k = i; k < i + sample_step; k++) {
+        for (int l = j; l < j + sample_step; l++) {
+          // Get the coordinates of the sample point
+          sample_y = yf + l * scale;
+          sample_x = xf + k * scale;
+
+          y1 = fRoundV2(sample_y);
+          x1 = fRoundV2(sample_x);
+
+          ri = *(evolution_[level].Lt.ptr<float>(y1) + x1);
+          rx = *(evolution_[level].Lx.ptr<float>(y1) + x1);
+          ry = *(evolution_[level].Ly.ptr<float>(y1) + x1);
+
+          di += ri;
+          dx += rx;
+          dy += ry;
+          nsamples++;
+        }
+      }
+
+      di /= nsamples;
+      dx /= nsamples;
+      dy /= nsamples;
+
+      values_3[dcount2][0] = di;
+      values_3[dcount2][1] = dx;
+      values_3[dcount2][2] = dy;
+      dcount2++;
+    }
+  }
+
+  // Do binary comparison third level
+  dcount2 = 0;
+  for (int i = 0; i < 16; i++) {
+    for (int j = i + 1; j < 16; j++) {
+      if (values_3[i][0] > values_3[j][0]) {
+        desc[dcount1 / 8] |= (1 << (dcount1 % 8));
+      } else {
+        desc[dcount1 / 8] &= ~(1 << (dcount1 % 8));
+      }
+      dcount1++;
+
+      if (values_3[i][1] > values_3[j][1]) {
+        desc[dcount1 / 8] |= (1 << (dcount1 % 8));
+      } else {
+        desc[dcount1 / 8] &= ~(1 << (dcount1 % 8));
+      }
+      dcount1++;
+
+      if (values_3[i][2] > values_3[j][2]) {
+        desc[dcount1 / 8] |= (1 << (dcount1 % 8));
+      } else {
+        desc[dcount1 / 8] &= ~(1 << (dcount1 % 8));
+      }
+      dcount1++;
+    }
+  }
+}
+
+inline void MLDB_Full_Descriptor_InvokerV2::MLDB_Fill_Values(
+    float* values, int sample_step, int level, float xf, float yf, float co,
+    float si, float scale) const {
+  int pattern_size = options_.descriptor_pattern_size;
+  int chan = options_.descriptor_channels;
+  int valpos = 0;
+
+  for (int i = -pattern_size; i < pattern_size; i += sample_step) {
+    for (int j = -pattern_size; j < pattern_size; j += sample_step) {
+      float di, dx, dy;
+      di = dx = dy = 0.0;
+      int nsamples = 0;
+
+      for (int k = i; k < i + sample_step; k++) {
+        for (int l = j; l < j + sample_step; l++) {
+          float sample_y = yf + (l * co * scale + k * si * scale);
+          float sample_x = xf + (-l * si * scale + k * co * scale);
+
+          int y1 = fRoundV2(sample_y);
+          int x1 = fRoundV2(sample_x);
+
+          float ri = *(evolution_[level].Lt.ptr<float>(y1) + x1);
+          di += ri;
+
+          if (chan > 1) {
+            float rx = *(evolution_[level].Lx.ptr<float>(y1) + x1);
+            float ry = *(evolution_[level].Ly.ptr<float>(y1) + x1);
+            if (chan == 2) {
+              dx += sqrtf(rx * rx + ry * ry);
+            } else {
+              float rry = rx * co + ry * si;
+              float rrx = -rx * si + ry * co;
+              dx += rrx;
+              dy += rry;
+            }
+          }
+          nsamples++;
+        }
+      }
+      di /= nsamples;
+      dx /= nsamples;
+      dy /= nsamples;
+
+      values[valpos] = di;
+      if (chan > 1) {
+        values[valpos + 1] = dx;
+      }
+      if (chan > 2) {
+        values[valpos + 2] = dy;
+      }
+      valpos += chan;
+    }
+  }
+}
+
+void MLDB_Full_Descriptor_InvokerV2::MLDB_Binary_Comparisons(
+    float* values, unsigned char* desc, int count, int& dpos) const {
+  int chan = options_.descriptor_channels;
+  int32_t* ivalues = (int32_t*)values;
+  for (int i = 0; i < count * chan; i++) {
+    ivalues[i] = CV_TOGGLE_FLT(ivalues[i]);
+  }
+
+  for (int pos = 0; pos < chan; pos++) {
+    for (int i = 0; i < count; i++) {
+      int32_t ival = ivalues[chan * i + pos];
+      for (int j = i + 1; j < count; j++) {
+        if (ival > ivalues[chan * j + pos]) {
+          desc[dpos >> 3] |= (1 << (dpos & 7));
+        } else {
+          desc[dpos >> 3] &= ~(1 << (dpos & 7));
+        }
+        dpos++;
+      }
+    }
+  }
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This method computes the descriptor of the provided keypoint given the
+ * main orientation of the keypoint
+ * @param kpt Input keypoint
+ * @param desc Descriptor vector
+ */
+void MLDB_Full_Descriptor_InvokerV2::Get_MLDB_Full_Descriptor(
+    const KeyPoint& kpt, unsigned char* desc) const {
+  const int max_channels = 3;
+  CV_Assert(options_.descriptor_channels <= max_channels);
+  float values[16 * max_channels];
+  const double size_mult[3] = {1, 2.0 / 3.0, 1.0 / 2.0};
+
+  float ratio = evolution_[kpt.class_id].octave_ratio;
+  float scale = (float)(evolution_[kpt.class_id].sigma_size);
+  float xf = kpt.pt.x / ratio;
+  float yf = kpt.pt.y / ratio;
+  float co = cos(kpt.angle);
+  float si = sin(kpt.angle);
+  int pattern_size = options_.descriptor_pattern_size;
+
+  int dpos = 0;
+  for (int lvl = 0; lvl < 3; lvl++) {
+    int val_count = (lvl + 2) * (lvl + 2);
+    int sample_step = static_cast<int>(ceil(pattern_size * size_mult[lvl]));
+    MLDB_Fill_Values(values, sample_step, kpt.class_id, xf, yf, co, si, scale);
+    MLDB_Binary_Comparisons(values, desc, val_count, dpos);
+  }
+
+  // Clear the uninitialized bits of the last byte
+  int remain = dpos % 8;
+  if (remain > 0) desc[dpos >> 3] &= (0xff >> (8 - remain));
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This function compares two values specified by comps[] and set the
+ * i-th bit of desc if the comparison is true.
+ * @param values Input array of values to compare
+ * @param comps Input array of indices at which two values are compared
+ * @param nbits The length of values[] as well as the number of bits to write in
+ * desc
+ * @param desc Descriptor vector
+ */
+template <typename Typ_ = uint64_t>
+inline void compare_and_pack_descriptor(const float values[], const int* comps,
+                                        const int nbits, unsigned char* desc) {
+  const int nbits_in_bucket = sizeof(Typ_) << 3;
+  const int(*idx)[2] = (const int(*)[2])comps;
+  int written = 0;
+
+  Typ_ bucket = 0;
+  for (int i = 0; i < nbits; i++) {
+    bucket <<= 1;
+    if (values[idx[i][0]] > values[idx[i][1]]) bucket |= 1;
+
+    if ((i & (nbits_in_bucket - 1)) == (nbits_in_bucket - 1))
+      (reinterpret_cast<Typ_*>(desc))[written++] = bucket, bucket = 0;
+  }
+
+  // Flush the remaining bits in bucket
+  if (written * nbits_in_bucket < nbits) {
+    written *= sizeof(Typ_); /* Convert the unit from bucket to byte */
+
+    int remain = (nbits + 7) / 8 - written;
+    for (int i = 0; i < remain; i++)
+      desc[written++] = (uint8_t)(bucket & 0xFF), bucket >>= 8;
+  }
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This method computes the M-LDB descriptor of the provided keypoint
+ * given the main orientation of the keypoint. The descriptor is computed based
+ * on a subset of the bits of the whole descriptor
+ * @param kpt Input keypoint
+ * @param desc Descriptor vector
+ */
+void MLDB_Descriptor_Subset_InvokerV2::Get_MLDB_Descriptor_Subset(
+    const KeyPoint& kpt, unsigned char* desc) const {
+  const TEvolutionV2& e = evolution_[kpt.class_id];
+
+  // Get the information from the keypoint
+  const int scale = e.sigma_size;
+  const float yf = kpt.pt.y / e.octave_ratio;
+  const float xf = kpt.pt.x / e.octave_ratio;
+  const float co = cos(kpt.angle);
+  const float si = sin(kpt.angle);
+
+  // Matrices for the M-LDB descriptor: the size is [grid size] * [channel size]
+  CV_DbgAssert(descriptorSamples_.rows <= (4 + 9 + 16));
+  CV_DbgAssert(options_.descriptor_channels <= 3);
+  float values[(4 + 9 + 16) * 3];
+
+  // coords[3] is { grid_width, x, y }
+  const int* coords = descriptorSamples_.ptr<int>(0);
+
+  // Sample everything, but only do the comparisons
+  for (int i = 0; i < descriptorSamples_.rows; i++, coords += 3) {
+    float di = 0.0f;
+    float dx = 0.0f;
+    float dy = 0.0f;
+
+    for (int x = coords[1]; x < coords[1] + coords[0]; x++) {
+      for (int y = coords[2]; y < coords[2] + coords[0]; y++) {
+        // Get the coordinates of the sample point
+        int x1 = fRoundV2(xf + (x * scale * co - y * scale * si));
+        int y1 = fRoundV2(yf + (x * scale * si + y * scale * co));
+
+        di += *(e.Lt.ptr<float>(y1) + x1);
+
+        if (options_.descriptor_channels > 1) {
+          float rx = *(e.Lx.ptr<float>(y1) + x1);
+          float ry = *(e.Ly.ptr<float>(y1) + x1);
+
+          if (options_.descriptor_channels == 2) {
+            dx += sqrtf(rx * rx + ry * ry);
+          } else if (options_.descriptor_channels == 3) {
+            // Get the x and y derivatives on the rotated axis
+            dx += rx * co + ry * si;
+            dy += -rx * si + ry * co;
+          }
+        }
+      }
+    }
+
+    values[i * options_.descriptor_channels] = di;
+
+    if (options_.descriptor_channels == 2) {
+      values[i * options_.descriptor_channels + 1] = dx;
+    } else if (options_.descriptor_channels == 3) {
+      values[i * options_.descriptor_channels + 1] = dx;
+      values[i * options_.descriptor_channels + 2] = dy;
+    }
+  }
+
+  // Do the comparisons
+  compare_and_pack_descriptor<uint64_t>(values, descriptorBits_.ptr<int>(0),
+                                        descriptorBits_.rows, desc);
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This method computes the upright (not rotation invariant) M-LDB
+ * descriptor of the provided keypoint given the main orientation of the
+ * keypoint. The descriptor is computed based on a subset of the bits of the
+ * whole descriptor
+ * @param kpt Input keypoint
+ * @param desc Descriptor vector
+ */
+void Upright_MLDB_Descriptor_Subset_InvokerV2::
+    Get_Upright_MLDB_Descriptor_Subset(const KeyPoint& kpt,
+                                       unsigned char* desc) const {
+  const TEvolutionV2& e = evolution_[kpt.class_id];
+
+  // Get the information from the keypoint
+  const int scale = e.sigma_size;
+  const float yf = kpt.pt.y / e.octave_ratio;
+  const float xf = kpt.pt.x / e.octave_ratio;
+
+  // Matrices for the M-LDB descriptor: the size is [grid size] * [channel size]
+  CV_DbgAssert(descriptorSamples_.rows <= (4 + 9 + 16));
+  CV_DbgAssert(options_.descriptor_channels <= 3);
+  float values[(4 + 9 + 16) * 3];
+
+  // coords[3] is { grid_width, x, y }
+  const int* coords = descriptorSamples_.ptr<int>(0);
+
+  for (int i = 0; i < descriptorSamples_.rows; i++, coords += 3) {
+    float di = 0.0f;
+    float dx = 0.0f;
+    float dy = 0.0f;
+
+    for (int x = coords[1]; x < coords[1] + coords[0]; x++) {
+      for (int y = coords[2]; y < coords[2] + coords[0]; y++) {
+        // Get the coordinates of the sample point
+        int x1 = fRoundV2(xf + x * scale);
+        int y1 = fRoundV2(yf + y * scale);
+
+        di += *(e.Lt.ptr<float>(y1) + x1);
+
+        if (options_.descriptor_channels > 1) {
+          float rx = *(e.Lx.ptr<float>(y1) + x1);
+          float ry = *(e.Ly.ptr<float>(y1) + x1);
+
+          if (options_.descriptor_channels == 2) {
+            dx += sqrtf(rx * rx + ry * ry);
+          } else if (options_.descriptor_channels == 3) {
+            dx += rx;
+            dy += ry;
+          }
+        }
+      }
+    }
+
+    values[i * options_.descriptor_channels] = di;
+
+    if (options_.descriptor_channels == 2) {
+      values[i * options_.descriptor_channels + 1] = dx;
+    } else if (options_.descriptor_channels == 3) {
+      values[i * options_.descriptor_channels + 1] = dx;
+      values[i * options_.descriptor_channels + 2] = dy;
+    }
+  }
+
+  // Do the comparisons
+  compare_and_pack_descriptor<uint64_t>(values, descriptorBits_.ptr<int>(0),
+                                        descriptorBits_.rows, desc);
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This function computes a (quasi-random) list of bits to be taken
+ * from the full descriptor. To speed the extraction, the function creates
+ * a list of the samples that are involved in generating at least a bit
+ * (sampleList) and a list of the comparisons between those samples
+ * (comparisons)
+ * @param sampleList
+ * @param comparisons The matrix with the binary comparisons
+ * @param nbits The number of bits of the descriptor
+ * @param pattern_size The pattern size for the binary descriptor
+ * @param nchannels Number of channels to consider in the descriptor (1-3)
+ * @note The function keeps the 18 bits (3-channels by 6 comparisons) of the
+ * coarser grid, since it provides the most robust estimations
+ */
+static void generateDescriptorSubsampleV2(Mat& sampleList, Mat& comparisons,
+                                          int nbits, int pattern_size,
+                                          int nchannels) {
+#if 0
+  // Replaced by an immediate to use stack; need C++11 constexpr to use the logic
+  int fullM_rows = 0;
+  for (int i = 0; i < 3; i++) {
+    int gz = (i + 2)*(i + 2);
+    fullM_rows += gz*(gz - 1) / 2;
+  }
+#else
+  const int fullM_rows = 162;
+#endif
+
+  int ssz = fullM_rows * nchannels;  // ssz is 486 when nchannels is 3
+
+  CV_Assert(nbits <=
+            ssz);  // Descriptor size can't be bigger than full descriptor
+
+  const int steps[3] = {pattern_size, (int)ceil(2.f * pattern_size / 3.f),
+                        pattern_size / 2};
+
+  // Since the full descriptor is usually under 10k elements, we pick
+  // the selection from the full matrix.  We take as many samples per
+  // pick as the number of channels. For every pick, we
+  // take the two samples involved and put them in the sampling list
+
+  int fullM_stack[fullM_rows *
+                  5];  // About 6.3KB workspace with 64-bit int on stack
+  Mat_<int> fullM(fullM_rows, 5, fullM_stack);
+
+  for (int i = 0, c = 0; i < 3; i++) {
+    int gdiv = i + 2;  // grid divisions, per row
+    int gsz = gdiv * gdiv;
+    int psz = (int)ceil(2.f * pattern_size / (float)gdiv);
+
+    for (int j = 0; j < gsz; j++) {
+      for (int k = j + 1; k < gsz; k++, c++) {
+        fullM(c, 0) = steps[i];
+        fullM(c, 1) = psz * (j % gdiv) - pattern_size;
+        fullM(c, 2) = psz * (j / gdiv) - pattern_size;
+        fullM(c, 3) = psz * (k % gdiv) - pattern_size;
+        fullM(c, 4) = psz * (k / gdiv) - pattern_size;
+      }
+    }
+  }
+
+  int comps_stack[486 * 2];  // About 7.6KB workspace with 64-bit int on stack
+  Mat_<int> comps(486, 2, comps_stack);
+  comps = 1000;
+
+  int samples_stack[(4 + 9 + 16) *
+                    3];  // 696 bytes workspace with 64-bit int on stack
+  Mat_<int> samples((4 + 9 + 16), 3, samples_stack);
+
+  // Select some samples. A sample includes all channels
+  int count = 0;
+  int npicks = (int)ceil(nbits / (float)nchannels);
+  samples = -1;
+
+  srand(1024);
+  for (int i = 0; i < npicks; i++) {
+    int k = rand() % (fullM_rows - i);
+    if (i < 6) {
+      // Force use of the coarser grid values and comparisons
+      k = i;
+    }
+
+    bool n = true;
+
+    for (int j = 0; j < count; j++) {
+      if (samples(j, 0) == fullM(k, 0) && samples(j, 1) == fullM(k, 1) &&
+          samples(j, 2) == fullM(k, 2)) {
+        n = false;
+        comps(i * nchannels, 0) = nchannels * j;
+        comps(i * nchannels + 1, 0) = nchannels * j + 1;
+        comps(i * nchannels + 2, 0) = nchannels * j + 2;
+        break;
+      }
+    }
+
+    if (n) {
+      samples(count, 0) = fullM(k, 0);
+      samples(count, 1) = fullM(k, 1);
+      samples(count, 2) = fullM(k, 2);
+      comps(i * nchannels, 0) = nchannels * count;
+      comps(i * nchannels + 1, 0) = nchannels * count + 1;
+      comps(i * nchannels + 2, 0) = nchannels * count + 2;
+      count++;
+    }
+
+    n = true;
+    for (int j = 0; j < count; j++) {
+      if (samples(j, 0) == fullM(k, 0) && samples(j, 1) == fullM(k, 3) &&
+          samples(j, 2) == fullM(k, 4)) {
+        n = false;
+        comps(i * nchannels, 1) = nchannels * j;
+        comps(i * nchannels + 1, 1) = nchannels * j + 1;
+        comps(i * nchannels + 2, 1) = nchannels * j + 2;
+        break;
+      }
+    }
+
+    if (n) {
+      samples(count, 0) = fullM(k, 0);
+      samples(count, 1) = fullM(k, 3);
+      samples(count, 2) = fullM(k, 4);
+      comps(i * nchannels, 1) = nchannels * count;
+      comps(i * nchannels + 1, 1) = nchannels * count + 1;
+      comps(i * nchannels + 2, 1) = nchannels * count + 2;
+      count++;
+    }
+
+    fullM.row(fullM.rows - i - 1).copyTo(fullM.row(k));
+  }
+
+  sampleList = samples.rowRange(0, count).clone();
+  comparisons = comps.rowRange(0, nbits).clone();
+}
+
+}  // namespace cv
\ No newline at end of file
diff --git a/third_party/akaze/AKAZEFeatures.h b/third_party/akaze/AKAZEFeatures.h
new file mode 100644
index 0000000..77740b2
--- /dev/null
+++ b/third_party/akaze/AKAZEFeatures.h
@@ -0,0 +1,94 @@
+/**
+ * @file AKAZE.h
+ * @brief Main class for detecting and computing binary descriptors in an
+ * accelerated nonlinear scale space
+ * @date Mar 27, 2013
+ * @author Pablo F. Alcantarilla, Jesus Nuevo
+ */
+
+#ifndef __OPENCV_FEATURES_2D_AKAZE_FEATURES_H__
+#define __OPENCV_FEATURES_2D_AKAZE_FEATURES_H__
+
+/* ************************************************************************* */
+// Includes
+#include <vector>
+
+#define AKAZE_USE_CPP11_THREADING
+
+#ifdef AKAZE_USE_CPP11_THREADING
+#include <atomic>
+#include <future>
+#endif
+
+#include <opencv2/core.hpp>
+
+#include "AKAZEConfig.h"
+#include "TEvolution.h"
+
+namespace cv {
+
+/* ************************************************************************* */
+// AKAZE Class Declaration
+class AKAZEFeaturesV2 {
+ private:
+  AKAZEOptionsV2 options_;  ///< Configuration options for AKAZE
+  std::vector<TEvolutionV2>
+      evolution_;  ///< Vector of nonlinear diffusion evolution
+
+  /// FED parameters
+  bool reordering_;  ///< Flag for reordering time steps
+  std::vector<std::vector<float>>
+      tsteps_;  ///< Vector of FED dynamic time steps
+
+  /// Matrices for the M-LDB descriptor computation
+  cv::Mat descriptorSamples_;  // List of positions in the grids to sample LDB
+                               // bits from.
+  cv::Mat descriptorBits_;
+  cv::Mat bitMask_;
+
+  /// Preallocated temporary variables
+  cv::Mat gray_, lx_, ly_;
+  cv::Mat lflow_, lstep_;
+  cv::Mat histgram_, modgs_;
+  std::vector<std::vector<cv::KeyPoint>> kpts_aux_;
+
+#ifdef AKAZE_USE_CPP11_THREADING
+  using task = std::future<void>;
+  std::vector<std::vector<task>> tasklist_;
+  std::vector<std::atomic_int> taskdeps_;
+  std::future<float> kcontrast_;
+#endif
+
+ public:
+  /// Constructor with input arguments
+  AKAZEFeaturesV2(const AKAZEOptionsV2& options);
+
+  /// Getters and Setters
+  void setThreshold(double threshold_) {
+    options_.dthreshold = std::max((float)threshold_, options_.min_dthreshold);
+  };
+  double getThreshold() const { return options_.dthreshold; }
+  void setDiffusivity(int diff_) { options_.diffusivity = diff_; }
+  int getDiffusivity() const { return options_.diffusivity; }
+
+  /// Scale Space methods
+  void Allocate_Memory_Evolution();
+  int Create_Nonlinear_Scale_Space(const cv::Mat& img);
+  float Compute_Base_Evolution_Level(const cv::Mat& img);
+  void Feature_Detection(std::vector<cv::KeyPoint>& kpts);
+  void Compute_Determinant_Hessian_Response(const int level);
+  void Compute_Determinant_Hessian_Response_Single(const int level);
+  void Find_Scale_Space_Extrema(
+      std::vector<std::vector<cv::KeyPoint>>& kpts_aux);
+  void Find_Scale_Space_Extrema_Single(
+      std::vector<std::vector<cv::KeyPoint>>& kpts_aux);
+  void Do_Subpixel_Refinement(std::vector<std::vector<cv::KeyPoint>>& kpts_aux,
+                              std::vector<cv::KeyPoint>& kpts);
+
+  /// Feature description methods
+  void Compute_Descriptors(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc);
+};
+
+}  // namespace cv
+
+#endif
\ No newline at end of file
diff --git a/third_party/akaze/BUILD b/third_party/akaze/BUILD
new file mode 100644
index 0000000..fe74381
--- /dev/null
+++ b/third_party/akaze/BUILD
@@ -0,0 +1,101 @@
+cc_library(
+    name = "akaze",
+    srcs = [
+        "akaze.cpp",
+    ],
+    hdrs = [
+        "akaze.h",
+    ],
+    target_compatible_with = ["@platforms//os:linux"],
+    visibility = ["//visibility:public"],
+    deps = [
+        ":akaze_features",
+        "//third_party:opencv",
+    ],
+)
+
+cc_library(
+    name = "akaze_config",
+    hdrs = [
+        "AKAZEConfig.h",
+    ],
+    target_compatible_with = ["@platforms//os:linux"],
+    visibility = ["//visibility:public"],
+    deps = [
+        "//third_party:opencv",
+    ],
+)
+
+cc_library(
+    name = "akaze_features",
+    srcs = [
+        "AKAZEFeatures.cpp",
+    ],
+    hdrs = [
+        "AKAZEFeatures.h",
+    ],
+    target_compatible_with = ["@platforms//os:linux"],
+    visibility = ["//visibility:public"],
+    deps = [
+        ":akaze_config",
+        ":fed",
+        ":nldiffusion_functions",
+        ":t_evolution",
+        ":utils",
+        "//third_party:opencv",
+    ],
+)
+
+cc_library(
+    name = "fed",
+    srcs = [
+        "fed.cpp",
+    ],
+    hdrs = [
+        "fed.h",
+    ],
+    target_compatible_with = ["@platforms//os:linux"],
+    visibility = ["//visibility:public"],
+    deps = [
+        "//third_party:opencv",
+    ],
+)
+
+cc_library(
+    name = "nldiffusion_functions",
+    srcs = [
+        "nldiffusion_functions.cpp",
+    ],
+    hdrs = [
+        "nldiffusion_functions.h",
+    ],
+    target_compatible_with = ["@platforms//os:linux"],
+    visibility = ["//visibility:public"],
+    deps = [
+        "//third_party:opencv",
+    ],
+)
+
+cc_library(
+    name = "t_evolution",
+    hdrs = [
+        "TEvolution.h",
+    ],
+    target_compatible_with = ["@platforms//os:linux"],
+    visibility = ["//visibility:public"],
+    deps = [
+        "//third_party:opencv",
+    ],
+)
+
+cc_library(
+    name = "utils",
+    hdrs = [
+        "utils.h",
+    ],
+    target_compatible_with = ["@platforms//os:linux"],
+    visibility = ["//visibility:public"],
+    deps = [
+        "//third_party:opencv",
+    ],
+)
diff --git a/third_party/akaze/README.md b/third_party/akaze/README.md
new file mode 100644
index 0000000..8cfb1c6
--- /dev/null
+++ b/third_party/akaze/README.md
@@ -0,0 +1,4 @@
+This is a copy of the following repo:
+https://github.com/h2suzuki/fast_akaze
+
+OpenCV Akaze worked at 30 fps on laptop (which is too slow), so trying to speed this up.
\ No newline at end of file
diff --git a/third_party/akaze/TEvolution.h b/third_party/akaze/TEvolution.h
new file mode 100644
index 0000000..f2bc0ee
--- /dev/null
+++ b/third_party/akaze/TEvolution.h
@@ -0,0 +1,48 @@
+/**
+ * @file TEvolution.h
+ * @brief Header file with the declaration of the TEvolution struct
+ * @date Jun 02, 2014
+ * @author Pablo F. Alcantarilla
+ */
+
+#ifndef __OPENCV_FEATURES_2D_TEVOLUTION_H__
+#define __OPENCV_FEATURES_2D_TEVOLUTION_H__
+
+#include <opencv2/core.hpp>
+
+namespace cv {
+
+/* ************************************************************************* */
+/// KAZE/A-KAZE nonlinear diffusion filtering evolution
+struct TEvolutionV2 {
+  TEvolutionV2() {
+    etime = 0.0f;
+    esigma = 0.0f;
+    octave = 0;
+    sublevel = 0;
+    sigma_size = 0;
+    octave_ratio = 1.0f;
+  }
+
+  Mat Lx, Ly;         ///< First order spatial derivatives
+  Mat Lxx, Lxy, Lyy;  ///< Second order spatial derivatives
+  Mat Lt;             ///< Evolution image
+  Mat Lsmooth;        ///< Smoothed image
+  Mat Ldet;           ///< Detector response
+
+  Mat DxKx, DxKy;  ///< Derivative kernels (kx and ky) of xorder = 1
+  Mat DyKx, DyKy;  ///< Derivative kernels (kx and ky) of yorder = 1
+
+  float etime;     ///< Evolution time
+  float esigma;    ///< Evolution sigma. For linear diffusion t = sigma^2 / 2
+  int octave;      ///< Image octave
+  int sublevel;    ///< Image sublevel in each octave
+  int sigma_size;  ///< Scaling factor of esigma that is round(esigma *
+                   ///< derivative_factor / power)
+  int border;      ///< Width of border where descriptors cannot be computed
+  float octave_ratio;  ///< Scaling ratio of this octave. ratio = 2^octave
+};
+
+}  // namespace cv
+
+#endif
\ No newline at end of file
diff --git a/third_party/akaze/akaze.cpp b/third_party/akaze/akaze.cpp
new file mode 100644
index 0000000..2ad044c
--- /dev/null
+++ b/third_party/akaze/akaze.cpp
@@ -0,0 +1,273 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+//  By downloading, copying, installing or using the software you agree to this
+license.
+//  If you do not agree to this license, do not download, install,
+//  copy or use the software.
+//
+//
+//                           License Agreement
+//                For Open Source Computer Vision Library
+//
+// Copyright (C) 2008, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without
+modification,
+// are permitted provided that the following conditions are met:
+//
+//   * Redistribution's of source code must retain the above copyright notice,
+//     this list of conditions and the following disclaimer.
+//
+//   * Redistribution's in binary form must reproduce the above copyright
+notice,
+//     this list of conditions and the following disclaimer in the documentation
+//     and/or other materials provided with the distribution.
+//
+//   * The name of Intel Corporation may not be used to endorse or promote
+products
+//     derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is"
+and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are
+disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any
+direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+/*
+OpenCV wrapper of reference implementation of
+[1] Fast Explicit Diffusion for Accelerated Features in Nonlinear Scale Spaces.
+Pablo F. Alcantarilla, J. Nuevo and Adrien Bartoli.
+In British Machine Vision Conference (BMVC), Bristol, UK, September 2013
+http://www.robesafe.com/personal/pablo.alcantarilla/papers/Alcantarilla13bmvc.pdf
+@author Eugene Khvedchenya <ekhvedchenya@gmail.com>
+*/
+
+#include "akaze.h"  // Define AKAZE2; included in place of <opencv2/features2d.hpp>
+
+#include <iostream>
+#include <opencv2/core.hpp>
+#include <opencv2/imgproc.hpp>
+
+#include "AKAZEFeatures.h"
+
+namespace cv {
+using namespace std;
+
+class AKAZE_Impl2 : public AKAZE2 {
+ public:
+  AKAZE_Impl2(int _descriptor_type, int _descriptor_size,
+              int _descriptor_channels, float _threshold, int _octaves,
+              int _sublevels, int _diffusivity)
+      : descriptor(_descriptor_type),
+        descriptor_channels(_descriptor_channels),
+        descriptor_size(_descriptor_size),
+        threshold(_threshold),
+        octaves(_octaves),
+        sublevels(_sublevels),
+        diffusivity(_diffusivity),
+        img_width(-1),
+        img_height(-1) {
+    cout << "AKAZE_Impl2 constructor called" << endl;
+  }
+
+  virtual ~AKAZE_Impl2() {}
+
+  void setDescriptorType(int dtype_) {
+    descriptor = dtype_;
+    impl.release();
+  }
+  int getDescriptorType() const { return descriptor; }
+
+  void setDescriptorSize(int dsize_) {
+    descriptor_size = dsize_;
+    impl.release();
+  }
+  int getDescriptorSize() const { return descriptor_size; }
+
+  void setDescriptorChannels(int dch_) {
+    descriptor_channels = dch_;
+    impl.release();
+  }
+  int getDescriptorChannels() const { return descriptor_channels; }
+
+  void setThreshold(double th_) {
+    threshold = (float)th_;
+    if (!impl.empty()) impl->setThreshold(th_);
+  }
+  double getThreshold() const { return threshold; }
+
+  void setNOctaves(int octaves_) {
+    octaves = octaves_;
+    impl.release();
+  }
+  int getNOctaves() const { return octaves; }
+
+  void setNOctaveLayers(int octaveLayers_) {
+    sublevels = octaveLayers_;
+    impl.release();
+  }
+  int getNOctaveLayers() const { return sublevels; }
+
+  void setDiffusivity(int diff_) {
+    diffusivity = diff_;
+    if (!impl.empty()) impl->setDiffusivity(diff_);
+  }
+  int getDiffusivity() const { return diffusivity; }
+
+  // returns the descriptor size in bytes
+  int descriptorSize() const {
+    switch (descriptor) {
+      case DESCRIPTOR_KAZE:
+      case DESCRIPTOR_KAZE_UPRIGHT:
+        return 64;
+
+      case DESCRIPTOR_MLDB:
+      case DESCRIPTOR_MLDB_UPRIGHT:
+        // We use the full length binary descriptor -> 486 bits
+        if (descriptor_size == 0) {
+          int t = (6 + 36 + 120) * descriptor_channels;
+          return (int)ceil(t / 8.);
+        } else {
+          // We use the random bit selection length binary descriptor
+          return (int)ceil(descriptor_size / 8.);
+        }
+
+      default:
+        return -1;
+    }
+  }
+
+  // returns the descriptor type
+  int descriptorType() const {
+    switch (descriptor) {
+      case DESCRIPTOR_KAZE:
+      case DESCRIPTOR_KAZE_UPRIGHT:
+        return CV_32F;
+
+      case DESCRIPTOR_MLDB:
+      case DESCRIPTOR_MLDB_UPRIGHT:
+        return CV_8U;
+
+      default:
+        return -1;
+    }
+  }
+
+  // returns the default norm type
+  int defaultNorm() const {
+    switch (descriptor) {
+      case DESCRIPTOR_KAZE:
+      case DESCRIPTOR_KAZE_UPRIGHT:
+        return NORM_L2;
+
+      case DESCRIPTOR_MLDB:
+      case DESCRIPTOR_MLDB_UPRIGHT:
+        return NORM_HAMMING;
+
+      default:
+        return -1;
+    }
+  }
+
+  void detectAndCompute(InputArray image, InputArray mask,
+                        std::vector<KeyPoint>& keypoints,
+                        OutputArray descriptors, bool useProvidedKeypoints) {
+    Mat img = image.getMat();
+
+    if (img_width != img.cols) {
+      img_width = img.cols;
+      impl.release();
+    }
+
+    if (img_height != img.rows) {
+      img_height = img.rows;
+      impl.release();
+    }
+
+    if (impl.empty()) {
+      AKAZEOptionsV2 options;
+      options.descriptor = descriptor;
+      options.descriptor_channels = descriptor_channels;
+      options.descriptor_size = descriptor_size;
+      options.img_width = img_width;
+      options.img_height = img_height;
+      options.dthreshold = threshold;
+      options.omax = octaves;
+      options.nsublevels = sublevels;
+      options.diffusivity = diffusivity;
+
+      impl = makePtr<AKAZEFeaturesV2>(options);
+    }
+
+    impl->Create_Nonlinear_Scale_Space(img);
+
+    if (!useProvidedKeypoints) {
+      impl->Feature_Detection(keypoints);
+    }
+
+    if (!mask.empty()) {
+      KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
+    }
+
+    if (descriptors.needed()) {
+      Mat& desc = descriptors.getMatRef();
+      impl->Compute_Descriptors(keypoints, desc);
+
+      CV_Assert((!desc.rows || desc.cols == descriptorSize()));
+      CV_Assert((!desc.rows || (desc.type() == descriptorType())));
+    }
+  }
+
+  void write(FileStorage& fs) const {
+    fs << "descriptor" << descriptor;
+    fs << "descriptor_channels" << descriptor_channels;
+    fs << "descriptor_size" << descriptor_size;
+    fs << "threshold" << threshold;
+    fs << "octaves" << octaves;
+    fs << "sublevels" << sublevels;
+    fs << "diffusivity" << diffusivity;
+  }
+
+  void read(const FileNode& fn) {
+    descriptor = (int)fn["descriptor"];
+    descriptor_channels = (int)fn["descriptor_channels"];
+    descriptor_size = (int)fn["descriptor_size"];
+    threshold = (float)fn["threshold"];
+    octaves = (int)fn["octaves"];
+    sublevels = (int)fn["sublevels"];
+    diffusivity = (int)fn["diffusivity"];
+  }
+
+  Ptr<AKAZEFeaturesV2> impl;
+  int descriptor;
+  int descriptor_channels;
+  int descriptor_size;
+  float threshold;
+  int octaves;
+  int sublevels;
+  int diffusivity;
+  int img_width;
+  int img_height;
+};
+
+Ptr<AKAZE2> AKAZE2::create(int descriptor_type, int descriptor_size,
+                           int descriptor_channels, float threshold,
+                           int octaves, int sublevels, int diffusivity) {
+  return makePtr<AKAZE_Impl2>(descriptor_type, descriptor_size,
+                              descriptor_channels, threshold, octaves,
+                              sublevels, diffusivity);
+}
+}  // namespace cv
\ No newline at end of file
diff --git a/third_party/akaze/akaze.h b/third_party/akaze/akaze.h
new file mode 100644
index 0000000..1930eae
--- /dev/null
+++ b/third_party/akaze/akaze.h
@@ -0,0 +1,86 @@
+#ifndef __OPENCV_FEATURES_2D_AKAZE2_HPP__
+#define __OPENCV_FEATURES_2D_AKAZE2_HPP__
+
+#include <opencv2/core.hpp>
+#include <opencv2/features2d.hpp>
+
+/*
+  This file is the excerpt from opencv/feature2d.hpp to provide the local AKAZE
+  class definition. In addition, the class name is changed from AKAZE to AKAZE2
+  to avoid possible confusion between this local variant and OpenCV's original
+  AKAZE class.
+*/
+
+namespace cv {
+
+//! @addtogroup features2d
+//! @{
+
+//! @addtogroup features2d_main
+//! @{
+
+/** @brief Class implementing the AKAZE keypoint detector and descriptor
+extractor, described in @cite ANB13 . :
+@note AKAZE descriptors can only be used with KAZE or AKAZE keypoints. Try to
+avoid using *extract* and *detect* instead of *operator()* due to performance
+reasons. .. [ANB13] Fast Explicit Diffusion for Accelerated Features in
+Nonlinear Scale Spaces. Pablo F. Alcantarilla, Jesús Nuevo and Adrien Bartoli.
+In British Machine Vision Conference (BMVC), Bristol, UK, September 2013.
+ */
+class CV_EXPORTS_W AKAZE2 : public Feature2D {
+ public:
+  // AKAZE descriptor type
+  enum {
+    DESCRIPTOR_KAZE_UPRIGHT =
+        2,  ///< Upright descriptors, not invariant to rotation
+    DESCRIPTOR_KAZE = 3,
+    DESCRIPTOR_MLDB_UPRIGHT =
+        4,  ///< Upright descriptors, not invariant to rotation
+    DESCRIPTOR_MLDB = 5
+  };
+
+  /** @brief The AKAZE constructor
+  @param descriptor_type Type of the extracted descriptor: DESCRIPTOR_KAZE,
+  DESCRIPTOR_KAZE_UPRIGHT, DESCRIPTOR_MLDB or DESCRIPTOR_MLDB_UPRIGHT.
+  @param descriptor_size Size of the descriptor in bits. 0 -\> Full size
+  @param descriptor_channels Number of channels in the descriptor (1, 2, 3)
+  @param threshold Detector response threshold to accept point
+  @param nOctaves Maximum octave evolution of the image
+  @param nOctaveLayers Default number of sublevels per scale level
+  @param diffusivity Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or
+  DIFF_CHARBONNIER
+   */
+  CV_WRAP static Ptr<AKAZE2> create(
+      int descriptor_type = AKAZE::DESCRIPTOR_MLDB, int descriptor_size = 0,
+      int descriptor_channels = 3, float threshold = 0.001f, int nOctaves = 4,
+      int nOctaveLayers = 4, int diffusivity = KAZE::DIFF_PM_G2);
+
+  CV_WRAP virtual void setDescriptorType(int dtype) = 0;
+  CV_WRAP virtual int getDescriptorType() const = 0;
+
+  CV_WRAP virtual void setDescriptorSize(int dsize) = 0;
+  CV_WRAP virtual int getDescriptorSize() const = 0;
+
+  CV_WRAP virtual void setDescriptorChannels(int dch) = 0;
+  CV_WRAP virtual int getDescriptorChannels() const = 0;
+
+  CV_WRAP virtual void setThreshold(double threshold) = 0;
+  CV_WRAP virtual double getThreshold() const = 0;
+
+  CV_WRAP virtual void setNOctaves(int octaves) = 0;
+  CV_WRAP virtual int getNOctaves() const = 0;
+
+  CV_WRAP virtual void setNOctaveLayers(int octaveLayers) = 0;
+  CV_WRAP virtual int getNOctaveLayers() const = 0;
+
+  CV_WRAP virtual void setDiffusivity(int diff) = 0;
+  CV_WRAP virtual int getDiffusivity() const = 0;
+};
+
+//! @} features2d_main
+
+//! @} features2d
+
+} /* namespace cv */
+
+#endif
\ No newline at end of file
diff --git a/third_party/akaze/fed.cpp b/third_party/akaze/fed.cpp
new file mode 100644
index 0000000..835441c
--- /dev/null
+++ b/third_party/akaze/fed.cpp
@@ -0,0 +1,181 @@
+//=============================================================================
+//
+// fed.cpp
+// Authors: Pablo F. Alcantarilla (1), Jesus Nuevo (2)
+// Institutions: Georgia Institute of Technology (1)
+//               TrueVision Solutions (2)
+// Date: 15/09/2013
+// Email: pablofdezalc@gmail.com
+//
+// AKAZE Features Copyright 2013, Pablo F. Alcantarilla, Jesus Nuevo
+// All Rights Reserved
+// See LICENSE for the license information
+//=============================================================================
+
+/**
+ * @file fed.cpp
+ * @brief Functions for performing Fast Explicit Diffusion and building the
+ * nonlinear scale space
+ * @date Sep 15, 2013
+ * @author Pablo F. Alcantarilla, Jesus Nuevo
+ * @note This code is derived from FED/FJ library from Grewenig et al.,
+ * The FED/FJ library allows solving more advanced problems
+ * Please look at the following papers for more information about FED:
+ * [1] S. Grewenig, J. Weickert, C. Schroers, A. Bruhn. Cyclic Schemes for
+ * PDE-Based Image Analysis. Technical Report No. 327, Department of
+ * Mathematics, Saarland University, Saarbrücken, Germany, March 2013 [2] S.
+ * Grewenig, J. Weickert, A. Bruhn. From box filtering to fast explicit
+ * diffusion. DAGM, 2010
+ *
+ */
+#include "fed.h"
+
+#include <opencv2/core.hpp>
+
+using namespace std;
+
+//*************************************************************************************
+//*************************************************************************************
+
+/**
+ * @brief This function allocates an array of the least number of time steps
+ * such that a certain stopping time for the whole process can be obtained and
+ * fills it with the respective FED time step sizes for one cycle The function
+ * returns the number of time steps per cycle or 0 on failure
+ * @param T Desired process stopping time
+ * @param M Desired number of cycles
+ * @param tau_max Stability limit for the explicit scheme
+ * @param reordering Reordering flag
+ * @param tau The vector with the dynamic step sizes
+ */
+int fed_tau_by_process_timeV2(const float& T, const int& M,
+                              const float& tau_max, const bool& reordering,
+                              std::vector<float>& tau) {
+  // All cycles have the same fraction of the stopping time
+  return fed_tau_by_cycle_timeV2(T / (float)M, tau_max, reordering, tau);
+}
+
+//*************************************************************************************
+//*************************************************************************************
+
+/**
+ * @brief This function allocates an array of the least number of time steps
+ * such that a certain stopping time for the whole process can be obtained and
+ * fills it it with the respective FED time step sizes for one cycle The
+ * function returns the number of time steps per cycle or 0 on failure
+ * @param t Desired cycle stopping time
+ * @param tau_max Stability limit for the explicit scheme
+ * @param reordering Reordering flag
+ * @param tau The vector with the dynamic step sizes
+ */
+inline int fed_tau_by_cycle_timeV2(const float& t, const float& tau_max,
+                                   const bool& reordering,
+                                   std::vector<float>& tau) {
+  int n = 0;          // Number of time steps
+  float scale = 0.0;  // Ratio of t we search to maximal t
+
+  // Compute necessary number of time steps
+  n = (int)(ceilf(sqrtf(3.0f * t / tau_max + 0.25f) - 0.5f - 1.0e-8f) + 0.5f);
+  scale = 3.0f * t / (tau_max * (float)(n * (n + 1)));
+
+  // Call internal FED time step creation routine
+  return fed_tau_internalV2(n, scale, tau_max, reordering, tau);
+}
+
+//*************************************************************************************
+//*************************************************************************************
+
+/**
+ * @brief This function allocates an array of time steps and fills it with FED
+ * time step sizes
+ * The function returns the number of time steps per cycle or 0 on failure
+ * @param n Number of internal steps
+ * @param scale Ratio of t we search to maximal t
+ * @param tau_max Stability limit for the explicit scheme
+ * @param reordering Reordering flag
+ * @param tau The vector with the dynamic step sizes
+ */
+inline int fed_tau_internalV2(const int& n, const float& scale,
+                              const float& tau_max, const bool& reordering,
+                              std::vector<float>& tau) {
+  if (n <= 0) {
+    return 0;
+  }
+
+  // Allocate memory for the time step size (Helper vector for unsorted taus)
+  vector<float> tauh(n);
+
+  // Compute time saver
+  float c = 1.0f / (4.0f * n + 2.0f);
+  float d = scale * tau_max / 2.0f;
+
+  // Set up originally ordered tau vector
+  for (int k = 0; k < n; ++k) {
+    float h = cos((float)CV_PI * (2.0f * k + 1.0f) * c);
+    tauh[k] = d / (h * h);
+  }
+
+  if (!reordering || n == 1) {
+    std::swap(tau, tauh);
+  } else {
+    // Permute list of time steps according to chosen reordering function
+
+    // Choose kappa cycle with k = n/2
+    // This is a heuristic. We can use Leja ordering instead!!
+    int kappa = n / 2;
+
+    // Get modulus for permutation
+    int prime = n + 1;
+
+    while (!fed_is_prime_internalV2(prime)) prime++;
+
+    // Perform permutation
+    tau.resize(n);
+    for (int k = 0, l = 0; l < n; ++k, ++l) {
+      int index = 0;
+      while ((index = ((k + 1) * kappa) % prime - 1) >= n) {
+        k++;
+      }
+
+      tau[l] = tauh[index];
+    }
+  }
+
+  return n;
+}
+
+//*************************************************************************************
+//*************************************************************************************
+
+/**
+ * @brief This function checks if a number is prime or not
+ * @param number Number to check if it is prime or not
+ * @return true if the number is prime
+ */
+inline bool fed_is_prime_internalV2(const int& number) {
+  bool is_prime = false;
+
+  if (number <= 1) {
+    return false;
+  } else if (number == 1 || number == 2 || number == 3 || number == 5 ||
+             number == 7) {
+    return true;
+  } else if ((number % 2) == 0 || (number % 3) == 0 || (number % 5) == 0 ||
+             (number % 7) == 0) {
+    return false;
+  } else {
+    is_prime = true;
+    int upperLimit = (int)sqrt(1.0f + number);
+    int divisor = 11;
+
+    while (divisor <= upperLimit) {
+      if (number % divisor == 0) {
+        is_prime = false;
+      }
+
+      divisor += 2;
+    }
+
+    return is_prime;
+  }
+}
\ No newline at end of file
diff --git a/third_party/akaze/fed.h b/third_party/akaze/fed.h
new file mode 100644
index 0000000..f2a78fc
--- /dev/null
+++ b/third_party/akaze/fed.h
@@ -0,0 +1,26 @@
+#ifndef __OPENCV_FEATURES_2D_FED_H__
+#define __OPENCV_FEATURES_2D_FED_H__
+
+//******************************************************************************
+//******************************************************************************
+
+// Includes
+#include <vector>
+
+//*************************************************************************************
+//*************************************************************************************
+
+// Declaration of functions
+int fed_tau_by_process_timeV2(const float& T, const int& M,
+                              const float& tau_max, const bool& reordering,
+                              std::vector<float>& tau);
+int fed_tau_by_cycle_timeV2(const float& t, const float& tau_max,
+                            const bool& reordering, std::vector<float>& tau);
+int fed_tau_internalV2(const int& n, const float& scale, const float& tau_max,
+                       const bool& reordering, std::vector<float>& tau);
+bool fed_is_prime_internalV2(const int& number);
+
+//*************************************************************************************
+//*************************************************************************************
+
+#endif  // __OPENCV_FEATURES_2D_FED_H__
\ No newline at end of file
diff --git a/third_party/akaze/nldiffusion_functions.cpp b/third_party/akaze/nldiffusion_functions.cpp
new file mode 100644
index 0000000..39ec70e
--- /dev/null
+++ b/third_party/akaze/nldiffusion_functions.cpp
@@ -0,0 +1,457 @@
+//=============================================================================
+//
+// nldiffusion_functions.cpp
+// Author: Pablo F. Alcantarilla
+// Institution: University d'Auvergne
+// Address: Clermont Ferrand, France
+// Date: 27/12/2011
+// Email: pablofdezalc@gmail.com
+//
+// KAZE Features Copyright 2012, Pablo F. Alcantarilla
+// All Rights Reserved
+// See LICENSE for the license information
+//=============================================================================
+
+/**
+ * @file nldiffusion_functions.cpp
+ * @brief Functions for non-linear diffusion applications:
+ * 2D Gaussian Derivatives
+ * Perona and Malik conductivity equations
+ * Perona and Malik evolution
+ * @date Dec 27, 2011
+ * @author Pablo F. Alcantarilla
+ */
+
+#include "nldiffusion_functions.h"
+
+#include <cstdint>
+#include <cstring>
+#include <iostream>
+#include <opencv2/core.hpp>
+#include <opencv2/imgproc.hpp>
+
+// Namespaces
+
+/* ************************************************************************* */
+
+namespace cv {
+using namespace std;
+
+/* ************************************************************************* */
+/**
+ * @brief This function smoothes an image with a Gaussian kernel
+ * @param src Input image
+ * @param dst Output image
+ * @param ksize_x Kernel size in X-direction (horizontal)
+ * @param ksize_y Kernel size in Y-direction (vertical)
+ * @param sigma Kernel standard deviation
+ */
+void gaussian_2D_convolutionV2(const cv::Mat& src, cv::Mat& dst, int ksize_x,
+                               int ksize_y, float sigma) {
+  int ksize_x_ = 0, ksize_y_ = 0;
+
+  // Compute an appropriate kernel size according to the specified sigma
+  if (sigma > ksize_x || sigma > ksize_y || ksize_x == 0 || ksize_y == 0) {
+    ksize_x_ = (int)ceil(2.0f * (1.0f + (sigma - 0.8f) / (0.3f)));
+    ksize_y_ = ksize_x_;
+  }
+
+  // The kernel size must be and odd number
+  if ((ksize_x_ % 2) == 0) {
+    ksize_x_ += 1;
+  }
+
+  if ((ksize_y_ % 2) == 0) {
+    ksize_y_ += 1;
+  }
+
+  // Perform the Gaussian Smoothing with border replication
+  GaussianBlur(src, dst, Size(ksize_x_, ksize_y_), sigma, sigma,
+               BORDER_REPLICATE);
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This function computes image derivatives with Scharr kernel
+ * @param src Input image
+ * @param dst Output image
+ * @param xorder Derivative order in X-direction (horizontal)
+ * @param yorder Derivative order in Y-direction (vertical)
+ * @note Scharr operator approximates better rotation invariance than
+ * other stencils such as Sobel. See Weickert and Scharr,
+ * A Scheme for Coherence-Enhancing Diffusion Filtering with Optimized Rotation
+ * Invariance, Journal of Visual Communication and Image Representation 2002
+ */
+void image_derivatives_scharrV2(const cv::Mat& src, cv::Mat& dst, int xorder,
+                                int yorder) {
+  Scharr(src, dst, CV_32F, xorder, yorder, 1.0, 0, BORDER_DEFAULT);
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This function computes the Perona and Malik conductivity coefficient
+ * g1 g1 = exp(-|dL|^2/k^2)
+ * @param Lx First order image derivative in X-direction (horizontal)
+ * @param Ly First order image derivative in Y-direction (vertical)
+ * @param dst Output image
+ * @param k Contrast factor parameter
+ */
+void pm_g1V2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k) {
+  // Compute: dst = exp((Lx.mul(Lx) + Ly.mul(Ly)) / (-k * k))
+
+  const float neg_inv_k2 = -1.0f / (k * k);
+
+  const int total = Lx.rows * Lx.cols;
+  const float* lx = Lx.ptr<float>(0);
+  const float* ly = Ly.ptr<float>(0);
+  float* d = dst.ptr<float>(0);
+
+  for (int i = 0; i < total; i++)
+    d[i] = neg_inv_k2 * (lx[i] * lx[i] + ly[i] * ly[i]);
+
+  exp(dst, dst);
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This function computes the Perona and Malik conductivity coefficient
+ * g2 g2 = 1 / (1 + dL^2 / k^2)
+ * @param Lx First order image derivative in X-direction (horizontal)
+ * @param Ly First order image derivative in Y-direction (vertical)
+ * @param dst Output image
+ * @param k Contrast factor parameter
+ */
+void pm_g2V2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k) {
+  // Compute: dst = 1.0f / (1.0f + ((Lx.mul(Lx) + Ly.mul(Ly)) / (k * k)) );
+
+  const float inv_k2 = 1.0f / (k * k);
+
+  const int total = Lx.rows * Lx.cols;
+  const float* lx = Lx.ptr<float>(0);
+  const float* ly = Ly.ptr<float>(0);
+  float* d = dst.ptr<float>(0);
+
+  for (int i = 0; i < total; i++)
+    d[i] = 1.0f / (1.0f + ((lx[i] * lx[i] + ly[i] * ly[i]) * inv_k2));
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This function computes Weickert conductivity coefficient gw
+ * @param Lx First order image derivative in X-direction (horizontal)
+ * @param Ly First order image derivative in Y-direction (vertical)
+ * @param dst Output image
+ * @param k Contrast factor parameter
+ * @note For more information check the following paper: J. Weickert
+ * Applications of nonlinear diffusion in image processing and computer vision,
+ * Proceedings of Algorithmy 2000
+ */
+void weickert_diffusivityV2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst,
+                            float k) {
+  // Compute: dst = 1.0f - exp(-3.315f / ((Lx.mul(Lx) + Ly.mul(Ly)) / (k *
+  // k))^4)
+
+  const float inv_k2 = 1.0f / (k * k);
+
+  const int total = Lx.rows * Lx.cols;
+  const float* lx = Lx.ptr<float>(0);
+  const float* ly = Ly.ptr<float>(0);
+  float* d = dst.ptr<float>(0);
+
+  for (int i = 0; i < total; i++) {
+    float dL = inv_k2 * (lx[i] * lx[i] + ly[i] * ly[i]);
+    d[i] = -3.315f / (dL * dL * dL * dL);
+  }
+
+  exp(dst, dst);
+
+  for (int i = 0; i < total; i++) d[i] = 1.0f - d[i];
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This function computes Charbonnier conductivity coefficient gc
+ * gc = 1 / sqrt(1 + dL^2 / k^2)
+ * @param Lx First order image derivative in X-direction (horizontal)
+ * @param Ly First order image derivative in Y-direction (vertical)
+ * @param dst Output image
+ * @param k Contrast factor parameter
+ * @note For more information check the following paper: J. Weickert
+ * Applications of nonlinear diffusion in image processing and computer vision,
+ * Proceedings of Algorithmy 2000
+ */
+void charbonnier_diffusivityV2(const cv::Mat& Lx, const cv::Mat& Ly,
+                               cv::Mat& dst, float k) {
+  // Compute: dst = 1.0f / sqrt(1.0f + (Lx.mul(Lx) + Ly.mul(Ly)) / (k * k))
+
+  const float inv_k2 = 1.0f / (k * k);
+
+  const int total = Lx.rows * Lx.cols;
+  const float* lx = Lx.ptr<float>(0);
+  const float* ly = Ly.ptr<float>(0);
+  float* d = dst.ptr<float>(0);
+
+  for (int i = 0; i < total; i++)
+    d[i] = 1.0f / sqrtf(1.0f + inv_k2 * (lx[i] * lx[i] + ly[i] * ly[i]));
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This function computes a good empirical value for the k contrast
+ * factor given two gradient images, the percentile (0-1), the temporal storage
+ * to hold gradient norms and the histogram bins
+ * @param Lx Horizontal gradient of the input image
+ * @param Ly Vertical gradient of the input image
+ * @param perc Percentile of the image gradient histogram (0-1)
+ * @param modgs Temporal vector to hold the gradient norms
+ * @param histogram Temporal vector to hold the gradient histogram
+ * @return k contrast factor
+ */
+float compute_k_percentileV2(const cv::Mat& Lx, const cv::Mat& Ly, float perc,
+                             cv::Mat& modgs, cv::Mat& histogram) {
+  const int total = modgs.cols;
+  const int nbins = histogram.cols;
+
+  CV_DbgAssert(total == (Lx.rows - 2) * (Lx.cols - 2));
+  CV_DbgAssert(nbins > 2);
+
+  float* modg = modgs.ptr<float>(0);
+  int32_t* hist = histogram.ptr<int32_t>(0);
+
+  for (int i = 1; i < Lx.rows - 1; i++) {
+    const float* lx = Lx.ptr<float>(i) + 1;
+    const float* ly = Ly.ptr<float>(i) + 1;
+    const int cols = Lx.cols - 2;
+
+    for (int j = 0; j < cols; j++)
+      *modg++ = sqrtf(lx[j] * lx[j] + ly[j] * ly[j]);
+  }
+  modg = modgs.ptr<float>(0);
+
+  // Get the maximum
+  float hmax = 0.0f;
+  for (int i = 0; i < total; i++)
+    if (hmax < modg[i]) hmax = modg[i];
+
+  if (hmax == 0.0f) return 0.03f;  // e.g. a blank image
+
+  // Compute the bin numbers: the value range [0, hmax] -> [0, nbins-1]
+  for (int i = 0; i < total; i++) modg[i] *= (nbins - 1) / hmax;
+
+  // Count up
+  std::memset(hist, 0, sizeof(int32_t) * nbins);
+  for (int i = 0; i < total; i++) hist[(int)modg[i]]++;
+
+  // Now find the perc of the histogram percentile
+  const int nthreshold =
+      (int)((total - hist[0]) * perc);  // Exclude hist[0] as background
+  int nelements = 0;
+  for (int k = 1; k < nbins; k++) {
+    if (nelements >= nthreshold) return (float)hmax * k / nbins;
+
+    nelements = nelements + hist[k];
+  }
+
+  return 0.03f;
+}
+
+/* ************************************************************************* */
+/**
+ * @brief Compute Scharr derivative kernels for sizes different than 3
+ * @param _kx Horizontal kernel ues
+ * @param _ky Vertical kernel values
+ * @param dx Derivative order in X-direction (horizontal)
+ * @param dy Derivative order in Y-direction (vertical)
+ * @param scale_ Scale factor or derivative size
+ */
+void compute_scharr_derivative_kernelsV2(cv::OutputArray _kx,
+                                         cv::OutputArray _ky, int dx, int dy,
+                                         int scale) {
+  int ksize = 3 + 2 * (scale - 1);
+
+  // The standard Scharr kernel
+  if (scale == 1) {
+    getDerivKernels(_kx, _ky, dx, dy, FILTER_SCHARR, true, CV_32F);
+    return;
+  }
+
+  _kx.create(ksize, 1, CV_32F, -1, true);
+  _ky.create(ksize, 1, CV_32F, -1, true);
+  Mat kx = _kx.getMat();
+  Mat ky = _ky.getMat();
+
+  float w = 10.0f / 3.0f;
+  float norm = 1.0f / (2.0f * (w + 2.0f));
+
+  std::vector<float> kerI(ksize, 0.0f);
+
+  if (dx == 0) {
+    kerI[0] = norm, kerI[ksize / 2] = w * norm, kerI[ksize - 1] = norm;
+  } else if (dx == 1) {
+    kerI[0] = -1, kerI[ksize / 2] = 0, kerI[ksize - 1] = 1;
+  }
+  Mat(kx.rows, kx.cols, CV_32F, &kerI[0]).copyTo(kx);
+
+  kerI.assign(ksize, 0.0f);
+
+  if (dy == 0) {
+    kerI[0] = norm, kerI[ksize / 2] = w * norm, kerI[ksize - 1] = norm;
+  } else if (dy == 1) {
+    kerI[0] = -1, kerI[ksize / 2] = 0, kerI[ksize - 1] = 1;
+  }
+  Mat(ky.rows, ky.cols, CV_32F, &kerI[0]).copyTo(ky);
+}
+
+inline void nld_step_scalar_one_lane(const cv::Mat& Lt, const cv::Mat& Lf,
+                                     cv::Mat& Lstep, int idx, int skip) {
+  /* The labeling scheme for this five star stencil:
+       [    a    ]
+       [ -1 c +1 ]
+       [    b    ]
+   */
+
+  const int cols = Lt.cols - 2;
+  int row = idx;
+
+  const float *lt_a, *lt_c, *lt_b;
+  const float *lf_a, *lf_c, *lf_b;
+  float* dst;
+
+  // Process the top row
+  if (row == 0) {
+    lt_c = Lt.ptr<float>(0) + 1; /* Skip the left-most column by +1 */
+    lf_c = Lf.ptr<float>(0) + 1;
+    lt_b = Lt.ptr<float>(1) + 1;
+    lf_b = Lf.ptr<float>(1) + 1;
+    dst = Lstep.ptr<float>(0) + 1;
+
+    for (int j = 0; j < cols; j++) {
+      dst[j] = (lf_c[j] + lf_c[j + 1]) * (lt_c[j + 1] - lt_c[j]) +
+               (lf_c[j] + lf_c[j - 1]) * (lt_c[j - 1] - lt_c[j]) +
+               (lf_c[j] + lf_b[j]) * (lt_b[j] - lt_c[j]);
+    }
+    row += skip;
+  }
+
+  // Process the middle rows
+  for (; row < Lt.rows - 1; row += skip) {
+    lt_a = Lt.ptr<float>(row - 1);
+    lf_a = Lf.ptr<float>(row - 1);
+    lt_c = Lt.ptr<float>(row);
+    lf_c = Lf.ptr<float>(row);
+    lt_b = Lt.ptr<float>(row + 1);
+    lf_b = Lf.ptr<float>(row + 1);
+    dst = Lstep.ptr<float>(row);
+
+    // The left-most column
+    dst[0] = (lf_c[0] + lf_c[1]) * (lt_c[1] - lt_c[0]) +
+             (lf_c[0] + lf_b[0]) * (lt_b[0] - lt_c[0]) +
+             (lf_c[0] + lf_a[0]) * (lt_a[0] - lt_c[0]);
+
+    lt_a++;
+    lt_c++;
+    lt_b++;
+    lf_a++;
+    lf_c++;
+    lf_b++;
+    dst++;
+
+    // The middle columns
+    for (int j = 0; j < cols; j++) {
+      dst[j] = (lf_c[j] + lf_c[j + 1]) * (lt_c[j + 1] - lt_c[j]) +
+               (lf_c[j] + lf_c[j - 1]) * (lt_c[j - 1] - lt_c[j]) +
+               (lf_c[j] + lf_b[j]) * (lt_b[j] - lt_c[j]) +
+               (lf_c[j] + lf_a[j]) * (lt_a[j] - lt_c[j]);
+    }
+
+    // The right-most column
+    dst[cols] = (lf_c[cols] + lf_c[cols - 1]) * (lt_c[cols - 1] - lt_c[cols]) +
+                (lf_c[cols] + lf_b[cols]) * (lt_b[cols] - lt_c[cols]) +
+                (lf_c[cols] + lf_a[cols]) * (lt_a[cols] - lt_c[cols]);
+  }
+
+  // Process the bottom row
+  if (row == Lt.rows - 1) {
+    lt_a = Lt.ptr<float>(row - 1) + 1; /* Skip the left-most column by +1 */
+    lf_a = Lf.ptr<float>(row - 1) + 1;
+    lt_c = Lt.ptr<float>(row) + 1;
+    lf_c = Lf.ptr<float>(row) + 1;
+    dst = Lstep.ptr<float>(row) + 1;
+
+    for (int j = 0; j < cols; j++) {
+      dst[j] = (lf_c[j] + lf_c[j + 1]) * (lt_c[j + 1] - lt_c[j]) +
+               (lf_c[j] + lf_c[j - 1]) * (lt_c[j - 1] - lt_c[j]) +
+               (lf_c[j] + lf_a[j]) * (lt_a[j] - lt_c[j]);
+    }
+  }
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This function computes a scalar non-linear diffusion step
+ * @param Ld Base image in the evolution
+ * @param c Conductivity image
+ * @param Lstep Output image that gives the difference between the current
+ * Ld and the next Ld being evolved
+ * @note Forward Euler Scheme 3x3 stencil
+ * The function c is a scalar value that depends on the gradient norm
+ * dL_by_ds = d(c dL_by_dx)_by_dx + d(c dL_by_dy)_by_dy
+ */
+void nld_step_scalarV2(const cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep) {
+  nld_step_scalar_one_lane(Ld, c, Lstep, 0, 1);
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This function downsamples the input image using OpenCV resize
+ * @param src Input image to be downsampled
+ * @param dst Output image with half of the resolution of the input image
+ */
+void halfsample_imageV2(const cv::Mat& src, cv::Mat& dst) {
+  // Make sure the destination image is of the right size
+  CV_Assert(src.cols / 2 == dst.cols);
+  CV_Assert(src.rows / 2 == dst.rows);
+  resize(src, dst, dst.size(), 0, 0, cv::INTER_AREA);
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This function checks if a given pixel is a maximum in a local
+ * neighbourhood
+ * @param img Input image where we will perform the maximum search
+ * @param dsize Half size of the neighbourhood
+ * @param value Response value at (x,y) position
+ * @param row Image row coordinate
+ * @param col Image column coordinate
+ * @param same_img Flag to indicate if the image value at (x,y) is in the input
+ * image
+ * @return 1->is maximum, 0->otherwise
+ */
+bool check_maximum_neighbourhoodV2(const cv::Mat& img, int dsize, float value,
+                                   int row, int col, bool same_img) {
+  bool response = true;
+
+  for (int i = row - dsize; i <= row + dsize; i++) {
+    for (int j = col - dsize; j <= col + dsize; j++) {
+      if (i >= 0 && i < img.rows && j >= 0 && j < img.cols) {
+        if (same_img == true) {
+          if (i != row || j != col) {
+            if ((*(img.ptr<float>(i) + j)) > value) {
+              response = false;
+              return response;
+            }
+          }
+        } else {
+          if ((*(img.ptr<float>(i) + j)) > value) {
+            response = false;
+            return response;
+          }
+        }
+      }
+    }
+  }
+
+  return response;
+}
+
+}  // namespace cv
\ No newline at end of file
diff --git a/third_party/akaze/nldiffusion_functions.h b/third_party/akaze/nldiffusion_functions.h
new file mode 100644
index 0000000..67d5640
--- /dev/null
+++ b/third_party/akaze/nldiffusion_functions.h
@@ -0,0 +1,55 @@
+/**
+ * @file nldiffusion_functions.h
+ * @brief Functions for non-linear diffusion applications:
+ * 2D Gaussian Derivatives
+ * Perona and Malik conductivity equations
+ * Perona and Malik evolution
+ * @date Dec 27, 2011
+ * @author Pablo F. Alcantarilla
+ */
+
+#ifndef __OPENCV_FEATURES_2D_NLDIFFUSION_FUNCTIONS_H__
+#define __OPENCV_FEATURES_2D_NLDIFFUSION_FUNCTIONS_H__
+
+/* ************************************************************************* */
+// Declaration of functions
+
+#include <opencv2/core.hpp>
+
+namespace cv {
+
+// Gaussian 2D convolution
+void gaussian_2D_convolutionV2(const cv::Mat& src, cv::Mat& dst, int ksize_x,
+                               int ksize_y, float sigma);
+
+// Diffusivity functions
+void pm_g1V2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
+void pm_g2V2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, float k);
+void weickert_diffusivityV2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst,
+                            float k);
+void charbonnier_diffusivityV2(const cv::Mat& Lx, const cv::Mat& Ly,
+                               cv::Mat& dst, float k);
+
+float compute_k_percentileV2(const cv::Mat& Lx, const cv::Mat& Ly, float perc,
+                             cv::Mat& modgs, cv::Mat& hist);
+
+// Image derivatives
+void compute_scharr_derivative_kernelsV2(cv::OutputArray _kx,
+                                         cv::OutputArray _ky, int dx, int dy,
+                                         int scale);
+void image_derivatives_scharrV2(const cv::Mat& src, cv::Mat& dst, int xorder,
+                                int yorder);
+
+// Nonlinear diffusion filtering scalar step
+void nld_step_scalarV2(const cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep);
+
+// For non-maxima suppresion
+bool check_maximum_neighbourhoodV2(const cv::Mat& img, int dsize, float value,
+                                   int row, int col, bool same_img);
+
+// Image downsampling
+void halfsample_imageV2(const cv::Mat& src, cv::Mat& dst);
+
+}  // namespace cv
+
+#endif
\ No newline at end of file
diff --git a/third_party/akaze/utils.h b/third_party/akaze/utils.h
new file mode 100644
index 0000000..fe17305
--- /dev/null
+++ b/third_party/akaze/utils.h
@@ -0,0 +1,67 @@
+#ifndef __OPENCV_FEATURES_2D_KAZE_UTILS_H__
+#define __OPENCV_FEATURES_2D_KAZE_UTILS_H__
+
+#include <opencv2/core/cvdef.h>
+
+#include <cmath>
+
+/* ************************************************************************* */
+/**
+ * @brief This function computes the angle from the vector given by (X Y). From
+ * 0 to 2*Pi
+ */
+inline float getAngleV2(float x, float y) {
+  float theta = atan2f(y, x);
+
+  if (theta >= 0)
+    return theta;
+  else
+    return theta + static_cast<float>(2.0f * CV_PI);
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This function computes the value of a 2D Gaussian function
+ * @param x X Position
+ * @param y Y Position
+ * @param sig Standard Deviation
+ */
+inline float gaussianV2(float x, float y, float sigma) {
+  return expf(-(x * x + y * y) / (2.0f * sigma * sigma));
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This function checks descriptor limits
+ * @param x X Position
+ * @param y Y Position
+ * @param width Image width
+ * @param height Image height
+ */
+inline void checkDescriptorLimitsV2(int &x, int &y, int width, int height) {
+  if (x < 0) {
+    x = 0;
+  }
+
+  if (y < 0) {
+    y = 0;
+  }
+
+  if (x > width - 1) {
+    x = width - 1;
+  }
+
+  if (y > height - 1) {
+    y = height - 1;
+  }
+}
+
+/* ************************************************************************* */
+/**
+ * @brief This function rounds float to nearest integer
+ * @param flt Input float
+ * @return dst Nearest integer
+ */
+inline int fRoundV2(float flt) { return (int)(flt + 0.5f); }
+
+#endif
\ No newline at end of file
diff --git a/third_party/autocxx/.github/workflows/ci.yml b/third_party/autocxx/.github/workflows/ci.yml
index 3eb54d1..367b729 100644
--- a/third_party/autocxx/.github/workflows/ci.yml
+++ b/third_party/autocxx/.github/workflows/ci.yml
@@ -139,6 +139,9 @@
       - name: Build non-trivial-type-on-stack example
         working-directory: ./examples/non-trivial-type-on-stack
         run: cargo build
+      - name: Build reference-wrappers example
+        working-directory: ./examples/reference-wrappers
+        run: cargo build
         # We do not build the LLVM example because even 'apt-get install llvm-13-dev'
         # does not work to install the LLVM 13 headers.
 
diff --git a/third_party/autocxx/.github/workflows/site.yml b/third_party/autocxx/.github/workflows/site.yml
index 0dd4237..e1951c8 100644
--- a/third_party/autocxx/.github/workflows/site.yml
+++ b/third_party/autocxx/.github/workflows/site.yml
@@ -27,9 +27,7 @@
         run: cargo install mdbook-linkcheck
     
       - name: Install mdbook-mermaid
-        run: |
-          curl -LSfs https://japaric.github.io/trust/install.sh | \
-            sh -s -- --git badboy/mdbook-mermaid
+        run: cargo install mdbook-mermaid
     
       - run: mdbook build
         working-directory: book
diff --git a/third_party/autocxx/BUILD b/third_party/autocxx/BUILD
new file mode 100644
index 0000000..8a26c11
--- /dev/null
+++ b/third_party/autocxx/BUILD
@@ -0,0 +1,32 @@
+load("@rules_rust//rust:defs.bzl", "rust_library")
+
+package(default_visibility = ["//visibility:public"])
+
+licenses([
+    "notice",  # MIT from expression "MIT OR Apache-2.0"
+])
+
+rust_library(
+    name = "autocxx",
+    srcs = glob(["**/*.rs"]),
+    compile_data = ["README.md"],
+    crate_root = "src/lib.rs",
+    edition = "2021",
+    proc_macro_deps = [
+        "//third_party/cargo:aquamarine",
+        "//third_party/autocxx/macro:autocxx_macro",
+    ],
+    rustc_flags = [
+        "--cap-lints=allow",
+    ],
+    tags = [
+        "cargo-raze",
+        "crate-name=autocxx",
+        "manual",
+    ],
+    version = "0.16.0",
+    deps = [
+        "//third_party/cargo:cxx",
+        "//third_party/cargo:moveit",
+    ],
+)
diff --git a/third_party/autocxx/Cargo.lock b/third_party/autocxx/Cargo.lock
index a5bfc0a..4f58182 100644
--- a/third_party/autocxx/Cargo.lock
+++ b/third_party/autocxx/Cargo.lock
@@ -87,7 +87,7 @@
 
 [[package]]
 name = "autocxx"
-version = "0.22.0"
+version = "0.22.3"
 dependencies = [
  "aquamarine",
  "autocxx-macro",
@@ -97,9 +97,9 @@
 
 [[package]]
 name = "autocxx-bindgen"
-version = "0.59.16"
+version = "0.59.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "435723e14bf88f198322f8555a4fdb108363021d97a47bb6492891ca86055e79"
+checksum = "f9a9a26dd38d385d23b1bf61bd231b77f690c4368aef4c77cee1b7a6da2e2042"
 dependencies = [
  "bitflags",
  "cexpr",
@@ -121,7 +121,7 @@
 
 [[package]]
 name = "autocxx-build"
-version = "0.22.0"
+version = "0.22.3"
 dependencies = [
  "autocxx-engine",
  "env_logger 0.9.0",
@@ -131,7 +131,7 @@
 
 [[package]]
 name = "autocxx-demo"
-version = "0.22.0"
+version = "0.22.3"
 dependencies = [
  "autocxx",
  "autocxx-build",
@@ -141,7 +141,7 @@
 
 [[package]]
 name = "autocxx-engine"
-version = "0.22.0"
+version = "0.22.3"
 dependencies = [
  "aquamarine",
  "autocxx-bindgen",
@@ -167,7 +167,7 @@
 
 [[package]]
 name = "autocxx-gen"
-version = "0.22.0"
+version = "0.22.3"
 dependencies = [
  "assert_cmd",
  "autocxx",
@@ -181,12 +181,12 @@
  "miette",
  "pathdiff",
  "proc-macro2",
- "tempdir",
+ "tempfile",
 ]
 
 [[package]]
 name = "autocxx-integration-tests"
-version = "0.22.0"
+version = "0.22.3"
 dependencies = [
  "autocxx",
  "autocxx-engine",
@@ -209,7 +209,7 @@
 
 [[package]]
 name = "autocxx-macro"
-version = "0.22.0"
+version = "0.22.3"
 dependencies = [
  "autocxx-parser",
  "proc-macro-error",
@@ -220,7 +220,7 @@
 
 [[package]]
 name = "autocxx-mdbook-preprocessor"
-version = "0.22.0"
+version = "0.22.3"
 dependencies = [
  "anyhow",
  "autocxx-integration-tests",
@@ -238,7 +238,7 @@
 
 [[package]]
 name = "autocxx-parser"
-version = "0.22.0"
+version = "0.22.3"
 dependencies = [
  "indexmap",
  "itertools 0.10.3",
@@ -254,7 +254,7 @@
 
 [[package]]
 name = "autocxx-reduce"
-version = "0.22.0"
+version = "0.22.3"
 dependencies = [
  "assert_cmd",
  "autocxx-engine",
@@ -271,7 +271,6 @@
  "serde_derive",
  "serde_json",
  "syn",
- "tempdir",
  "tempfile",
 ]
 
@@ -491,9 +490,9 @@
 
 [[package]]
 name = "cxx"
-version = "1.0.66"
+version = "1.0.68"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ce2295fe8865279f404147e9b2328e5af0ad11a2c016e58c13acfd48a07d8a55"
+checksum = "7e599641dff337570f6aa9c304ecca92341d30bf72e1c50287869ed6a36615a6"
 dependencies = [
  "cc",
  "cxxbridge-flags",
@@ -503,9 +502,9 @@
 
 [[package]]
 name = "cxx-gen"
-version = "0.7.66"
+version = "0.7.68"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2bba249c8ea90cff9c647cd76928efc82f1e8684da0a6cedb4416cc79478050d"
+checksum = "1e2c726d93799c3129c65224ab09eae1a31276bc593d4f7344be1c592c16a1ec"
 dependencies = [
  "codespan-reporting",
  "proc-macro2",
@@ -515,15 +514,15 @@
 
 [[package]]
 name = "cxxbridge-flags"
-version = "1.0.66"
+version = "1.0.68"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0a670224c6686471df12560a0b97a08145082e70bd38e2b0b5383b79e46c3da7"
+checksum = "3894ad0c6d517cb5a4ce8ec20b37cd0ea31b480fe582a104c5db67ae21270853"
 
 [[package]]
 name = "cxxbridge-macro"
-version = "1.0.66"
+version = "1.0.68"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7b700096ca0dece28d9535fdb17ab784a8ae155d7f29d39c273643e6292c9620"
+checksum = "34fa7e395dc1c001083c7eed28c8f0f0b5a225610f3b6284675f444af6fab86b"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -610,12 +609,6 @@
 ]
 
 [[package]]
-name = "fuchsia-cprng"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
-
-[[package]]
 name = "gag"
 version = "1.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1135,34 +1128,6 @@
 ]
 
 [[package]]
-name = "rand"
-version = "0.4.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293"
-dependencies = [
- "fuchsia-cprng",
- "libc",
- "rand_core 0.3.1",
- "rdrand",
- "winapi",
-]
-
-[[package]]
-name = "rand_core"
-version = "0.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
-dependencies = [
- "rand_core 0.4.2",
-]
-
-[[package]]
-name = "rand_core"
-version = "0.4.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc"
-
-[[package]]
 name = "rayon"
 version = "1.5.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1187,15 +1152,6 @@
 ]
 
 [[package]]
-name = "rdrand"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
-dependencies = [
- "rand_core 0.3.1",
-]
-
-[[package]]
 name = "redox_syscall"
 version = "0.2.13"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1392,16 +1348,6 @@
 ]
 
 [[package]]
-name = "tempdir"
-version = "0.3.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8"
-dependencies = [
- "rand",
- "remove_dir_all",
-]
-
-[[package]]
 name = "tempfile"
 version = "3.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
diff --git a/third_party/autocxx/Cargo.toml b/third_party/autocxx/Cargo.toml
index 7d34f78..a741ece 100644
--- a/third_party/autocxx/Cargo.toml
+++ b/third_party/autocxx/Cargo.toml
@@ -8,7 +8,7 @@
 
 [package]
 name = "autocxx"
-version = "0.22.0"
+version = "0.22.3"
 authors = ["Adrian Taylor <adetaylor@chromium.org>"]
 license = "MIT OR Apache-2.0"
 description = "Safe autogenerated interop between Rust and C++"
@@ -25,8 +25,8 @@
 resolver = "2"
 
 [dependencies]
-autocxx-macro = { path="macro", version="0.22.0" }
-cxx = "1.0.54" # ... also needed because expansion of type_id refers to ::cxx
+autocxx-macro = { path="macro", version="0.22.3" }
+cxx = "1.0.68" # ... also needed because expansion of type_id refers to ::cxx
 aquamarine = "0.1" # docs
 moveit = { version = "0.5", features = [ "cxx" ] }
 
diff --git a/third_party/autocxx/book/src/contributing.md b/third_party/autocxx/book/src/contributing.md
index 1f00bc9..e95ca5a 100644
--- a/third_party/autocxx/book/src/contributing.md
+++ b/third_party/autocxx/book/src/contributing.md
@@ -80,7 +80,7 @@
 order of preference here's how we would like to hear about your problem:
 
 * Raise a pull request adding a new failing integration test to
-  `engine/src/integration_tests.rs`.
+  [`integration_test.rs`](https://github.com/google/autocxx/blob/main/integration-tests/tests/integration_test.rs)
 * Minimize the test using `tools/reduce`, something like this:
   `target/debug/autocxx-reduce file -d "safety!(unsafe_ffi)" -d
   'generate_pod!("A")' -I ~/my-include-dir -h my-header.h -p
diff --git a/third_party/autocxx/book/src/references_etc.md b/third_party/autocxx/book/src/references_etc.md
index a4b6d09..95456dc 100644
--- a/third_party/autocxx/book/src/references_etc.md
+++ b/third_party/autocxx/book/src/references_etc.md
@@ -39,12 +39,13 @@
 Exactly the same issues apply to C++ references _in theory_, but in practice,
 they usually don't. Therefore [`cxx`](https://cxx.rs) has taken the view that we can "trust"
 a C++ reference to a higher degree than a pointer, and autocxx follows that
-lead. In practice, of course, references are rarely return values from C++
+lead (in fact we 'trust' references even slightly more than cxx).
+In practice, of course, references are rarely return values from C++
 APIs so we rarely have to navel-gaze about the trustworthiness of a
 reference.
 
 (See also the discussion of [`safety`](safety.md) - if you haven't specified
-an unsafety policy, _all_ C++ APIs require `unsafe` so the discussion is moot.)
+an unsafety policy, _all_ C++ APIs require `unsafe` so the discussion is moot.
 
 If you're given a C++ object by pointer, and you want to interact with it,
 you'll need to figure out the guarantees attached to the C++ object - most
diff --git a/third_party/autocxx/book/src/safety.md b/third_party/autocxx/book/src/safety.md
index 34724d2..e4c858a 100644
--- a/third_party/autocxx/book/src/safety.md
+++ b/third_party/autocxx/book/src/safety.md
@@ -59,7 +59,6 @@
 )
 ```
 
-
 ## Pragmatism in a complex C++ codebase
 
 This crate mostly intends to follow the lead of the `cxx` crate in where and when `unsafe` is required. But, this crate is opinionated. It believes some unsafety requires more careful review than other bits, along the following spectrum:
@@ -89,3 +88,12 @@
 * It doesn't delete it.
 * or any of the other things that you're not permitted to do in unsafe Rust.
 
+## Soundness
+
+This crate shares the general approach to safety and soundness pioneered by cxx, but has two important differences:
+
+* cxx requires you to specify your interface in detail, and thus think through all aspects of the language boundary. autocxx doesn't, and may autogenerate footguns.
+* cxx may allow multiple conflicting Rust references to exist to 'trivial' data types ("plain old data" or POD in autocxx parlance), but they're rare. autocxx may allow conflicting Rust references to exist even to 'opaque' (non-POD) data, and they're more common. This difference exists because opaque data is zero-sized in cxx, and zero-sized references cannot conflict. (In autocxx, we tell Rust about the size in order that we can allocate such types on the stack.)
+
+There are preliminary explorations to avoid this problem by using a C++ reference wrapper type. See `examples/reference-wrappers`.
+
diff --git a/third_party/autocxx/book/src/tutorial.md b/third_party/autocxx/book/src/tutorial.md
index 3bedb26..3e8e5b2 100644
--- a/third_party/autocxx/book/src/tutorial.md
+++ b/third_party/autocxx/book/src/tutorial.md
@@ -15,15 +15,17 @@
 
 The rest of this 'getting started' section assumes Cargo - if you're using something else, see the [building](building.md) section.
 
-First, add `autocxx` *and `cxx`* to your `dependencies` and `autocxx-build` to your `build-dependencies` in your `Cargo.toml`.
+First, add `autocxx` *and `cxx`* to your `dependencies` and `autocxx-build` to your `build-dependencies` in your `Cargo.toml`. **You must specify both.**
+
+
 
 ```toml
 [dependencies]
-autocxx = "0.22.0"
+autocxx = "0.22.3"
 cxx = "1.0"
 
 [build-dependencies]
-autocxx-build = "0.22.0"
+autocxx-build = "0.22.3"
 miette = { version="4.3", features=["fancy"] } # optional but gives nicer error messages!
 ```
 
diff --git a/third_party/autocxx/demo/Cargo.toml b/third_party/autocxx/demo/Cargo.toml
index c6a7227..e81cb80 100644
--- a/third_party/autocxx/demo/Cargo.toml
+++ b/third_party/autocxx/demo/Cargo.toml
@@ -8,14 +8,14 @@
 
 [package]
 name = "autocxx-demo"
-version = "0.22.0"
+version = "0.22.3"
 authors = ["Adrian Taylor <adetaylor@chromium.org>"]
 edition = "2021"
 
 [dependencies]
-cxx = "1.0.54"
-autocxx = { path = "..", version="0.22.0" }
+cxx = "1.0.68"
+autocxx = { path = "..", version="0.22.3" }
 
 [build-dependencies]
-autocxx-build = { path = "../gen/build", version="0.22.0" }
+autocxx-build = { path = "../gen/build", version="0.22.3" }
 miette = { version="4.3", features=["fancy"]}
diff --git a/third_party/autocxx/engine/BUILD b/third_party/autocxx/engine/BUILD
new file mode 100644
index 0000000..b3335d2
--- /dev/null
+++ b/third_party/autocxx/engine/BUILD
@@ -0,0 +1,51 @@
+load("@rules_rust//rust:defs.bzl", "rust_library")
+
+package(default_visibility = ["//visibility:public"])
+
+licenses([
+    "notice",  # MIT from expression "MIT OR Apache-2.0"
+])
+
+rust_library(
+    name = "autocxx_engine",
+    srcs = glob(["**/*.rs"]),
+    crate_features = [
+        "default",
+        "reproduction_case",
+        "serde_json",
+    ],
+    crate_root = "src/lib.rs",
+    edition = "2021",
+    proc_macro_deps = [
+        "//third_party/cargo:indoc",
+        "//third_party/cargo:aquamarine",
+        "//third_party/cargo:strum_macros",
+    ],
+    rustc_flags = [
+        "--cap-lints=allow",
+    ],
+    tags = [
+        "cargo-raze",
+        "crate-name=autocxx-engine",
+        "manual",
+    ],
+    version = "0.16.0",
+    deps = [
+        "//third_party/autocxx/parser:autocxx_parser",
+        "//third_party/cargo:autocxx_bindgen",
+        "//third_party/cargo:cxx_gen",
+        "//third_party/cargo:indexmap",
+        "//third_party/cargo:itertools",
+        "//third_party/cargo:log",
+        "//third_party/cargo:miette",
+        "//third_party/cargo:once_cell",
+        "//third_party/cargo:proc_macro2",
+        "//third_party/cargo:quote",
+        "//third_party/cargo:regex",
+        "//third_party/cargo:serde_json",
+        "//third_party/cargo:syn",
+        "//third_party/cargo:tempfile",
+        "//third_party/cargo:thiserror",
+        "//third_party/cargo:version_check",
+    ],
+)
diff --git a/third_party/autocxx/engine/Cargo.toml b/third_party/autocxx/engine/Cargo.toml
index 964108e..1400847 100644
--- a/third_party/autocxx/engine/Cargo.toml
+++ b/third_party/autocxx/engine/Cargo.toml
@@ -8,7 +8,7 @@
 
 [package]
 name = "autocxx-engine"
-version = "0.22.0"
+version = "0.22.3"
 authors = ["Adrian Taylor <adetaylor@chromium.org>"]
 license = "MIT OR Apache-2.0"
 description = "Safe autogenerated interop between Rust and C++"
@@ -30,15 +30,15 @@
 proc-macro2 = "1.0.11"
 quote = "1.0"
 indoc = "1.0"
-autocxx-bindgen = "=0.59.16"
+autocxx-bindgen = "=0.59.17"
 #autocxx-bindgen = { git = "https://github.com/adetaylor/rust-bindgen", branch = "pollute-fewer-typedefs" }
 itertools = "0.10.3"
 cc = { version = "1.0", optional = true }
 # Note: Keep the patch-level version of cxx-gen and cxx in sync.
 # There can be interdependencies between the code generated by cxx-gen and
 # what cxx expects to be there.
-cxx-gen = "0.7.54"
-autocxx-parser = { version = "=0.22.0", path="../parser" }
+cxx-gen = "0.7.68"
+autocxx-parser = { version = "=0.22.3", path="../parser" }
 version_check = "0.9"
 aquamarine = "0.1" # docs
 tempfile = "3.1"
diff --git a/third_party/autocxx/engine/src/conversion/analysis/fun/function_wrapper.rs b/third_party/autocxx/engine/src/conversion/analysis/fun/function_wrapper.rs
index f2f6aaa..ab3b7d9 100644
--- a/third_party/autocxx/engine/src/conversion/analysis/fun/function_wrapper.rs
+++ b/third_party/autocxx/engine/src/conversion/analysis/fun/function_wrapper.rs
@@ -10,7 +10,8 @@
     conversion::api::SubclassName,
     types::{Namespace, QualifiedName},
 };
-use syn::{parse_quote, Ident, Type};
+use quote::ToTokens;
+use syn::{parse_quote, Ident, Type, TypeReference};
 
 #[derive(Clone, Debug)]
 pub(crate) enum CppConversionType {
@@ -23,6 +24,8 @@
     /// Ignored in the sense that it isn't passed into the C++ function.
     IgnoredPlacementPtrParameter,
     FromReturnValueToPlacementPtr,
+    FromPointerToReference, // unwrapped_type is always Type::Ptr
+    FromReferenceToPointer, // unwrapped_type is always Type::Ptr
 }
 
 impl CppConversionType {
@@ -36,6 +39,8 @@
                 CppConversionType::FromValueToUniquePtr
             }
             CppConversionType::FromValueToUniquePtr => CppConversionType::FromUniquePtrToValue,
+            CppConversionType::FromPointerToReference => CppConversionType::FromReferenceToPointer,
+            CppConversionType::FromReferenceToPointer => CppConversionType::FromPointerToReference,
             _ => panic!("Did not expect to have to invert this conversion"),
         }
     }
@@ -52,6 +57,8 @@
     FromValueParamToPtr,
     FromPlacementParamToNewReturn,
     FromRValueParamToPtr,
+    FromReferenceWrapperToPointer, // unwrapped_type is always Type::Ptr
+    FromPointerToReferenceWrapper, // unwrapped_type is always Type::Ptr
 }
 
 impl RustConversionType {
@@ -74,19 +81,53 @@
 /// * Finally, the actual C++ API receives a `std::string` by value.
 /// The implementation here is distributed across this file, and
 /// `function_wrapper_rs` and `function_wrapper_cpp`.
+/// TODO: we should make this into a single enum, with the Type as enum
+/// variant params. That would remove the possibility of various runtime
+/// panics by enforcing (for example) that conversion from a pointer always
+/// has a Type::Ptr.
 #[derive(Clone)]
 pub(crate) struct TypeConversionPolicy {
-    pub(crate) unwrapped_type: Type,
+    unwrapped_type: Type,
     pub(crate) cpp_conversion: CppConversionType,
     pub(crate) rust_conversion: RustConversionType,
 }
 
 impl TypeConversionPolicy {
     pub(crate) fn new_unconverted(ty: Type) -> Self {
-        TypeConversionPolicy {
+        Self::new(ty, CppConversionType::None, RustConversionType::None)
+    }
+
+    pub(crate) fn new(
+        ty: Type,
+        cpp_conversion: CppConversionType,
+        rust_conversion: RustConversionType,
+    ) -> Self {
+        Self {
             unwrapped_type: ty,
-            cpp_conversion: CppConversionType::None,
-            rust_conversion: RustConversionType::None,
+            cpp_conversion,
+            rust_conversion,
+        }
+    }
+
+    pub(crate) fn cxxbridge_type(&self) -> &Type {
+        &self.unwrapped_type
+    }
+
+    pub(crate) fn return_reference_into_wrapper(ty: Type) -> Self {
+        let (unwrapped_type, is_mut) = match ty {
+            Type::Reference(TypeReference {
+                elem, mutability, ..
+            }) => (*elem, mutability.is_some()),
+            _ => panic!("Not a ptr: {}", ty.to_token_stream()),
+        };
+        TypeConversionPolicy {
+            unwrapped_type: if is_mut {
+                parse_quote! { *mut #unwrapped_type }
+            } else {
+                parse_quote! { *const #unwrapped_type }
+            },
+            cpp_conversion: CppConversionType::FromReferenceToPointer,
+            rust_conversion: RustConversionType::FromPointerToReferenceWrapper,
         }
     }
 
@@ -161,6 +202,8 @@
             RustConversionType::FromValueParamToPtr
                 | RustConversionType::FromRValueParamToPtr
                 | RustConversionType::FromPlacementParamToNewReturn
+                | RustConversionType::FromPointerToReferenceWrapper { .. }
+                | RustConversionType::FromReferenceWrapperToPointer { .. }
         )
     }
 
diff --git a/third_party/autocxx/engine/src/conversion/analysis/fun/mod.rs b/third_party/autocxx/engine/src/conversion/analysis/fun/mod.rs
index 19340f9..7194746 100644
--- a/third_party/autocxx/engine/src/conversion/analysis/fun/mod.rs
+++ b/third_party/autocxx/engine/src/conversion/analysis/fun/mod.rs
@@ -41,7 +41,7 @@
 use quote::quote;
 use syn::{
     parse_quote, punctuated::Punctuated, token::Comma, FnArg, Ident, Pat, ReturnType, Type,
-    TypePtr, Visibility,
+    TypePath, TypePtr, TypeReference, Visibility,
 };
 
 use crate::{
@@ -183,7 +183,7 @@
     pub(crate) is_placement_return_destination: bool,
 }
 
-struct ReturnTypeAnalysis {
+pub(crate) struct ReturnTypeAnalysis {
     rt: ReturnType,
     conversion: Option<TypeConversionPolicy>,
     was_reference: bool,
@@ -270,7 +270,7 @@
 }
 
 pub(crate) struct FnAnalyzer<'a> {
-    unsafe_policy: UnsafePolicy,
+    unsafe_policy: &'a UnsafePolicy,
     extra_apis: ApiVec<NullPhase>,
     type_converter: TypeConverter<'a>,
     bridge_name_tracker: BridgeNameTracker,
@@ -288,7 +288,7 @@
 impl<'a> FnAnalyzer<'a> {
     pub(crate) fn analyze_functions(
         apis: ApiVec<PodPhase>,
-        unsafe_policy: UnsafePolicy,
+        unsafe_policy: &'a UnsafePolicy,
         config: &'a IncludeCppConfig,
     ) -> ApiVec<FnPrePhase2> {
         let mut me = Self {
@@ -476,7 +476,9 @@
                 UnsafetyNeeded::Always => UnsafetyNeeded::JustBridge,
                 _ => unsafest_param,
             },
-            _ if self.unsafe_policy == UnsafePolicy::AllFunctionsUnsafe => UnsafetyNeeded::Always,
+            _ if matches!(self.unsafe_policy, UnsafePolicy::AllFunctionsUnsafe) => {
+                UnsafetyNeeded::Always
+            }
             _ => match unsafest_non_placement_param {
                 UnsafetyNeeded::Always => UnsafetyNeeded::Always,
                 UnsafetyNeeded::JustBridge => match unsafest_param {
@@ -638,6 +640,7 @@
                     receiver_mutability,
                     sup,
                     subclass_fn_deps,
+                    self.unsafe_policy,
                 ));
 
                 // Create the trait item for the <superclass>_methods and <superclass>_supers
@@ -655,6 +658,7 @@
                         receiver_mutability,
                         sup.clone(),
                         is_pure_virtual,
+                        self.unsafe_policy,
                     ));
                 }
             }
@@ -836,7 +840,7 @@
                 let arg_is_reference = matches!(
                     param_details
                         .get(1)
-                        .map(|param| &param.conversion.unwrapped_type),
+                        .map(|param| param.conversion.cxxbridge_type()),
                     Some(Type::Reference(_))
                 );
                 // Some exotic forms of copy constructor have const and/or volatile qualifiers.
@@ -1209,6 +1213,11 @@
         let ret_type_conversion_needed = ret_type_conversion
             .as_ref()
             .map_or(false, |x| x.cpp_work_needed());
+        let return_needs_rust_conversion = ret_type_conversion
+            .as_ref()
+            .map(|ra| ra.rust_work_needed())
+            .unwrap_or_default();
+
         // See https://github.com/dtolnay/cxx/issues/878 for the reason for this next line.
         let effective_cpp_name = cpp_name.as_ref().unwrap_or(&rust_name);
         let cpp_name_incompatible_with_cxx =
@@ -1346,9 +1355,10 @@
             .any(|pd| pd.conversion.rust_work_needed());
 
         let rust_wrapper_needed = match kind {
+            _ if any_param_needs_rust_conversion || return_needs_rust_conversion => true,
             FnKind::TraitMethod { .. } => true,
-            FnKind::Method { .. } => any_param_needs_rust_conversion || cxxbridge_name != rust_name,
-            _ => any_param_needs_rust_conversion,
+            FnKind::Method { .. } => cxxbridge_name != rust_name,
+            _ => false,
         };
 
         // Naming, part two.
@@ -1642,6 +1652,7 @@
                     }
                     _ => old_pat,
                 };
+
                 let is_placement_return_destination = is_placement_return_destination
                     || matches!(
                         force_rust_conversion,
@@ -1653,6 +1664,8 @@
                     is_move_constructor,
                     force_rust_conversion,
                     sophistication,
+                    self_type.is_some(),
+                    is_placement_return_destination,
                 );
                 let new_ty = annotated_type.ty;
                 pt.pat = Box::new(new_pat.clone());
@@ -1694,6 +1707,8 @@
         is_move_constructor: bool,
         force_rust_conversion: Option<RustConversionType>,
         sophistication: TypeConversionSophistication,
+        is_self: bool,
+        is_placement_return_destination: bool,
     ) -> TypeConversionPolicy {
         let is_subclass_holder = match &annotated_type.kind {
             type_converter::TypeKind::SubclassHolder(holder) => Some(holder),
@@ -1703,6 +1718,9 @@
             annotated_type.kind,
             type_converter::TypeKind::RValueReference
         );
+        let is_reference =
+            matches!(annotated_type.kind, type_converter::TypeKind::Reference) || is_self;
+        let rust_conversion_forced = force_rust_conversion.is_some();
         let ty = &*annotated_type.ty;
         if let Some(holder_id) = is_subclass_holder {
             let subclass = SubclassName::from_holder_name(holder_id);
@@ -1710,91 +1728,127 @@
                 let ty = parse_quote! {
                     rust::Box<#holder_id>
                 };
-                TypeConversionPolicy {
-                    unwrapped_type: ty,
-                    cpp_conversion: CppConversionType::Move,
-                    rust_conversion: RustConversionType::ToBoxedUpHolder(subclass),
-                }
+                TypeConversionPolicy::new(
+                    ty,
+                    CppConversionType::Move,
+                    RustConversionType::ToBoxedUpHolder(subclass),
+                )
             };
         } else if matches!(
             force_rust_conversion,
             Some(RustConversionType::FromPlacementParamToNewReturn)
         ) && matches!(sophistication, TypeConversionSophistication::Regular)
         {
-            return TypeConversionPolicy {
-                unwrapped_type: ty.clone(),
-                cpp_conversion: CppConversionType::IgnoredPlacementPtrParameter,
-                rust_conversion: RustConversionType::FromPlacementParamToNewReturn,
-            };
+            return TypeConversionPolicy::new(
+                ty.clone(),
+                CppConversionType::IgnoredPlacementPtrParameter,
+                RustConversionType::FromPlacementParamToNewReturn,
+            );
         }
         match ty {
             Type::Path(p) => {
                 let ty = ty.clone();
                 let tn = QualifiedName::from_type_path(p);
-                if self.pod_safe_types.contains(&tn) {
+                if matches!(
+                    self.config.unsafe_policy,
+                    UnsafePolicy::ReferencesWrappedAllFunctionsSafe
+                ) && is_reference
+                    && !rust_conversion_forced
+                // must be std::pin::Pin<&mut T>
+                {
+                    let unwrapped_type = extract_type_from_pinned_mut_ref(p);
+                    TypeConversionPolicy::new(
+                        parse_quote! { *mut #unwrapped_type },
+                        CppConversionType::FromPointerToReference,
+                        RustConversionType::FromReferenceWrapperToPointer,
+                    )
+                } else if self.pod_safe_types.contains(&tn) {
                     if known_types().lacks_copy_constructor(&tn) {
-                        TypeConversionPolicy {
-                            unwrapped_type: ty,
-                            cpp_conversion: CppConversionType::Move,
-                            rust_conversion: RustConversionType::None,
-                        }
+                        TypeConversionPolicy::new(
+                            ty,
+                            CppConversionType::Move,
+                            RustConversionType::None,
+                        )
                     } else {
                         TypeConversionPolicy::new_unconverted(ty)
                     }
                 } else if known_types().convertible_from_strs(&tn)
                     && !self.config.exclude_utilities()
                 {
-                    TypeConversionPolicy {
-                        unwrapped_type: ty,
-                        cpp_conversion: CppConversionType::FromUniquePtrToValue,
-                        rust_conversion: RustConversionType::FromStr,
-                    }
+                    TypeConversionPolicy::new(
+                        ty,
+                        CppConversionType::FromUniquePtrToValue,
+                        RustConversionType::FromStr,
+                    )
                 } else if matches!(
                     sophistication,
                     TypeConversionSophistication::SimpleForSubclasses
                 ) {
-                    TypeConversionPolicy {
-                        unwrapped_type: ty,
-                        cpp_conversion: CppConversionType::FromUniquePtrToValue,
-                        rust_conversion: RustConversionType::None,
-                    }
+                    TypeConversionPolicy::new(
+                        ty,
+                        CppConversionType::FromUniquePtrToValue,
+                        RustConversionType::None,
+                    )
                 } else {
-                    TypeConversionPolicy {
-                        unwrapped_type: ty,
-                        cpp_conversion: CppConversionType::FromPtrToValue,
-                        rust_conversion: RustConversionType::FromValueParamToPtr,
-                    }
+                    TypeConversionPolicy::new(
+                        ty,
+                        CppConversionType::FromPtrToValue,
+                        RustConversionType::FromValueParamToPtr,
+                    )
                 }
             }
             Type::Ptr(tp) => {
                 let rust_conversion = force_rust_conversion.unwrap_or(RustConversionType::None);
                 if is_move_constructor {
-                    TypeConversionPolicy {
-                        unwrapped_type: ty.clone(),
-                        cpp_conversion: CppConversionType::FromPtrToMove,
+                    TypeConversionPolicy::new(
+                        ty.clone(),
+                        CppConversionType::FromPtrToMove,
                         rust_conversion,
-                    }
+                    )
                 } else if is_rvalue_ref {
-                    TypeConversionPolicy {
-                        unwrapped_type: *tp.elem.clone(),
-                        cpp_conversion: CppConversionType::FromPtrToValue,
-                        rust_conversion: RustConversionType::FromRValueParamToPtr,
-                    }
+                    TypeConversionPolicy::new(
+                        *tp.elem.clone(),
+                        CppConversionType::FromPtrToValue,
+                        RustConversionType::FromRValueParamToPtr,
+                    )
+                } else if matches!(
+                    self.config.unsafe_policy,
+                    UnsafePolicy::ReferencesWrappedAllFunctionsSafe
+                ) && is_reference
+                    && !rust_conversion_forced
+                    && !is_placement_return_destination
+                {
+                    TypeConversionPolicy::new(
+                        ty.clone(),
+                        CppConversionType::FromPointerToReference,
+                        RustConversionType::FromReferenceWrapperToPointer,
+                    )
                 } else {
-                    TypeConversionPolicy {
-                        unwrapped_type: ty.clone(),
-                        cpp_conversion: CppConversionType::None,
-                        rust_conversion,
-                    }
+                    TypeConversionPolicy::new(ty.clone(), CppConversionType::None, rust_conversion)
                 }
             }
+            Type::Reference(TypeReference {
+                elem, mutability, ..
+            }) if matches!(
+                self.config.unsafe_policy,
+                UnsafePolicy::ReferencesWrappedAllFunctionsSafe
+            ) && !rust_conversion_forced
+                && !is_placement_return_destination =>
+            {
+                let is_mut = mutability.is_some();
+                TypeConversionPolicy::new(
+                    if is_mut {
+                        panic!("Never expected to find &mut T at this point, we should be Pin<&mut T> by now")
+                    } else {
+                        parse_quote! { *const #elem }
+                    },
+                    CppConversionType::FromPointerToReference,
+                    RustConversionType::FromReferenceWrapperToPointer,
+                )
+            }
             _ => {
                 let rust_conversion = force_rust_conversion.unwrap_or(RustConversionType::None);
-                TypeConversionPolicy {
-                    unwrapped_type: ty.clone(),
-                    cpp_conversion: CppConversionType::None,
-                    rust_conversion,
-                }
+                TypeConversionPolicy::new(ty.clone(), CppConversionType::None, rust_conversion)
             }
         }
     }
@@ -1870,8 +1924,19 @@
                         }
                     }
                     _ => {
-                        let was_reference = matches!(boxed_type.as_ref(), Type::Reference(_));
-                        let conversion = Some(TypeConversionPolicy::new_unconverted(ty.clone()));
+                        let was_reference = references.ref_return;
+                        let conversion = Some(
+                            if was_reference
+                                && matches!(
+                                    self.config.unsafe_policy,
+                                    UnsafePolicy::ReferencesWrappedAllFunctionsSafe
+                                )
+                            {
+                                TypeConversionPolicy::return_reference_into_wrapper(ty.clone())
+                            } else {
+                                TypeConversionPolicy::new_unconverted(ty.clone())
+                            },
+                        );
                         ReturnTypeAnalysis {
                             rt: ReturnType::Type(*rarrow, boxed_type),
                             conversion,
@@ -2138,3 +2203,24 @@
         }
     }
 }
+
+fn extract_type_from_pinned_mut_ref(ty: &TypePath) -> Type {
+    match ty
+        .path
+        .segments
+        .last()
+        .expect("was not std::pin::Pin")
+        .arguments
+    {
+        syn::PathArguments::AngleBracketed(ref ab) => {
+            match ab.args.first().expect("did not have angle bracketed args") {
+                syn::GenericArgument::Type(ref ty) => match ty {
+                    Type::Reference(ref tyr) => tyr.elem.as_ref().clone(),
+                    _ => panic!("pin did not contain a reference"),
+                },
+                _ => panic!("argument was not a type"),
+            }
+        }
+        _ => panic!("did not find angle bracketed args"),
+    }
+}
diff --git a/third_party/autocxx/engine/src/conversion/analysis/fun/subclass.rs b/third_party/autocxx/engine/src/conversion/analysis/fun/subclass.rs
index c017249..6383d2c 100644
--- a/third_party/autocxx/engine/src/conversion/analysis/fun/subclass.rs
+++ b/third_party/autocxx/engine/src/conversion/analysis/fun/subclass.rs
@@ -10,7 +10,7 @@
 
 use syn::{parse_quote, FnArg, PatType, Type, TypePtr};
 
-use crate::conversion::analysis::fun::{FnKind, MethodKind, ReceiverMutability};
+use crate::conversion::analysis::fun::{FnKind, MethodKind, ReceiverMutability, UnsafePolicy};
 use crate::conversion::analysis::pod::PodPhase;
 use crate::conversion::api::{
     CppVisibility, FuncToConvert, Provenance, RustSubclassFnDetails, SubclassConstructorDetails,
@@ -79,12 +79,18 @@
     receiver_mutability: &ReceiverMutability,
     receiver: QualifiedName,
     is_pure_virtual: bool,
+    unsafe_policy: &UnsafePolicy,
 ) -> Api<FnPrePhase1> {
     let param_names = analysis
         .param_details
         .iter()
         .map(|pd| pd.name.clone())
         .collect();
+    let requires_unsafe = if matches!(unsafe_policy, UnsafePolicy::AllFunctionsUnsafe) {
+        UnsafetyNeeded::Always
+    } else {
+        UnsafetyNeeded::from_param_details(&analysis.param_details, false)
+    };
     Api::SubclassTraitItem {
         name,
         details: SuperclassMethod {
@@ -93,7 +99,7 @@
             ret_type: analysis.ret_type.clone(),
             param_names,
             receiver_mutability: receiver_mutability.clone(),
-            requires_unsafe: UnsafetyNeeded::from_param_details(&analysis.param_details, false),
+            requires_unsafe,
             is_pure_virtual,
             receiver,
         },
@@ -107,6 +113,7 @@
     receiver_mutability: &ReceiverMutability,
     superclass: &QualifiedName,
     dependencies: Vec<QualifiedName>,
+    unsafe_policy: &UnsafePolicy,
 ) -> Api<FnPrePhase1> {
     let cpp = sub.cpp();
     let holder_name = sub.holder();
@@ -131,6 +138,11 @@
         .skip(1)
         .map(|p| p.conversion.clone())
         .collect();
+    let requires_unsafe = if matches!(unsafe_policy, UnsafePolicy::AllFunctionsUnsafe) {
+        UnsafetyNeeded::Always
+    } else {
+        UnsafetyNeeded::from_param_details(&analysis.param_details, false)
+    };
     Api::RustSubclassFn {
         name: ApiName::new_in_root_namespace(rust_call_name.clone()),
         subclass: sub.clone(),
@@ -151,7 +163,7 @@
             superclass: superclass.clone(),
             receiver_mutability: receiver_mutability.clone(),
             dependencies,
-            requires_unsafe: UnsafetyNeeded::from_param_details(&analysis.param_details, false),
+            requires_unsafe,
             is_pure_virtual: matches!(
                 analysis.kind,
                 FnKind::Method {
diff --git a/third_party/autocxx/engine/src/conversion/analysis/pod/mod.rs b/third_party/autocxx/engine/src/conversion/analysis/pod/mod.rs
index 6722c23..eeb5051 100644
--- a/third_party/autocxx/engine/src/conversion/analysis/pod/mod.rs
+++ b/third_party/autocxx/engine/src/conversion/analysis/pod/mod.rs
@@ -18,7 +18,7 @@
 use crate::{
     conversion::{
         analysis::type_converter::{self, add_analysis, TypeConversionContext, TypeConverter},
-        api::{AnalysisPhase, Api, ApiName, CppVisibility, NullPhase, StructDetails, TypeKind},
+        api::{AnalysisPhase, Api, ApiName, NullPhase, StructDetails, TypeKind},
         apivec::ApiVec,
         convert_error::{ConvertErrorWithContext, ErrorContext},
         error_reporter::convert_apis,
@@ -134,12 +134,6 @@
     config: &IncludeCppConfig,
 ) -> Result<Box<dyn Iterator<Item = Api<PodPhase>>>, ConvertErrorWithContext> {
     let id = name.name.get_final_ident();
-    if details.vis != CppVisibility::Public {
-        return Err(ConvertErrorWithContext(
-            ConvertError::NonPublicNestedType,
-            Some(ErrorContext::new_for_item(id)),
-        ));
-    }
     let metadata = BindgenSemanticAttributes::new_retaining_others(&mut details.item.attrs);
     metadata.check_for_fatal_attrs(&id)?;
     let bases = get_bases(&details.item);
@@ -208,9 +202,14 @@
     extra_apis: &mut ApiVec<NullPhase>,
 ) -> Vec<ConvertError> {
     let mut convert_errors = Vec::new();
+    let struct_type_params = s
+        .generics
+        .type_params()
+        .map(|tp| tp.ident.clone())
+        .collect();
+    let type_conversion_context = TypeConversionContext::WithinStructField { struct_type_params };
     for f in &s.fields {
-        let annotated =
-            type_converter.convert_type(f.ty.clone(), ns, &TypeConversionContext::WithinReference);
+        let annotated = type_converter.convert_type(f.ty.clone(), ns, &type_conversion_context);
         match annotated {
             Ok(mut r) => {
                 extra_apis.append(&mut r.extra_apis);
diff --git a/third_party/autocxx/engine/src/conversion/analysis/type_converter.rs b/third_party/autocxx/engine/src/conversion/analysis/type_converter.rs
index afdda8a..7e2d2bd 100644
--- a/third_party/autocxx/engine/src/conversion/analysis/type_converter.rs
+++ b/third_party/autocxx/engine/src/conversion/analysis/type_converter.rs
@@ -91,6 +91,7 @@
 /// from [TypeConverter] _might_ be used in the [cxx::bridge].
 pub(crate) enum TypeConversionContext {
     WithinReference,
+    WithinStructField { struct_type_params: HashSet<Ident> },
     WithinContainer,
     OuterType { pointer_treatment: PointerTreatment },
 }
@@ -98,13 +99,25 @@
 impl TypeConversionContext {
     fn pointer_treatment(&self) -> PointerTreatment {
         match self {
-            Self::WithinReference | Self::WithinContainer => PointerTreatment::Pointer,
+            Self::WithinReference | Self::WithinContainer | Self::WithinStructField { .. } => {
+                PointerTreatment::Pointer
+            }
             Self::OuterType { pointer_treatment } => *pointer_treatment,
         }
     }
     fn allow_instantiation_of_forward_declaration(&self) -> bool {
         matches!(self, Self::WithinReference)
     }
+    fn allowed_generic_type(&self, ident: &Ident) -> bool {
+        match self {
+            Self::WithinStructField { struct_type_params }
+                if struct_type_params.contains(ident) =>
+            {
+                false
+            }
+            _ => true,
+        }
+    }
 }
 
 /// A type which can convert from a type encountered in `bindgen`
@@ -156,7 +169,7 @@
     ) -> Result<Annotated<Type>, ConvertError> {
         let result = match ty {
             Type::Path(p) => {
-                let newp = self.convert_type_path(p, ns)?;
+                let newp = self.convert_type_path(p, ns, ctx)?;
                 if let Type::Path(newpp) = &newp.ty {
                     let qn = QualifiedName::from_type_path(newpp);
                     if !ctx.allow_instantiation_of_forward_declaration()
@@ -216,6 +229,7 @@
         &mut self,
         mut typ: TypePath,
         ns: &Namespace,
+        ctx: &TypeConversionContext,
     ) -> Result<Annotated<Type>, ConvertError> {
         // First, qualify any unqualified paths.
         if typ.path.segments.iter().next().unwrap().ident != "root" {
@@ -323,7 +337,24 @@
                 // Oh poop. It's a generic type which cxx won't be able to handle.
                 // We'll have to come up with a concrete type in both the cxx::bridge (in Rust)
                 // and a corresponding typedef in C++.
-                // Let's first see if this is a concrete version of a templated type
+                // First let's see if this actually depends on a generic type
+                // param of the surrounding struct.
+                for seg in &typ.path.segments {
+                    if let PathArguments::AngleBracketed(args) = &seg.arguments {
+                        for arg in args.args.iter() {
+                            if let GenericArgument::Type(Type::Path(typ)) = arg {
+                                if let Some(seg) = typ.path.segments.last() {
+                                    if typ.path.segments.len() == 1
+                                        && !ctx.allowed_generic_type(&seg.ident)
+                                    {
+                                        return Err(ConvertError::ReferringToGenericTypeParam);
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+                // Let's second see if this is a concrete version of a templated type
                 // which we already rejected. Some, but possibly not all, of the reasons
                 // for its rejection would also apply to any concrete types we
                 // make. Err on the side of caution. In future we may be able to relax
@@ -393,6 +424,14 @@
                     if encountered.contains(&new_tn) {
                         return Err(ConvertError::InfinitelyRecursiveTypedef(tn.clone()));
                     }
+                    if typ
+                        .path
+                        .segments
+                        .iter()
+                        .any(|seg| seg.ident.to_string().starts_with("_bindgen_mod"))
+                    {
+                        return Err(ConvertError::TypedefToTypeInAnonymousNamespace);
+                    }
                     encountered.insert(new_tn.clone());
                     tn = new_tn;
                 }
diff --git a/third_party/autocxx/engine/src/conversion/api.rs b/third_party/autocxx/engine/src/conversion/api.rs
index 3d04674..c5a1b60 100644
--- a/third_party/autocxx/engine/src/conversion/api.rs
+++ b/third_party/autocxx/engine/src/conversion/api.rs
@@ -54,7 +54,6 @@
 
 /// Details about a C++ struct.
 pub(crate) struct StructDetails {
-    pub(crate) vis: CppVisibility,
     pub(crate) item: ItemStruct,
     pub(crate) layout: Option<Layout>,
     pub(crate) has_rvalue_reference_fields: bool,
@@ -713,3 +712,10 @@
         Ok(Box::new(std::iter::once(Api::Enum { name, item })))
     }
 }
+
+/// Whether a type is a pointer of some kind.
+pub(crate) enum Pointerness {
+    Not,
+    ConstPtr,
+    MutPtr,
+}
diff --git a/third_party/autocxx/engine/src/conversion/codegen_cpp/function_wrapper_cpp.rs b/third_party/autocxx/engine/src/conversion/codegen_cpp/function_wrapper_cpp.rs
index b2e1ea3..5367626 100644
--- a/third_party/autocxx/engine/src/conversion/codegen_cpp/function_wrapper_cpp.rs
+++ b/third_party/autocxx/engine/src/conversion/codegen_cpp/function_wrapper_cpp.rs
@@ -6,8 +6,11 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+use syn::{Type, TypePtr};
+
 use crate::conversion::{
     analysis::fun::function_wrapper::{CppConversionType, TypeConversionPolicy},
+    api::Pointerness,
     ConvertError,
 };
 
@@ -30,12 +33,39 @@
     pub(super) fn converted_type(&self, cpp_name_map: &CppNameMap) -> Result<String, ConvertError> {
         match self.cpp_conversion {
             CppConversionType::FromValueToUniquePtr => self.unique_ptr_wrapped_type(cpp_name_map),
+            CppConversionType::FromReferenceToPointer => {
+                let (const_string, ty) = match self.cxxbridge_type() {
+                    Type::Ptr(TypePtr {
+                        mutability: Some(_),
+                        elem,
+                        ..
+                    }) => ("", elem.as_ref()),
+                    Type::Ptr(TypePtr { elem, .. }) => ("const ", elem.as_ref()),
+                    _ => panic!("Not a pointer"),
+                };
+                Ok(format!(
+                    "{}{}*",
+                    const_string,
+                    type_to_cpp(ty, cpp_name_map)?
+                ))
+            }
             _ => self.unwrapped_type_as_string(cpp_name_map),
         }
     }
 
     fn unwrapped_type_as_string(&self, cpp_name_map: &CppNameMap) -> Result<String, ConvertError> {
-        type_to_cpp(&self.unwrapped_type, cpp_name_map)
+        type_to_cpp(self.cxxbridge_type(), cpp_name_map)
+    }
+
+    pub(crate) fn is_a_pointer(&self) -> Pointerness {
+        match self.cxxbridge_type() {
+            Type::Ptr(TypePtr {
+                mutability: Some(_),
+                ..
+            }) => Pointerness::MutPtr,
+            Type::Ptr(_) => Pointerness::ConstPtr,
+            _ => Pointerness::Not,
+        }
     }
 
     fn unique_ptr_wrapped_type(
@@ -60,6 +90,7 @@
             CppConversionType::None | CppConversionType::FromReturnValueToPlacementPtr => {
                 Some(var_name.to_string())
             }
+            CppConversionType::FromPointerToReference { .. } => Some(format!("(*{})", var_name)),
             CppConversionType::Move => Some(format!("std::move({})", var_name)),
             CppConversionType::FromUniquePtrToValue | CppConversionType::FromPtrToMove => {
                 Some(format!("std::move(*{})", var_name))
@@ -78,6 +109,7 @@
                 })
             }
             CppConversionType::IgnoredPlacementPtrParameter => None,
+            CppConversionType::FromReferenceToPointer { .. } => Some(format!("&{}", var_name)),
         })
     }
 }
diff --git a/third_party/autocxx/engine/src/conversion/codegen_cpp/mod.rs b/third_party/autocxx/engine/src/conversion/codegen_cpp/mod.rs
index 02e92b2..0e6dcce 100644
--- a/third_party/autocxx/engine/src/conversion/codegen_cpp/mod.rs
+++ b/third_party/autocxx/engine/src/conversion/codegen_cpp/mod.rs
@@ -559,7 +559,7 @@
 
             underlying_function_call = match placement_param {
                 Some(placement_param) => {
-                    let tyname = type_to_cpp(&ret.unwrapped_type, &self.original_name_map)?;
+                    let tyname = type_to_cpp(ret.cxxbridge_type(), &self.original_name_map)?;
                     format!("new({}) {}({})", placement_param, tyname, call_itself)
                 }
                 None => format!("return {}", call_itself),
@@ -684,6 +684,13 @@
             "{}& As_{}_mut() {{ return *this; }}",
             super_name, super_name
         ));
+        self.additional_functions.push(ExtraCpp {
+            declaration: Some(format!(
+                "inline std::unique_ptr<{}> {}_As_{}_UniquePtr(std::unique_ptr<{}> u) {{ return std::unique_ptr<{}>(u.release()); }}",
+                superclass.to_cpp_name(), subclass.cpp(), super_name, subclass.cpp(), superclass.to_cpp_name(),
+                )),
+                ..Default::default()
+        });
         // And now constructors
         let mut constructor_decls: Vec<String> = Vec::new();
         for constructor in constructors {
@@ -700,7 +707,7 @@
         }
         self.additional_functions.push(ExtraCpp {
             type_definition: Some(format!(
-                "class {} : {}\n{{\npublic:\n{}\n{}\nvoid {}() const;\nprivate:rust::Box<{}> obs;\nvoid really_remove_ownership();\n\n}};",
+                "class {} : public {}\n{{\npublic:\n{}\n{}\nvoid {}() const;\nprivate:rust::Box<{}> obs;\nvoid really_remove_ownership();\n\n}};",
                 subclass.cpp(),
                 superclass.to_cpp_name(),
                 constructor_decls.join("\n"),
diff --git a/third_party/autocxx/engine/src/conversion/codegen_rs/fun_codegen.rs b/third_party/autocxx/engine/src/conversion/codegen_rs/fun_codegen.rs
index 7c32b1b..db222a6 100644
--- a/third_party/autocxx/engine/src/conversion/codegen_rs/fun_codegen.rs
+++ b/third_party/autocxx/engine/src/conversion/codegen_rs/fun_codegen.rs
@@ -6,6 +6,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+use autocxx_parser::IncludeCppConfig;
 use indexmap::set::IndexSet as HashSet;
 use std::borrow::Cow;
 
@@ -23,15 +24,15 @@
     function_wrapper_rs::RustParamConversion,
     maybe_unsafes_to_tokens,
     unqualify::{unqualify_params, unqualify_ret_type},
-    ImplBlockDetails, MaybeUnsafeStmt, RsCodegenResult, TraitImplBlockDetails, Use,
+    ImplBlockDetails, ImplBlockKey, MaybeUnsafeStmt, RsCodegenResult, TraitImplBlockDetails, Use,
 };
 use crate::{
     conversion::{
         analysis::fun::{
-            ArgumentAnalysis, FnAnalysis, FnKind, MethodKind, RustRenameStrategy,
-            TraitMethodDetails,
+            function_wrapper::TypeConversionPolicy, ArgumentAnalysis, FnAnalysis, FnKind,
+            MethodKind, RustRenameStrategy, TraitMethodDetails,
         },
-        api::UnsafetyNeeded,
+        api::{Pointerness, UnsafetyNeeded},
     },
     types::{Namespace, QualifiedName},
 };
@@ -89,6 +90,7 @@
     analysis: FnAnalysis,
     cpp_call_name: String,
     non_pod_types: &HashSet<QualifiedName>,
+    config: &IncludeCppConfig,
 ) -> RsCodegenResult {
     if analysis.ignore_reason.is_err() || !analysis.externally_callable {
         return RsCodegenResult::default();
@@ -96,6 +98,7 @@
     let cxxbridge_name = analysis.cxxbridge_name;
     let rust_name = &analysis.rust_name;
     let ret_type = analysis.ret_type;
+    let ret_conversion = analysis.ret_conversion;
     let param_details = analysis.param_details;
     let wrapper_function_needed = analysis.cpp_wrapper.is_some();
     let params = analysis.params;
@@ -119,6 +122,9 @@
         always_unsafe_due_to_trait_definition,
         doc_attrs: &doc_attrs,
         non_pod_types,
+        ret_type: &ret_type,
+        ret_conversion: &ret_conversion,
+        reference_wrappers: config.unsafe_policy.requires_cpprefs(),
     };
     // In rare occasions, we might need to give an explicit lifetime.
     let (lifetime_tokens, params, ret_type) = add_explicit_lifetime_if_necessary(
@@ -148,15 +154,14 @@
                 impl_entry = Some(fn_generator.generate_method_impl(
                     matches!(method_kind, MethodKind::Constructor { .. }),
                     impl_for,
-                    &ret_type,
                 ));
             }
             FnKind::TraitMethod { ref details, .. } => {
-                trait_impl_entry = Some(fn_generator.generate_trait_impl(details, &ret_type));
+                trait_impl_entry = Some(fn_generator.generate_trait_impl(details));
             }
             _ => {
                 // Generate plain old function
-                bindgen_mod_items.push(fn_generator.generate_function_impl(&ret_type));
+                bindgen_mod_items.push(fn_generator.generate_function_impl());
             }
         }
     }
@@ -225,20 +230,23 @@
 #[derive(Clone)]
 struct FnGenerator<'a> {
     param_details: &'a [ArgumentAnalysis],
+    ret_conversion: &'a Option<TypeConversionPolicy>,
+    ret_type: &'a ReturnType,
     cxxbridge_name: &'a Ident,
     rust_name: &'a str,
     unsafety: &'a UnsafetyNeeded,
     always_unsafe_due_to_trait_definition: bool,
     doc_attrs: &'a Vec<Attribute>,
     non_pod_types: &'a HashSet<QualifiedName>,
+    reference_wrappers: bool,
 }
 
 impl<'a> FnGenerator<'a> {
     fn common_parts<'b>(
-        &self,
+        &'b self,
         avoid_self: bool,
         parameter_reordering: &Option<Vec<usize>>,
-        ret_type: &'b ReturnType,
+        ret_type: Option<ReturnType>,
     ) -> (
         Option<TokenStream>,
         Punctuated<FnArg, Comma>,
@@ -249,15 +257,20 @@
         let mut local_variables = Vec::new();
         let mut arg_list = Vec::new();
         let mut ptr_arg_name = None;
-        let mut ret_type = Cow::Borrowed(ret_type);
+        let mut ret_type: Cow<'a, _> = ret_type
+            .map(Cow::Owned)
+            .unwrap_or(Cow::Borrowed(self.ret_type));
         let mut any_conversion_requires_unsafe = false;
+        let mut variable_counter = 0usize;
         for pd in self.param_details {
             let wrapper_arg_name = if pd.self_type.is_some() && !avoid_self {
                 parse_quote!(self)
             } else {
                 pd.name.clone()
             };
-            let rust_for_param = pd.conversion.rust_conversion(wrapper_arg_name.clone());
+            let rust_for_param = pd
+                .conversion
+                .rust_conversion(parse_quote! { #wrapper_arg_name }, &mut variable_counter);
             match rust_for_param {
                 RustParamConversion::Param {
                     ty,
@@ -305,6 +318,39 @@
             },
             any_conversion_requires_unsafe || matches!(self.unsafety, UnsafetyNeeded::JustBridge),
         );
+        let context_is_unsafe = matches!(self.unsafety, UnsafetyNeeded::Always)
+            || self.always_unsafe_due_to_trait_definition;
+        let (call_body, ret_type) = match self.ret_conversion {
+            Some(ret_conversion) if ret_conversion.rust_work_needed() => {
+                let expr = maybe_unsafes_to_tokens(vec![call_body], context_is_unsafe);
+                let conv =
+                    ret_conversion.rust_conversion(parse_quote! { #expr }, &mut variable_counter);
+                let (conversion, requires_unsafe, ty) = match conv {
+                    RustParamConversion::Param {
+                        local_variables, ..
+                    } if !local_variables.is_empty() => panic!("return type required variables"),
+                    RustParamConversion::Param {
+                        conversion,
+                        conversion_requires_unsafe,
+                        ty,
+                        ..
+                    } => (conversion, conversion_requires_unsafe, ty),
+                    _ => panic!(
+                        "Unexpected - return type is supposed to be converted to a return type"
+                    ),
+                };
+                (
+                    if requires_unsafe {
+                        MaybeUnsafeStmt::NeedsUnsafe(conversion)
+                    } else {
+                        MaybeUnsafeStmt::Normal(conversion)
+                    },
+                    Cow::Owned(parse_quote! { -> #ty }),
+                )
+            }
+            _ => (call_body, ret_type),
+        };
+
         let call_stmts = if let Some(ptr_arg_name) = ptr_arg_name {
             let mut closure_stmts = local_variables;
             closure_stmts.push(MaybeUnsafeStmt::binary(
@@ -323,8 +369,6 @@
             call_stmts.push(call_body);
             call_stmts
         };
-        let context_is_unsafe = matches!(self.unsafety, UnsafetyNeeded::Always)
-            || self.always_unsafe_due_to_trait_definition;
         let call_body = maybe_unsafes_to_tokens(call_stmts, context_is_unsafe);
         (lifetime_tokens, wrapper_params, ret_type, call_body)
     }
@@ -334,13 +378,44 @@
         &self,
         avoid_self: bool,
         impl_block_type_name: &QualifiedName,
-        ret_type: &ReturnType,
     ) -> Box<ImplBlockDetails> {
         let (lifetime_tokens, wrapper_params, ret_type, call_body) =
-            self.common_parts(avoid_self, &None, ret_type);
+            self.common_parts(avoid_self, &None, None);
         let rust_name = make_ident(self.rust_name);
         let unsafety = self.unsafety.wrapper_token();
         let doc_attrs = self.doc_attrs;
+        let receiver_pointerness = self
+            .param_details
+            .iter()
+            .next()
+            .map(|pd| pd.conversion.is_a_pointer())
+            .unwrap_or(Pointerness::Not);
+        let ty = impl_block_type_name.get_final_ident();
+        let ty = if self.reference_wrappers {
+            match receiver_pointerness {
+                Pointerness::MutPtr => ImplBlockKey {
+                    ty: parse_quote! {
+                        CppMutRef< 'a, #ty>
+                    },
+                    lifetime: Some(parse_quote! { 'a }),
+                },
+                Pointerness::ConstPtr => ImplBlockKey {
+                    ty: parse_quote! {
+                        CppRef< 'a, #ty>
+                    },
+                    lifetime: Some(parse_quote! { 'a }),
+                },
+                Pointerness::Not => ImplBlockKey {
+                    ty: parse_quote! { # ty },
+                    lifetime: None,
+                },
+            }
+        } else {
+            ImplBlockKey {
+                ty: parse_quote! { # ty },
+                lifetime: None,
+            }
+        };
         Box::new(ImplBlockDetails {
             item: ImplItem::Method(parse_quote! {
                 #(#doc_attrs)*
@@ -348,18 +423,14 @@
                     #call_body
                 }
             }),
-            ty: impl_block_type_name.get_final_ident(),
+            ty,
         })
     }
 
     /// Generate an 'impl Trait for Type { methods-go-here }' in its entrety.
-    fn generate_trait_impl(
-        &self,
-        details: &TraitMethodDetails,
-        ret_type: &ReturnType,
-    ) -> Box<TraitImplBlockDetails> {
+    fn generate_trait_impl(&self, details: &TraitMethodDetails) -> Box<TraitImplBlockDetails> {
         let (lifetime_tokens, wrapper_params, ret_type, call_body) =
-            self.common_parts(details.avoid_self, &details.parameter_reordering, ret_type);
+            self.common_parts(details.avoid_self, &details.parameter_reordering, None);
         let doc_attrs = self.doc_attrs;
         let unsafety = self.unsafety.wrapper_token();
         let key = details.trt.clone();
@@ -381,25 +452,28 @@
     ) -> Box<ImplBlockDetails> {
         let ret_type: ReturnType = parse_quote! { -> impl autocxx::moveit::new::New<Output=Self> };
         let (lifetime_tokens, wrapper_params, ret_type, call_body) =
-            self.common_parts(true, &None, &ret_type);
+            self.common_parts(true, &None, Some(ret_type));
         let rust_name = make_ident(&self.rust_name);
         let doc_attrs = self.doc_attrs;
         let unsafety = self.unsafety.wrapper_token();
-        Box::new(ImplBlockDetails {
-            item: ImplItem::Method(parse_quote! {
+        let ty = impl_block_type_name.get_final_ident();
+        let ty = parse_quote! { #ty };
+        let stuff = quote! {
                 #(#doc_attrs)*
                 pub #unsafety fn #rust_name #lifetime_tokens ( #wrapper_params ) #ret_type {
                     #call_body
                 }
-            }),
-            ty: impl_block_type_name.get_final_ident(),
+        };
+        Box::new(ImplBlockDetails {
+            item: ImplItem::Method(parse_quote! { #stuff }),
+            ty: ImplBlockKey { ty, lifetime: None },
         })
     }
 
     /// Generate a function call wrapper
-    fn generate_function_impl(&self, ret_type: &ReturnType) -> Item {
+    fn generate_function_impl(&self) -> Item {
         let (lifetime_tokens, wrapper_params, ret_type, call_body) =
-            self.common_parts(false, &None, ret_type);
+            self.common_parts(false, &None, None);
         let rust_name = make_ident(self.rust_name);
         let doc_attrs = self.doc_attrs;
         let unsafety = self.unsafety.wrapper_token();
diff --git a/third_party/autocxx/engine/src/conversion/codegen_rs/function_wrapper_rs.rs b/third_party/autocxx/engine/src/conversion/codegen_rs/function_wrapper_rs.rs
index a3fc71f..708d41c 100644
--- a/third_party/autocxx/engine/src/conversion/codegen_rs/function_wrapper_rs.rs
+++ b/third_party/autocxx/engine/src/conversion/codegen_rs/function_wrapper_rs.rs
@@ -7,7 +7,7 @@
 // except according to those terms.
 
 use proc_macro2::TokenStream;
-use syn::{Pat, Type, TypePtr};
+use syn::{Expr, Type, TypePtr};
 
 use crate::{
     conversion::analysis::fun::function_wrapper::{RustConversionType, TypeConversionPolicy},
@@ -32,8 +32,7 @@
 }
 
 impl TypeConversionPolicy {
-    /// If returns `None` then this parameter should be omitted entirely.
-    pub(super) fn rust_conversion(&self, var: Pat) -> RustParamConversion {
+    pub(super) fn rust_conversion(&self, var: Expr, counter: &mut usize) -> RustParamConversion {
         match self.rust_conversion {
             RustConversionType::None => RustParamConversion::Param {
                 ty: self.converted_rust_type(),
@@ -63,7 +62,7 @@
                 }
             }
             RustConversionType::FromPinMaybeUninitToPtr => {
-                let ty = match &self.unwrapped_type {
+                let ty = match self.cxxbridge_type() {
                     Type::Ptr(TypePtr { elem, .. }) => &*elem,
                     _ => panic!("Not a ptr"),
                 };
@@ -80,7 +79,7 @@
                 }
             }
             RustConversionType::FromPinMoveRefToPtr => {
-                let ty = match &self.unwrapped_type {
+                let ty = match self.cxxbridge_type() {
                     Type::Ptr(TypePtr { elem, .. }) => &*elem,
                     _ => panic!("Not a ptr"),
                 };
@@ -99,7 +98,7 @@
                 }
             }
             RustConversionType::FromTypeToPtr => {
-                let ty = match &self.unwrapped_type {
+                let ty = match self.cxxbridge_type() {
                     Type::Ptr(TypePtr { elem, .. }) => &*elem,
                     _ => panic!("Not a ptr"),
                 };
@@ -123,13 +122,11 @@
                 };
                 let handler_type = make_ident(handler_type);
                 let param_trait = make_ident(param_trait);
-                let var_name = if let Pat::Ident(pti) = &var {
-                    &pti.ident
-                } else {
-                    panic!("Unexpected non-ident parameter name");
-                };
-                let space_var_name = make_ident(format!("{}_space", var_name));
-                let ty = &self.unwrapped_type;
+                let var_counter = *counter;
+                *counter += 1;
+                let space_var_name = format!("space{}", var_counter);
+                let space_var_name = make_ident(space_var_name);
+                let ty = self.cxxbridge_type();
                 let ty = parse_quote! { impl autocxx::#param_trait<#ty> };
                 // This is the usual trick to put something on the stack, then
                 // immediately shadow the variable name so it can't be accessed or moved.
@@ -148,7 +145,7 @@
                             },
                         ),
                         MaybeUnsafeStmt::needs_unsafe(
-                            quote! { #space_var_name.as_mut().populate(#var_name); },
+                            quote! { #space_var_name.as_mut().populate(#var); },
                         ),
                     ],
                     conversion: quote! {
@@ -161,12 +158,55 @@
             // but not in the arguments for the wrapper function, because instead we return an
             // impl New which uses the cxx::bridge function's pointer parameter.
             RustConversionType::FromPlacementParamToNewReturn => {
-                let ty = match &self.unwrapped_type {
+                let ty = match self.cxxbridge_type() {
                     Type::Ptr(TypePtr { elem, .. }) => *(*elem).clone(),
                     _ => panic!("Not a ptr"),
                 };
                 RustParamConversion::ReturnValue { ty }
             }
+            RustConversionType::FromPointerToReferenceWrapper => {
+                let (is_mut, ty) = match self.cxxbridge_type() {
+                    Type::Ptr(TypePtr {
+                        mutability, elem, ..
+                    }) => (mutability.is_some(), elem.as_ref()),
+                    _ => panic!("Not a pointer"),
+                };
+                let (ty, wrapper_name) = if is_mut {
+                    (parse_quote! { CppMutRef<'a, #ty> }, "CppMutRef")
+                } else {
+                    (parse_quote! { CppRef<'a, #ty> }, "CppRef")
+                };
+                let wrapper_name = make_ident(wrapper_name);
+                RustParamConversion::Param {
+                    ty,
+                    local_variables: Vec::new(),
+                    conversion: quote! {
+                        #wrapper_name (#var, std::marker::PhantomData)
+                    },
+                    conversion_requires_unsafe: false,
+                }
+            }
+            RustConversionType::FromReferenceWrapperToPointer => {
+                let (is_mut, ty) = match self.cxxbridge_type() {
+                    Type::Ptr(TypePtr {
+                        mutability, elem, ..
+                    }) => (mutability.is_some(), elem.as_ref()),
+                    _ => panic!("Not a pointer"),
+                };
+                let ty = if is_mut {
+                    parse_quote! { &mut CppMutRef<'a, #ty> }
+                } else {
+                    parse_quote! { &CppRef<'a, #ty> }
+                };
+                RustParamConversion::Param {
+                    ty,
+                    local_variables: Vec::new(),
+                    conversion: quote! {
+                        #var .0
+                    },
+                    conversion_requires_unsafe: false,
+                }
+            }
         }
     }
 }
diff --git a/third_party/autocxx/engine/src/conversion/codegen_rs/mod.rs b/third_party/autocxx/engine/src/conversion/codegen_rs/mod.rs
index d488d52..5dd4cbb 100644
--- a/third_party/autocxx/engine/src/conversion/codegen_rs/mod.rs
+++ b/third_party/autocxx/engine/src/conversion/codegen_rs/mod.rs
@@ -17,13 +17,14 @@
 use indexmap::map::IndexMap as HashMap;
 use indexmap::set::IndexSet as HashSet;
 
-use autocxx_parser::{ExternCppType, IncludeCppConfig, RustFun};
+use autocxx_parser::{ExternCppType, IncludeCppConfig, RustFun, UnsafePolicy};
 
 use itertools::Itertools;
 use proc_macro2::{Span, TokenStream};
 use syn::{
     parse_quote, punctuated::Punctuated, token::Comma, Attribute, Expr, FnArg, ForeignItem,
-    ForeignItemFn, Ident, ImplItem, Item, ItemForeignMod, ItemMod, TraitItem, TypePath,
+    ForeignItemFn, Ident, ImplItem, Item, ItemForeignMod, ItemMod, Lifetime, TraitItem, Type,
+    TypePath,
 };
 
 use crate::{
@@ -61,10 +62,16 @@
 use super::{convert_error::ErrorContext, ConvertError};
 use quote::quote;
 
+#[derive(Clone, Hash, PartialEq, Eq)]
+struct ImplBlockKey {
+    ty: Type,
+    lifetime: Option<Lifetime>,
+}
+
 /// An entry which needs to go into an `impl` block for a given type.
 struct ImplBlockDetails {
     item: ImplItem,
-    ty: Ident,
+    ty: ImplBlockKey,
 }
 
 struct TraitImplBlockDetails {
@@ -130,10 +137,96 @@
     .to_vec()
 }
 
+fn get_cppref_items() -> Vec<Item> {
+    [
+        Item::Struct(parse_quote! {
+            #[repr(transparent)]
+            pub struct CppRef<'a, T>(pub *const T, pub ::std::marker::PhantomData<&'a T>);
+        }),
+        Item::Impl(parse_quote! {
+            impl<'a, T> autocxx::CppRef<'a, T> for CppRef<'a, T> {
+                fn as_ptr(&self) -> *const T {
+                    self.0
+                }
+            }
+        }),
+        Item::Struct(parse_quote! {
+            #[repr(transparent)]
+            pub struct CppMutRef<'a, T>(pub *mut T, pub ::std::marker::PhantomData<&'a T>);
+        }),
+        Item::Impl(parse_quote! {
+            impl<'a, T> autocxx::CppRef<'a, T> for CppMutRef<'a, T> {
+                fn as_ptr(&self) -> *const T {
+                    self.0
+                }
+            }
+        }),
+        Item::Impl(parse_quote! {
+            impl<'a, T> autocxx::CppMutRef<'a, T> for CppMutRef<'a, T> {
+                fn as_mut_ptr(&self) -> *mut T {
+                    self.0
+                }
+            }
+        }),
+        Item::Impl(parse_quote! {
+            impl<'a, T: ::cxx::private::UniquePtrTarget> CppMutRef<'a, T> {
+                /// Create a const C++ reference from this mutable C++ reference.
+                pub fn as_cpp_ref(&self) -> CppRef<'a, T> {
+                    use autocxx::CppRef;
+                    CppRef(self.as_ptr(), ::std::marker::PhantomData)
+                }
+            }
+        }),
+        Item::Struct(parse_quote! {
+            /// "Pins" a `UniquePtr` to an object, so that C++-compatible references can be created.
+            /// See [`::autocxx::CppPin`]
+            #[repr(transparent)]
+            pub struct CppUniquePtrPin<T: ::cxx::private::UniquePtrTarget>(::cxx::UniquePtr<T>);
+        }),
+        Item::Impl(parse_quote! {
+            impl<'a, T: 'a + ::cxx::private::UniquePtrTarget> autocxx::CppPin<'a, T> for CppUniquePtrPin<T>
+            {
+                type CppRef = CppRef<'a, T>;
+                type CppMutRef = CppMutRef<'a, T>;
+                fn as_ptr(&self) -> *const T {
+                    // TODO add as_ptr to cxx to avoid the ephemeral reference
+                    self.0.as_ref().unwrap() as *const T
+                }
+                fn as_mut_ptr(&mut self) -> *mut T {
+                    unsafe { ::std::pin::Pin::into_inner_unchecked(self.0.as_mut().unwrap()) as *mut T  }
+                }
+                fn as_cpp_ref(&self) -> Self::CppRef {
+                    CppRef(self.as_ptr(), ::std::marker::PhantomData)
+                }
+                fn as_cpp_mut_ref(&mut self) -> Self::CppMutRef {
+                    CppMutRef(self.as_mut_ptr(), ::std::marker::PhantomData)
+                }
+            }
+        }),
+        Item::Impl(parse_quote! {
+            impl<T: ::cxx::private::UniquePtrTarget> CppUniquePtrPin<T> {
+                pub fn new(item: ::cxx::UniquePtr<T>) -> Self {
+                    Self(item)
+                }
+            }
+        }),
+        Item::Fn(parse_quote! {
+            /// Pin this item so that we can create C++ references to it.
+            /// This makes it impossible to hold Rust references because Rust
+            /// references are fundamentally incompatible with C++ references.
+            pub fn cpp_pin_uniqueptr<T: ::cxx::private::UniquePtrTarget> (item: ::cxx::UniquePtr<T>) -> CppUniquePtrPin<T> {
+                CppUniquePtrPin::new(item)
+            }
+        })
+    ]
+    .to_vec()
+}
+
 /// Type which handles generation of Rust code.
 /// In practice, much of the "generation" involves connecting together
 /// existing lumps of code within the Api structures.
 pub(crate) struct RsCodeGenerator<'a> {
+    unsafe_policy: &'a UnsafePolicy,
     include_list: &'a [String],
     bindgen_mod: ItemMod,
     original_name_map: CppNameMap,
@@ -145,12 +238,14 @@
     /// Generate code for a set of APIs that was discovered during parsing.
     pub(crate) fn generate_rs_code(
         all_apis: ApiVec<FnPhase>,
+        unsafe_policy: &'a UnsafePolicy,
         include_list: &'a [String],
         bindgen_mod: ItemMod,
         config: &'a IncludeCppConfig,
         header_name: Option<String>,
     ) -> Vec<Item> {
         let c = Self {
+            unsafe_policy,
             include_list,
             bindgen_mod,
             original_name_map: original_name_map_from_apis(&all_apis),
@@ -219,6 +314,9 @@
         let mut extern_rust_mod_items = extern_rust_mod_items.into_iter().flatten().collect();
         // And a list of global items to include at the top level.
         let mut all_items: Vec<Item> = all_items.into_iter().flatten().collect();
+        if self.config.unsafe_policy.requires_cpprefs() {
+            all_items.append(&mut get_cppref_items())
+        }
         // And finally any C++ we need to generate. And by "we" I mean autocxx not cxx.
         let has_additional_cpp_needs = additional_cpp_needs.into_iter().any(std::convert::identity);
         extern_c_mod_items.extend(self.build_include_foreign_items(has_additional_cpp_needs));
@@ -357,23 +455,24 @@
     }
 
     fn append_uses_for_ns(&mut self, items: &mut Vec<Item>, ns: &Namespace) {
+        let mut imports_from_super = vec!["cxxbridge"];
+        if !self.config.exclude_utilities() {
+            imports_from_super.push("ToCppString");
+        }
+        if self.config.unsafe_policy.requires_cpprefs() {
+            imports_from_super.extend(["CppRef", "CppMutRef"]);
+        }
+        let imports_from_super = imports_from_super.into_iter().map(make_ident);
         let super_duper = std::iter::repeat(make_ident("super")); // I'll get my coat
         let supers = super_duper.clone().take(ns.depth() + 2);
         items.push(Item::Use(parse_quote! {
             #[allow(unused_imports)]
             use self::
                 #(#supers)::*
-            ::cxxbridge;
+            ::{
+                #(#imports_from_super),*
+            };
         }));
-        if !self.config.exclude_utilities() {
-            let supers = super_duper.clone().take(ns.depth() + 2);
-            items.push(Item::Use(parse_quote! {
-                #[allow(unused_imports)]
-                use self::
-                    #(#supers)::*
-                ::ToCppString;
-            }));
-        }
         let supers = super_duper.take(ns.depth() + 1);
         items.push(Item::Use(parse_quote! {
             #[allow(unused_imports)]
@@ -407,8 +506,10 @@
             }
         }
         for (ty, entries) in impl_entries_by_type.into_iter() {
+            let lt = ty.lifetime.map(|lt| quote! { < #lt > });
+            let ty = ty.ty;
             output_items.push(Item::Impl(parse_quote! {
-                impl #ty {
+                impl #lt #ty {
                     #(#entries)*
                 }
             }))
@@ -487,6 +588,7 @@
                 analysis,
                 cpp_call_name,
                 non_pod_types,
+                self.config,
             ),
             Api::Const { const_item, .. } => RsCodegenResult {
                 bindgen_mod_items: vec![Item::Const(const_item)],
@@ -609,8 +711,11 @@
                 name, superclass, ..
             } => {
                 let methods = associated_methods.get(&superclass);
-                let generate_peer_constructor =
-                    subclasses_with_a_single_trivial_constructor.contains(&name.0.name);
+                let generate_peer_constructor = subclasses_with_a_single_trivial_constructor.contains(&name.0.name) &&
+                    // TODO: Create an UnsafeCppPeerConstructor trait for calling an unsafe
+                    // constructor instead? Need to create unsafe versions of everything that uses
+                    // it too.
+                    matches!(self.unsafe_policy, UnsafePolicy::AllFunctionsSafe);
                 self.generate_subclass(name, &superclass, methods, generate_peer_constructor)
             }
             Api::ExternCppType {
@@ -723,6 +828,10 @@
         extern_c_mod_items.push(parse_quote! {
             fn #as_mut_id(self: Pin<&mut #cpp_id>) -> Pin<&mut #super_cxxxbridge_id>;
         });
+        let as_unique_ptr_id = make_ident(format!("{}_As_{}_UniquePtr", cpp_id, super_name));
+        extern_c_mod_items.push(parse_quote! {
+            fn #as_unique_ptr_id(u: UniquePtr<#cpp_id>) -> UniquePtr<#super_cxxxbridge_id>;
+        });
         bindgen_mod_items.push(parse_quote! {
             impl AsRef<#super_path> for super::super::super::#id {
                 fn as_ref(&self) -> &cxxbridge::#super_cxxxbridge_id {
@@ -740,6 +849,14 @@
                 }
             }
         });
+        let rs_as_unique_ptr_id = make_ident(format!("as_{}_unique_ptr", super_name));
+        bindgen_mod_items.push(parse_quote! {
+            impl super::super::super::#id {
+                pub fn #rs_as_unique_ptr_id(u: cxx::UniquePtr<#cpp_id>) -> cxx::UniquePtr<cxxbridge::#super_cxxxbridge_id> {
+                    cxxbridge::#as_unique_ptr_id(u)
+                }
+            }
+        });
         let remove_ownership = sub.remove_ownership();
         global_items.push(parse_quote! {
             #[allow(non_snake_case)]
@@ -813,7 +930,7 @@
                         .as_ref()
                         .#borrow()
                         .expect(#reentrancy_panic_msg);
-                    let r = std::ops::#deref_ty::#deref_call(& #mut_token b);
+                    let r = ::std::ops::#deref_ty::#deref_call(& #mut_token b);
                     #methods_trait :: #method_name
                         (r,
                         #args)
@@ -1012,7 +1129,7 @@
         rust_path: TypePath,
         ns_depth: usize,
     ) -> RsCodegenResult {
-        let id = name.get_final_ident();
+        let id = name.type_path_from_root();
         let super_duper = std::iter::repeat(make_ident("super"));
         let supers = super_duper.take(ns_depth + 2);
         let use_statement = parse_quote! {
@@ -1057,7 +1174,10 @@
                         fn #method(_uhoh: autocxx::BindingGenerationFailure) {
                         }
                     },
-                    ty: self_ty,
+                    ty: ImplBlockKey {
+                        ty: parse_quote! { #self_ty },
+                        lifetime: None,
+                    },
                 })),
                 None,
                 None,
diff --git a/third_party/autocxx/engine/src/conversion/convert_error.rs b/third_party/autocxx/engine/src/conversion/convert_error.rs
index 0de4f19..ba8344d 100644
--- a/third_party/autocxx/engine/src/conversion/convert_error.rs
+++ b/third_party/autocxx/engine/src/conversion/convert_error.rs
@@ -125,6 +125,12 @@
     ConcreteVersionOfIgnoredTemplate,
     #[error("bindgen decided to call this type _bindgen_ty_N because it couldn't deduce the correct name for it. That means we can't generate C++ bindings to it.")]
     BindgenTy,
+    #[error("This is a typedef to a type in an anonymous namespace, not currently supported.")]
+    TypedefToTypeInAnonymousNamespace,
+    #[error("This type refers to a generic type parameter of an outer type, which is not yet supported.")]
+    ReferringToGenericTypeParam,
+    #[error("This forward declaration was nested within another struct/class. autocxx is unable to represent inner types if they are forward declarations.")]
+    ForwardDeclaredNestedType,
 }
 
 /// Ensures that error contexts are always created using the constructors in this
diff --git a/third_party/autocxx/engine/src/conversion/mod.rs b/third_party/autocxx/engine/src/conversion/mod.rs
index 3043dcf..aa639a2 100644
--- a/third_party/autocxx/engine/src/conversion/mod.rs
+++ b/third_party/autocxx/engine/src/conversion/mod.rs
@@ -157,7 +157,7 @@
                 // parameterized by a richer set of metadata.
                 Self::dump_apis("adding casts", &analyzed_apis);
                 let analyzed_apis =
-                    FnAnalyzer::analyze_functions(analyzed_apis, unsafe_policy, self.config);
+                    FnAnalyzer::analyze_functions(analyzed_apis, &unsafe_policy, self.config);
                 // If any of those functions turned out to be pure virtual, don't attempt
                 // to generate UniquePtr implementations for the type, since it can't
                 // be instantiated.
@@ -197,6 +197,7 @@
                 )?;
                 let rs = RsCodeGenerator::generate_rs_code(
                     analyzed_apis,
+                    &unsafe_policy,
                     self.include_list,
                     bindgen_mod,
                     self.config,
diff --git a/third_party/autocxx/engine/src/conversion/parse/bindgen_semantic_attributes.rs b/third_party/autocxx/engine/src/conversion/parse/bindgen_semantic_attributes.rs
index 8b789ae..a8de9ce 100644
--- a/third_party/autocxx/engine/src/conversion/parse/bindgen_semantic_attributes.rs
+++ b/third_party/autocxx/engine/src/conversion/parse/bindgen_semantic_attributes.rs
@@ -61,6 +61,11 @@
                 ConvertError::UnusedTemplateParam,
                 Some(ErrorContext::new_for_item(id_for_context.clone())),
             ))
+        } else if self.get_cpp_visibility() != CppVisibility::Public {
+            Err(ConvertErrorWithContext(
+                ConvertError::NonPublicNestedType,
+                Some(ErrorContext::new_for_item(id_for_context.clone())),
+            ))
         } else {
             Ok(())
         }
diff --git a/third_party/autocxx/engine/src/conversion/parse/parse_bindgen.rs b/third_party/autocxx/engine/src/conversion/parse/parse_bindgen.rs
index 0818aa5..2d4e3de 100644
--- a/third_party/autocxx/engine/src/conversion/parse/parse_bindgen.rs
+++ b/third_party/autocxx/engine/src/conversion/parse/parse_bindgen.rs
@@ -207,7 +207,7 @@
                 // cxx::bridge can't cope with type aliases to generic
                 // types at the moment.
                 let name = api_name_qualified(ns, s.ident.clone(), &annotations)?;
-                let err = annotations.check_for_fatal_attrs(&s.ident).err();
+                let mut err = annotations.check_for_fatal_attrs(&s.ident).err();
                 let api = if ns.is_empty() && self.config.is_rust_type(&s.ident) {
                     None
                 } else if Self::spot_forward_declaration(&s.fields)
@@ -219,6 +219,15 @@
                     // we spot in the previous clause) but instead with an _address field.
                     // So, solely in the case where we're storing up an error about such
                     // a templated type, we'll also treat such cases as forward declarations.
+                    //
+                    // We'll also at this point check for one specific problem with
+                    // forward declarations.
+                    if err.is_none() && name.cpp_name().contains("::") {
+                        err = Some(ConvertErrorWithContext(
+                            ConvertError::ForwardDeclaredNestedType,
+                            Some(ErrorContext::new_for_item(s.ident)),
+                        ));
+                    }
                     Some(UnanalyzedApi::ForwardDeclaration { name, err })
                 } else {
                     let has_rvalue_reference_fields = s.fields.iter().any(|f| {
@@ -227,7 +236,6 @@
                     Some(UnanalyzedApi::Struct {
                         name,
                         details: Box::new(StructDetails {
-                            vis: annotations.get_cpp_visibility(),
                             layout: annotations.get_layout(),
                             item: s,
                             has_rvalue_reference_fields,
diff --git a/third_party/autocxx/engine/src/lib.rs b/third_party/autocxx/engine/src/lib.rs
index 4edc4a4..86a31ea 100644
--- a/third_party/autocxx/engine/src/lib.rs
+++ b/third_party/autocxx/engine/src/lib.rs
@@ -304,6 +304,7 @@
             .default_enum_style(bindgen::EnumVariation::Rust {
                 non_exhaustive: false,
             })
+            .rustfmt_bindings(log::log_enabled!(log::Level::Info))
             .size_t_is_usize(true)
             .enable_cxx_namespaces()
             .generate_inline_functions(true)
diff --git a/third_party/autocxx/engine/src/types.rs b/third_party/autocxx/engine/src/types.rs
index 0d11895..337da14 100644
--- a/third_party/autocxx/engine/src/types.rs
+++ b/third_party/autocxx/engine/src/types.rs
@@ -195,6 +195,16 @@
         }
     }
 
+    pub(crate) fn type_path_from_root(&self) -> TypePath {
+        let segs = self
+            .ns_segment_iter()
+            .chain(std::iter::once(&self.1))
+            .map(make_ident);
+        parse_quote! {
+            #(#segs)::*
+        }
+    }
+
     /// Iterator over segments in the namespace of this name.
     pub(crate) fn ns_segment_iter(&self) -> impl Iterator<Item = &String> {
         self.0.iter()
diff --git a/third_party/autocxx/examples/chromium-fake-render-frame-host/Cargo.toml b/third_party/autocxx/examples/chromium-fake-render-frame-host/Cargo.toml
index a961708..3862f2c 100644
--- a/third_party/autocxx/examples/chromium-fake-render-frame-host/Cargo.toml
+++ b/third_party/autocxx/examples/chromium-fake-render-frame-host/Cargo.toml
@@ -13,9 +13,9 @@
 edition = "2021"
 
 [dependencies]
-cxx = "1.0.54"
-autocxx = { path = "../..", version="0.22.0" }
+cxx = "1.0.68"
+autocxx = { path = "../..", version="0.22.3" }
 
 [build-dependencies]
-autocxx-build = { path = "../../gen/build", version="0.22.0" }
+autocxx-build = { path = "../../gen/build", version="0.22.3" }
 miette = { version="4.3", features = [ "fancy" ] }
diff --git a/third_party/autocxx/examples/llvm/Cargo.toml b/third_party/autocxx/examples/llvm/Cargo.toml
index d8806c8..4c03db7 100644
--- a/third_party/autocxx/examples/llvm/Cargo.toml
+++ b/third_party/autocxx/examples/llvm/Cargo.toml
@@ -13,7 +13,7 @@
 edition = "2021"
 
 [dependencies]
-cxx = "1.0.54"
+cxx = "1.0.68"
 autocxx = { path = "../..", version="0.17.2" }
 
 [build-dependencies]
diff --git a/third_party/autocxx/examples/non-trivial-type-on-stack/Cargo.toml b/third_party/autocxx/examples/non-trivial-type-on-stack/Cargo.toml
index ff227d4..110bd1b 100644
--- a/third_party/autocxx/examples/non-trivial-type-on-stack/Cargo.toml
+++ b/third_party/autocxx/examples/non-trivial-type-on-stack/Cargo.toml
@@ -13,9 +13,9 @@
 edition = "2021"
 
 [dependencies]
-cxx = "1.0.54"
-autocxx = { path = "../..", version="0.22.0" }
+cxx = "1.0.68"
+autocxx = { path = "../..", version="0.22.3" }
 
 [build-dependencies]
-autocxx-build = { path = "../../gen/build", version="0.22.0" }
+autocxx-build = { path = "../../gen/build", version="0.22.3" }
 miette = { version="4.3", features = [ "fancy" ] }
diff --git a/third_party/autocxx/examples/pod/Cargo.toml b/third_party/autocxx/examples/pod/Cargo.toml
index 71bd5a0..9f4ee5a 100644
--- a/third_party/autocxx/examples/pod/Cargo.toml
+++ b/third_party/autocxx/examples/pod/Cargo.toml
@@ -13,9 +13,9 @@
 edition = "2021"
 
 [dependencies]
-cxx = "1.0.54"
-autocxx = { path = "../..", version="0.22.0" }
+cxx = "1.0.68"
+autocxx = { path = "../..", version="0.22.3" }
 
 [build-dependencies]
-autocxx-build = { path = "../../gen/build", version="0.22.0" }
+autocxx-build = { path = "../../gen/build", version="0.22.3" }
 miette = { version="4.3", features = [ "fancy" ] }
diff --git a/third_party/autocxx/examples/reference-wrappers/Cargo.toml b/third_party/autocxx/examples/reference-wrappers/Cargo.toml
new file mode 100644
index 0000000..cb85e80
--- /dev/null
+++ b/third_party/autocxx/examples/reference-wrappers/Cargo.toml
@@ -0,0 +1,21 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+# https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+# <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+# option. This file may not be copied, modified, or distributed
+# except according to those terms.
+
+[package]
+name = "autocxx-reference-wrapper-example"
+version = "0.22.1"
+authors = ["Adrian Taylor <adetaylor@chromium.org>"]
+edition = "2021"
+
+[dependencies]
+cxx = "1.0.68"
+autocxx = { path = "../..", version="0.22.3" }
+
+[build-dependencies]
+autocxx-build = { path = "../../gen/build", version="0.22.3" }
+miette = { version="4.3", features=["fancy"]}
diff --git a/third_party/autocxx/examples/reference-wrappers/build.rs b/third_party/autocxx/examples/reference-wrappers/build.rs
new file mode 100644
index 0000000..64c573d
--- /dev/null
+++ b/third_party/autocxx/examples/reference-wrappers/build.rs
@@ -0,0 +1,18 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() -> miette::Result<()> {
+    let path = std::path::PathBuf::from("src");
+    let mut b = autocxx_build::Builder::new("src/main.rs", &[&path]).build()?;
+    b.flag_if_supported("-std=c++14")
+    .file("src/input.cc").compile("autocxx-reference-wrapper-example");
+
+    println!("cargo:rerun-if-changed=src/main.rs");
+    println!("cargo:rerun-if-changed=src/input.h");
+    Ok(())
+}
diff --git a/third_party/autocxx/examples/reference-wrappers/src/input.cc b/third_party/autocxx/examples/reference-wrappers/src/input.cc
new file mode 100644
index 0000000..afb7eb2
--- /dev/null
+++ b/third_party/autocxx/examples/reference-wrappers/src/input.cc
@@ -0,0 +1,11 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#include "input.h"
+
+Goat the_goat;
diff --git a/third_party/autocxx/examples/reference-wrappers/src/input.h b/third_party/autocxx/examples/reference-wrappers/src/input.h
new file mode 100644
index 0000000..5e3c6e9
--- /dev/null
+++ b/third_party/autocxx/examples/reference-wrappers/src/input.h
@@ -0,0 +1,42 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#pragma once
+
+#include <cstdint>
+#include <sstream>
+#include <stdint.h>
+#include <string>
+
+class Goat {
+public:
+    Goat() : horns(0) {}
+    void add_a_horn();
+    std::string describe() const;
+private:
+    uint32_t horns;
+};
+
+
+inline void Goat::add_a_horn() { horns++; }
+inline std::string Goat::describe() const {
+    std::ostringstream oss;
+    std::string plural = horns == 1 ? "" : "s";
+    oss << "This goat has " << horns << " horn" << plural << ".";
+    return oss.str();
+}
+
+class Field {
+public:
+    const Goat& get_goat() const {
+        return the_goat;
+    }
+
+private:
+    Goat the_goat;
+};
diff --git a/third_party/autocxx/examples/reference-wrappers/src/main.rs b/third_party/autocxx/examples/reference-wrappers/src/main.rs
new file mode 100644
index 0000000..6bea2ff
--- /dev/null
+++ b/third_party/autocxx/examples/reference-wrappers/src/main.rs
@@ -0,0 +1,67 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// This example serves to demonstrate the experimental C++
+// reference wrappers. They exist because C++ references are not
+// the same as Rust references: C++ references may alias, whereas
+// Rust references may not.
+//
+// Standard autocxx behavior therefore introduces unsoundness when
+// C++ references are encountered and treated like Rust references.
+// (cxx has this soundness problem for Trivial types; autocxx
+// makes it worse in that the same problem applies even for
+// opaque types, because we make them sized such that we can allocate
+// them on the stack).
+//
+// Reference wrappers solve that problem because internally, they're
+// just pointers. On the other hand, they're awkward to use,
+// especially in the absence of the Rust "arbitrary self types"
+// feature.
+
+use autocxx::prelude::*;
+
+include_cpp! {
+    #include "input.h"
+    // This next line enables C++ reference wrappers
+    safety!(unsafe_references_wrapped)
+    generate!("Goat")
+    generate!("Field")
+}
+
+fn main() {
+    // Create a cxx::UniquePtr as normal for a Field object.
+    let field = ffi::Field::new().within_unique_ptr();
+    // We assume at this point that C++ has had no opportunity
+    // to retain any reference to the Field. That's not strictly
+    // true, due to RVO, but under all reasonable circumstances
+    // Rust currently has exclusive ownership of the Field we've
+    // been given.
+    // Therefore, at this point in the program, it's still
+    // OK to take Rust references to this Field.
+    let _field_rust_ref = field.as_ref();
+    // However, as soon as we want to pass a reference to the field
+    // back to C++, we have to ensure we have no Rust references
+    // in existence. So: we imprison the object in a "CppPin":
+    let field = ffi::cpp_pin_uniqueptr(field);
+    // We can no longer take Rust references to the field...
+    //   let _field_rust_ref = field.as_ref();
+    // However, we can take C++ references. And use such references
+    // to call methods...
+    let another_goat = field.as_cpp_ref().get_goat();
+    // The 'get_goat' method in C++ returns a reference, so this is
+    // another CppRef, not a Rust reference.
+    assert_eq!(
+        another_goat
+            .describe() // returns a UniquePtr<CxxString>, there
+                // are no Rust or C++ references involved at this point.
+            .as_ref()
+            .unwrap()
+            .to_string_lossy(),
+        "This goat has 0 horns."
+    );
+}
diff --git a/third_party/autocxx/examples/s2/Cargo.toml b/third_party/autocxx/examples/s2/Cargo.toml
index 994a381..04524b1 100644
--- a/third_party/autocxx/examples/s2/Cargo.toml
+++ b/third_party/autocxx/examples/s2/Cargo.toml
@@ -15,9 +15,9 @@
 # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
 
 [dependencies]
-cxx = "1.0.54"
-autocxx = { path = "../..", version="0.22.0" }
+cxx = "1.0.68"
+autocxx = { path = "../..", version="0.22.3" }
 
 [build-dependencies]
-autocxx-build = { path = "../../gen/build", version="0.22.0" }
+autocxx-build = { path = "../../gen/build", version="0.22.3" }
 miette = { version="4.3", features = [ "fancy" ] }
diff --git a/third_party/autocxx/examples/steam-mini/Cargo.toml b/third_party/autocxx/examples/steam-mini/Cargo.toml
index b292f25..ceb81ff 100644
--- a/third_party/autocxx/examples/steam-mini/Cargo.toml
+++ b/third_party/autocxx/examples/steam-mini/Cargo.toml
@@ -15,9 +15,9 @@
 # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
 
 [dependencies]
-cxx = "1.0.54"
-autocxx = { path = "../..", version="0.22.0" }
+cxx = "1.0.68"
+autocxx = { path = "../..", version="0.22.3" }
 
 [build-dependencies]
-autocxx-build = { path = "../../gen/build", version="0.22.0" }
+autocxx-build = { path = "../../gen/build", version="0.22.3" }
 miette = { version="4.3", features = [ "fancy" ] }
diff --git a/third_party/autocxx/examples/subclass/Cargo.toml b/third_party/autocxx/examples/subclass/Cargo.toml
index 8a01205..dc93f53 100644
--- a/third_party/autocxx/examples/subclass/Cargo.toml
+++ b/third_party/autocxx/examples/subclass/Cargo.toml
@@ -15,13 +15,13 @@
 # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
 
 [dependencies]
-cxx = "1.0.54"
-autocxx = { path = "../..", version="0.22.0" }
+cxx = "1.0.68"
+autocxx = { path = "../..", version="0.22.3" }
 uwuify = "0.2.2"
 textwrap = "0.14"
 fastrand = "1.5.0"
 
 [build-dependencies]
-autocxx-build = { path = "../../gen/build", version="0.22.0" }
+autocxx-build = { path = "../../gen/build", version="0.22.3" }
 regex = "1.5.4"
 miette = { version="4.3", features = [ "fancy" ] }
diff --git a/third_party/autocxx/gen/build/Cargo.toml b/third_party/autocxx/gen/build/Cargo.toml
index 2895526..678a844 100644
--- a/third_party/autocxx/gen/build/Cargo.toml
+++ b/third_party/autocxx/gen/build/Cargo.toml
@@ -8,7 +8,7 @@
 
 [package]
 name = "autocxx-build"
-version = "0.22.0"
+version = "0.22.3"
 authors = ["Adrian Taylor <adetaylor@chromium.org>"]
 edition = "2021"
 license = "MIT OR Apache-2.0"
@@ -22,7 +22,7 @@
 static = [ "autocxx-engine/static" ]
 
 [dependencies]
-autocxx-engine = { version="=0.22.0", path="../../engine", features = ["build"] }
+autocxx-engine = { version="=0.22.3", path="../../engine", features = ["build"] }
 env_logger = "0.9.0"
 indexmap = "1.8"
 
diff --git a/third_party/autocxx/gen/cmd/BUILD b/third_party/autocxx/gen/cmd/BUILD
new file mode 100644
index 0000000..2cee72d
--- /dev/null
+++ b/third_party/autocxx/gen/cmd/BUILD
@@ -0,0 +1,32 @@
+load("@rules_rust//rust:defs.bzl", "rust_binary")
+
+package(default_visibility = ["//visibility:public"])
+
+licenses([
+    "notice",  # MIT from expression "MIT OR Apache-2.0"
+])
+
+rust_binary(
+    name = "gen",
+    srcs = glob(["**/*.rs"]),
+    crate_root = "src/main.rs",
+    edition = "2021",
+    rustc_flags = [
+        "--cap-lints=allow",
+    ],
+    tags = [
+        "cargo-raze",
+        "crate-name=autocxx-gen",
+        "manual",
+    ],
+    version = "0.16.0",
+    deps = [
+        "//third_party/autocxx/engine:autocxx_engine",
+        "@//third_party/cargo:clap",
+        "@//third_party/cargo:env_logger",
+        "@//third_party/cargo:indexmap",
+        "@//third_party/cargo:miette",
+        "@//third_party/cargo:pathdiff",
+        "@//third_party/cargo:proc_macro2",
+    ],
+)
diff --git a/third_party/autocxx/gen/cmd/Cargo.toml b/third_party/autocxx/gen/cmd/Cargo.toml
index 744d29a..51eb56c 100644
--- a/third_party/autocxx/gen/cmd/Cargo.toml
+++ b/third_party/autocxx/gen/cmd/Cargo.toml
@@ -8,7 +8,7 @@
 
 [package]
 name = "autocxx-gen"
-version = "0.22.0"
+version = "0.22.3"
 authors = ["Adrian Taylor <adetaylor@chromium.org>"]
 edition = "2021"
 license = "MIT OR Apache-2.0"
@@ -22,7 +22,7 @@
 static = [ "autocxx-engine/static" ]
 
 [dependencies]
-autocxx-engine = { version="=0.22.0", path="../../engine" }
+autocxx-engine = { version="=0.22.3", path="../../engine" }
 clap = { version = "3.1.2", features = ["cargo"] }
 proc-macro2 = "1.0"
 env_logger = "0.9.0"
@@ -32,10 +32,10 @@
 
 [dev-dependencies]
 assert_cmd = "1.0.3"
-tempdir = "0.3.7"
-autocxx-integration-tests = { path = "../../integration-tests", version="=0.22.0" }
+tempfile = "3.1"
+autocxx-integration-tests = { path = "../../integration-tests", version="=0.22.3" }
 # This is necessary for building the projects created
 # by the trybuild test system...
 autocxx = { path="../.." }
-cxx = "1.0.54"
+cxx = "1.0.68"
 itertools = "0.10.3"
\ No newline at end of file
diff --git a/third_party/autocxx/gen/cmd/src/depfile.rs b/third_party/autocxx/gen/cmd/src/depfile.rs
index 5bb7a66..d8d3626 100644
--- a/third_party/autocxx/gen/cmd/src/depfile.rs
+++ b/third_party/autocxx/gen/cmd/src/depfile.rs
@@ -63,13 +63,13 @@
 mod tests {
     use std::{fs::File, io::Read};
 
-    use tempdir::TempDir;
+    use tempfile::tempdir;
 
     use super::Depfile;
 
     #[test]
     fn test_simple_depfile() {
-        let tmp_dir = TempDir::new("depfile-test").unwrap();
+        let tmp_dir = tempdir().unwrap();
         let f = tmp_dir.path().join("depfile.d");
         let mut df = Depfile::new(&f).unwrap();
         df.add_output(&tmp_dir.path().join("a/b"));
@@ -85,7 +85,7 @@
 
     #[test]
     fn test_multiple_outputs() {
-        let tmp_dir = TempDir::new("depfile-test").unwrap();
+        let tmp_dir = tempdir().unwrap();
         let f = tmp_dir.path().join("depfile.d");
         let mut df = Depfile::new(&f).unwrap();
         df.add_output(&tmp_dir.path().join("a/b"));
diff --git a/third_party/autocxx/gen/cmd/src/main.rs b/third_party/autocxx/gen/cmd/src/main.rs
index 20f278b..8b109c3 100644
--- a/third_party/autocxx/gen/cmd/src/main.rs
+++ b/third_party/autocxx/gen/cmd/src/main.rs
@@ -316,27 +316,33 @@
             name_autocxxgen_h,
         )?;
     }
-    //writer.write_placeholders(header_counter.into_inner(), desired_number, "h")?;
     if matches.is_present("gen-rs-include") {
+        if !matches.is_present("fix-rs-include-name") && desired_number.is_some() {
+            return Err(miette::Report::msg(
+                "gen-rs-include and generate-exact requires fix-rs-include-name.",
+            ));
+        }
+        let mut counter = 0usize;
         let rust_buildables = parsed_files
             .iter()
             .flat_map(|parsed_file| parsed_file.get_rs_outputs());
-        for (counter, include_cxx) in rust_buildables.enumerate() {
+        for include_cxx in rust_buildables {
             let rs_code = generate_rs_single(include_cxx);
             let fname = if matches.is_present("fix-rs-include-name") {
-                format!("gen{}.include.rs", counter)
+                name_include_rs(counter)
             } else {
                 rs_code.filename
             };
             writer.write_to_file(fname, rs_code.code.as_bytes())?;
+            counter += 1;
         }
+        writer.write_placeholders(counter, desired_number, name_include_rs)?;
     }
     if matches.is_present("gen-rs-archive") {
         let rust_buildables = parsed_files
             .iter()
             .flat_map(|parsed_file| parsed_file.get_rs_outputs());
         let json = generate_rs_archive(rust_buildables);
-        eprintln!("Writing to gen.rs.json in {:?}", outdir);
         writer.write_to_file("gen.rs.json".into(), json.as_bytes())?;
     }
     if let Some(depfile) = depfile {
@@ -353,6 +359,10 @@
     format!("gen{}.h", counter)
 }
 
+fn name_include_rs(counter: usize) -> String {
+    format!("gen{}.include.rs", counter)
+}
+
 fn get_dependency_recorder(depfile: Rc<RefCell<Depfile>>) -> Box<dyn RebuildDependencyRecorder> {
     Box::new(RecordIntoDepfile(depfile))
 }
diff --git a/third_party/autocxx/gen/cmd/tests/cmd_test.rs b/third_party/autocxx/gen/cmd/tests/cmd_test.rs
index d0e671e..7e455a2 100644
--- a/third_party/autocxx/gen/cmd/tests/cmd_test.rs
+++ b/third_party/autocxx/gen/cmd/tests/cmd_test.rs
@@ -13,7 +13,7 @@
 use assert_cmd::Command;
 use autocxx_integration_tests::{build_from_folder, RsFindMode};
 use itertools::Itertools;
-use tempdir::TempDir;
+use tempfile::{tempdir, TempDir};
 
 static MAIN_RS: &str = concat!(
     include_str!("../../../demo/src/main.rs"),
@@ -31,7 +31,7 @@
 static INPUT2_H: &str = include_str!("data/input2.h");
 static INPUT3_H: &str = include_str!("data/input3.h");
 
-const KEEP_TEMPDIRS: bool = false;
+const KEEP_TEMPDIRS: bool = true;
 
 #[test]
 fn test_help() -> Result<(), Box<dyn std::error::Error>> {
@@ -107,7 +107,7 @@
 
 #[test]
 fn test_gen() -> Result<(), Box<dyn std::error::Error>> {
-    let tmp_dir = TempDir::new("example")?;
+    let tmp_dir = tempdir()?;
     base_test(&tmp_dir, RsGenMode::Single, |_| {})?;
     File::create(tmp_dir.path().join("cxx.h"))
         .and_then(|mut cxx_h| cxx_h.write_all(autocxx_engine::HEADER.as_bytes()))?;
@@ -128,7 +128,7 @@
 
 #[test]
 fn test_gen_archive() -> Result<(), Box<dyn std::error::Error>> {
-    let tmp_dir = TempDir::new("example")?;
+    let tmp_dir = tempdir()?;
     base_test(&tmp_dir, RsGenMode::Archive, |_| {})?;
     File::create(tmp_dir.path().join("cxx.h"))
         .and_then(|mut cxx_h| cxx_h.write_all(autocxx_engine::HEADER.as_bytes()))?;
@@ -148,7 +148,7 @@
 
 #[test]
 fn test_gen_multiple_in_archive() -> Result<(), Box<dyn std::error::Error>> {
-    let tmp_dir = TempDir::new("example")?;
+    let tmp_dir = tempdir()?;
 
     let mut files = HashMap::new();
     files.insert("input2.h", INPUT2_H.as_bytes());
@@ -186,14 +186,15 @@
 
 #[test]
 fn test_include_prefixes() -> Result<(), Box<dyn std::error::Error>> {
-    let tmp_dir = TempDir::new("example")?;
+    let tmp_dir = tempdir()?;
     base_test(&tmp_dir, RsGenMode::Single, |cmd| {
         cmd.arg("--cxx-h-path")
             .arg("foo/")
             .arg("--cxxgen-h-path")
             .arg("bar/")
             .arg("--generate-exact")
-            .arg("3");
+            .arg("3")
+            .arg("--fix-rs-include-name");
     })?;
     assert_contains(&tmp_dir, "autocxxgen0.h", "foo/cxx.h");
     // Currently we don't test cxxgen-h-path because we build the demo code
@@ -203,11 +204,12 @@
 
 #[test]
 fn test_gen_fixed_num() -> Result<(), Box<dyn std::error::Error>> {
-    let tmp_dir = TempDir::new("example")?;
+    let tmp_dir = tempdir()?;
     let depfile = tmp_dir.path().join("test.d");
     base_test(&tmp_dir, RsGenMode::Single, |cmd| {
         cmd.arg("--generate-exact")
             .arg("2")
+            .arg("--fix-rs-include-name")
             .arg("--depfile")
             .arg(depfile);
     })?;
@@ -217,16 +219,16 @@
     assert_contentful(&tmp_dir, "autocxxgen0.h");
     assert_not_contentful(&tmp_dir, "gen1.h");
     assert_not_contentful(&tmp_dir, "autocxxgen1.h");
-    assert_contentful(&tmp_dir, "autocxx-ffi-default-gen.rs");
+    assert_contentful(&tmp_dir, "gen0.include.rs");
     assert_contentful(&tmp_dir, "test.d");
     File::create(tmp_dir.path().join("cxx.h"))
         .and_then(|mut cxx_h| cxx_h.write_all(autocxx_engine::HEADER.as_bytes()))?;
     let r = build_from_folder(
         tmp_dir.path(),
         &tmp_dir.path().join("demo/main.rs"),
-        vec![tmp_dir.path().join("autocxx-ffi-default-gen.rs")],
+        vec![tmp_dir.path().join("gen0.include.rs")],
         &["gen0.cc"],
-        RsFindMode::AutocxxRs,
+        RsFindMode::AutocxxRsFile,
     );
     if KEEP_TEMPDIRS {
         println!("Tempdir: {:?}", tmp_dir.into_path().to_str());
@@ -237,7 +239,7 @@
 
 #[test]
 fn test_gen_preprocess() -> Result<(), Box<dyn std::error::Error>> {
-    let tmp_dir = TempDir::new("example")?;
+    let tmp_dir = tempdir()?;
     let prepro_path = tmp_dir.path().join("preprocessed.h");
     base_test(&tmp_dir, RsGenMode::Single, |cmd| {
         cmd.env("AUTOCXX_PREPROCESS", prepro_path.to_str().unwrap());
@@ -251,7 +253,7 @@
 
 #[test]
 fn test_gen_repro() -> Result<(), Box<dyn std::error::Error>> {
-    let tmp_dir = TempDir::new("example")?;
+    let tmp_dir = tempdir()?;
     let repro_path = tmp_dir.path().join("repro.json");
     base_test(&tmp_dir, RsGenMode::Single, |cmd| {
         cmd.env("AUTOCXX_REPRO_CASE", repro_path.to_str().unwrap());
diff --git a/third_party/autocxx/integration-tests/Cargo.toml b/third_party/autocxx/integration-tests/Cargo.toml
index c139479..7791ea5 100644
--- a/third_party/autocxx/integration-tests/Cargo.toml
+++ b/third_party/autocxx/integration-tests/Cargo.toml
@@ -8,7 +8,7 @@
 
 [package]
 name = "autocxx-integration-tests"
-version = "0.22.0"
+version = "0.22.3"
 autotests = false
 edition = "2021"
 authors = ["Adrian Taylor <adetaylor@chromium.org>"]
@@ -27,15 +27,15 @@
 cc = "1.0"
 quote = "1.0"
 once_cell = "1.7"
-autocxx-engine = { version="=0.22.0", path="../engine", features = ["build"] }
+autocxx-engine = { version="=0.22.3", path="../engine", features = ["build"] }
 # This is necessary for building the projects created
 # by the trybuild test system...
-autocxx = { path="..", version="=0.22.0" }
+autocxx = { path="..", version="=0.22.3" }
 link-cplusplus = "1.0"
 tempfile = "3.1"
 indoc = "1.0"
 log = "0.4"
-cxx = "1.0.54"
+cxx = "1.0.68"
 itertools = "0.10"
 
 [dependencies.syn]
diff --git a/third_party/autocxx/integration-tests/src/lib.rs b/third_party/autocxx/integration-tests/src/lib.rs
index 3499af8..352c8ab 100644
--- a/third_party/autocxx/integration-tests/src/lib.rs
+++ b/third_party/autocxx/integration-tests/src/lib.rs
@@ -21,7 +21,7 @@
 use log::info;
 use once_cell::sync::OnceCell;
 use proc_macro2::{Span, TokenStream};
-use quote::{quote, TokenStreamExt};
+use quote::{format_ident, quote, TokenStreamExt};
 use syn::Token;
 use tempfile::{tempdir, TempDir};
 
@@ -56,6 +56,7 @@
 pub enum RsFindMode {
     AutocxxRs,
     AutocxxRsArchive,
+    AutocxxRsFile,
 }
 
 /// API to test building pre-generated files.
@@ -169,6 +170,10 @@
                 "AUTOCXX_RS_JSON_ARCHIVE",
                 self.temp_dir.path().join("gen.rs.json"),
             ),
+            RsFindMode::AutocxxRsFile => std::env::set_var(
+                "AUTOCXX_RS_FILE",
+                self.temp_dir.path().join("gen0.include.rs"),
+            ),
         };
         std::panic::catch_unwind(|| {
             let test_cases = trybuild::TestCases::new();
@@ -200,6 +205,7 @@
         None,
         None,
         None,
+        "unsafe_ffi",
     )
     .unwrap()
 }
@@ -254,10 +260,23 @@
         builder_modifier,
         code_checker,
         extra_rust,
+        "unsafe_ffi",
     )
     .unwrap()
 }
 
+pub fn run_generate_all_test(header_code: &str) {
+    run_test_ex(
+        "",
+        header_code,
+        quote! {},
+        quote! { generate_all!() },
+        None,
+        None,
+        None,
+    );
+}
+
 pub fn run_test_expect_fail(
     cxx_code: &str,
     header_code: &str,
@@ -273,6 +292,7 @@
         None,
         None,
         None,
+        "unsafe_ffi",
     )
     .expect_err("Unexpected success");
 }
@@ -294,6 +314,7 @@
         builder_modifier,
         code_checker,
         extra_rust,
+        "unsafe_ffi",
     )
     .expect_err("Unexpected success");
 }
@@ -343,14 +364,16 @@
     builder_modifier: Option<BuilderModifier>,
     rust_code_checker: Option<CodeChecker>,
     extra_rust: Option<TokenStream>,
+    safety_policy: &str,
 ) -> Result<(), TestError> {
     let hexathorpe = Token![#](Span::call_site());
+    let safety_policy = format_ident!("{}", safety_policy);
     let unexpanded_rust = quote! {
             use autocxx::prelude::*;
 
             include_cpp!(
                 #hexathorpe include "input.h"
-                safety!(unsafe_ffi)
+                safety!(#safety_policy)
                 #directives
             );
 
diff --git a/third_party/autocxx/integration-tests/tests/cpprefs_test.rs b/third_party/autocxx/integration-tests/tests/cpprefs_test.rs
new file mode 100644
index 0000000..4241dec
--- /dev/null
+++ b/third_party/autocxx/integration-tests/tests/cpprefs_test.rs
@@ -0,0 +1,96 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Tests specific to reference wrappers.
+
+use autocxx_integration_tests::{directives_from_lists, do_run_test};
+use indoc::indoc;
+use proc_macro2::TokenStream;
+use quote::quote;
+
+/// A positive test, we expect to pass.
+fn run_cpprefs_test(
+    cxx_code: &str,
+    header_code: &str,
+    rust_code: TokenStream,
+    generate: &[&str],
+    generate_pods: &[&str],
+) {
+    do_run_test(
+        cxx_code,
+        header_code,
+        rust_code,
+        directives_from_lists(generate, generate_pods, None),
+        None,
+        None,
+        None,
+        "unsafe_references_wrapped",
+    )
+    .unwrap()
+}
+
+#[test]
+fn test_method_call_mut() {
+    run_cpprefs_test(
+        "",
+        indoc! {"
+        #include <string>
+        #include <sstream>
+
+        class Goat {
+            public:
+                Goat() : horns(0) {}
+                void add_a_horn();
+            private:
+                uint32_t horns;
+        };
+
+        inline void Goat::add_a_horn() { horns++; }
+    "},
+        quote! {
+            let goat = ffi::Goat::new().within_unique_ptr();
+            let mut goat = ffi::CppUniquePtrPin::new(goat);
+            goat.as_cpp_mut_ref().add_a_horn();
+        },
+        &["Goat"],
+        &[],
+    )
+}
+
+#[test]
+fn test_method_call_const() {
+    run_cpprefs_test(
+        "",
+        indoc! {"
+        #include <string>
+        #include <sstream>
+
+        class Goat {
+            public:
+                Goat() : horns(0) {}
+                std::string describe() const;
+            private:
+                uint32_t horns;
+        };
+
+        inline std::string Goat::describe() const {
+            std::ostringstream oss;
+            std::string plural = horns == 1 ? \"\" : \"s\";
+            oss << \"This goat has \" << horns << \" horn\" << plural << \".\";
+            return oss.str();
+        }
+    "},
+        quote! {
+            let goat = ffi::Goat::new().within_unique_ptr();
+            let goat = ffi::cpp_pin_uniqueptr(goat);
+            goat.as_cpp_ref().describe();
+        },
+        &["Goat"],
+        &[],
+    )
+}
diff --git a/third_party/autocxx/integration-tests/tests/integration_test.rs b/third_party/autocxx/integration-tests/tests/integration_test.rs
index 2a3340a..16c792b8 100644
--- a/third_party/autocxx/integration-tests/tests/integration_test.rs
+++ b/third_party/autocxx/integration-tests/tests/integration_test.rs
@@ -16,8 +16,8 @@
     },
 };
 use autocxx_integration_tests::{
-    directives_from_lists, do_run_test, do_run_test_manual, run_test, run_test_ex,
-    run_test_expect_fail, run_test_expect_fail_ex, TestError,
+    directives_from_lists, do_run_test, do_run_test_manual, run_generate_all_test, run_test,
+    run_test_ex, run_test_expect_fail, run_test_expect_fail_ex, TestError,
 };
 use indoc::indoc;
 use itertools::Itertools;
@@ -3541,20 +3541,7 @@
         template <class _Ty>
         using _Remove_cvref_t = remove_cv_t<remove_reference_t<_Ty>>;
     "};
-
-    let rs = quote! {};
-
-    run_test_ex(
-        "",
-        hdr,
-        rs,
-        quote! {
-            generate_all!()
-        },
-        None,
-        None,
-        None,
-    );
+    run_generate_all_test(hdr);
 }
 
 #[test]
@@ -5044,15 +5031,7 @@
         inline void a() {}
     };
     "};
-    run_test_ex(
-        "",
-        hdr,
-        quote! {},
-        quote! { generate_all!() },
-        None,
-        None,
-        None,
-    );
+    run_generate_all_test(hdr);
 }
 
 #[test]
@@ -5941,6 +5920,7 @@
         None,
         None,
         None,
+        "unsafe_ffi",
     ) {
         Err(TestError::CppBuild(_)) => {} // be sure this fails due to a static_assert
         // rather than some runtime problem
@@ -6232,20 +6212,7 @@
 
         typedef bitset<1> mybitset;
     "};
-
-    let rs = quote! {};
-
-    run_test_ex(
-        "",
-        hdr,
-        rs,
-        quote! {
-            generate_all!()
-        },
-        None,
-        None,
-        None,
-    );
+    run_generate_all_test(hdr);
 }
 
 #[test]
@@ -6433,18 +6400,7 @@
             typedef std::a<b<d>::c, int, int> e;
         };
     "};
-    let rs = quote! {};
-    run_test_ex(
-        "",
-        hdr,
-        rs,
-        quote! {
-            generate_all!()
-        },
-        None,
-        None,
-        None,
-    );
+    run_generate_all_test(hdr);
 }
 
 #[test]
@@ -6505,18 +6461,7 @@
         }
         typedef char daft;
     "};
-    let rs = quote! {};
-    run_test_ex(
-        "",
-        hdr,
-        rs,
-        quote! {
-            generate_all!()
-        },
-        None,
-        None,
-        None,
-    );
+    run_generate_all_test(hdr);
 }
 
 #[test]
@@ -6738,6 +6683,50 @@
 }
 
 #[test]
+/// Tests extern_cpp_type with a type inside a namespace.
+fn test_extern_cpp_type_namespace() {
+    let hdr = indoc! {"
+        #include <cstdint>
+        namespace b {
+        struct B {
+            B() {}
+        };
+        }  // namespace b
+        struct A {
+            A() {}
+            b::B make_b() { return b::B(); }
+        };
+    "};
+    let hexathorpe = Token![#](Span::call_site());
+    let rs = quote! {
+        pub mod b {
+            autocxx::include_cpp! {
+                #hexathorpe include "input.h"
+                safety!(unsafe_ffi)
+                name!(ffi_b)
+                generate_pod!("b::B")
+            }
+            pub use ffi_b::b::B;
+        }
+        pub mod a {
+            autocxx::include_cpp! {
+                #hexathorpe include "input.h"
+                safety!(unsafe_ffi)
+                name!(ffi_a)
+                generate_pod!("A")
+                extern_cpp_type!("b::B", crate::b::B)
+            }
+            pub use ffi_a::A;
+        }
+        fn main() {
+            use autocxx::prelude::*;
+            let _ = crate::a::A::new().within_unique_ptr().as_mut().unwrap().make_b();
+        }
+    };
+    do_run_test_manual("", hdr, rs, None, None).unwrap();
+}
+
+#[test]
 #[ignore] // because we currently require UniquePtrTarget which this can't implement
 fn test_extern_cpp_type_manual() {
     let hdr = indoc! {"
@@ -7600,6 +7589,49 @@
 }
 
 #[test]
+/// Tests the Rust code generated for subclasses when there's a `std` module in scope representing
+/// the C++ `std` namespace. This breaks if any of the generated Rust code fails to fully qualify
+/// its references to the Rust `std`.
+fn test_subclass_with_std() {
+    let hdr = indoc! {"
+    #include <cstdint>
+    #include <chrono>
+
+    class Observer {
+    public:
+        Observer() {}
+        virtual void foo() const {}
+        virtual ~Observer() {}
+
+        void unused(std::chrono::nanoseconds) {}
+    };
+    "};
+    run_test_ex(
+        "",
+        hdr,
+        quote! {
+            let obs = MyObserver::new_rust_owned(MyObserver { a: 3, cpp_peer: Default::default() });
+            obs.borrow().foo();
+        },
+        quote! {
+            subclass!("Observer",MyObserver)
+        },
+        None,
+        None,
+        Some(quote! {
+            use autocxx::subclass::CppSubclass;
+            use ffi::Observer_methods;
+            #[autocxx::subclass::subclass]
+            pub struct MyObserver {
+                a: u32
+            }
+            impl Observer_methods for MyObserver {
+            }
+        }),
+    );
+}
+
+#[test]
 fn test_two_subclasses() {
     let hdr = indoc! {"
     #include <cstdint>
@@ -7704,6 +7736,54 @@
 }
 
 #[test]
+fn test_subclass_no_safety() {
+    let hdr = indoc! {"
+    #include <cstdint>
+
+    class Observer {
+    public:
+        Observer() {}
+        virtual void foo() = 0;
+        virtual ~Observer() {}
+    };
+    "};
+    let hexathorpe = Token![#](Span::call_site());
+    let unexpanded_rust = quote! {
+        use autocxx::prelude::*;
+
+        include_cpp!(
+            #hexathorpe include "input.h"
+            subclass!("Observer",MyObserver)
+        );
+
+        use ffi::Observer_methods;
+        #hexathorpe [autocxx::subclass::subclass]
+        pub struct MyObserver;
+        impl Observer_methods for MyObserver {
+            unsafe fn foo(&mut self) {}
+        }
+
+        use autocxx::subclass::{CppSubclass, CppPeerConstructor, CppSubclassRustPeerHolder};
+        use cxx::UniquePtr;
+        impl CppPeerConstructor<ffi::MyObserverCpp> for MyObserver {
+            fn make_peer(
+                &mut self,
+                peer_holder: CppSubclassRustPeerHolder<Self>,
+            ) -> UniquePtr<ffi::MyObserverCpp> {
+                UniquePtr::emplace(unsafe { ffi::MyObserverCpp::new(peer_holder) })
+            }
+        }
+
+        fn main() {
+            let obs = MyObserver::new_rust_owned(MyObserver { cpp_peer: Default::default() });
+            unsafe { obs.borrow_mut().foo() };
+        }
+    };
+
+    do_run_test_manual("", hdr, unexpanded_rust, None, None).unwrap()
+}
+
+#[test]
 fn test_pv_protected_constructor() {
     let hdr = indoc! {"
     #include <cstdint>
@@ -8291,6 +8371,74 @@
 }
 
 #[test]
+fn test_pv_subclass_as_superclass() {
+    let hdr = indoc! {"
+    #include <cstdint>
+    #include <memory>
+
+    class TestObserver {
+    public:
+        TestObserver() {}
+        virtual void a() const = 0;
+        virtual ~TestObserver() {}
+    };
+
+    inline void call_observer(std::unique_ptr<TestObserver> obs) { obs->a(); }
+    "};
+    run_test_ex(
+        "",
+        hdr,
+        quote! {
+            use autocxx::subclass::CppSubclass;
+            let obs = MyTestObserver::new_cpp_owned(
+                MyTestObserver::default()
+            );
+            let obs = MyTestObserver::as_TestObserver_unique_ptr(obs);
+            assert!(!Lazy::force(&STATUS).lock().unwrap().dropped);
+            ffi::call_observer(obs);
+            assert!(Lazy::force(&STATUS).lock().unwrap().sub_a_called);
+            assert!(Lazy::force(&STATUS).lock().unwrap().dropped);
+            *Lazy::force(&STATUS).lock().unwrap() = Default::default();
+        },
+        quote! {
+            generate!("call_observer")
+            subclass!("TestObserver",MyTestObserver)
+        },
+        None,
+        None,
+        Some(quote! {
+            use once_cell::sync::Lazy;
+            use std::sync::Mutex;
+
+            use ffi::TestObserver_methods;
+            #[autocxx::subclass::subclass]
+            #[derive(Default)]
+            pub struct MyTestObserver {
+            }
+            impl TestObserver_methods for MyTestObserver {
+                fn a(&self) {
+                    assert!(!Lazy::force(&STATUS).lock().unwrap().dropped);
+                    Lazy::force(&STATUS).lock().unwrap().sub_a_called = true;
+                }
+            }
+            impl Drop for MyTestObserver {
+                fn drop(&mut self) {
+                    Lazy::force(&STATUS).lock().unwrap().dropped = true;
+                }
+            }
+
+            #[derive(Default)]
+            struct Status {
+                sub_a_called: bool,
+                dropped: bool,
+            }
+
+            static STATUS: Lazy<Mutex<Status>> = Lazy::new(|| Mutex::new(Status::default()));
+        }),
+    );
+}
+
+#[test]
 fn test_cycle_nonpod_simple() {
     let hdr = indoc! {"
     #include <string>
@@ -10925,15 +11073,7 @@
           d e();
         };
     "};
-    run_test_ex(
-        "",
-        hdr,
-        quote! {},
-        quote! { generate_all!() },
-        None,
-        None,
-        None,
-    );
+    run_generate_all_test(hdr);
 }
 
 #[test]
@@ -10948,15 +11088,7 @@
         };
         } // namespace
     "};
-    run_test_ex(
-        "",
-        hdr,
-        quote! {},
-        quote! { generate_all!() },
-        None,
-        None,
-        None,
-    );
+    run_generate_all_test(hdr);
 }
 
 #[test]
@@ -11015,15 +11147,7 @@
         };
         } // namespace a
     "};
-    run_test_ex(
-        "",
-        hdr,
-        quote! {},
-        quote! { generate_all!() },
-        None,
-        None,
-        None,
-    );
+    run_generate_all_test(hdr);
 }
 
 #[test]
@@ -11044,15 +11168,7 @@
           _CharT b;
         };
     "};
-    run_test_ex(
-        "",
-        hdr,
-        quote! {},
-        quote! { generate_all!() },
-        None,
-        None,
-        None,
-    );
+    run_generate_all_test(hdr);
 }
 
 #[test]
@@ -11063,15 +11179,7 @@
           b c;
         };
     "};
-    run_test_ex(
-        "",
-        hdr,
-        quote! {},
-        quote! { generate_all!() },
-        None,
-        None,
-        None,
-    );
+    run_generate_all_test(hdr);
 }
 
 #[test]
@@ -11086,15 +11194,7 @@
         }
         }
     "};
-    run_test_ex(
-        "",
-        hdr,
-        quote! {},
-        quote! { generate_all!() },
-        None,
-        None,
-        None,
-    );
+    run_generate_all_test(hdr);
 }
 
 #[test]
@@ -11116,15 +11216,171 @@
         } // namespace
         } // namespace a
     "};
-    run_test_ex(
-        "",
-        hdr,
-        quote! {},
-        quote! { generate_all!() },
-        None,
-        None,
-        None,
-    );
+    run_generate_all_test(hdr);
+}
+
+/// The problem here is that 'g' doesn't get annotated with
+/// the unused_template semantic attribute.
+/// This seems to be because both g and f have template
+/// parameters, so they're all "used", but effectively cancel
+/// out and thus bindgen generates
+///   pub type g = root::b::f;
+/// So, what we should do here is spot any typedef depending
+/// on a template which takes template args, and reject that too.
+/// Probably.
+#[test]
+#[ignore] // https://github.com/google/autocxx/pull/1094
+fn test_issue_1094() {
+    let hdr = indoc! {"
+        namespace {
+        typedef int a;
+        }
+        namespace b {
+        template <typename> struct c;
+        template <typename d, d e> using f = __make_integer_seq<c, d, e>;
+        template <a e> using g = f<a, e>;
+        } // namespace b
+    "};
+    run_generate_all_test(hdr);
+}
+
+#[test]
+fn test_issue_1096a() {
+    let hdr = indoc! {"
+        namespace a {
+        class b {
+          class c;
+        };
+        } // namespace a
+    "};
+    run_generate_all_test(hdr);
+}
+
+#[test]
+fn test_issue_1096b() {
+    let hdr = indoc! {"
+        namespace a {
+        class b {
+        public:
+          class c;
+        };
+        } // namespace a
+    "};
+    run_generate_all_test(hdr);
+}
+
+#[test]
+fn test_issue_1096c() {
+    let hdr = indoc! {"
+        namespace a {
+        class b {
+        public:
+          class c {
+          public:
+            int d;
+          };
+        };
+        } // namespace a
+    "};
+    run_generate_all_test(hdr);
+}
+
+#[test]
+fn test_issue_1096d() {
+    let hdr = indoc! {"
+        namespace a {
+        class b {
+        private:
+          class c {
+          public:
+            int d;
+          };
+        };
+        } // namespace a
+    "};
+    run_generate_all_test(hdr);
+}
+
+#[test]
+fn test_issue_1096e() {
+    let hdr = indoc! {"
+        namespace a {
+        class b {
+        private:
+          enum c {
+              D,
+          };
+        };
+        } // namespace a
+    "};
+    run_generate_all_test(hdr);
+}
+
+/// Unclear why minimization resulted in this particular test case.
+#[test]
+#[ignore] // https://github.com/google/autocxx/pull/1097
+fn test_issue_1097() {
+    let hdr = indoc! {"
+        namespace rust {
+        inline namespace a {
+        class Str {
+        public:
+          ~Str();
+        };
+        } // namespace a
+        } // namespace rust
+    "};
+    run_generate_all_test(hdr);
+}
+
+#[test]
+fn test_issue_1098a() {
+    let hdr = indoc! {"
+        namespace {
+        namespace {
+        template <typename _CharT> class a {
+          typedef _CharT b;
+          b c;
+        };
+        template <typename _CharT> class d : a<_CharT> {};
+        } // namespace
+        } // namespace
+    "};
+    run_generate_all_test(hdr);
+}
+
+/// Need to spot structs like this:
+/// pub struct d<_CharT> {
+///  _base: root::a<_CharT>,
+/// }
+/// and not create concrete types where the inner type is something from
+/// the outer context.
+#[test]
+fn test_issue_1098b() {
+    let hdr = indoc! {"
+        template <typename _CharT> class a {
+          typedef _CharT b;
+          b c;
+        };
+        template <typename _CharT> class d : a<_CharT> {};
+    "};
+    run_generate_all_test(hdr);
+}
+
+#[test]
+fn test_issue_1098c() {
+    let hdr = indoc! {"
+        namespace {
+        namespace {
+        struct A {
+            int a;
+        };
+        typedef A B;
+        } // namespace
+        } // namespace
+        inline void take_b(const B&) {}
+    "};
+    run_generate_all_test(hdr);
 }
 
 #[test]
@@ -11185,6 +11441,58 @@
     run_test("", hdr, rs, &["RenderFrameHost"], &[]);
 }
 
+#[test]
+fn test_issue_1081() {
+    let hdr = indoc! {"
+        namespace libtorrent {
+        char version;
+        }
+        namespace libtorrent {
+        struct session;
+        }
+    "};
+    let rs = quote! {};
+    run_test("", hdr, rs, &["libtorrent::session"], &[]);
+}
+
+#[test]
+#[ignore] // This test passes under all normal builds. However
+          // it triggers a stack use-after-return in older versions of
+          // libclang which is only detected under ASAN (obviously it
+          // sometimes causes crashes the rest of the time).
+          // This UaR does not occur when the same code is processed
+          // with a HEAD version of clang itself as of June 2022. This
+          // may mean that the UaR has been fixed in later versions of
+          // the clang code, or that it only occurs when the code is used
+          // in a libclang context (not a plain clang compilation context).
+          // If the problem recurs, we should work out which of these is
+          // the case.
+fn test_issue_1125() {
+    let hdr = indoc! {"
+        namespace {
+        namespace {
+        template <class a> class b {
+          typedef a c;
+          struct {
+            c : sizeof(c);
+          };
+        };
+        } // namespace
+        } // namespace
+    "};
+    run_test_ex(
+        "",
+        hdr,
+        quote! {},
+        quote! {
+            generate_all!()
+        },
+        make_cpp17_adder(),
+        None,
+        None,
+    );
+}
+
 // Yet to test:
 // - Ifdef
 // - Out param pointers
diff --git a/third_party/autocxx/integration-tests/tests/lib.rs b/third_party/autocxx/integration-tests/tests/lib.rs
index 17f076a..8d9eba9 100644
--- a/third_party/autocxx/integration-tests/tests/lib.rs
+++ b/third_party/autocxx/integration-tests/tests/lib.rs
@@ -8,4 +8,5 @@
 
 mod builder_modifiers;
 mod code_checkers;
+mod cpprefs_test;
 mod integration_test;
diff --git a/third_party/autocxx/macro/BUILD b/third_party/autocxx/macro/BUILD
new file mode 100644
index 0000000..a924a69
--- /dev/null
+++ b/third_party/autocxx/macro/BUILD
@@ -0,0 +1,30 @@
+load("@rules_rust//rust:defs.bzl", "rust_proc_macro")
+
+package(default_visibility = ["//visibility:public"])
+
+licenses([
+    "notice",  # MIT from expression "MIT OR Apache-2.0"
+])
+
+rust_proc_macro(
+    name = "autocxx_macro",
+    srcs = glob(["**/*.rs"]),
+    crate_root = "src/lib.rs",
+    edition = "2021",
+    rustc_flags = [
+        "--cap-lints=allow",
+    ],
+    tags = [
+        "cargo-raze",
+        "crate-name=autocxx-macro",
+        "manual",
+    ],
+    version = "0.16.0",
+    deps = [
+        "//third_party/autocxx/parser:autocxx_parser",
+        "@//third_party/cargo:proc_macro2",
+        "@//third_party/cargo:proc_macro_error",
+        "@//third_party/cargo:quote",
+        "@//third_party/cargo:syn",
+    ],
+)
diff --git a/third_party/autocxx/macro/Cargo.toml b/third_party/autocxx/macro/Cargo.toml
index d1d633a..1f81ab1 100644
--- a/third_party/autocxx/macro/Cargo.toml
+++ b/third_party/autocxx/macro/Cargo.toml
@@ -8,7 +8,7 @@
 
 [package]
 name = "autocxx-macro"
-version = "0.22.0"
+version = "0.22.3"
 authors = ["Adrian Taylor <adetaylor@chromium.org>"]
 license = "MIT OR Apache-2.0"
 description = "Safe autogenerated interop between Rust and C++"
@@ -21,7 +21,7 @@
 proc-macro = true
 
 [dependencies]
-autocxx-parser = { path="../parser", version="=0.22.0" }
+autocxx-parser = { path="../parser", version="=0.22.3" }
 proc-macro-error = "1.0"
 proc-macro2 = "1.0.11"
 quote = "1.0"
diff --git a/third_party/autocxx/parser/BUILD b/third_party/autocxx/parser/BUILD
new file mode 100644
index 0000000..c91ea85
--- /dev/null
+++ b/third_party/autocxx/parser/BUILD
@@ -0,0 +1,38 @@
+load("@rules_rust//rust:defs.bzl", "rust_library")
+
+package(default_visibility = ["//visibility:public"])
+
+licenses([
+    "notice",  # MIT from expression "MIT OR Apache-2.0"
+])
+
+rust_library(
+    name = "autocxx_parser",
+    srcs = glob(["**/*.rs"]),
+    crate_features = [
+        "reproduction_case",
+    ],
+    crate_root = "src/lib.rs",
+    edition = "2021",
+    rustc_flags = [
+        "--cap-lints=allow",
+    ],
+    tags = [
+        "cargo-raze",
+        "crate-name=autocxx-parser",
+        "manual",
+    ],
+    version = "0.16.0",
+    deps = [
+        "@//third_party/cargo:indexmap",
+        "@//third_party/cargo:itertools",
+        "@//third_party/cargo:log",
+        "@//third_party/cargo:once_cell",
+        "@//third_party/cargo:proc_macro2",
+        "@//third_party/cargo:quote",
+        "@//third_party/cargo:serde",
+        "@//third_party/cargo:serde_json",
+        "@//third_party/cargo:syn",
+        "@//third_party/cargo:thiserror",
+    ],
+)
diff --git a/third_party/autocxx/parser/Cargo.toml b/third_party/autocxx/parser/Cargo.toml
index 481f21d..3668e65 100644
--- a/third_party/autocxx/parser/Cargo.toml
+++ b/third_party/autocxx/parser/Cargo.toml
@@ -8,7 +8,7 @@
 
 [package]
 name = "autocxx-parser"
-version = "0.22.0"
+version = "0.22.3"
 authors = ["Adrian Taylor <adetaylor@chromium.org>"]
 license = "MIT OR Apache-2.0"
 description = "Safe autogenerated interop between Rust and C++"
diff --git a/third_party/autocxx/parser/src/config.rs b/third_party/autocxx/parser/src/config.rs
index 4204acf..8d30249 100644
--- a/third_party/autocxx/parser/src/config.rs
+++ b/third_party/autocxx/parser/src/config.rs
@@ -33,6 +33,7 @@
 pub enum UnsafePolicy {
     AllFunctionsSafe,
     AllFunctionsUnsafe,
+    ReferencesWrappedAllFunctionsSafe,
 }
 
 impl Default for UnsafePolicy {
@@ -50,8 +51,13 @@
             Some(id) => {
                 if id == "unsafe_ffi" {
                     Ok(UnsafePolicy::AllFunctionsSafe)
+                } else if id == "unsafe_references_wrapped" {
+                    Ok(UnsafePolicy::ReferencesWrappedAllFunctionsSafe)
                 } else {
-                    Err(syn::Error::new(id.span(), "expected unsafe_ffi"))
+                    Err(syn::Error::new(
+                        id.span(),
+                        "expected unsafe_ffi or unsafe_references_wrapped",
+                    ))
                 }
             }
             None => Ok(UnsafePolicy::AllFunctionsUnsafe),
@@ -70,10 +76,20 @@
     fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
         if *self == UnsafePolicy::AllFunctionsSafe {
             tokens.extend(quote! { unsafe })
+        } else if *self == UnsafePolicy::ReferencesWrappedAllFunctionsSafe {
+            tokens.extend(quote! { unsafe_references_wrapped })
         }
     }
 }
 
+impl UnsafePolicy {
+    /// Whether we are treating C++ references as a different thing from Rust
+    /// references and therefore have to generate lots of code for a CppRef type
+    pub fn requires_cpprefs(&self) -> bool {
+        matches!(self, Self::ReferencesWrappedAllFunctionsSafe)
+    }
+}
+
 /// An entry in the allowlist.
 #[derive(Hash, Debug)]
 pub enum AllowlistEntry {
diff --git a/third_party/autocxx/parser/src/directives.rs b/third_party/autocxx/parser/src/directives.rs
index 160829e..70c88fc 100644
--- a/third_party/autocxx/parser/src/directives.rs
+++ b/third_party/autocxx/parser/src/directives.rs
@@ -268,10 +268,8 @@
     ) -> Box<dyn Iterator<Item = TokenStream> + 'a> {
         let policy = &config.unsafe_policy;
         match config.unsafe_policy {
-            crate::UnsafePolicy::AllFunctionsSafe => {
-                Box::new(std::iter::once(policy.to_token_stream()))
-            }
             crate::UnsafePolicy::AllFunctionsUnsafe => Box::new(std::iter::empty()),
+            _ => Box::new(std::iter::once(policy.to_token_stream())),
         }
     }
 }
diff --git a/third_party/autocxx/src/lib.rs b/third_party/autocxx/src/lib.rs
index 6592011..4b9f26b 100644
--- a/third_party/autocxx/src/lib.rs
+++ b/third_party/autocxx/src/lib.rs
@@ -14,10 +14,13 @@
 // do anything - all the magic is handled entirely by
 // autocxx_macro::include_cpp_impl.
 
+mod reference_wrapper;
 mod rvalue_param;
 pub mod subclass;
 mod value_param;
 
+pub use reference_wrapper::{CppMutRef, CppPin, CppRef};
+
 #[cfg_attr(doc, aquamarine::aquamarine)]
 /// Include some C++ headers in your Rust project.
 ///
@@ -257,6 +260,14 @@
 ///
 /// Generated C++ APIs which use raw pointers remain `unsafe`
 /// no matter what policy you choose.
+///
+/// There's an additional possible experimental safety
+/// policy available here:
+/// `safety!(unsafe_references_wrapped)`
+/// This policy treats C++ references as scary and requires
+/// them to be wrapped in a `CppRef` type. This `CppRef`
+/// type is implemented within the generated bindings but
+/// follows the contract of [`CppRef`].
 #[macro_export]
 macro_rules! safety {
     ($($tt:tt)*) => { $crate::usage!{$($tt)*} };
@@ -613,6 +624,9 @@
     pub use crate::c_void;
     pub use crate::cpp_semantics;
     pub use crate::include_cpp;
+    pub use crate::CppMutRef;
+    pub use crate::CppPin;
+    pub use crate::CppRef;
     pub use crate::PinMut;
     pub use crate::RValueParam;
     pub use crate::ValueParam;
diff --git a/third_party/autocxx/src/reference_wrapper.rs b/third_party/autocxx/src/reference_wrapper.rs
new file mode 100644
index 0000000..ed943d3
--- /dev/null
+++ b/third_party/autocxx/src/reference_wrapper.rs
@@ -0,0 +1,109 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/// A C++ const reference. These are different from Rust's `&T` in that
+/// these may exist even while the object is mutated elsewhere.
+///
+/// This is a trait not a struct due to the nuances of Rust's orphan rule
+/// - implemntations of this trait are found in each set of generated bindings
+/// but they are essentially the same.
+pub trait CppRef<'a, T> {
+    /// Retrieve the underlying C++ pointer.
+    fn as_ptr(&self) -> *const T;
+
+    /// Get a regular Rust reference out of this C++ reference.
+    ///
+    /// # Safety
+    ///
+    /// Callers must guarantee that the referent is not modified by any other
+    /// C++ or Rust code while the returned reference exists. Callers must
+    /// also guarantee that no mutable Rust reference is created to the
+    /// referent while the returned reference exists.
+    unsafe fn as_ref(&self) -> &T {
+        &*self.as_ptr()
+    }
+}
+
+/// A C++ non-const reference. These are different from Rust's `&mut T` in that
+/// several C++ references can exist to the same underlying data ("aliasing")
+/// and that's not permitted in Rust.
+///
+/// This is a trait not a struct due to the nuances of Rust's orphan rule
+/// - implemntations of this trait are found in each set of generated bindings
+/// but they are essentially the same.
+pub trait CppMutRef<'a, T>: CppRef<'a, T> {
+    /// Retrieve the underlying C++ pointer.
+    fn as_mut_ptr(&self) -> *mut T;
+
+    /// Get a regular Rust mutable reference out of this C++ reference.
+    ///
+    /// # Safety
+    ///
+    /// Callers must guarantee that the referent is not modified by any other
+    /// C++ or Rust code while the returned reference exists. Callers must
+    /// also guarantee that no other Rust reference is created to the referent
+    /// while the returned reference exists.
+    unsafe fn as_mut(&mut self) -> &mut T {
+        &mut *self.as_mut_ptr()
+    }
+}
+
+/// Any newtype wrapper which causes the contained object to obey C++ reference
+/// semantics rather than Rust reference semantics.
+///
+/// The complex generics here are working around the orphan rule - the only
+/// important generic is `T` which is the underlying stored type.
+///
+/// C++ references are permitted to alias one another, and commonly do.
+/// Rust references must alias according only to the narrow rules of the
+/// borrow checker.
+///
+/// If you need C++ to access your Rust object, first imprison it in one of these
+/// objects, then use [`Self::as_cpp_ref`] to obtain C++ references to it.
+pub trait CppPin<'a, T: 'a> {
+    /// The type of C++ reference created to the contained object.
+    type CppRef: CppRef<'a, T>;
+
+    /// The type of C++ mutable reference created to the contained object..
+    type CppMutRef: CppMutRef<'a, T>;
+
+    /// Get an immutable pointer to the underlying object.
+    fn as_ptr(&self) -> *const T;
+
+    /// Get a mutable pointer to the underlying object.
+    fn as_mut_ptr(&mut self) -> *mut T;
+
+    /// Returns a reference which obeys C++ reference semantics
+    fn as_cpp_ref(&self) -> Self::CppRef;
+
+    /// Returns a mutable reference which obeys C++ reference semantics.
+    ///
+    /// Note that this requires unique ownership of `self`, but this is
+    /// advisory since the resulting reference can be cloned.
+    fn as_cpp_mut_ref(&mut self) -> Self::CppMutRef;
+
+    /// Get a normal Rust reference to the underlying object. This is unsafe.
+    ///
+    /// # Safety
+    ///
+    /// You must guarantee that C++ will not mutate the object while the
+    /// reference exists.
+    unsafe fn as_ref(&self) -> &T {
+        &*self.as_ptr()
+    }
+
+    /// Get a normal Rust mutable reference to the underlying object. This is unsafe.
+    ///
+    /// # Safety
+    ///
+    /// You must guarantee that C++ will not mutate the object while the
+    /// reference exists.
+    unsafe fn as_mut(&mut self) -> &mut T {
+        &mut *self.as_mut_ptr()
+    }
+}
diff --git a/third_party/autocxx/src/subclass.rs b/third_party/autocxx/src/subclass.rs
index d2827ca..6c6ee31 100644
--- a/third_party/autocxx/src/subclass.rs
+++ b/third_party/autocxx/src/subclass.rs
@@ -207,8 +207,9 @@
 /// * You _may_ need to implement [`CppPeerConstructor`] for your subclass,
 ///   but only if autocxx determines that there are multiple possible superclass
 ///   constructors so you need to call one explicitly (or if there's a single
-///   non-trivial superclass constructor.) autocxx will implemente this trait
-///   for you if there's no ambiguity.
+///   non-trivial superclass constructor.) autocxx will implement this trait
+///   for you if there's no ambiguity and FFI functions are safe to call due to
+///   `autocxx::safety!` being used.
 ///
 /// # How to access your Rust structure from outside
 ///
diff --git a/third_party/autocxx/tools/mdbook-preprocessor/Cargo.toml b/third_party/autocxx/tools/mdbook-preprocessor/Cargo.toml
index 19d775d..f88e833 100644
--- a/third_party/autocxx/tools/mdbook-preprocessor/Cargo.toml
+++ b/third_party/autocxx/tools/mdbook-preprocessor/Cargo.toml
@@ -8,7 +8,7 @@
 
 [package]
 name = "autocxx-mdbook-preprocessor"
-version = "0.22.0"
+version = "0.22.3"
 authors = ["adetaylor <adetaylor@chromium.org>"]
 edition = "2021"
 
@@ -18,7 +18,7 @@
 itertools = "0.10"
 anyhow = "1"
 regex = "1"
-autocxx-integration-tests = { path = "../../integration-tests", version="=0.22.0"}
+autocxx-integration-tests = { path = "../../integration-tests", version="=0.22.3"}
 rayon = "1.5"
 gag = "1.0"
 env_logger = "0.9.0"
diff --git a/third_party/autocxx/tools/reduce/Cargo.toml b/third_party/autocxx/tools/reduce/Cargo.toml
index 092ccac..83e3916 100644
--- a/third_party/autocxx/tools/reduce/Cargo.toml
+++ b/third_party/autocxx/tools/reduce/Cargo.toml
@@ -8,15 +8,15 @@
 
 [package]
 name = "autocxx-reduce"
-version = "0.22.0"
+version = "0.22.3"
 authors = ["adetaylor <adetaylor@chromium.org>"]
 edition = "2021"
 
 # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
 
 [dependencies]
-autocxx-engine = { version = "=0.22.0", path="../../engine" }
-autocxx-parser = { version = "=0.22.0", path="../../parser", features = [ "reproduction_case" ] }
+autocxx-engine = { version = "=0.22.3", path="../../engine" }
+autocxx-parser = { version = "=0.22.3", path="../../parser", features = [ "reproduction_case" ] }
 clap = { version = "3.1.2", features = ["cargo"] }
 tempfile = "3.1"
 indoc = "1.0"
@@ -26,12 +26,12 @@
 serde_derive = "1.0"
 syn = "1.0.39"
 quote = "1.0"
-cxx-gen = "0.7.54"
+cxx-gen = "0.7.68"
 regex = "1.5"
 indexmap = "1.8"
 
 [dev-dependencies]
 assert_cmd = "1.0.3"
-tempdir = "0.3.7"
+tempfile = "3.1"
 indoc = "1.0"
 proc-macro2 = "1.0"
\ No newline at end of file
diff --git a/third_party/autocxx/tools/reduce/src/main.rs b/third_party/autocxx/tools/reduce/src/main.rs
index 17c5bf0..06110cd 100644
--- a/third_party/autocxx/tools/reduce/src/main.rs
+++ b/third_party/autocxx/tools/reduce/src/main.rs
@@ -482,7 +482,7 @@
         mv concat.h concat-body.h
         echo Codegen
         (echo \"#ifndef __CONCAT_H__\"; echo \"#define __CONCAT_H__\"; echo '#include \"concat-body.h\"'; echo \"#endif\") > concat.h
-        ({} {} 2>&1 && cat autocxx-ffi-default-gen.rs && cat autocxxgen*.h && {} 2>&1 ) {}
+        (trap \"if [[ \\$? -eq 139 ]]; then echo Segfault; fi\" CHLD; {} {} 2>&1 && cat autocxx-ffi-default-gen.rs && cat autocxxgen*.h && {} 2>&1 ) {}
         echo Remove
         rm concat.h
         echo Swap back
diff --git a/third_party/autocxx/tools/reduce/tests/reduce_test.rs b/third_party/autocxx/tools/reduce/tests/reduce_test.rs
index ceac5f4..b277585 100644
--- a/third_party/autocxx/tools/reduce/tests/reduce_test.rs
+++ b/third_party/autocxx/tools/reduce/tests/reduce_test.rs
@@ -16,7 +16,7 @@
     path::{Path, PathBuf},
 };
 use syn::Token;
-use tempdir::TempDir;
+use tempfile::tempdir;
 
 static INPUT_H: &str = indoc::indoc! {"
     inline int DoMath(int a) {
@@ -169,7 +169,7 @@
     if creduce_is_broken() {
         return Ok(());
     }
-    let tmp_dir = TempDir::new("example")?;
+    let tmp_dir = tempdir()?;
     let demo_code_dir = tmp_dir.path().join("demo");
     std::fs::create_dir(&demo_code_dir).unwrap();
     let input_header = if include_cxx_h {
diff --git a/third_party/autocxx/tools/stress-test/Cargo.toml b/third_party/autocxx/tools/stress-test/Cargo.toml
index 6f15e42..a651f60 100644
--- a/third_party/autocxx/tools/stress-test/Cargo.toml
+++ b/third_party/autocxx/tools/stress-test/Cargo.toml
@@ -8,14 +8,14 @@
 
 [package]
 name = "autocxx-stress-test"
-version = "0.22.0"
+version = "0.22.3"
 authors = ["Adrian Taylor <adetaylor@chromium.org>"]
 edition = "2021"
 
 [dependencies]
-cxx = "1.0.54"
-autocxx = { path = "../..", version="0.22.0" }
+cxx = "1.0.68"
+autocxx = { path = "../..", version="0.22.3" }
 
 [build-dependencies]
-autocxx-build = { path = "../../gen/build", version="0.22.0" }
+autocxx-build = { path = "../../gen/build", version="0.22.3" }
 miette = { version="4.3", features=["fancy"]}
diff --git a/third_party/cargo/BUILD.bazel b/third_party/cargo/BUILD.bazel
index 1ab9728..4bf6ce8 100644
--- a/third_party/cargo/BUILD.bazel
+++ b/third_party/cargo/BUILD.bazel
@@ -14,7 +14,7 @@
 # Aliased targets
 alias(
     name = "anyhow",
-    actual = "@raze__anyhow__1_0_57//:anyhow",
+    actual = "@raze__anyhow__1_0_58//:anyhow",
     tags = [
         "cargo-raze",
         "manual",
@@ -41,7 +41,7 @@
 
 alias(
     name = "autocxx_bindgen",
-    actual = "@raze__autocxx_bindgen__0_59_16//:autocxx_bindgen",
+    actual = "@raze__autocxx_bindgen__0_59_17//:autocxx_bindgen",
     tags = [
         "cargo-raze",
         "manual",
@@ -86,7 +86,7 @@
 
 alias(
     name = "clap",
-    actual = "@raze__clap__3_1_18//:clap",
+    actual = "@raze__clap__3_2_12//:clap",
     tags = [
         "cargo-raze",
         "manual",
@@ -95,7 +95,7 @@
 
 alias(
     name = "cxx",
-    actual = "@raze__cxx__1_0_68//:cxx",
+    actual = "@raze__cxx__1_0_71//:cxx",
     tags = [
         "cargo-raze",
         "manual",
@@ -104,7 +104,7 @@
 
 alias(
     name = "cxx_cc",
-    actual = "@raze__cxx__1_0_68//:cxx_cc",
+    actual = "@raze__cxx__1_0_71//:cxx_cc",
     tags = [
         "cargo-raze",
         "manual",
@@ -113,7 +113,7 @@
 
 alias(
     name = "cxx_gen",
-    actual = "@raze__cxx_gen__0_7_68//:cxx_gen",
+    actual = "@raze__cxx_gen__0_7_71//:cxx_gen",
     tags = [
         "cargo-raze",
         "manual",
@@ -122,7 +122,7 @@
 
 alias(
     name = "cargo_bin_cxxbridge",
-    actual = "@raze__cxxbridge_cmd__1_0_68//:cargo_bin_cxxbridge",
+    actual = "@raze__cxxbridge_cmd__1_0_71//:cargo_bin_cxxbridge",
     tags = [
         "cargo-raze",
         "manual",
@@ -131,7 +131,7 @@
 
 alias(
     name = "cxxbridge_cmd",
-    actual = "@raze__cxxbridge_cmd__1_0_68//:cxxbridge_cmd",
+    actual = "@raze__cxxbridge_cmd__1_0_71//:cxxbridge_cmd",
     tags = [
         "cargo-raze",
         "manual",
@@ -140,7 +140,7 @@
 
 alias(
     name = "cxxbridge_macro",
-    actual = "@raze__cxxbridge_macro__1_0_68//:cxxbridge_macro",
+    actual = "@raze__cxxbridge_macro__1_0_71//:cxxbridge_macro",
     tags = [
         "cargo-raze",
         "manual",
@@ -157,8 +157,17 @@
 )
 
 alias(
+    name = "futures",
+    actual = "@raze__futures__0_3_21//:futures",
+    tags = [
+        "cargo-raze",
+        "manual",
+    ],
+)
+
+alias(
     name = "indexmap",
-    actual = "@raze__indexmap__1_8_1//:indexmap",
+    actual = "@raze__indexmap__1_9_1//:indexmap",
     tags = [
         "cargo-raze",
         "manual",
@@ -230,7 +239,7 @@
 
 alias(
     name = "once_cell",
-    actual = "@raze__once_cell__1_10_0//:once_cell",
+    actual = "@raze__once_cell__1_13_0//:once_cell",
     tags = [
         "cargo-raze",
         "manual",
@@ -248,7 +257,7 @@
 
 alias(
     name = "proc_macro2",
-    actual = "@raze__proc_macro2__1_0_39//:proc_macro2",
+    actual = "@raze__proc_macro2__1_0_40//:proc_macro2",
     tags = [
         "cargo-raze",
         "manual",
@@ -266,7 +275,7 @@
 
 alias(
     name = "quote",
-    actual = "@raze__quote__1_0_18//:quote",
+    actual = "@raze__quote__1_0_20//:quote",
     tags = [
         "cargo-raze",
         "manual",
@@ -275,7 +284,7 @@
 
 alias(
     name = "regex",
-    actual = "@raze__regex__1_5_5//:regex",
+    actual = "@raze__regex__1_6_0//:regex",
     tags = [
         "cargo-raze",
         "manual",
@@ -293,7 +302,7 @@
 
 alias(
     name = "serde",
-    actual = "@raze__serde__1_0_137//:serde",
+    actual = "@raze__serde__1_0_139//:serde",
     tags = [
         "cargo-raze",
         "manual",
@@ -302,7 +311,7 @@
 
 alias(
     name = "serde_json",
-    actual = "@raze__serde_json__1_0_81//:serde_json",
+    actual = "@raze__serde_json__1_0_82//:serde_json",
     tags = [
         "cargo-raze",
         "manual",
@@ -311,7 +320,7 @@
 
 alias(
     name = "smallvec",
-    actual = "@raze__smallvec__1_8_0//:smallvec",
+    actual = "@raze__smallvec__1_9_0//:smallvec",
     tags = [
         "cargo-raze",
         "manual",
@@ -320,7 +329,7 @@
 
 alias(
     name = "strum_macros",
-    actual = "@raze__strum_macros__0_24_0//:strum_macros",
+    actual = "@raze__strum_macros__0_24_2//:strum_macros",
     tags = [
         "cargo-raze",
         "manual",
@@ -329,16 +338,7 @@
 
 alias(
     name = "syn",
-    actual = "@raze__syn__1_0_95//:syn",
-    tags = [
-        "cargo-raze",
-        "manual",
-    ],
-)
-
-alias(
-    name = "tempdir",
-    actual = "@raze__tempdir__0_3_7//:tempdir",
+    actual = "@raze__syn__1_0_98//:syn",
     tags = [
         "cargo-raze",
         "manual",
@@ -383,7 +383,7 @@
 
 alias(
     name = "trybuild",
-    actual = "@raze__trybuild__1_0_61//:trybuild",
+    actual = "@raze__trybuild__1_0_63//:trybuild",
     tags = [
         "cargo-raze",
         "manual",
@@ -392,7 +392,7 @@
 
 alias(
     name = "uuid",
-    actual = "@raze__uuid__1_0_0//:uuid",
+    actual = "@raze__uuid__1_1_2//:uuid",
     tags = [
         "cargo-raze",
         "manual",
diff --git a/third_party/cargo/Cargo.raze.lock b/third_party/cargo/Cargo.raze.lock
index 9b8251b..bc0a380 100644
--- a/third_party/cargo/Cargo.raze.lock
+++ b/third_party/cargo/Cargo.raze.lock
@@ -35,9 +35,9 @@
 
 [[package]]
 name = "anyhow"
-version = "1.0.57"
+version = "1.0.58"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "08f9b8508dccb7687a1d6c4ce66b2b0ecef467c94667de27d8d7fe1f8d2a9cdc"
+checksum = "bb07d2053ccdbe10e2af2995a2f116c1330396493dc1269f6a91d0ae82e19704"
 
 [[package]]
 name = "aquamarine"
@@ -85,7 +85,7 @@
 
 [[package]]
 name = "autocxx"
-version = "0.22.0"
+version = "0.22.3"
 dependencies = [
  "aquamarine",
  "autocxx-macro",
@@ -95,9 +95,9 @@
 
 [[package]]
 name = "autocxx-bindgen"
-version = "0.59.16"
+version = "0.59.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "435723e14bf88f198322f8555a4fdb108363021d97a47bb6492891ca86055e79"
+checksum = "f9a9a26dd38d385d23b1bf61bd231b77f690c4368aef4c77cee1b7a6da2e2042"
 dependencies = [
  "bitflags",
  "cexpr 0.6.0",
@@ -119,7 +119,7 @@
 
 [[package]]
 name = "autocxx-engine"
-version = "0.22.0"
+version = "0.22.3"
 dependencies = [
  "aquamarine",
  "autocxx-bindgen",
@@ -145,13 +145,13 @@
 
 [[package]]
 name = "autocxx-gen"
-version = "0.22.0"
+version = "0.22.3"
 dependencies = [
  "assert_cmd",
  "autocxx",
  "autocxx-engine",
  "autocxx-integration-tests",
- "clap 3.1.18",
+ "clap 3.2.12",
  "cxx",
  "env_logger 0.9.0",
  "indexmap",
@@ -159,12 +159,12 @@
  "miette",
  "pathdiff",
  "proc-macro2",
- "tempdir",
+ "tempfile",
 ]
 
 [[package]]
 name = "autocxx-integration-tests"
-version = "0.22.0"
+version = "0.22.3"
 dependencies = [
  "autocxx",
  "autocxx-engine",
@@ -187,7 +187,7 @@
 
 [[package]]
 name = "autocxx-macro"
-version = "0.22.0"
+version = "0.22.3"
 dependencies = [
  "autocxx-parser",
  "proc-macro-error",
@@ -198,7 +198,7 @@
 
 [[package]]
 name = "autocxx-parser"
-version = "0.22.0"
+version = "0.22.3"
 dependencies = [
  "indexmap",
  "itertools 0.10.3",
@@ -214,9 +214,9 @@
 
 [[package]]
 name = "backtrace"
-version = "0.3.65"
+version = "0.3.66"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "11a17d453482a265fd5f8479f2a3f405566e6ca627837aaddb85af8b1ab8ef61"
+checksum = "cab84319d616cfb654d03394f38ab7e6f0919e181b1b57e1fd15e7fb4077d9a7"
 dependencies = [
  "addr2line",
  "cc",
@@ -305,9 +305,9 @@
 
 [[package]]
 name = "clang-sys"
-version = "1.3.2"
+version = "1.3.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bf6b561dcf059c85bbe388e0a7b0a1469acb3934cc0cfa148613a830629e3049"
+checksum = "5a050e2153c5be08febd6734e29298e844fdb0fa21aeddd63b4eb7baa106c69b"
 dependencies = [
  "glob",
  "libc",
@@ -331,15 +331,15 @@
 
 [[package]]
 name = "clap"
-version = "3.1.18"
+version = "3.2.12"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d2dbdf4bdacb33466e854ce889eee8dfd5729abf7ccd7664d0a2d60cd384440b"
+checksum = "ab8b79fe3946ceb4a0b1c080b4018992b8d27e9ff363644c1c9b6387c854614d"
 dependencies = [
  "atty",
  "bitflags",
  "clap_lex",
  "indexmap",
- "lazy_static",
+ "once_cell",
  "strsim 0.10.0",
  "termcolor",
  "textwrap 0.15.0",
@@ -347,9 +347,9 @@
 
 [[package]]
 name = "clap_lex"
-version = "0.2.0"
+version = "0.2.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a37c35f1112dad5e6e0b1adaff798507497a18fceeb30cceb3bae7d1427b9213"
+checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5"
 dependencies = [
  "os_str_bytes",
 ]
@@ -373,8 +373,11 @@
  "cxx",
  "cxxbridge-cmd",
  "cxxbridge-macro",
+ "futures",
  "libloading 0.6.3",
  "link-cplusplus",
+ "once_cell",
+ "thiserror",
  "toml",
  "uuid",
 ]
@@ -390,9 +393,9 @@
 
 [[package]]
 name = "cxx"
-version = "1.0.68"
+version = "1.0.71"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7e599641dff337570f6aa9c304ecca92341d30bf72e1c50287869ed6a36615a6"
+checksum = "5469a6f42296f4fd40789b397383718f9a0bd75d2f9b7cedbb249996811fba27"
 dependencies = [
  "cc",
  "cxxbridge-flags",
@@ -402,9 +405,9 @@
 
 [[package]]
 name = "cxx-gen"
-version = "0.7.68"
+version = "0.7.71"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1e2c726d93799c3129c65224ab09eae1a31276bc593d4f7344be1c592c16a1ec"
+checksum = "c606d018d5f8298464809ab01eb1aaf3efc2d6a984f527477cabda650f9f8688"
 dependencies = [
  "codespan-reporting",
  "proc-macro2",
@@ -414,11 +417,11 @@
 
 [[package]]
 name = "cxxbridge-cmd"
-version = "1.0.68"
+version = "1.0.71"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "08bf87cef93c3987aab9316b83fbf041a9a6fd19d0e08b0e9deb79321a58f766"
+checksum = "384d7699599cc149694e38151d20820e8ab5550037526870bee8a27b069ed922"
 dependencies = [
- "clap 3.1.18",
+ "clap 3.2.12",
  "codespan-reporting",
  "proc-macro2",
  "quote",
@@ -427,15 +430,15 @@
 
 [[package]]
 name = "cxxbridge-flags"
-version = "1.0.68"
+version = "1.0.71"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3894ad0c6d517cb5a4ce8ec20b37cd0ea31b480fe582a104c5db67ae21270853"
+checksum = "0fef2b4ffdc935c973bc7817d541fc936fdc8a85194cfdd9c761aca8387edd48"
 
 [[package]]
 name = "cxxbridge-macro"
-version = "1.0.68"
+version = "1.0.71"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "34fa7e395dc1c001083c7eed28c8f0f0b5a225610f3b6284675f444af6fab86b"
+checksum = "9d3a240a54f5526967ffae81fdcda1fc80564964220d90816960b2eae2eab7f4"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -456,9 +459,9 @@
 
 [[package]]
 name = "either"
-version = "1.6.1"
+version = "1.7.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457"
+checksum = "3f107b87b6afc2a64fd13cac55fe06d6c8859f12d4b14cbcdd2c67d0976781be"
 
 [[package]]
 name = "env_logger"
@@ -508,10 +511,93 @@
 ]
 
 [[package]]
-name = "fuchsia-cprng"
-version = "0.1.1"
+name = "futures"
+version = "0.3.21"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
+checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e"
+dependencies = [
+ "futures-channel",
+ "futures-core",
+ "futures-executor",
+ "futures-io",
+ "futures-sink",
+ "futures-task",
+ "futures-util",
+]
+
+[[package]]
+name = "futures-channel"
+version = "0.3.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010"
+dependencies = [
+ "futures-core",
+ "futures-sink",
+]
+
+[[package]]
+name = "futures-core"
+version = "0.3.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3"
+
+[[package]]
+name = "futures-executor"
+version = "0.3.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6"
+dependencies = [
+ "futures-core",
+ "futures-task",
+ "futures-util",
+]
+
+[[package]]
+name = "futures-io"
+version = "0.3.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b"
+
+[[package]]
+name = "futures-macro"
+version = "0.3.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "futures-sink"
+version = "0.3.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868"
+
+[[package]]
+name = "futures-task"
+version = "0.3.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a"
+
+[[package]]
+name = "futures-util"
+version = "0.3.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a"
+dependencies = [
+ "futures-channel",
+ "futures-core",
+ "futures-io",
+ "futures-macro",
+ "futures-sink",
+ "futures-task",
+ "memchr",
+ "pin-project-lite",
+ "pin-utils",
+ "slab",
+]
 
 [[package]]
 name = "gimli"
@@ -527,9 +613,9 @@
 
 [[package]]
 name = "hashbrown"
-version = "0.11.2"
+version = "0.12.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e"
+checksum = "607c8a29735385251a339424dd462993c0fed8fa09d378f259377df08c126022"
 
 [[package]]
 name = "heck"
@@ -554,9 +640,9 @@
 
 [[package]]
 name = "indexmap"
-version = "1.8.1"
+version = "1.9.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee"
+checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e"
 dependencies = [
  "autocfg",
  "hashbrown",
@@ -709,9 +795,9 @@
 
 [[package]]
 name = "miniz_oxide"
-version = "0.5.1"
+version = "0.5.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d2b29bd4bc3f33391105ebee3589c19197c4271e3e5a9ec9bfe8127eeff8f082"
+checksum = "6f5c75688da582b8ffc1f1799e9db273f32133c49e048f614d22ec3256773ccc"
 dependencies = [
  "adler",
 ]
@@ -747,24 +833,24 @@
 
 [[package]]
 name = "object"
-version = "0.28.4"
+version = "0.29.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e42c982f2d955fac81dd7e1d0e1426a7d702acd9c98d19ab01083a6a0328c424"
+checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53"
 dependencies = [
  "memchr",
 ]
 
 [[package]]
 name = "once_cell"
-version = "1.10.0"
+version = "1.13.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9"
+checksum = "18a6dbe30758c9f83eb00cbea4ac95966305f5a7772f3f42ebfc7fc7eddbd8e1"
 
 [[package]]
 name = "os_str_bytes"
-version = "6.0.1"
+version = "6.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "029d8d0b2f198229de29dca79676f2738ff952edf3fde542eb8bf94d8c21b435"
+checksum = "648001efe5d5c0102d8cea768e348da85d90af8ba91f0bea908f157951493cd4"
 
 [[package]]
 name = "owo-colors"
@@ -785,6 +871,18 @@
 checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099"
 
 [[package]]
+name = "pin-project-lite"
+version = "0.2.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116"
+
+[[package]]
+name = "pin-utils"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
+
+[[package]]
 name = "predicates"
 version = "2.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -837,60 +935,23 @@
 
 [[package]]
 name = "proc-macro2"
-version = "1.0.39"
+version = "1.0.40"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f"
+checksum = "dd96a1e8ed2596c337f8eae5f24924ec83f5ad5ab21ea8e455d3566c69fbcaf7"
 dependencies = [
  "unicode-ident",
 ]
 
 [[package]]
 name = "quote"
-version = "1.0.18"
+version = "1.0.20"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1"
+checksum = "3bcdf212e9776fbcb2d23ab029360416bb1706b1aea2d1a5ba002727cbcab804"
 dependencies = [
  "proc-macro2",
 ]
 
 [[package]]
-name = "rand"
-version = "0.4.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293"
-dependencies = [
- "fuchsia-cprng",
- "libc",
- "rand_core 0.3.1",
- "rdrand",
- "winapi",
-]
-
-[[package]]
-name = "rand_core"
-version = "0.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
-dependencies = [
- "rand_core 0.4.2",
-]
-
-[[package]]
-name = "rand_core"
-version = "0.4.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc"
-
-[[package]]
-name = "rdrand"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
-dependencies = [
- "rand_core 0.3.1",
-]
-
-[[package]]
 name = "redox_syscall"
 version = "0.2.13"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -901,9 +962,9 @@
 
 [[package]]
 name = "regex"
-version = "1.5.5"
+version = "1.6.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286"
+checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b"
 dependencies = [
  "aho-corasick",
  "memchr",
@@ -918,9 +979,9 @@
 
 [[package]]
 name = "regex-syntax"
-version = "0.6.25"
+version = "0.6.27"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b"
+checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244"
 
 [[package]]
 name = "remove_dir_all"
@@ -951,9 +1012,9 @@
 
 [[package]]
 name = "rustversion"
-version = "1.0.6"
+version = "1.0.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f"
+checksum = "24c8ad4f0c00e1eb5bc7614d236a7f1300e3dbd76b68cac8e06fb00b015ad8d8"
 
 [[package]]
 name = "ryu"
@@ -963,18 +1024,18 @@
 
 [[package]]
 name = "serde"
-version = "1.0.137"
+version = "1.0.139"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1"
+checksum = "0171ebb889e45aa68b44aee0859b3eede84c6f5f5c228e6f140c0b2a0a46cad6"
 dependencies = [
  "serde_derive",
 ]
 
 [[package]]
 name = "serde_derive"
-version = "1.0.137"
+version = "1.0.139"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be"
+checksum = "dc1d3230c1de7932af58ad8ffbe1d784bd55efd5a9d84ac24f69c72d83543dfb"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -983,9 +1044,9 @@
 
 [[package]]
 name = "serde_json"
-version = "1.0.81"
+version = "1.0.82"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c"
+checksum = "82c2c1fdcd807d1098552c5b9a36e425e42e9fbd7c6a37a8425f390f781f7fa7"
 dependencies = [
  "itoa",
  "ryu",
@@ -999,10 +1060,16 @@
 checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3"
 
 [[package]]
-name = "smallvec"
-version = "1.8.0"
+name = "slab"
+version = "0.4.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83"
+checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32"
+
+[[package]]
+name = "smallvec"
+version = "1.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1"
 
 [[package]]
 name = "smawk"
@@ -1024,9 +1091,9 @@
 
 [[package]]
 name = "strum_macros"
-version = "0.24.0"
+version = "0.24.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6878079b17446e4d3eba6192bb0a2950d5b14f0ed8424b852310e5a94345d0ef"
+checksum = "4faebde00e8ff94316c01800f9054fd2ba77d30d9e922541913051d1d978918b"
 dependencies = [
  "heck",
  "proc-macro2",
@@ -1065,9 +1132,9 @@
 
 [[package]]
 name = "syn"
-version = "1.0.95"
+version = "1.0.98"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fbaf6116ab8924f39d52792136fb74fd60a80194cf1b1c6ffa6453eef1c3f942"
+checksum = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -1075,16 +1142,6 @@
 ]
 
 [[package]]
-name = "tempdir"
-version = "0.3.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8"
-dependencies = [
- "rand",
- "remove_dir_all",
-]
-
-[[package]]
 name = "tempfile"
 version = "3.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1206,9 +1263,9 @@
 
 [[package]]
 name = "trybuild"
-version = "1.0.61"
+version = "1.0.63"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7fc92f558afb6d1d7c6f175eb8d615b8ef49c227543e68e19c123d4ee43d8a7d"
+checksum = "764b9e244b482a9b81bde596aa37aa6f1347bf8007adab25e59f901b32b4e0a0"
 dependencies = [
  "glob",
  "once_cell",
@@ -1221,9 +1278,9 @@
 
 [[package]]
 name = "unicode-ident"
-version = "1.0.0"
+version = "1.0.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d22af068fba1eb5edcb4aea19d382b2a3deb4c8f9d475c589b6ada9e0fd493ee"
+checksum = "15c61ba63f9235225a22310255a29b806b907c9b8c964bcbd0a2c70f3f2deea7"
 
 [[package]]
 name = "unicode-linebreak"
@@ -1242,9 +1299,9 @@
 
 [[package]]
 name = "uuid"
-version = "1.0.0"
+version = "1.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8cfcd319456c4d6ea10087ed423473267e1a071f3bc0aa89f80d60997843c6f0"
+checksum = "dd6469f4314d5f1ffec476e05f17cc9a78bc7a27a6a857842170bdf8d6f98d2f"
 
 [[package]]
 name = "vec_map"
diff --git a/third_party/cargo/crates.bzl b/third_party/cargo/crates.bzl
index 8549a4d..51d9a05 100644
--- a/third_party/cargo/crates.bzl
+++ b/third_party/cargo/crates.bzl
@@ -53,12 +53,12 @@
 
     maybe(
         http_archive,
-        name = "raze__anyhow__1_0_57",
-        url = "https://crates.io/api/v1/crates/anyhow/1.0.57/download",
+        name = "raze__anyhow__1_0_58",
+        url = "https://crates.io/api/v1/crates/anyhow/1.0.58/download",
         type = "tar.gz",
-        sha256 = "08f9b8508dccb7687a1d6c4ce66b2b0ecef467c94667de27d8d7fe1f8d2a9cdc",
-        strip_prefix = "anyhow-1.0.57",
-        build_file = Label("//third_party/cargo/remote:BUILD.anyhow-1.0.57.bazel"),
+        sha256 = "bb07d2053ccdbe10e2af2995a2f116c1330396493dc1269f6a91d0ae82e19704",
+        strip_prefix = "anyhow-1.0.58",
+        build_file = Label("//third_party/cargo/remote:BUILD.anyhow-1.0.58.bazel"),
     )
 
     maybe(
@@ -103,22 +103,22 @@
 
     maybe(
         http_archive,
-        name = "raze__autocxx_bindgen__0_59_16",
-        url = "https://crates.io/api/v1/crates/autocxx-bindgen/0.59.16/download",
+        name = "raze__autocxx_bindgen__0_59_17",
+        url = "https://crates.io/api/v1/crates/autocxx-bindgen/0.59.17/download",
         type = "tar.gz",
-        sha256 = "435723e14bf88f198322f8555a4fdb108363021d97a47bb6492891ca86055e79",
-        strip_prefix = "autocxx-bindgen-0.59.16",
-        build_file = Label("//third_party/cargo/remote:BUILD.autocxx-bindgen-0.59.16.bazel"),
+        sha256 = "f9a9a26dd38d385d23b1bf61bd231b77f690c4368aef4c77cee1b7a6da2e2042",
+        strip_prefix = "autocxx-bindgen-0.59.17",
+        build_file = Label("//third_party/cargo/remote:BUILD.autocxx-bindgen-0.59.17.bazel"),
     )
 
     maybe(
         http_archive,
-        name = "raze__backtrace__0_3_65",
-        url = "https://crates.io/api/v1/crates/backtrace/0.3.65/download",
+        name = "raze__backtrace__0_3_66",
+        url = "https://crates.io/api/v1/crates/backtrace/0.3.66/download",
         type = "tar.gz",
-        sha256 = "11a17d453482a265fd5f8479f2a3f405566e6ca627837aaddb85af8b1ab8ef61",
-        strip_prefix = "backtrace-0.3.65",
-        build_file = Label("//third_party/cargo/remote:BUILD.backtrace-0.3.65.bazel"),
+        sha256 = "cab84319d616cfb654d03394f38ab7e6f0919e181b1b57e1fd15e7fb4077d9a7",
+        strip_prefix = "backtrace-0.3.66",
+        build_file = Label("//third_party/cargo/remote:BUILD.backtrace-0.3.66.bazel"),
     )
 
     maybe(
@@ -203,12 +203,12 @@
 
     maybe(
         http_archive,
-        name = "raze__clang_sys__1_3_2",
-        url = "https://crates.io/api/v1/crates/clang-sys/1.3.2/download",
+        name = "raze__clang_sys__1_3_3",
+        url = "https://crates.io/api/v1/crates/clang-sys/1.3.3/download",
         type = "tar.gz",
-        sha256 = "bf6b561dcf059c85bbe388e0a7b0a1469acb3934cc0cfa148613a830629e3049",
-        strip_prefix = "clang-sys-1.3.2",
-        build_file = Label("//third_party/cargo/remote:BUILD.clang-sys-1.3.2.bazel"),
+        sha256 = "5a050e2153c5be08febd6734e29298e844fdb0fa21aeddd63b4eb7baa106c69b",
+        strip_prefix = "clang-sys-1.3.3",
+        build_file = Label("//third_party/cargo/remote:BUILD.clang-sys-1.3.3.bazel"),
     )
 
     maybe(
@@ -223,22 +223,22 @@
 
     maybe(
         http_archive,
-        name = "raze__clap__3_1_18",
-        url = "https://crates.io/api/v1/crates/clap/3.1.18/download",
+        name = "raze__clap__3_2_12",
+        url = "https://crates.io/api/v1/crates/clap/3.2.12/download",
         type = "tar.gz",
-        sha256 = "d2dbdf4bdacb33466e854ce889eee8dfd5729abf7ccd7664d0a2d60cd384440b",
-        strip_prefix = "clap-3.1.18",
-        build_file = Label("//third_party/cargo/remote:BUILD.clap-3.1.18.bazel"),
+        sha256 = "ab8b79fe3946ceb4a0b1c080b4018992b8d27e9ff363644c1c9b6387c854614d",
+        strip_prefix = "clap-3.2.12",
+        build_file = Label("//third_party/cargo/remote:BUILD.clap-3.2.12.bazel"),
     )
 
     maybe(
         http_archive,
-        name = "raze__clap_lex__0_2_0",
-        url = "https://crates.io/api/v1/crates/clap_lex/0.2.0/download",
+        name = "raze__clap_lex__0_2_4",
+        url = "https://crates.io/api/v1/crates/clap_lex/0.2.4/download",
         type = "tar.gz",
-        sha256 = "a37c35f1112dad5e6e0b1adaff798507497a18fceeb30cceb3bae7d1427b9213",
-        strip_prefix = "clap_lex-0.2.0",
-        build_file = Label("//third_party/cargo/remote:BUILD.clap_lex-0.2.0.bazel"),
+        sha256 = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5",
+        strip_prefix = "clap_lex-0.2.4",
+        build_file = Label("//third_party/cargo/remote:BUILD.clap_lex-0.2.4.bazel"),
     )
 
     maybe(
@@ -253,52 +253,52 @@
 
     maybe(
         http_archive,
-        name = "raze__cxx__1_0_68",
-        url = "https://crates.io/api/v1/crates/cxx/1.0.68/download",
+        name = "raze__cxx__1_0_71",
+        url = "https://crates.io/api/v1/crates/cxx/1.0.71/download",
         type = "tar.gz",
-        sha256 = "7e599641dff337570f6aa9c304ecca92341d30bf72e1c50287869ed6a36615a6",
-        strip_prefix = "cxx-1.0.68",
-        build_file = Label("//third_party/cargo/remote:BUILD.cxx-1.0.68.bazel"),
+        sha256 = "5469a6f42296f4fd40789b397383718f9a0bd75d2f9b7cedbb249996811fba27",
+        strip_prefix = "cxx-1.0.71",
+        build_file = Label("//third_party/cargo/remote:BUILD.cxx-1.0.71.bazel"),
     )
 
     maybe(
         http_archive,
-        name = "raze__cxx_gen__0_7_68",
-        url = "https://crates.io/api/v1/crates/cxx-gen/0.7.68/download",
+        name = "raze__cxx_gen__0_7_71",
+        url = "https://crates.io/api/v1/crates/cxx-gen/0.7.71/download",
         type = "tar.gz",
-        sha256 = "1e2c726d93799c3129c65224ab09eae1a31276bc593d4f7344be1c592c16a1ec",
-        strip_prefix = "cxx-gen-0.7.68",
-        build_file = Label("//third_party/cargo/remote:BUILD.cxx-gen-0.7.68.bazel"),
+        sha256 = "c606d018d5f8298464809ab01eb1aaf3efc2d6a984f527477cabda650f9f8688",
+        strip_prefix = "cxx-gen-0.7.71",
+        build_file = Label("//third_party/cargo/remote:BUILD.cxx-gen-0.7.71.bazel"),
     )
 
     maybe(
         http_archive,
-        name = "raze__cxxbridge_cmd__1_0_68",
-        url = "https://crates.io/api/v1/crates/cxxbridge-cmd/1.0.68/download",
+        name = "raze__cxxbridge_cmd__1_0_71",
+        url = "https://crates.io/api/v1/crates/cxxbridge-cmd/1.0.71/download",
         type = "tar.gz",
-        sha256 = "08bf87cef93c3987aab9316b83fbf041a9a6fd19d0e08b0e9deb79321a58f766",
-        strip_prefix = "cxxbridge-cmd-1.0.68",
-        build_file = Label("//third_party/cargo/remote:BUILD.cxxbridge-cmd-1.0.68.bazel"),
+        sha256 = "384d7699599cc149694e38151d20820e8ab5550037526870bee8a27b069ed922",
+        strip_prefix = "cxxbridge-cmd-1.0.71",
+        build_file = Label("//third_party/cargo/remote:BUILD.cxxbridge-cmd-1.0.71.bazel"),
     )
 
     maybe(
         http_archive,
-        name = "raze__cxxbridge_flags__1_0_68",
-        url = "https://crates.io/api/v1/crates/cxxbridge-flags/1.0.68/download",
+        name = "raze__cxxbridge_flags__1_0_71",
+        url = "https://crates.io/api/v1/crates/cxxbridge-flags/1.0.71/download",
         type = "tar.gz",
-        sha256 = "3894ad0c6d517cb5a4ce8ec20b37cd0ea31b480fe582a104c5db67ae21270853",
-        strip_prefix = "cxxbridge-flags-1.0.68",
-        build_file = Label("//third_party/cargo/remote:BUILD.cxxbridge-flags-1.0.68.bazel"),
+        sha256 = "0fef2b4ffdc935c973bc7817d541fc936fdc8a85194cfdd9c761aca8387edd48",
+        strip_prefix = "cxxbridge-flags-1.0.71",
+        build_file = Label("//third_party/cargo/remote:BUILD.cxxbridge-flags-1.0.71.bazel"),
     )
 
     maybe(
         http_archive,
-        name = "raze__cxxbridge_macro__1_0_68",
-        url = "https://crates.io/api/v1/crates/cxxbridge-macro/1.0.68/download",
+        name = "raze__cxxbridge_macro__1_0_71",
+        url = "https://crates.io/api/v1/crates/cxxbridge-macro/1.0.71/download",
         type = "tar.gz",
-        sha256 = "34fa7e395dc1c001083c7eed28c8f0f0b5a225610f3b6284675f444af6fab86b",
-        strip_prefix = "cxxbridge-macro-1.0.68",
-        build_file = Label("//third_party/cargo/remote:BUILD.cxxbridge-macro-1.0.68.bazel"),
+        sha256 = "9d3a240a54f5526967ffae81fdcda1fc80564964220d90816960b2eae2eab7f4",
+        strip_prefix = "cxxbridge-macro-1.0.71",
+        build_file = Label("//third_party/cargo/remote:BUILD.cxxbridge-macro-1.0.71.bazel"),
     )
 
     maybe(
@@ -323,12 +323,12 @@
 
     maybe(
         http_archive,
-        name = "raze__either__1_6_1",
-        url = "https://crates.io/api/v1/crates/either/1.6.1/download",
+        name = "raze__either__1_7_0",
+        url = "https://crates.io/api/v1/crates/either/1.7.0/download",
         type = "tar.gz",
-        sha256 = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457",
-        strip_prefix = "either-1.6.1",
-        build_file = Label("//third_party/cargo/remote:BUILD.either-1.6.1.bazel"),
+        sha256 = "3f107b87b6afc2a64fd13cac55fe06d6c8859f12d4b14cbcdd2c67d0976781be",
+        strip_prefix = "either-1.7.0",
+        build_file = Label("//third_party/cargo/remote:BUILD.either-1.7.0.bazel"),
     )
 
     maybe(
@@ -363,12 +363,92 @@
 
     maybe(
         http_archive,
-        name = "raze__fuchsia_cprng__0_1_1",
-        url = "https://crates.io/api/v1/crates/fuchsia-cprng/0.1.1/download",
+        name = "raze__futures__0_3_21",
+        url = "https://crates.io/api/v1/crates/futures/0.3.21/download",
         type = "tar.gz",
-        sha256 = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba",
-        strip_prefix = "fuchsia-cprng-0.1.1",
-        build_file = Label("//third_party/cargo/remote:BUILD.fuchsia-cprng-0.1.1.bazel"),
+        sha256 = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e",
+        strip_prefix = "futures-0.3.21",
+        build_file = Label("//third_party/cargo/remote:BUILD.futures-0.3.21.bazel"),
+    )
+
+    maybe(
+        http_archive,
+        name = "raze__futures_channel__0_3_21",
+        url = "https://crates.io/api/v1/crates/futures-channel/0.3.21/download",
+        type = "tar.gz",
+        sha256 = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010",
+        strip_prefix = "futures-channel-0.3.21",
+        build_file = Label("//third_party/cargo/remote:BUILD.futures-channel-0.3.21.bazel"),
+    )
+
+    maybe(
+        http_archive,
+        name = "raze__futures_core__0_3_21",
+        url = "https://crates.io/api/v1/crates/futures-core/0.3.21/download",
+        type = "tar.gz",
+        sha256 = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3",
+        strip_prefix = "futures-core-0.3.21",
+        build_file = Label("//third_party/cargo/remote:BUILD.futures-core-0.3.21.bazel"),
+    )
+
+    maybe(
+        http_archive,
+        name = "raze__futures_executor__0_3_21",
+        url = "https://crates.io/api/v1/crates/futures-executor/0.3.21/download",
+        type = "tar.gz",
+        sha256 = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6",
+        strip_prefix = "futures-executor-0.3.21",
+        build_file = Label("//third_party/cargo/remote:BUILD.futures-executor-0.3.21.bazel"),
+    )
+
+    maybe(
+        http_archive,
+        name = "raze__futures_io__0_3_21",
+        url = "https://crates.io/api/v1/crates/futures-io/0.3.21/download",
+        type = "tar.gz",
+        sha256 = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b",
+        strip_prefix = "futures-io-0.3.21",
+        build_file = Label("//third_party/cargo/remote:BUILD.futures-io-0.3.21.bazel"),
+    )
+
+    maybe(
+        http_archive,
+        name = "raze__futures_macro__0_3_21",
+        url = "https://crates.io/api/v1/crates/futures-macro/0.3.21/download",
+        type = "tar.gz",
+        sha256 = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512",
+        strip_prefix = "futures-macro-0.3.21",
+        build_file = Label("//third_party/cargo/remote:BUILD.futures-macro-0.3.21.bazel"),
+    )
+
+    maybe(
+        http_archive,
+        name = "raze__futures_sink__0_3_21",
+        url = "https://crates.io/api/v1/crates/futures-sink/0.3.21/download",
+        type = "tar.gz",
+        sha256 = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868",
+        strip_prefix = "futures-sink-0.3.21",
+        build_file = Label("//third_party/cargo/remote:BUILD.futures-sink-0.3.21.bazel"),
+    )
+
+    maybe(
+        http_archive,
+        name = "raze__futures_task__0_3_21",
+        url = "https://crates.io/api/v1/crates/futures-task/0.3.21/download",
+        type = "tar.gz",
+        sha256 = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a",
+        strip_prefix = "futures-task-0.3.21",
+        build_file = Label("//third_party/cargo/remote:BUILD.futures-task-0.3.21.bazel"),
+    )
+
+    maybe(
+        http_archive,
+        name = "raze__futures_util__0_3_21",
+        url = "https://crates.io/api/v1/crates/futures-util/0.3.21/download",
+        type = "tar.gz",
+        sha256 = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a",
+        strip_prefix = "futures-util-0.3.21",
+        build_file = Label("//third_party/cargo/remote:BUILD.futures-util-0.3.21.bazel"),
     )
 
     maybe(
@@ -393,12 +473,12 @@
 
     maybe(
         http_archive,
-        name = "raze__hashbrown__0_11_2",
-        url = "https://crates.io/api/v1/crates/hashbrown/0.11.2/download",
+        name = "raze__hashbrown__0_12_2",
+        url = "https://crates.io/api/v1/crates/hashbrown/0.12.2/download",
         type = "tar.gz",
-        sha256 = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e",
-        strip_prefix = "hashbrown-0.11.2",
-        build_file = Label("//third_party/cargo/remote:BUILD.hashbrown-0.11.2.bazel"),
+        sha256 = "607c8a29735385251a339424dd462993c0fed8fa09d378f259377df08c126022",
+        strip_prefix = "hashbrown-0.12.2",
+        build_file = Label("//third_party/cargo/remote:BUILD.hashbrown-0.12.2.bazel"),
     )
 
     maybe(
@@ -433,12 +513,12 @@
 
     maybe(
         http_archive,
-        name = "raze__indexmap__1_8_1",
-        url = "https://crates.io/api/v1/crates/indexmap/1.8.1/download",
+        name = "raze__indexmap__1_9_1",
+        url = "https://crates.io/api/v1/crates/indexmap/1.9.1/download",
         type = "tar.gz",
-        sha256 = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee",
-        strip_prefix = "indexmap-1.8.1",
-        build_file = Label("//third_party/cargo/remote:BUILD.indexmap-1.8.1.bazel"),
+        sha256 = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e",
+        strip_prefix = "indexmap-1.9.1",
+        build_file = Label("//third_party/cargo/remote:BUILD.indexmap-1.9.1.bazel"),
     )
 
     maybe(
@@ -613,12 +693,12 @@
 
     maybe(
         http_archive,
-        name = "raze__miniz_oxide__0_5_1",
-        url = "https://crates.io/api/v1/crates/miniz_oxide/0.5.1/download",
+        name = "raze__miniz_oxide__0_5_3",
+        url = "https://crates.io/api/v1/crates/miniz_oxide/0.5.3/download",
         type = "tar.gz",
-        sha256 = "d2b29bd4bc3f33391105ebee3589c19197c4271e3e5a9ec9bfe8127eeff8f082",
-        strip_prefix = "miniz_oxide-0.5.1",
-        build_file = Label("//third_party/cargo/remote:BUILD.miniz_oxide-0.5.1.bazel"),
+        sha256 = "6f5c75688da582b8ffc1f1799e9db273f32133c49e048f614d22ec3256773ccc",
+        strip_prefix = "miniz_oxide-0.5.3",
+        build_file = Label("//third_party/cargo/remote:BUILD.miniz_oxide-0.5.3.bazel"),
     )
 
     maybe(
@@ -653,32 +733,32 @@
 
     maybe(
         http_archive,
-        name = "raze__object__0_28_4",
-        url = "https://crates.io/api/v1/crates/object/0.28.4/download",
+        name = "raze__object__0_29_0",
+        url = "https://crates.io/api/v1/crates/object/0.29.0/download",
         type = "tar.gz",
-        sha256 = "e42c982f2d955fac81dd7e1d0e1426a7d702acd9c98d19ab01083a6a0328c424",
-        strip_prefix = "object-0.28.4",
-        build_file = Label("//third_party/cargo/remote:BUILD.object-0.28.4.bazel"),
+        sha256 = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53",
+        strip_prefix = "object-0.29.0",
+        build_file = Label("//third_party/cargo/remote:BUILD.object-0.29.0.bazel"),
     )
 
     maybe(
         http_archive,
-        name = "raze__once_cell__1_10_0",
-        url = "https://crates.io/api/v1/crates/once_cell/1.10.0/download",
+        name = "raze__once_cell__1_13_0",
+        url = "https://crates.io/api/v1/crates/once_cell/1.13.0/download",
         type = "tar.gz",
-        sha256 = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9",
-        strip_prefix = "once_cell-1.10.0",
-        build_file = Label("//third_party/cargo/remote:BUILD.once_cell-1.10.0.bazel"),
+        sha256 = "18a6dbe30758c9f83eb00cbea4ac95966305f5a7772f3f42ebfc7fc7eddbd8e1",
+        strip_prefix = "once_cell-1.13.0",
+        build_file = Label("//third_party/cargo/remote:BUILD.once_cell-1.13.0.bazel"),
     )
 
     maybe(
         http_archive,
-        name = "raze__os_str_bytes__6_0_1",
-        url = "https://crates.io/api/v1/crates/os_str_bytes/6.0.1/download",
+        name = "raze__os_str_bytes__6_2_0",
+        url = "https://crates.io/api/v1/crates/os_str_bytes/6.2.0/download",
         type = "tar.gz",
-        sha256 = "029d8d0b2f198229de29dca79676f2738ff952edf3fde542eb8bf94d8c21b435",
-        strip_prefix = "os_str_bytes-6.0.1",
-        build_file = Label("//third_party/cargo/remote:BUILD.os_str_bytes-6.0.1.bazel"),
+        sha256 = "648001efe5d5c0102d8cea768e348da85d90af8ba91f0bea908f157951493cd4",
+        strip_prefix = "os_str_bytes-6.2.0",
+        build_file = Label("//third_party/cargo/remote:BUILD.os_str_bytes-6.2.0.bazel"),
     )
 
     maybe(
@@ -713,6 +793,26 @@
 
     maybe(
         http_archive,
+        name = "raze__pin_project_lite__0_2_9",
+        url = "https://crates.io/api/v1/crates/pin-project-lite/0.2.9/download",
+        type = "tar.gz",
+        sha256 = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116",
+        strip_prefix = "pin-project-lite-0.2.9",
+        build_file = Label("//third_party/cargo/remote:BUILD.pin-project-lite-0.2.9.bazel"),
+    )
+
+    maybe(
+        http_archive,
+        name = "raze__pin_utils__0_1_0",
+        url = "https://crates.io/api/v1/crates/pin-utils/0.1.0/download",
+        type = "tar.gz",
+        sha256 = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184",
+        strip_prefix = "pin-utils-0.1.0",
+        build_file = Label("//third_party/cargo/remote:BUILD.pin-utils-0.1.0.bazel"),
+    )
+
+    maybe(
+        http_archive,
         name = "raze__predicates__2_1_1",
         url = "https://crates.io/api/v1/crates/predicates/2.1.1/download",
         type = "tar.gz",
@@ -763,62 +863,22 @@
 
     maybe(
         http_archive,
-        name = "raze__proc_macro2__1_0_39",
-        url = "https://crates.io/api/v1/crates/proc-macro2/1.0.39/download",
+        name = "raze__proc_macro2__1_0_40",
+        url = "https://crates.io/api/v1/crates/proc-macro2/1.0.40/download",
         type = "tar.gz",
-        sha256 = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f",
-        strip_prefix = "proc-macro2-1.0.39",
-        build_file = Label("//third_party/cargo/remote:BUILD.proc-macro2-1.0.39.bazel"),
+        sha256 = "dd96a1e8ed2596c337f8eae5f24924ec83f5ad5ab21ea8e455d3566c69fbcaf7",
+        strip_prefix = "proc-macro2-1.0.40",
+        build_file = Label("//third_party/cargo/remote:BUILD.proc-macro2-1.0.40.bazel"),
     )
 
     maybe(
         http_archive,
-        name = "raze__quote__1_0_18",
-        url = "https://crates.io/api/v1/crates/quote/1.0.18/download",
+        name = "raze__quote__1_0_20",
+        url = "https://crates.io/api/v1/crates/quote/1.0.20/download",
         type = "tar.gz",
-        sha256 = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1",
-        strip_prefix = "quote-1.0.18",
-        build_file = Label("//third_party/cargo/remote:BUILD.quote-1.0.18.bazel"),
-    )
-
-    maybe(
-        http_archive,
-        name = "raze__rand__0_4_6",
-        url = "https://crates.io/api/v1/crates/rand/0.4.6/download",
-        type = "tar.gz",
-        sha256 = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293",
-        strip_prefix = "rand-0.4.6",
-        build_file = Label("//third_party/cargo/remote:BUILD.rand-0.4.6.bazel"),
-    )
-
-    maybe(
-        http_archive,
-        name = "raze__rand_core__0_3_1",
-        url = "https://crates.io/api/v1/crates/rand_core/0.3.1/download",
-        type = "tar.gz",
-        sha256 = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b",
-        strip_prefix = "rand_core-0.3.1",
-        build_file = Label("//third_party/cargo/remote:BUILD.rand_core-0.3.1.bazel"),
-    )
-
-    maybe(
-        http_archive,
-        name = "raze__rand_core__0_4_2",
-        url = "https://crates.io/api/v1/crates/rand_core/0.4.2/download",
-        type = "tar.gz",
-        sha256 = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc",
-        strip_prefix = "rand_core-0.4.2",
-        build_file = Label("//third_party/cargo/remote:BUILD.rand_core-0.4.2.bazel"),
-    )
-
-    maybe(
-        http_archive,
-        name = "raze__rdrand__0_4_0",
-        url = "https://crates.io/api/v1/crates/rdrand/0.4.0/download",
-        type = "tar.gz",
-        sha256 = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2",
-        strip_prefix = "rdrand-0.4.0",
-        build_file = Label("//third_party/cargo/remote:BUILD.rdrand-0.4.0.bazel"),
+        sha256 = "3bcdf212e9776fbcb2d23ab029360416bb1706b1aea2d1a5ba002727cbcab804",
+        strip_prefix = "quote-1.0.20",
+        build_file = Label("//third_party/cargo/remote:BUILD.quote-1.0.20.bazel"),
     )
 
     maybe(
@@ -833,12 +893,12 @@
 
     maybe(
         http_archive,
-        name = "raze__regex__1_5_5",
-        url = "https://crates.io/api/v1/crates/regex/1.5.5/download",
+        name = "raze__regex__1_6_0",
+        url = "https://crates.io/api/v1/crates/regex/1.6.0/download",
         type = "tar.gz",
-        sha256 = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286",
-        strip_prefix = "regex-1.5.5",
-        build_file = Label("//third_party/cargo/remote:BUILD.regex-1.5.5.bazel"),
+        sha256 = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b",
+        strip_prefix = "regex-1.6.0",
+        build_file = Label("//third_party/cargo/remote:BUILD.regex-1.6.0.bazel"),
     )
 
     maybe(
@@ -853,12 +913,12 @@
 
     maybe(
         http_archive,
-        name = "raze__regex_syntax__0_6_25",
-        url = "https://crates.io/api/v1/crates/regex-syntax/0.6.25/download",
+        name = "raze__regex_syntax__0_6_27",
+        url = "https://crates.io/api/v1/crates/regex-syntax/0.6.27/download",
         type = "tar.gz",
-        sha256 = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b",
-        strip_prefix = "regex-syntax-0.6.25",
-        build_file = Label("//third_party/cargo/remote:BUILD.regex-syntax-0.6.25.bazel"),
+        sha256 = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244",
+        strip_prefix = "regex-syntax-0.6.27",
+        build_file = Label("//third_party/cargo/remote:BUILD.regex-syntax-0.6.27.bazel"),
     )
 
     maybe(
@@ -903,12 +963,12 @@
 
     maybe(
         http_archive,
-        name = "raze__rustversion__1_0_6",
-        url = "https://crates.io/api/v1/crates/rustversion/1.0.6/download",
+        name = "raze__rustversion__1_0_8",
+        url = "https://crates.io/api/v1/crates/rustversion/1.0.8/download",
         type = "tar.gz",
-        sha256 = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f",
-        strip_prefix = "rustversion-1.0.6",
-        build_file = Label("//third_party/cargo/remote:BUILD.rustversion-1.0.6.bazel"),
+        sha256 = "24c8ad4f0c00e1eb5bc7614d236a7f1300e3dbd76b68cac8e06fb00b015ad8d8",
+        strip_prefix = "rustversion-1.0.8",
+        build_file = Label("//third_party/cargo/remote:BUILD.rustversion-1.0.8.bazel"),
     )
 
     maybe(
@@ -923,32 +983,32 @@
 
     maybe(
         http_archive,
-        name = "raze__serde__1_0_137",
-        url = "https://crates.io/api/v1/crates/serde/1.0.137/download",
+        name = "raze__serde__1_0_139",
+        url = "https://crates.io/api/v1/crates/serde/1.0.139/download",
         type = "tar.gz",
-        sha256 = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1",
-        strip_prefix = "serde-1.0.137",
-        build_file = Label("//third_party/cargo/remote:BUILD.serde-1.0.137.bazel"),
+        sha256 = "0171ebb889e45aa68b44aee0859b3eede84c6f5f5c228e6f140c0b2a0a46cad6",
+        strip_prefix = "serde-1.0.139",
+        build_file = Label("//third_party/cargo/remote:BUILD.serde-1.0.139.bazel"),
     )
 
     maybe(
         http_archive,
-        name = "raze__serde_derive__1_0_137",
-        url = "https://crates.io/api/v1/crates/serde_derive/1.0.137/download",
+        name = "raze__serde_derive__1_0_139",
+        url = "https://crates.io/api/v1/crates/serde_derive/1.0.139/download",
         type = "tar.gz",
-        sha256 = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be",
-        strip_prefix = "serde_derive-1.0.137",
-        build_file = Label("//third_party/cargo/remote:BUILD.serde_derive-1.0.137.bazel"),
+        sha256 = "dc1d3230c1de7932af58ad8ffbe1d784bd55efd5a9d84ac24f69c72d83543dfb",
+        strip_prefix = "serde_derive-1.0.139",
+        build_file = Label("//third_party/cargo/remote:BUILD.serde_derive-1.0.139.bazel"),
     )
 
     maybe(
         http_archive,
-        name = "raze__serde_json__1_0_81",
-        url = "https://crates.io/api/v1/crates/serde_json/1.0.81/download",
+        name = "raze__serde_json__1_0_82",
+        url = "https://crates.io/api/v1/crates/serde_json/1.0.82/download",
         type = "tar.gz",
-        sha256 = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c",
-        strip_prefix = "serde_json-1.0.81",
-        build_file = Label("//third_party/cargo/remote:BUILD.serde_json-1.0.81.bazel"),
+        sha256 = "82c2c1fdcd807d1098552c5b9a36e425e42e9fbd7c6a37a8425f390f781f7fa7",
+        strip_prefix = "serde_json-1.0.82",
+        build_file = Label("//third_party/cargo/remote:BUILD.serde_json-1.0.82.bazel"),
     )
 
     maybe(
@@ -963,12 +1023,22 @@
 
     maybe(
         http_archive,
-        name = "raze__smallvec__1_8_0",
-        url = "https://crates.io/api/v1/crates/smallvec/1.8.0/download",
+        name = "raze__slab__0_4_6",
+        url = "https://crates.io/api/v1/crates/slab/0.4.6/download",
         type = "tar.gz",
-        sha256 = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83",
-        strip_prefix = "smallvec-1.8.0",
-        build_file = Label("//third_party/cargo/remote:BUILD.smallvec-1.8.0.bazel"),
+        sha256 = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32",
+        strip_prefix = "slab-0.4.6",
+        build_file = Label("//third_party/cargo/remote:BUILD.slab-0.4.6.bazel"),
+    )
+
+    maybe(
+        http_archive,
+        name = "raze__smallvec__1_9_0",
+        url = "https://crates.io/api/v1/crates/smallvec/1.9.0/download",
+        type = "tar.gz",
+        sha256 = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1",
+        strip_prefix = "smallvec-1.9.0",
+        build_file = Label("//third_party/cargo/remote:BUILD.smallvec-1.9.0.bazel"),
     )
 
     maybe(
@@ -1003,12 +1073,12 @@
 
     maybe(
         http_archive,
-        name = "raze__strum_macros__0_24_0",
-        url = "https://crates.io/api/v1/crates/strum_macros/0.24.0/download",
+        name = "raze__strum_macros__0_24_2",
+        url = "https://crates.io/api/v1/crates/strum_macros/0.24.2/download",
         type = "tar.gz",
-        sha256 = "6878079b17446e4d3eba6192bb0a2950d5b14f0ed8424b852310e5a94345d0ef",
-        strip_prefix = "strum_macros-0.24.0",
-        build_file = Label("//third_party/cargo/remote:BUILD.strum_macros-0.24.0.bazel"),
+        sha256 = "4faebde00e8ff94316c01800f9054fd2ba77d30d9e922541913051d1d978918b",
+        strip_prefix = "strum_macros-0.24.2",
+        build_file = Label("//third_party/cargo/remote:BUILD.strum_macros-0.24.2.bazel"),
     )
 
     maybe(
@@ -1043,22 +1113,12 @@
 
     maybe(
         http_archive,
-        name = "raze__syn__1_0_95",
-        url = "https://crates.io/api/v1/crates/syn/1.0.95/download",
+        name = "raze__syn__1_0_98",
+        url = "https://crates.io/api/v1/crates/syn/1.0.98/download",
         type = "tar.gz",
-        sha256 = "fbaf6116ab8924f39d52792136fb74fd60a80194cf1b1c6ffa6453eef1c3f942",
-        strip_prefix = "syn-1.0.95",
-        build_file = Label("//third_party/cargo/remote:BUILD.syn-1.0.95.bazel"),
-    )
-
-    maybe(
-        http_archive,
-        name = "raze__tempdir__0_3_7",
-        url = "https://crates.io/api/v1/crates/tempdir/0.3.7/download",
-        type = "tar.gz",
-        sha256 = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8",
-        strip_prefix = "tempdir-0.3.7",
-        build_file = Label("//third_party/cargo/remote:BUILD.tempdir-0.3.7.bazel"),
+        sha256 = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd",
+        strip_prefix = "syn-1.0.98",
+        build_file = Label("//third_party/cargo/remote:BUILD.syn-1.0.98.bazel"),
     )
 
     maybe(
@@ -1163,22 +1223,22 @@
 
     maybe(
         http_archive,
-        name = "raze__trybuild__1_0_61",
-        url = "https://crates.io/api/v1/crates/trybuild/1.0.61/download",
+        name = "raze__trybuild__1_0_63",
+        url = "https://crates.io/api/v1/crates/trybuild/1.0.63/download",
         type = "tar.gz",
-        sha256 = "7fc92f558afb6d1d7c6f175eb8d615b8ef49c227543e68e19c123d4ee43d8a7d",
-        strip_prefix = "trybuild-1.0.61",
-        build_file = Label("//third_party/cargo/remote:BUILD.trybuild-1.0.61.bazel"),
+        sha256 = "764b9e244b482a9b81bde596aa37aa6f1347bf8007adab25e59f901b32b4e0a0",
+        strip_prefix = "trybuild-1.0.63",
+        build_file = Label("//third_party/cargo/remote:BUILD.trybuild-1.0.63.bazel"),
     )
 
     maybe(
         http_archive,
-        name = "raze__unicode_ident__1_0_0",
-        url = "https://crates.io/api/v1/crates/unicode-ident/1.0.0/download",
+        name = "raze__unicode_ident__1_0_2",
+        url = "https://crates.io/api/v1/crates/unicode-ident/1.0.2/download",
         type = "tar.gz",
-        sha256 = "d22af068fba1eb5edcb4aea19d382b2a3deb4c8f9d475c589b6ada9e0fd493ee",
-        strip_prefix = "unicode-ident-1.0.0",
-        build_file = Label("//third_party/cargo/remote:BUILD.unicode-ident-1.0.0.bazel"),
+        sha256 = "15c61ba63f9235225a22310255a29b806b907c9b8c964bcbd0a2c70f3f2deea7",
+        strip_prefix = "unicode-ident-1.0.2",
+        build_file = Label("//third_party/cargo/remote:BUILD.unicode-ident-1.0.2.bazel"),
     )
 
     maybe(
@@ -1203,12 +1263,12 @@
 
     maybe(
         http_archive,
-        name = "raze__uuid__1_0_0",
-        url = "https://crates.io/api/v1/crates/uuid/1.0.0/download",
+        name = "raze__uuid__1_1_2",
+        url = "https://crates.io/api/v1/crates/uuid/1.1.2/download",
         type = "tar.gz",
-        sha256 = "8cfcd319456c4d6ea10087ed423473267e1a071f3bc0aa89f80d60997843c6f0",
-        strip_prefix = "uuid-1.0.0",
-        build_file = Label("//third_party/cargo/remote:BUILD.uuid-1.0.0.bazel"),
+        sha256 = "dd6469f4314d5f1ffec476e05f17cc9a78bc7a27a6a857842170bdf8d6f98d2f",
+        strip_prefix = "uuid-1.1.2",
+        build_file = Label("//third_party/cargo/remote:BUILD.uuid-1.1.2.bazel"),
     )
 
     maybe(
diff --git a/third_party/cargo/cxx/include.BUILD.bazel b/third_party/cargo/cxx/include.BUILD.bazel
index f6b9f5a..31afbfb 100644
--- a/third_party/cargo/cxx/include.BUILD.bazel
+++ b/third_party/cargo/cxx/include.BUILD.bazel
@@ -6,4 +6,5 @@
     hdrs = ["include/cxx.h"],
     srcs = ["src/cxx.cc"],
     includes = ["include"],
+    target_compatible_with = ["@//tools/platforms/rust:has_support"],
 )
diff --git a/third_party/cargo/remote/BUILD.anyhow-1.0.57.bazel b/third_party/cargo/remote/BUILD.anyhow-1.0.58.bazel
similarity index 97%
rename from third_party/cargo/remote/BUILD.anyhow-1.0.57.bazel
rename to third_party/cargo/remote/BUILD.anyhow-1.0.58.bazel
index 82466d9..f7d5b14 100644
--- a/third_party/cargo/remote/BUILD.anyhow-1.0.57.bazel
+++ b/third_party/cargo/remote/BUILD.anyhow-1.0.58.bazel
@@ -56,7 +56,7 @@
         "cargo-raze",
         "manual",
     ],
-    version = "1.0.57",
+    version = "1.0.58",
     visibility = ["//visibility:private"],
     deps = [
     ],
@@ -80,7 +80,7 @@
         "crate-name=anyhow",
         "manual",
     ],
-    version = "1.0.57",
+    version = "1.0.58",
     # buildifier: leave-alone
     deps = [
         ":anyhow_build_script",
diff --git a/third_party/cargo/remote/BUILD.aquamarine-0.1.11.bazel b/third_party/cargo/remote/BUILD.aquamarine-0.1.11.bazel
index 87dc642..f24a8c1 100644
--- a/third_party/cargo/remote/BUILD.aquamarine-0.1.11.bazel
+++ b/third_party/cargo/remote/BUILD.aquamarine-0.1.11.bazel
@@ -51,9 +51,9 @@
     # buildifier: leave-alone
     deps = [
         "@raze__itertools__0_9_0//:itertools",
-        "@raze__proc_macro2__1_0_39//:proc_macro2",
+        "@raze__proc_macro2__1_0_40//:proc_macro2",
         "@raze__proc_macro_error__1_0_4//:proc_macro_error",
-        "@raze__quote__1_0_18//:quote",
-        "@raze__syn__1_0_95//:syn",
+        "@raze__quote__1_0_20//:quote",
+        "@raze__syn__1_0_98//:syn",
     ],
 )
diff --git a/third_party/cargo/remote/BUILD.autocxx-bindgen-0.59.16.bazel b/third_party/cargo/remote/BUILD.autocxx-bindgen-0.59.17.bazel
similarity index 88%
rename from third_party/cargo/remote/BUILD.autocxx-bindgen-0.59.16.bazel
rename to third_party/cargo/remote/BUILD.autocxx-bindgen-0.59.17.bazel
index 34dc60a..20770f5 100644
--- a/third_party/cargo/remote/BUILD.autocxx-bindgen-0.59.16.bazel
+++ b/third_party/cargo/remote/BUILD.autocxx-bindgen-0.59.17.bazel
@@ -62,10 +62,10 @@
         "cargo-raze",
         "manual",
     ],
-    version = "0.59.16",
+    version = "0.59.17",
     visibility = ["//visibility:private"],
     deps = [
-        "@raze__clang_sys__1_3_2//:clang_sys",
+        "@raze__clang_sys__1_3_3//:clang_sys",
     ],
 )
 
@@ -95,14 +95,14 @@
         "crate-name=autocxx-bindgen",
         "manual",
     ],
-    version = "0.59.16",
+    version = "0.59.17",
     # buildifier: leave-alone
     deps = [
         ":autocxx_bindgen",
         ":autocxx_bindgen_build_script",
         "@raze__bitflags__1_3_2//:bitflags",
         "@raze__cexpr__0_6_0//:cexpr",
-        "@raze__clang_sys__1_3_2//:clang_sys",
+        "@raze__clang_sys__1_3_3//:clang_sys",
         "@raze__clap__2_34_0//:clap",
         "@raze__env_logger__0_9_0//:env_logger",
         "@raze__itertools__0_10_3//:itertools",
@@ -110,9 +110,9 @@
         "@raze__lazycell__1_3_0//:lazycell",
         "@raze__log__0_4_17//:log",
         "@raze__peeking_take_while__0_1_2//:peeking_take_while",
-        "@raze__proc_macro2__1_0_39//:proc_macro2",
-        "@raze__quote__1_0_18//:quote",
-        "@raze__regex__1_5_5//:regex",
+        "@raze__proc_macro2__1_0_40//:proc_macro2",
+        "@raze__quote__1_0_20//:quote",
+        "@raze__regex__1_6_0//:regex",
         "@raze__rustc_hash__1_1_0//:rustc_hash",
         "@raze__shlex__1_1_0//:shlex",
         "@raze__which__4_2_5//:which",
@@ -143,13 +143,13 @@
         "crate-name=autocxx-bindgen",
         "manual",
     ],
-    version = "0.59.16",
+    version = "0.59.17",
     # buildifier: leave-alone
     deps = [
         ":autocxx_bindgen_build_script",
         "@raze__bitflags__1_3_2//:bitflags",
         "@raze__cexpr__0_6_0//:cexpr",
-        "@raze__clang_sys__1_3_2//:clang_sys",
+        "@raze__clang_sys__1_3_3//:clang_sys",
         "@raze__clap__2_34_0//:clap",
         "@raze__env_logger__0_9_0//:env_logger",
         "@raze__itertools__0_10_3//:itertools",
@@ -157,9 +157,9 @@
         "@raze__lazycell__1_3_0//:lazycell",
         "@raze__log__0_4_17//:log",
         "@raze__peeking_take_while__0_1_2//:peeking_take_while",
-        "@raze__proc_macro2__1_0_39//:proc_macro2",
-        "@raze__quote__1_0_18//:quote",
-        "@raze__regex__1_5_5//:regex",
+        "@raze__proc_macro2__1_0_40//:proc_macro2",
+        "@raze__quote__1_0_20//:quote",
+        "@raze__regex__1_6_0//:regex",
         "@raze__rustc_hash__1_1_0//:rustc_hash",
         "@raze__shlex__1_1_0//:shlex",
         "@raze__which__4_2_5//:which",
diff --git a/third_party/cargo/remote/BUILD.backtrace-0.3.65.bazel b/third_party/cargo/remote/BUILD.backtrace-0.3.66.bazel
similarity index 94%
rename from third_party/cargo/remote/BUILD.backtrace-0.3.65.bazel
rename to third_party/cargo/remote/BUILD.backtrace-0.3.66.bazel
index 4e77d85..21cd9bf 100644
--- a/third_party/cargo/remote/BUILD.backtrace-0.3.65.bazel
+++ b/third_party/cargo/remote/BUILD.backtrace-0.3.66.bazel
@@ -56,7 +56,7 @@
         "cargo-raze",
         "manual",
     ],
-    version = "0.3.65",
+    version = "0.3.66",
     visibility = ["//visibility:private"],
     deps = [
         "@raze__cc__1_0_73//:cc",
@@ -87,15 +87,15 @@
         "crate-name=backtrace",
         "manual",
     ],
-    version = "0.3.65",
+    version = "0.3.66",
     # buildifier: leave-alone
     deps = [
         ":backtrace_build_script",
         "@raze__addr2line__0_17_0//:addr2line",
         "@raze__cfg_if__1_0_0//:cfg_if",
         "@raze__libc__0_2_126//:libc",
-        "@raze__miniz_oxide__0_5_1//:miniz_oxide",
-        "@raze__object__0_28_4//:object",
+        "@raze__miniz_oxide__0_5_3//:miniz_oxide",
+        "@raze__object__0_29_0//:object",
         "@raze__rustc_demangle__0_1_21//:rustc_demangle",
     ],
 )
diff --git a/third_party/cargo/remote/BUILD.bindgen-0.58.1.bazel b/third_party/cargo/remote/BUILD.bindgen-0.58.1.bazel
index afa92f1..c79629a 100644
--- a/third_party/cargo/remote/BUILD.bindgen-0.58.1.bazel
+++ b/third_party/cargo/remote/BUILD.bindgen-0.58.1.bazel
@@ -65,7 +65,7 @@
     version = "0.58.1",
     visibility = ["//visibility:private"],
     deps = [
-        "@raze__clang_sys__1_3_2//:clang_sys",
+        "@raze__clang_sys__1_3_3//:clang_sys",
     ],
 )
 
@@ -102,16 +102,16 @@
         ":bindgen_build_script",
         "@raze__bitflags__1_3_2//:bitflags",
         "@raze__cexpr__0_4_0//:cexpr",
-        "@raze__clang_sys__1_3_2//:clang_sys",
+        "@raze__clang_sys__1_3_3//:clang_sys",
         "@raze__clap__2_34_0//:clap",
         "@raze__env_logger__0_8_4//:env_logger",
         "@raze__lazy_static__1_4_0//:lazy_static",
         "@raze__lazycell__1_3_0//:lazycell",
         "@raze__log__0_4_17//:log",
         "@raze__peeking_take_while__0_1_2//:peeking_take_while",
-        "@raze__proc_macro2__1_0_39//:proc_macro2",
-        "@raze__quote__1_0_18//:quote",
-        "@raze__regex__1_5_5//:regex",
+        "@raze__proc_macro2__1_0_40//:proc_macro2",
+        "@raze__quote__1_0_20//:quote",
+        "@raze__regex__1_6_0//:regex",
         "@raze__rustc_hash__1_1_0//:rustc_hash",
         "@raze__shlex__1_1_0//:shlex",
         "@raze__which__3_1_1//:which",
@@ -148,16 +148,16 @@
         ":bindgen_build_script",
         "@raze__bitflags__1_3_2//:bitflags",
         "@raze__cexpr__0_4_0//:cexpr",
-        "@raze__clang_sys__1_3_2//:clang_sys",
+        "@raze__clang_sys__1_3_3//:clang_sys",
         "@raze__clap__2_34_0//:clap",
         "@raze__env_logger__0_8_4//:env_logger",
         "@raze__lazy_static__1_4_0//:lazy_static",
         "@raze__lazycell__1_3_0//:lazycell",
         "@raze__log__0_4_17//:log",
         "@raze__peeking_take_while__0_1_2//:peeking_take_while",
-        "@raze__proc_macro2__1_0_39//:proc_macro2",
-        "@raze__quote__1_0_18//:quote",
-        "@raze__regex__1_5_5//:regex",
+        "@raze__proc_macro2__1_0_40//:proc_macro2",
+        "@raze__quote__1_0_20//:quote",
+        "@raze__regex__1_6_0//:regex",
         "@raze__rustc_hash__1_1_0//:rustc_hash",
         "@raze__shlex__1_1_0//:shlex",
         "@raze__which__3_1_1//:which",
diff --git a/third_party/cargo/remote/BUILD.clang-sys-1.3.2.bazel b/third_party/cargo/remote/BUILD.clang-sys-1.3.3.bazel
similarity index 97%
rename from third_party/cargo/remote/BUILD.clang-sys-1.3.2.bazel
rename to third_party/cargo/remote/BUILD.clang-sys-1.3.3.bazel
index 049c6dd..19592f5 100644
--- a/third_party/cargo/remote/BUILD.clang-sys-1.3.2.bazel
+++ b/third_party/cargo/remote/BUILD.clang-sys-1.3.3.bazel
@@ -65,7 +65,7 @@
         "cargo-raze",
         "manual",
     ],
-    version = "1.3.2",
+    version = "1.3.3",
     visibility = ["//visibility:private"],
     deps = [
         "@raze__glob__0_3_0//:glob",
@@ -98,7 +98,7 @@
         "crate-name=clang-sys",
         "manual",
     ],
-    version = "1.3.2",
+    version = "1.3.3",
     # buildifier: leave-alone
     deps = [
         ":clang_sys_build_script",
diff --git a/third_party/cargo/remote/BUILD.clap-3.1.18.bazel b/third_party/cargo/remote/BUILD.clap-3.2.12.bazel
similarity index 87%
rename from third_party/cargo/remote/BUILD.clap-3.1.18.bazel
rename to third_party/cargo/remote/BUILD.clap-3.2.12.bazel
index 872a85e..1fa3fef 100644
--- a/third_party/cargo/remote/BUILD.clap-3.1.18.bazel
+++ b/third_party/cargo/remote/BUILD.clap-3.2.12.bazel
@@ -31,18 +31,6 @@
 
 # Generated Targets
 
-# Unsupported target "01_default" with type "bench" omitted
-
-# Unsupported target "02_simple" with type "bench" omitted
-
-# Unsupported target "03_complex" with type "bench" omitted
-
-# Unsupported target "04_new_help" with type "bench" omitted
-
-# Unsupported target "05_ripgrep" with type "bench" omitted
-
-# Unsupported target "06_rustup" with type "bench" omitted
-
 rust_binary(
     # Prefix bin name to disambiguate from (probable) collision with lib name
     # N.B.: The exact form of this is subject to change.
@@ -53,7 +41,7 @@
         "cargo",
         "color",
         "default",
-        "lazy_static",
+        "once_cell",
         "std",
         "strsim",
         "suggestions",
@@ -61,7 +49,7 @@
     ],
     crate_root = "src/bin/stdio-fixture.rs",
     data = [],
-    edition = "2018",
+    edition = "2021",
     rustc_flags = [
         "--cap-lints=allow",
     ],
@@ -70,15 +58,15 @@
         "crate-name=stdio-fixture",
         "manual",
     ],
-    version = "3.1.18",
+    version = "3.2.12",
     # buildifier: leave-alone
     deps = [
         ":clap",
         "@raze__atty__0_2_14//:atty",
         "@raze__bitflags__1_3_2//:bitflags",
-        "@raze__clap_lex__0_2_0//:clap_lex",
-        "@raze__indexmap__1_8_1//:indexmap",
-        "@raze__lazy_static__1_4_0//:lazy_static",
+        "@raze__clap_lex__0_2_4//:clap_lex",
+        "@raze__indexmap__1_9_1//:indexmap",
+        "@raze__once_cell__1_13_0//:once_cell",
         "@raze__strsim__0_10_0//:strsim",
         "@raze__termcolor__1_1_3//:termcolor",
         "@raze__textwrap__0_15_0//:textwrap",
@@ -195,7 +183,7 @@
         "cargo",
         "color",
         "default",
-        "lazy_static",
+        "once_cell",
         "std",
         "strsim",
         "suggestions",
@@ -203,7 +191,7 @@
     ],
     crate_root = "src/lib.rs",
     data = [],
-    edition = "2018",
+    edition = "2021",
     rustc_flags = [
         "--cap-lints=allow",
     ],
@@ -212,14 +200,14 @@
         "crate-name=clap",
         "manual",
     ],
-    version = "3.1.18",
+    version = "3.2.12",
     # buildifier: leave-alone
     deps = [
         "@raze__atty__0_2_14//:atty",
         "@raze__bitflags__1_3_2//:bitflags",
-        "@raze__clap_lex__0_2_0//:clap_lex",
-        "@raze__indexmap__1_8_1//:indexmap",
-        "@raze__lazy_static__1_4_0//:lazy_static",
+        "@raze__clap_lex__0_2_4//:clap_lex",
+        "@raze__indexmap__1_9_1//:indexmap",
+        "@raze__once_cell__1_13_0//:once_cell",
         "@raze__strsim__0_10_0//:strsim",
         "@raze__termcolor__1_1_3//:termcolor",
         "@raze__textwrap__0_15_0//:textwrap",
diff --git a/third_party/cargo/remote/BUILD.clap_lex-0.2.0.bazel b/third_party/cargo/remote/BUILD.clap_lex-0.2.4.bazel
similarity index 90%
rename from third_party/cargo/remote/BUILD.clap_lex-0.2.0.bazel
rename to third_party/cargo/remote/BUILD.clap_lex-0.2.4.bazel
index 3627766..6566a06 100644
--- a/third_party/cargo/remote/BUILD.clap_lex-0.2.0.bazel
+++ b/third_party/cargo/remote/BUILD.clap_lex-0.2.4.bazel
@@ -38,7 +38,7 @@
     ],
     crate_root = "src/lib.rs",
     data = [],
-    edition = "2018",
+    edition = "2021",
     rustc_flags = [
         "--cap-lints=allow",
     ],
@@ -47,9 +47,9 @@
         "crate-name=clap_lex",
         "manual",
     ],
-    version = "0.2.0",
+    version = "0.2.4",
     # buildifier: leave-alone
     deps = [
-        "@raze__os_str_bytes__6_0_1//:os_str_bytes",
+        "@raze__os_str_bytes__6_2_0//:os_str_bytes",
     ],
 )
diff --git a/third_party/cargo/remote/BUILD.cxx-1.0.68.bazel b/third_party/cargo/remote/BUILD.cxx-1.0.71.bazel
similarity index 92%
rename from third_party/cargo/remote/BUILD.cxx-1.0.68.bazel
rename to third_party/cargo/remote/BUILD.cxx-1.0.71.bazel
index 363747f..1ede3fc 100644
--- a/third_party/cargo/remote/BUILD.cxx-1.0.68.bazel
+++ b/third_party/cargo/remote/BUILD.cxx-1.0.71.bazel
@@ -45,7 +45,7 @@
     data = [],
     edition = "2018",
     proc_macro_deps = [
-        "@raze__cxxbridge_macro__1_0_68//:cxxbridge_macro",
+        "@raze__cxxbridge_macro__1_0_71//:cxxbridge_macro",
     ],
     rustc_flags = [
         "--cap-lints=allow",
@@ -55,7 +55,7 @@
         "crate-name=cxx",
         "manual",
     ],
-    version = "1.0.68",
+    version = "1.0.71",
     # buildifier: leave-alone
     deps = [
         "@raze__link_cplusplus__1_0_6//:link_cplusplus",
@@ -81,4 +81,5 @@
     hdrs = ["include/cxx.h"],
     srcs = ["src/cxx.cc"],
     includes = ["include"],
+    target_compatible_with = ["@//tools/platforms/rust:has_support"],
 )
diff --git a/third_party/cargo/remote/BUILD.cxx-gen-0.7.68.bazel b/third_party/cargo/remote/BUILD.cxx-gen-0.7.71.bazel
similarity index 88%
rename from third_party/cargo/remote/BUILD.cxx-gen-0.7.68.bazel
rename to third_party/cargo/remote/BUILD.cxx-gen-0.7.71.bazel
index 2b29949..011b877 100644
--- a/third_party/cargo/remote/BUILD.cxx-gen-0.7.68.bazel
+++ b/third_party/cargo/remote/BUILD.cxx-gen-0.7.71.bazel
@@ -48,13 +48,13 @@
         "crate-name=cxx-gen",
         "manual",
     ],
-    version = "0.7.68",
+    version = "0.7.71",
     # buildifier: leave-alone
     deps = [
         "@raze__codespan_reporting__0_11_1//:codespan_reporting",
-        "@raze__proc_macro2__1_0_39//:proc_macro2",
-        "@raze__quote__1_0_18//:quote",
-        "@raze__syn__1_0_95//:syn",
+        "@raze__proc_macro2__1_0_40//:proc_macro2",
+        "@raze__quote__1_0_20//:quote",
+        "@raze__syn__1_0_98//:syn",
     ],
 )
 
diff --git a/third_party/cargo/remote/BUILD.cxxbridge-cmd-1.0.68.bazel b/third_party/cargo/remote/BUILD.cxxbridge-cmd-1.0.71.bazel
similarity index 82%
rename from third_party/cargo/remote/BUILD.cxxbridge-cmd-1.0.68.bazel
rename to third_party/cargo/remote/BUILD.cxxbridge-cmd-1.0.71.bazel
index 399187a..ec67c21 100644
--- a/third_party/cargo/remote/BUILD.cxxbridge-cmd-1.0.68.bazel
+++ b/third_party/cargo/remote/BUILD.cxxbridge-cmd-1.0.71.bazel
@@ -50,15 +50,15 @@
         "crate-name=cxxbridge",
         "manual",
     ],
-    version = "1.0.68",
+    version = "1.0.71",
     # buildifier: leave-alone
     deps = [
         ":cxxbridge_cmd",
-        "@raze__clap__3_1_18//:clap",
+        "@raze__clap__3_2_12//:clap",
         "@raze__codespan_reporting__0_11_1//:codespan_reporting",
-        "@raze__proc_macro2__1_0_39//:proc_macro2",
-        "@raze__quote__1_0_18//:quote",
-        "@raze__syn__1_0_95//:syn",
+        "@raze__proc_macro2__1_0_40//:proc_macro2",
+        "@raze__quote__1_0_20//:quote",
+        "@raze__syn__1_0_98//:syn",
     ],
 )
 
@@ -79,13 +79,13 @@
         "crate-name=cxxbridge-cmd",
         "manual",
     ],
-    version = "1.0.68",
+    version = "1.0.71",
     # buildifier: leave-alone
     deps = [
-        "@raze__clap__3_1_18//:clap",
+        "@raze__clap__3_2_12//:clap",
         "@raze__codespan_reporting__0_11_1//:codespan_reporting",
-        "@raze__proc_macro2__1_0_39//:proc_macro2",
-        "@raze__quote__1_0_18//:quote",
-        "@raze__syn__1_0_95//:syn",
+        "@raze__proc_macro2__1_0_40//:proc_macro2",
+        "@raze__quote__1_0_20//:quote",
+        "@raze__syn__1_0_98//:syn",
     ],
 )
diff --git a/third_party/cargo/remote/BUILD.cxxbridge-flags-1.0.68.bazel b/third_party/cargo/remote/BUILD.cxxbridge-flags-1.0.71.bazel
similarity index 97%
rename from third_party/cargo/remote/BUILD.cxxbridge-flags-1.0.68.bazel
rename to third_party/cargo/remote/BUILD.cxxbridge-flags-1.0.71.bazel
index 05f205c..dc02110 100644
--- a/third_party/cargo/remote/BUILD.cxxbridge-flags-1.0.68.bazel
+++ b/third_party/cargo/remote/BUILD.cxxbridge-flags-1.0.71.bazel
@@ -48,7 +48,7 @@
         "crate-name=cxxbridge-flags",
         "manual",
     ],
-    version = "1.0.68",
+    version = "1.0.71",
     # buildifier: leave-alone
     deps = [
     ],
diff --git a/third_party/cargo/remote/BUILD.cxxbridge-macro-1.0.68.bazel b/third_party/cargo/remote/BUILD.cxxbridge-macro-1.0.71.bazel
similarity index 87%
rename from third_party/cargo/remote/BUILD.cxxbridge-macro-1.0.68.bazel
rename to third_party/cargo/remote/BUILD.cxxbridge-macro-1.0.71.bazel
index 8e3bf0c..e41298e 100644
--- a/third_party/cargo/remote/BUILD.cxxbridge-macro-1.0.68.bazel
+++ b/third_party/cargo/remote/BUILD.cxxbridge-macro-1.0.71.bazel
@@ -47,11 +47,11 @@
         "crate-name=cxxbridge-macro",
         "manual",
     ],
-    version = "1.0.68",
+    version = "1.0.71",
     # buildifier: leave-alone
     deps = [
-        "@raze__proc_macro2__1_0_39//:proc_macro2",
-        "@raze__quote__1_0_18//:quote",
-        "@raze__syn__1_0_95//:syn",
+        "@raze__proc_macro2__1_0_40//:proc_macro2",
+        "@raze__quote__1_0_20//:quote",
+        "@raze__syn__1_0_98//:syn",
     ],
 )
diff --git a/third_party/cargo/remote/BUILD.either-1.6.1.bazel b/third_party/cargo/remote/BUILD.either-1.7.0.bazel
similarity index 95%
rename from third_party/cargo/remote/BUILD.either-1.6.1.bazel
rename to third_party/cargo/remote/BUILD.either-1.7.0.bazel
index 8807190..834d2c3 100644
--- a/third_party/cargo/remote/BUILD.either-1.6.1.bazel
+++ b/third_party/cargo/remote/BUILD.either-1.7.0.bazel
@@ -40,7 +40,7 @@
     ],
     crate_root = "src/lib.rs",
     data = [],
-    edition = "2015",
+    edition = "2018",
     rustc_flags = [
         "--cap-lints=allow",
     ],
@@ -49,7 +49,7 @@
         "crate-name=either",
         "manual",
     ],
-    version = "1.6.1",
+    version = "1.7.0",
     # buildifier: leave-alone
     deps = [
     ],
diff --git a/third_party/cargo/remote/BUILD.env_logger-0.8.4.bazel b/third_party/cargo/remote/BUILD.env_logger-0.8.4.bazel
index 14baebe..a9eb981 100644
--- a/third_party/cargo/remote/BUILD.env_logger-0.8.4.bazel
+++ b/third_party/cargo/remote/BUILD.env_logger-0.8.4.bazel
@@ -58,7 +58,7 @@
         "@raze__atty__0_2_14//:atty",
         "@raze__humantime__2_1_0//:humantime",
         "@raze__log__0_4_17//:log",
-        "@raze__regex__1_5_5//:regex",
+        "@raze__regex__1_6_0//:regex",
         "@raze__termcolor__1_1_3//:termcolor",
     ],
 )
diff --git a/third_party/cargo/remote/BUILD.env_logger-0.9.0.bazel b/third_party/cargo/remote/BUILD.env_logger-0.9.0.bazel
index d439a05..0f4e8d1 100644
--- a/third_party/cargo/remote/BUILD.env_logger-0.9.0.bazel
+++ b/third_party/cargo/remote/BUILD.env_logger-0.9.0.bazel
@@ -58,7 +58,7 @@
         "@raze__atty__0_2_14//:atty",
         "@raze__humantime__2_1_0//:humantime",
         "@raze__log__0_4_17//:log",
-        "@raze__regex__1_5_5//:regex",
+        "@raze__regex__1_6_0//:regex",
         "@raze__termcolor__1_1_3//:termcolor",
     ],
 )
diff --git a/third_party/cargo/remote/BUILD.fuchsia-cprng-0.1.1.bazel b/third_party/cargo/remote/BUILD.fuchsia-cprng-0.1.1.bazel
deleted file mode 100644
index 6dd24c5..0000000
--- a/third_party/cargo/remote/BUILD.fuchsia-cprng-0.1.1.bazel
+++ /dev/null
@@ -1,54 +0,0 @@
-"""
-@generated
-cargo-raze crate build file.
-
-DO NOT EDIT! Replaced on runs of cargo-raze
-"""
-
-# buildifier: disable=load
-load("@bazel_skylib//lib:selects.bzl", "selects")
-
-# buildifier: disable=load
-load(
-    "@rules_rust//rust:defs.bzl",
-    "rust_binary",
-    "rust_library",
-    "rust_proc_macro",
-    "rust_test",
-)
-
-package(default_visibility = [
-    # Public for visibility by "@raze__crate__version//" targets.
-    #
-    # Prefer access through "//third_party/cargo", which limits external
-    # visibility to explicit Cargo.toml dependencies.
-    "//visibility:public",
-])
-
-licenses([
-    "restricted",  # no license
-])
-
-# Generated Targets
-
-rust_library(
-    name = "fuchsia_cprng",
-    srcs = glob(["**/*.rs"]),
-    crate_features = [
-    ],
-    crate_root = "src/lib.rs",
-    data = [],
-    edition = "2018",
-    rustc_flags = [
-        "--cap-lints=allow",
-    ],
-    tags = [
-        "cargo-raze",
-        "crate-name=fuchsia-cprng",
-        "manual",
-    ],
-    version = "0.1.1",
-    # buildifier: leave-alone
-    deps = [
-    ],
-)
diff --git a/third_party/cargo/remote/BUILD.futures-0.3.21.bazel b/third_party/cargo/remote/BUILD.futures-0.3.21.bazel
new file mode 100644
index 0000000..118d747
--- /dev/null
+++ b/third_party/cargo/remote/BUILD.futures-0.3.21.bazel
@@ -0,0 +1,177 @@
+"""
+@generated
+cargo-raze crate build file.
+
+DO NOT EDIT! Replaced on runs of cargo-raze
+"""
+
+# buildifier: disable=load
+load("@bazel_skylib//lib:selects.bzl", "selects")
+
+# buildifier: disable=load
+load(
+    "@rules_rust//rust:defs.bzl",
+    "rust_binary",
+    "rust_library",
+    "rust_proc_macro",
+    "rust_test",
+)
+
+package(default_visibility = [
+    # Public for visibility by "@raze__crate__version//" targets.
+    #
+    # Prefer access through "//third_party/cargo", which limits external
+    # visibility to explicit Cargo.toml dependencies.
+    "//visibility:public",
+])
+
+licenses([
+    "notice",  # MIT from expression "MIT OR Apache-2.0"
+])
+
+# Generated Targets
+
+rust_library(
+    name = "futures",
+    srcs = glob(["**/*.rs"]),
+    crate_features = [
+        "alloc",
+        "async-await",
+        "default",
+        "executor",
+        "futures-executor",
+        "std",
+    ],
+    crate_root = "src/lib.rs",
+    data = [],
+    edition = "2018",
+    rustc_flags = [
+        "--cap-lints=allow",
+    ],
+    tags = [
+        "cargo-raze",
+        "crate-name=futures",
+        "manual",
+    ],
+    version = "0.3.21",
+    # buildifier: leave-alone
+    deps = [
+        "@raze__futures_channel__0_3_21//:futures_channel",
+        "@raze__futures_core__0_3_21//:futures_core",
+        "@raze__futures_executor__0_3_21//:futures_executor",
+        "@raze__futures_io__0_3_21//:futures_io",
+        "@raze__futures_sink__0_3_21//:futures_sink",
+        "@raze__futures_task__0_3_21//:futures_task",
+        "@raze__futures_util__0_3_21//:futures_util",
+    ],
+)
+
+# Unsupported target "_require_features" with type "test" omitted
+
+# Unsupported target "async_await_macros" with type "test" omitted
+
+# Unsupported target "auto_traits" with type "test" omitted
+
+# Unsupported target "compat" with type "test" omitted
+
+# Unsupported target "eager_drop" with type "test" omitted
+
+# Unsupported target "eventual" with type "test" omitted
+
+# Unsupported target "future_abortable" with type "test" omitted
+
+# Unsupported target "future_basic_combinators" with type "test" omitted
+
+# Unsupported target "future_fuse" with type "test" omitted
+
+# Unsupported target "future_inspect" with type "test" omitted
+
+# Unsupported target "future_join_all" with type "test" omitted
+
+# Unsupported target "future_obj" with type "test" omitted
+
+# Unsupported target "future_select_all" with type "test" omitted
+
+# Unsupported target "future_select_ok" with type "test" omitted
+
+# Unsupported target "future_shared" with type "test" omitted
+
+# Unsupported target "future_try_flatten_stream" with type "test" omitted
+
+# Unsupported target "future_try_join_all" with type "test" omitted
+
+# Unsupported target "io_buf_reader" with type "test" omitted
+
+# Unsupported target "io_buf_writer" with type "test" omitted
+
+# Unsupported target "io_cursor" with type "test" omitted
+
+# Unsupported target "io_line_writer" with type "test" omitted
+
+# Unsupported target "io_lines" with type "test" omitted
+
+# Unsupported target "io_read" with type "test" omitted
+
+# Unsupported target "io_read_exact" with type "test" omitted
+
+# Unsupported target "io_read_line" with type "test" omitted
+
+# Unsupported target "io_read_to_end" with type "test" omitted
+
+# Unsupported target "io_read_to_string" with type "test" omitted
+
+# Unsupported target "io_read_until" with type "test" omitted
+
+# Unsupported target "io_window" with type "test" omitted
+
+# Unsupported target "io_write" with type "test" omitted
+
+# Unsupported target "lock_mutex" with type "test" omitted
+
+# Unsupported target "macro_comma_support" with type "test" omitted
+
+# Unsupported target "object_safety" with type "test" omitted
+
+# Unsupported target "oneshot" with type "test" omitted
+
+# Unsupported target "ready_queue" with type "test" omitted
+
+# Unsupported target "recurse" with type "test" omitted
+
+# Unsupported target "sink" with type "test" omitted
+
+# Unsupported target "sink_fanout" with type "test" omitted
+
+# Unsupported target "stream" with type "test" omitted
+
+# Unsupported target "stream_abortable" with type "test" omitted
+
+# Unsupported target "stream_buffer_unordered" with type "test" omitted
+
+# Unsupported target "stream_catch_unwind" with type "test" omitted
+
+# Unsupported target "stream_futures_ordered" with type "test" omitted
+
+# Unsupported target "stream_futures_unordered" with type "test" omitted
+
+# Unsupported target "stream_into_async_read" with type "test" omitted
+
+# Unsupported target "stream_peekable" with type "test" omitted
+
+# Unsupported target "stream_select_all" with type "test" omitted
+
+# Unsupported target "stream_select_next_some" with type "test" omitted
+
+# Unsupported target "stream_split" with type "test" omitted
+
+# Unsupported target "stream_try_stream" with type "test" omitted
+
+# Unsupported target "stream_unfold" with type "test" omitted
+
+# Unsupported target "task_arc_wake" with type "test" omitted
+
+# Unsupported target "task_atomic_waker" with type "test" omitted
+
+# Unsupported target "test_macro" with type "test" omitted
+
+# Unsupported target "try_join" with type "test" omitted
diff --git a/third_party/cargo/remote/BUILD.serde_json-1.0.81.bazel b/third_party/cargo/remote/BUILD.futures-channel-0.3.21.bazel
similarity index 68%
copy from third_party/cargo/remote/BUILD.serde_json-1.0.81.bazel
copy to third_party/cargo/remote/BUILD.futures-channel-0.3.21.bazel
index 1869171..931d9a8 100644
--- a/third_party/cargo/remote/BUILD.serde_json-1.0.81.bazel
+++ b/third_party/cargo/remote/BUILD.futures-channel-0.3.21.bazel
@@ -38,12 +38,14 @@
 )
 
 cargo_build_script(
-    name = "serde_json_build_script",
+    name = "futures_channel_build_script",
     srcs = glob(["**/*.rs"]),
     build_script_env = {
     },
     crate_features = [
-        "default",
+        "alloc",
+        "futures-sink",
+        "sink",
         "std",
     ],
     crate_root = "build.rs",
@@ -56,17 +58,21 @@
         "cargo-raze",
         "manual",
     ],
-    version = "1.0.81",
+    version = "0.3.21",
     visibility = ["//visibility:private"],
     deps = [
     ],
 )
 
+# Unsupported target "sync_mpsc" with type "bench" omitted
+
 rust_library(
-    name = "serde_json",
+    name = "futures_channel",
     srcs = glob(["**/*.rs"]),
     crate_features = [
-        "default",
+        "alloc",
+        "futures-sink",
+        "sink",
         "std",
     ],
     crate_root = "src/lib.rs",
@@ -77,29 +83,22 @@
     ],
     tags = [
         "cargo-raze",
-        "crate-name=serde_json",
+        "crate-name=futures-channel",
         "manual",
     ],
-    version = "1.0.81",
+    version = "0.3.21",
     # buildifier: leave-alone
     deps = [
-        ":serde_json_build_script",
-        "@raze__itoa__1_0_2//:itoa",
-        "@raze__ryu__1_0_10//:ryu",
-        "@raze__serde__1_0_137//:serde",
+        ":futures_channel_build_script",
+        "@raze__futures_core__0_3_21//:futures_core",
+        "@raze__futures_sink__0_3_21//:futures_sink",
     ],
 )
 
-# Unsupported target "compiletest" with type "test" omitted
+# Unsupported target "channel" with type "test" omitted
 
-# Unsupported target "debug" with type "test" omitted
+# Unsupported target "mpsc" with type "test" omitted
 
-# Unsupported target "lexical" with type "test" omitted
+# Unsupported target "mpsc-close" with type "test" omitted
 
-# Unsupported target "map" with type "test" omitted
-
-# Unsupported target "regression" with type "test" omitted
-
-# Unsupported target "stream" with type "test" omitted
-
-# Unsupported target "test" with type "test" omitted
+# Unsupported target "oneshot" with type "test" omitted
diff --git a/third_party/cargo/remote/BUILD.serde-1.0.137.bazel b/third_party/cargo/remote/BUILD.futures-core-0.3.21.bazel
similarity index 78%
copy from third_party/cargo/remote/BUILD.serde-1.0.137.bazel
copy to third_party/cargo/remote/BUILD.futures-core-0.3.21.bazel
index f171109..ee392e2 100644
--- a/third_party/cargo/remote/BUILD.serde-1.0.137.bazel
+++ b/third_party/cargo/remote/BUILD.futures-core-0.3.21.bazel
@@ -38,19 +38,17 @@
 )
 
 cargo_build_script(
-    name = "serde_build_script",
+    name = "futures_core_build_script",
     srcs = glob(["**/*.rs"]),
     build_script_env = {
     },
     crate_features = [
-        "default",
-        "derive",
-        "serde_derive",
+        "alloc",
         "std",
     ],
     crate_root = "build.rs",
     data = glob(["**"]),
-    edition = "2015",
+    edition = "2018",
     rustc_flags = [
         "--cap-lints=allow",
     ],
@@ -58,38 +56,33 @@
         "cargo-raze",
         "manual",
     ],
-    version = "1.0.137",
+    version = "0.3.21",
     visibility = ["//visibility:private"],
     deps = [
     ],
 )
 
 rust_library(
-    name = "serde",
+    name = "futures_core",
     srcs = glob(["**/*.rs"]),
     crate_features = [
-        "default",
-        "derive",
-        "serde_derive",
+        "alloc",
         "std",
     ],
     crate_root = "src/lib.rs",
     data = [],
-    edition = "2015",
-    proc_macro_deps = [
-        "@raze__serde_derive__1_0_137//:serde_derive",
-    ],
+    edition = "2018",
     rustc_flags = [
         "--cap-lints=allow",
     ],
     tags = [
         "cargo-raze",
-        "crate-name=serde",
+        "crate-name=futures-core",
         "manual",
     ],
-    version = "1.0.137",
+    version = "0.3.21",
     # buildifier: leave-alone
     deps = [
-        ":serde_build_script",
+        ":futures_core_build_script",
     ],
 )
diff --git a/third_party/cargo/remote/BUILD.clap_lex-0.2.0.bazel b/third_party/cargo/remote/BUILD.futures-executor-0.3.21.bazel
similarity index 71%
copy from third_party/cargo/remote/BUILD.clap_lex-0.2.0.bazel
copy to third_party/cargo/remote/BUILD.futures-executor-0.3.21.bazel
index 3627766..236d50b 100644
--- a/third_party/cargo/remote/BUILD.clap_lex-0.2.0.bazel
+++ b/third_party/cargo/remote/BUILD.futures-executor-0.3.21.bazel
@@ -31,10 +31,13 @@
 
 # Generated Targets
 
+# Unsupported target "thread_notify" with type "bench" omitted
+
 rust_library(
-    name = "clap_lex",
+    name = "futures_executor",
     srcs = glob(["**/*.rs"]),
     crate_features = [
+        "std",
     ],
     crate_root = "src/lib.rs",
     data = [],
@@ -44,12 +47,16 @@
     ],
     tags = [
         "cargo-raze",
-        "crate-name=clap_lex",
+        "crate-name=futures-executor",
         "manual",
     ],
-    version = "0.2.0",
+    version = "0.3.21",
     # buildifier: leave-alone
     deps = [
-        "@raze__os_str_bytes__6_0_1//:os_str_bytes",
+        "@raze__futures_core__0_3_21//:futures_core",
+        "@raze__futures_task__0_3_21//:futures_task",
+        "@raze__futures_util__0_3_21//:futures_util",
     ],
 )
+
+# Unsupported target "local_pool" with type "test" omitted
diff --git a/third_party/cargo/remote/BUILD.os_str_bytes-6.0.1.bazel b/third_party/cargo/remote/BUILD.futures-io-0.3.21.bazel
similarity index 89%
copy from third_party/cargo/remote/BUILD.os_str_bytes-6.0.1.bazel
copy to third_party/cargo/remote/BUILD.futures-io-0.3.21.bazel
index ac08769..595d1fc 100644
--- a/third_party/cargo/remote/BUILD.os_str_bytes-6.0.1.bazel
+++ b/third_party/cargo/remote/BUILD.futures-io-0.3.21.bazel
@@ -32,10 +32,10 @@
 # Generated Targets
 
 rust_library(
-    name = "os_str_bytes",
+    name = "futures_io",
     srcs = glob(["**/*.rs"]),
     crate_features = [
-        "raw_os_str",
+        "std",
     ],
     crate_root = "src/lib.rs",
     data = [],
@@ -45,10 +45,10 @@
     ],
     tags = [
         "cargo-raze",
-        "crate-name=os_str_bytes",
+        "crate-name=futures-io",
         "manual",
     ],
-    version = "6.0.1",
+    version = "0.3.21",
     # buildifier: leave-alone
     deps = [
     ],
diff --git a/third_party/cargo/remote/BUILD.cxxbridge-macro-1.0.68.bazel b/third_party/cargo/remote/BUILD.futures-macro-0.3.21.bazel
similarity index 81%
copy from third_party/cargo/remote/BUILD.cxxbridge-macro-1.0.68.bazel
copy to third_party/cargo/remote/BUILD.futures-macro-0.3.21.bazel
index 8e3bf0c..62ab29e 100644
--- a/third_party/cargo/remote/BUILD.cxxbridge-macro-1.0.68.bazel
+++ b/third_party/cargo/remote/BUILD.futures-macro-0.3.21.bazel
@@ -32,7 +32,7 @@
 # Generated Targets
 
 rust_proc_macro(
-    name = "cxxbridge_macro",
+    name = "futures_macro",
     srcs = glob(["**/*.rs"]),
     crate_features = [
     ],
@@ -44,14 +44,14 @@
     ],
     tags = [
         "cargo-raze",
-        "crate-name=cxxbridge-macro",
+        "crate-name=futures-macro",
         "manual",
     ],
-    version = "1.0.68",
+    version = "0.3.21",
     # buildifier: leave-alone
     deps = [
-        "@raze__proc_macro2__1_0_39//:proc_macro2",
-        "@raze__quote__1_0_18//:quote",
-        "@raze__syn__1_0_95//:syn",
+        "@raze__proc_macro2__1_0_40//:proc_macro2",
+        "@raze__quote__1_0_20//:quote",
+        "@raze__syn__1_0_98//:syn",
     ],
 )
diff --git a/third_party/cargo/remote/BUILD.os_str_bytes-6.0.1.bazel b/third_party/cargo/remote/BUILD.futures-sink-0.3.21.bazel
similarity index 89%
copy from third_party/cargo/remote/BUILD.os_str_bytes-6.0.1.bazel
copy to third_party/cargo/remote/BUILD.futures-sink-0.3.21.bazel
index ac08769..0353eba 100644
--- a/third_party/cargo/remote/BUILD.os_str_bytes-6.0.1.bazel
+++ b/third_party/cargo/remote/BUILD.futures-sink-0.3.21.bazel
@@ -32,10 +32,11 @@
 # Generated Targets
 
 rust_library(
-    name = "os_str_bytes",
+    name = "futures_sink",
     srcs = glob(["**/*.rs"]),
     crate_features = [
-        "raw_os_str",
+        "alloc",
+        "std",
     ],
     crate_root = "src/lib.rs",
     data = [],
@@ -45,10 +46,10 @@
     ],
     tags = [
         "cargo-raze",
-        "crate-name=os_str_bytes",
+        "crate-name=futures-sink",
         "manual",
     ],
-    version = "6.0.1",
+    version = "0.3.21",
     # buildifier: leave-alone
     deps = [
     ],
diff --git a/third_party/cargo/remote/BUILD.serde-1.0.137.bazel b/third_party/cargo/remote/BUILD.futures-task-0.3.21.bazel
similarity index 78%
copy from third_party/cargo/remote/BUILD.serde-1.0.137.bazel
copy to third_party/cargo/remote/BUILD.futures-task-0.3.21.bazel
index f171109..ccb5218 100644
--- a/third_party/cargo/remote/BUILD.serde-1.0.137.bazel
+++ b/third_party/cargo/remote/BUILD.futures-task-0.3.21.bazel
@@ -38,19 +38,17 @@
 )
 
 cargo_build_script(
-    name = "serde_build_script",
+    name = "futures_task_build_script",
     srcs = glob(["**/*.rs"]),
     build_script_env = {
     },
     crate_features = [
-        "default",
-        "derive",
-        "serde_derive",
+        "alloc",
         "std",
     ],
     crate_root = "build.rs",
     data = glob(["**"]),
-    edition = "2015",
+    edition = "2018",
     rustc_flags = [
         "--cap-lints=allow",
     ],
@@ -58,38 +56,33 @@
         "cargo-raze",
         "manual",
     ],
-    version = "1.0.137",
+    version = "0.3.21",
     visibility = ["//visibility:private"],
     deps = [
     ],
 )
 
 rust_library(
-    name = "serde",
+    name = "futures_task",
     srcs = glob(["**/*.rs"]),
     crate_features = [
-        "default",
-        "derive",
-        "serde_derive",
+        "alloc",
         "std",
     ],
     crate_root = "src/lib.rs",
     data = [],
-    edition = "2015",
-    proc_macro_deps = [
-        "@raze__serde_derive__1_0_137//:serde_derive",
-    ],
+    edition = "2018",
     rustc_flags = [
         "--cap-lints=allow",
     ],
     tags = [
         "cargo-raze",
-        "crate-name=serde",
+        "crate-name=futures-task",
         "manual",
     ],
-    version = "1.0.137",
+    version = "0.3.21",
     # buildifier: leave-alone
     deps = [
-        ":serde_build_script",
+        ":futures_task_build_script",
     ],
 )
diff --git a/third_party/cargo/remote/BUILD.futures-util-0.3.21.bazel b/third_party/cargo/remote/BUILD.futures-util-0.3.21.bazel
new file mode 100644
index 0000000..5ea950a
--- /dev/null
+++ b/third_party/cargo/remote/BUILD.futures-util-0.3.21.bazel
@@ -0,0 +1,126 @@
+"""
+@generated
+cargo-raze crate build file.
+
+DO NOT EDIT! Replaced on runs of cargo-raze
+"""
+
+# buildifier: disable=load
+load("@bazel_skylib//lib:selects.bzl", "selects")
+
+# buildifier: disable=load
+load(
+    "@rules_rust//rust:defs.bzl",
+    "rust_binary",
+    "rust_library",
+    "rust_proc_macro",
+    "rust_test",
+)
+
+package(default_visibility = [
+    # Public for visibility by "@raze__crate__version//" targets.
+    #
+    # Prefer access through "//third_party/cargo", which limits external
+    # visibility to explicit Cargo.toml dependencies.
+    "//visibility:public",
+])
+
+licenses([
+    "notice",  # MIT from expression "MIT OR Apache-2.0"
+])
+
+# Generated Targets
+# buildifier: disable=out-of-order-load
+# buildifier: disable=load-on-top
+load(
+    "@rules_rust//cargo:cargo_build_script.bzl",
+    "cargo_build_script",
+)
+
+cargo_build_script(
+    name = "futures_util_build_script",
+    srcs = glob(["**/*.rs"]),
+    build_script_env = {
+    },
+    crate_features = [
+        "alloc",
+        "async-await",
+        "async-await-macro",
+        "channel",
+        "futures-channel",
+        "futures-io",
+        "futures-macro",
+        "futures-sink",
+        "io",
+        "memchr",
+        "sink",
+        "slab",
+        "std",
+    ],
+    crate_root = "build.rs",
+    data = glob(["**"]),
+    edition = "2018",
+    rustc_flags = [
+        "--cap-lints=allow",
+    ],
+    tags = [
+        "cargo-raze",
+        "manual",
+    ],
+    version = "0.3.21",
+    visibility = ["//visibility:private"],
+    deps = [
+    ],
+)
+
+# Unsupported target "flatten_unordered" with type "bench" omitted
+
+# Unsupported target "futures_unordered" with type "bench" omitted
+
+rust_library(
+    name = "futures_util",
+    srcs = glob(["**/*.rs"]),
+    crate_features = [
+        "alloc",
+        "async-await",
+        "async-await-macro",
+        "channel",
+        "futures-channel",
+        "futures-io",
+        "futures-macro",
+        "futures-sink",
+        "io",
+        "memchr",
+        "sink",
+        "slab",
+        "std",
+    ],
+    crate_root = "src/lib.rs",
+    data = [],
+    edition = "2018",
+    proc_macro_deps = [
+        "@raze__futures_macro__0_3_21//:futures_macro",
+    ],
+    rustc_flags = [
+        "--cap-lints=allow",
+    ],
+    tags = [
+        "cargo-raze",
+        "crate-name=futures-util",
+        "manual",
+    ],
+    version = "0.3.21",
+    # buildifier: leave-alone
+    deps = [
+        ":futures_util_build_script",
+        "@raze__futures_channel__0_3_21//:futures_channel",
+        "@raze__futures_core__0_3_21//:futures_core",
+        "@raze__futures_io__0_3_21//:futures_io",
+        "@raze__futures_sink__0_3_21//:futures_sink",
+        "@raze__futures_task__0_3_21//:futures_task",
+        "@raze__memchr__2_5_0//:memchr",
+        "@raze__pin_project_lite__0_2_9//:pin_project_lite",
+        "@raze__pin_utils__0_1_0//:pin_utils",
+        "@raze__slab__0_4_6//:slab",
+    ],
+)
diff --git a/third_party/cargo/remote/BUILD.hashbrown-0.11.2.bazel b/third_party/cargo/remote/BUILD.hashbrown-0.12.2.bazel
similarity index 87%
rename from third_party/cargo/remote/BUILD.hashbrown-0.11.2.bazel
rename to third_party/cargo/remote/BUILD.hashbrown-0.12.2.bazel
index 6ed9c49..7d61548 100644
--- a/third_party/cargo/remote/BUILD.hashbrown-0.11.2.bazel
+++ b/third_party/cargo/remote/BUILD.hashbrown-0.12.2.bazel
@@ -26,13 +26,15 @@
 ])
 
 licenses([
-    "notice",  # Apache-2.0 from expression "Apache-2.0 OR MIT"
+    "notice",  # MIT from expression "MIT OR Apache-2.0"
 ])
 
 # Generated Targets
 
 # Unsupported target "bench" with type "bench" omitted
 
+# Unsupported target "insert_unique_unchecked" with type "bench" omitted
+
 rust_library(
     name = "hashbrown",
     srcs = glob(["**/*.rs"]),
@@ -41,7 +43,7 @@
     ],
     crate_root = "src/lib.rs",
     data = [],
-    edition = "2018",
+    edition = "2021",
     rustc_flags = [
         "--cap-lints=allow",
     ],
@@ -50,7 +52,7 @@
         "crate-name=hashbrown",
         "manual",
     ],
-    version = "0.11.2",
+    version = "0.12.2",
     # buildifier: leave-alone
     deps = [
     ],
diff --git a/third_party/cargo/remote/BUILD.indexmap-1.8.1.bazel b/third_party/cargo/remote/BUILD.indexmap-1.9.1.bazel
similarity index 91%
rename from third_party/cargo/remote/BUILD.indexmap-1.8.1.bazel
rename to third_party/cargo/remote/BUILD.indexmap-1.9.1.bazel
index f8a1ad2..baf2641 100644
--- a/third_party/cargo/remote/BUILD.indexmap-1.8.1.bazel
+++ b/third_party/cargo/remote/BUILD.indexmap-1.9.1.bazel
@@ -48,7 +48,7 @@
     ],
     crate_root = "build.rs",
     data = glob(["**"]),
-    edition = "2018",
+    edition = "2021",
     rustc_flags = [
         "--cap-lints=allow",
     ],
@@ -56,7 +56,7 @@
         "cargo-raze",
         "manual",
     ],
-    version = "1.8.1",
+    version = "1.9.1",
     visibility = ["//visibility:private"],
     deps = [
         "@raze__autocfg__1_1_0//:autocfg",
@@ -76,7 +76,7 @@
     ],
     crate_root = "src/lib.rs",
     data = [],
-    edition = "2018",
+    edition = "2021",
     rustc_flags = [
         "--cap-lints=allow",
     ],
@@ -85,12 +85,12 @@
         "crate-name=indexmap",
         "manual",
     ],
-    version = "1.8.1",
+    version = "1.9.1",
     # buildifier: leave-alone
     deps = [
         ":indexmap_build_script",
-        "@raze__hashbrown__0_11_2//:hashbrown",
-        "@raze__serde__1_0_137//:serde",
+        "@raze__hashbrown__0_12_2//:hashbrown",
+        "@raze__serde__1_0_139//:serde",
     ],
 )
 
diff --git a/third_party/cargo/remote/BUILD.itertools-0.10.3.bazel b/third_party/cargo/remote/BUILD.itertools-0.10.3.bazel
index 46ee0f9..d919774 100644
--- a/third_party/cargo/remote/BUILD.itertools-0.10.3.bazel
+++ b/third_party/cargo/remote/BUILD.itertools-0.10.3.bazel
@@ -71,7 +71,7 @@
     version = "0.10.3",
     # buildifier: leave-alone
     deps = [
-        "@raze__either__1_6_1//:either",
+        "@raze__either__1_7_0//:either",
     ],
 )
 
diff --git a/third_party/cargo/remote/BUILD.itertools-0.9.0.bazel b/third_party/cargo/remote/BUILD.itertools-0.9.0.bazel
index ddc54c6..6dc4e8e 100644
--- a/third_party/cargo/remote/BUILD.itertools-0.9.0.bazel
+++ b/third_party/cargo/remote/BUILD.itertools-0.9.0.bazel
@@ -66,7 +66,7 @@
     version = "0.9.0",
     # buildifier: leave-alone
     deps = [
-        "@raze__either__1_6_1//:either",
+        "@raze__either__1_7_0//:either",
     ],
 )
 
diff --git a/third_party/cargo/remote/BUILD.miette-4.7.1.bazel b/third_party/cargo/remote/BUILD.miette-4.7.1.bazel
index 0d176e6..1ce2130 100644
--- a/third_party/cargo/remote/BUILD.miette-4.7.1.bazel
+++ b/third_party/cargo/remote/BUILD.miette-4.7.1.bazel
@@ -65,8 +65,8 @@
     # buildifier: leave-alone
     deps = [
         "@raze__atty__0_2_14//:atty",
-        "@raze__backtrace__0_3_65//:backtrace",
-        "@raze__once_cell__1_10_0//:once_cell",
+        "@raze__backtrace__0_3_66//:backtrace",
+        "@raze__once_cell__1_13_0//:once_cell",
         "@raze__owo_colors__3_4_0//:owo_colors",
         "@raze__supports_color__1_3_0//:supports_color",
         "@raze__supports_hyperlinks__1_2_0//:supports_hyperlinks",
diff --git a/third_party/cargo/remote/BUILD.miette-derive-4.7.1.bazel b/third_party/cargo/remote/BUILD.miette-derive-4.7.1.bazel
index 6e2b8ee..1f97719 100644
--- a/third_party/cargo/remote/BUILD.miette-derive-4.7.1.bazel
+++ b/third_party/cargo/remote/BUILD.miette-derive-4.7.1.bazel
@@ -50,8 +50,8 @@
     version = "4.7.1",
     # buildifier: leave-alone
     deps = [
-        "@raze__proc_macro2__1_0_39//:proc_macro2",
-        "@raze__quote__1_0_18//:quote",
-        "@raze__syn__1_0_95//:syn",
+        "@raze__proc_macro2__1_0_40//:proc_macro2",
+        "@raze__quote__1_0_20//:quote",
+        "@raze__syn__1_0_98//:syn",
     ],
 )
diff --git a/third_party/cargo/remote/BUILD.miniz_oxide-0.5.1.bazel b/third_party/cargo/remote/BUILD.miniz_oxide-0.5.3.bazel
similarity index 97%
rename from third_party/cargo/remote/BUILD.miniz_oxide-0.5.1.bazel
rename to third_party/cargo/remote/BUILD.miniz_oxide-0.5.3.bazel
index 0d2eae2..70f3666 100644
--- a/third_party/cargo/remote/BUILD.miniz_oxide-0.5.1.bazel
+++ b/third_party/cargo/remote/BUILD.miniz_oxide-0.5.3.bazel
@@ -47,7 +47,7 @@
         "crate-name=miniz_oxide",
         "manual",
     ],
-    version = "0.5.1",
+    version = "0.5.3",
     # buildifier: leave-alone
     deps = [
         "@raze__adler__1_0_2//:adler",
diff --git a/third_party/cargo/remote/BUILD.moveit-0.5.0.bazel b/third_party/cargo/remote/BUILD.moveit-0.5.0.bazel
index c81ec4a..38f2da3 100644
--- a/third_party/cargo/remote/BUILD.moveit-0.5.0.bazel
+++ b/third_party/cargo/remote/BUILD.moveit-0.5.0.bazel
@@ -53,6 +53,6 @@
     version = "0.5.0",
     # buildifier: leave-alone
     deps = [
-        "@raze__cxx__1_0_68//:cxx",
+        "@raze__cxx__1_0_71//:cxx",
     ],
 )
diff --git a/third_party/cargo/remote/BUILD.object-0.28.4.bazel b/third_party/cargo/remote/BUILD.object-0.29.0.bazel
similarity index 97%
rename from third_party/cargo/remote/BUILD.object-0.28.4.bazel
rename to third_party/cargo/remote/BUILD.object-0.29.0.bazel
index e04c45c..273891f 100644
--- a/third_party/cargo/remote/BUILD.object-0.28.4.bazel
+++ b/third_party/cargo/remote/BUILD.object-0.29.0.bazel
@@ -54,7 +54,7 @@
         "crate-name=object",
         "manual",
     ],
-    version = "0.28.4",
+    version = "0.29.0",
     # buildifier: leave-alone
     deps = [
         "@raze__memchr__2_5_0//:memchr",
diff --git a/third_party/cargo/remote/BUILD.once_cell-1.10.0.bazel b/third_party/cargo/remote/BUILD.once_cell-1.13.0.bazel
similarity index 98%
rename from third_party/cargo/remote/BUILD.once_cell-1.10.0.bazel
rename to third_party/cargo/remote/BUILD.once_cell-1.13.0.bazel
index dc4d474..741f97e 100644
--- a/third_party/cargo/remote/BUILD.once_cell-1.10.0.bazel
+++ b/third_party/cargo/remote/BUILD.once_cell-1.13.0.bazel
@@ -65,7 +65,7 @@
         "crate-name=once_cell",
         "manual",
     ],
-    version = "1.10.0",
+    version = "1.13.0",
     # buildifier: leave-alone
     deps = [
     ],
diff --git a/third_party/cargo/remote/BUILD.os_str_bytes-6.0.1.bazel b/third_party/cargo/remote/BUILD.os_str_bytes-6.2.0.bazel
similarity index 95%
rename from third_party/cargo/remote/BUILD.os_str_bytes-6.0.1.bazel
rename to third_party/cargo/remote/BUILD.os_str_bytes-6.2.0.bazel
index ac08769..7238c9f 100644
--- a/third_party/cargo/remote/BUILD.os_str_bytes-6.0.1.bazel
+++ b/third_party/cargo/remote/BUILD.os_str_bytes-6.2.0.bazel
@@ -39,7 +39,7 @@
     ],
     crate_root = "src/lib.rs",
     data = [],
-    edition = "2018",
+    edition = "2021",
     rustc_flags = [
         "--cap-lints=allow",
     ],
@@ -48,7 +48,7 @@
         "crate-name=os_str_bytes",
         "manual",
     ],
-    version = "6.0.1",
+    version = "6.2.0",
     # buildifier: leave-alone
     deps = [
     ],
diff --git a/third_party/cargo/remote/BUILD.quote-1.0.18.bazel b/third_party/cargo/remote/BUILD.pin-project-lite-0.2.9.bazel
similarity index 72%
rename from third_party/cargo/remote/BUILD.quote-1.0.18.bazel
rename to third_party/cargo/remote/BUILD.pin-project-lite-0.2.9.bazel
index daa4fcb..4a952bc 100644
--- a/third_party/cargo/remote/BUILD.quote-1.0.18.bazel
+++ b/third_party/cargo/remote/BUILD.pin-project-lite-0.2.9.bazel
@@ -26,17 +26,15 @@
 ])
 
 licenses([
-    "notice",  # MIT from expression "MIT OR Apache-2.0"
+    "notice",  # Apache-2.0 from expression "Apache-2.0 OR MIT"
 ])
 
 # Generated Targets
 
 rust_library(
-    name = "quote",
+    name = "pin_project_lite",
     srcs = glob(["**/*.rs"]),
     crate_features = [
-        "default",
-        "proc-macro",
     ],
     crate_root = "src/lib.rs",
     data = [],
@@ -46,16 +44,23 @@
     ],
     tags = [
         "cargo-raze",
-        "crate-name=quote",
+        "crate-name=pin-project-lite",
         "manual",
     ],
-    version = "1.0.18",
+    version = "0.2.9",
     # buildifier: leave-alone
     deps = [
-        "@raze__proc_macro2__1_0_39//:proc_macro2",
     ],
 )
 
 # Unsupported target "compiletest" with type "test" omitted
 
+# Unsupported target "drop_order" with type "test" omitted
+
+# Unsupported target "expandtest" with type "test" omitted
+
+# Unsupported target "lint" with type "test" omitted
+
+# Unsupported target "proper_unpin" with type "test" omitted
+
 # Unsupported target "test" with type "test" omitted
diff --git a/third_party/cargo/remote/BUILD.clap_lex-0.2.0.bazel b/third_party/cargo/remote/BUILD.pin-utils-0.1.0.bazel
similarity index 83%
copy from third_party/cargo/remote/BUILD.clap_lex-0.2.0.bazel
copy to third_party/cargo/remote/BUILD.pin-utils-0.1.0.bazel
index 3627766..f3852e0 100644
--- a/third_party/cargo/remote/BUILD.clap_lex-0.2.0.bazel
+++ b/third_party/cargo/remote/BUILD.pin-utils-0.1.0.bazel
@@ -32,7 +32,7 @@
 # Generated Targets
 
 rust_library(
-    name = "clap_lex",
+    name = "pin_utils",
     srcs = glob(["**/*.rs"]),
     crate_features = [
     ],
@@ -44,12 +44,15 @@
     ],
     tags = [
         "cargo-raze",
-        "crate-name=clap_lex",
+        "crate-name=pin-utils",
         "manual",
     ],
-    version = "0.2.0",
+    version = "0.1.0",
     # buildifier: leave-alone
     deps = [
-        "@raze__os_str_bytes__6_0_1//:os_str_bytes",
     ],
 )
+
+# Unsupported target "projection" with type "test" omitted
+
+# Unsupported target "stack_pin" with type "test" omitted
diff --git a/third_party/cargo/remote/BUILD.proc-macro-error-1.0.4.bazel b/third_party/cargo/remote/BUILD.proc-macro-error-1.0.4.bazel
index d571524..66f5bfa 100644
--- a/third_party/cargo/remote/BUILD.proc-macro-error-1.0.4.bazel
+++ b/third_party/cargo/remote/BUILD.proc-macro-error-1.0.4.bazel
@@ -90,9 +90,9 @@
     # buildifier: leave-alone
     deps = [
         ":proc_macro_error_build_script",
-        "@raze__proc_macro2__1_0_39//:proc_macro2",
-        "@raze__quote__1_0_18//:quote",
-        "@raze__syn__1_0_95//:syn",
+        "@raze__proc_macro2__1_0_40//:proc_macro2",
+        "@raze__quote__1_0_20//:quote",
+        "@raze__syn__1_0_98//:syn",
     ],
 )
 
diff --git a/third_party/cargo/remote/BUILD.proc-macro-error-attr-1.0.4.bazel b/third_party/cargo/remote/BUILD.proc-macro-error-attr-1.0.4.bazel
index b737055..fdca1a4 100644
--- a/third_party/cargo/remote/BUILD.proc-macro-error-attr-1.0.4.bazel
+++ b/third_party/cargo/remote/BUILD.proc-macro-error-attr-1.0.4.bazel
@@ -81,7 +81,7 @@
     # buildifier: leave-alone
     deps = [
         ":proc_macro_error_attr_build_script",
-        "@raze__proc_macro2__1_0_39//:proc_macro2",
-        "@raze__quote__1_0_18//:quote",
+        "@raze__proc_macro2__1_0_40//:proc_macro2",
+        "@raze__quote__1_0_20//:quote",
     ],
 )
diff --git a/third_party/cargo/remote/BUILD.proc-macro2-1.0.39.bazel b/third_party/cargo/remote/BUILD.proc-macro2-1.0.40.bazel
similarity index 94%
rename from third_party/cargo/remote/BUILD.proc-macro2-1.0.39.bazel
rename to third_party/cargo/remote/BUILD.proc-macro2-1.0.40.bazel
index 82d43bd..56b71b4 100644
--- a/third_party/cargo/remote/BUILD.proc-macro2-1.0.39.bazel
+++ b/third_party/cargo/remote/BUILD.proc-macro2-1.0.40.bazel
@@ -57,7 +57,7 @@
         "cargo-raze",
         "manual",
     ],
-    version = "1.0.39",
+    version = "1.0.40",
     visibility = ["//visibility:private"],
     deps = [
     ],
@@ -82,11 +82,11 @@
         "crate-name=proc-macro2",
         "manual",
     ],
-    version = "1.0.39",
+    version = "1.0.40",
     # buildifier: leave-alone
     deps = [
         ":proc_macro2_build_script",
-        "@raze__unicode_ident__1_0_0//:unicode_ident",
+        "@raze__unicode_ident__1_0_2//:unicode_ident",
     ],
 )
 
diff --git a/third_party/cargo/remote/BUILD.serde-1.0.137.bazel b/third_party/cargo/remote/BUILD.quote-1.0.20.bazel
similarity index 78%
copy from third_party/cargo/remote/BUILD.serde-1.0.137.bazel
copy to third_party/cargo/remote/BUILD.quote-1.0.20.bazel
index f171109..d23d8a4 100644
--- a/third_party/cargo/remote/BUILD.serde-1.0.137.bazel
+++ b/third_party/cargo/remote/BUILD.quote-1.0.20.bazel
@@ -38,19 +38,17 @@
 )
 
 cargo_build_script(
-    name = "serde_build_script",
+    name = "quote_build_script",
     srcs = glob(["**/*.rs"]),
     build_script_env = {
     },
     crate_features = [
         "default",
-        "derive",
-        "serde_derive",
-        "std",
+        "proc-macro",
     ],
     crate_root = "build.rs",
     data = glob(["**"]),
-    edition = "2015",
+    edition = "2018",
     rustc_flags = [
         "--cap-lints=allow",
     ],
@@ -58,38 +56,38 @@
         "cargo-raze",
         "manual",
     ],
-    version = "1.0.137",
+    version = "1.0.20",
     visibility = ["//visibility:private"],
     deps = [
     ],
 )
 
 rust_library(
-    name = "serde",
+    name = "quote",
     srcs = glob(["**/*.rs"]),
     crate_features = [
         "default",
-        "derive",
-        "serde_derive",
-        "std",
+        "proc-macro",
     ],
     crate_root = "src/lib.rs",
     data = [],
-    edition = "2015",
-    proc_macro_deps = [
-        "@raze__serde_derive__1_0_137//:serde_derive",
-    ],
+    edition = "2018",
     rustc_flags = [
         "--cap-lints=allow",
     ],
     tags = [
         "cargo-raze",
-        "crate-name=serde",
+        "crate-name=quote",
         "manual",
     ],
-    version = "1.0.137",
+    version = "1.0.20",
     # buildifier: leave-alone
     deps = [
-        ":serde_build_script",
+        ":quote_build_script",
+        "@raze__proc_macro2__1_0_40//:proc_macro2",
     ],
 )
+
+# Unsupported target "compiletest" with type "test" omitted
+
+# Unsupported target "test" with type "test" omitted
diff --git a/third_party/cargo/remote/BUILD.rand-0.4.6.bazel b/third_party/cargo/remote/BUILD.rand-0.4.6.bazel
deleted file mode 100644
index aa44832..0000000
--- a/third_party/cargo/remote/BUILD.rand-0.4.6.bazel
+++ /dev/null
@@ -1,75 +0,0 @@
-"""
-@generated
-cargo-raze crate build file.
-
-DO NOT EDIT! Replaced on runs of cargo-raze
-"""
-
-# buildifier: disable=load
-load("@bazel_skylib//lib:selects.bzl", "selects")
-
-# buildifier: disable=load
-load(
-    "@rules_rust//rust:defs.bzl",
-    "rust_binary",
-    "rust_library",
-    "rust_proc_macro",
-    "rust_test",
-)
-
-package(default_visibility = [
-    # Public for visibility by "@raze__crate__version//" targets.
-    #
-    # Prefer access through "//third_party/cargo", which limits external
-    # visibility to explicit Cargo.toml dependencies.
-    "//visibility:public",
-])
-
-licenses([
-    "notice",  # MIT from expression "MIT OR Apache-2.0"
-])
-
-# Generated Targets
-
-# Unsupported target "bench" with type "bench" omitted
-
-# Unsupported target "generators" with type "bench" omitted
-
-# Unsupported target "misc" with type "bench" omitted
-
-rust_library(
-    name = "rand",
-    srcs = glob(["**/*.rs"]),
-    aliases = {
-    },
-    crate_features = [
-        "default",
-        "libc",
-        "std",
-    ],
-    crate_root = "src/lib.rs",
-    data = [],
-    edition = "2015",
-    rustc_flags = [
-        "--cap-lints=allow",
-    ],
-    tags = [
-        "cargo-raze",
-        "crate-name=rand",
-        "manual",
-    ],
-    version = "0.4.6",
-    # buildifier: leave-alone
-    deps = [
-    ] + selects.with_or({
-        # cfg(unix)
-        (
-            "@rules_rust//rust/platform:x86_64-unknown-linux-gnu",
-            "@rules_rust//rust/platform:arm-unknown-linux-gnueabi",
-            "@rules_rust//rust/platform:armv7-unknown-linux-gnueabihf",
-        ): [
-            "@raze__libc__0_2_126//:libc",
-        ],
-        "//conditions:default": [],
-    }),
-)
diff --git a/third_party/cargo/remote/BUILD.rand_core-0.3.1.bazel b/third_party/cargo/remote/BUILD.rand_core-0.3.1.bazel
deleted file mode 100644
index c2649f2..0000000
--- a/third_party/cargo/remote/BUILD.rand_core-0.3.1.bazel
+++ /dev/null
@@ -1,55 +0,0 @@
-"""
-@generated
-cargo-raze crate build file.
-
-DO NOT EDIT! Replaced on runs of cargo-raze
-"""
-
-# buildifier: disable=load
-load("@bazel_skylib//lib:selects.bzl", "selects")
-
-# buildifier: disable=load
-load(
-    "@rules_rust//rust:defs.bzl",
-    "rust_binary",
-    "rust_library",
-    "rust_proc_macro",
-    "rust_test",
-)
-
-package(default_visibility = [
-    # Public for visibility by "@raze__crate__version//" targets.
-    #
-    # Prefer access through "//third_party/cargo", which limits external
-    # visibility to explicit Cargo.toml dependencies.
-    "//visibility:public",
-])
-
-licenses([
-    "notice",  # MIT from expression "MIT OR Apache-2.0"
-])
-
-# Generated Targets
-
-rust_library(
-    name = "rand_core",
-    srcs = glob(["**/*.rs"]),
-    crate_features = [
-    ],
-    crate_root = "src/lib.rs",
-    data = [],
-    edition = "2015",
-    rustc_flags = [
-        "--cap-lints=allow",
-    ],
-    tags = [
-        "cargo-raze",
-        "crate-name=rand_core",
-        "manual",
-    ],
-    version = "0.3.1",
-    # buildifier: leave-alone
-    deps = [
-        "@raze__rand_core__0_4_2//:rand_core",
-    ],
-)
diff --git a/third_party/cargo/remote/BUILD.rand_core-0.4.2.bazel b/third_party/cargo/remote/BUILD.rand_core-0.4.2.bazel
deleted file mode 100644
index ea89b87..0000000
--- a/third_party/cargo/remote/BUILD.rand_core-0.4.2.bazel
+++ /dev/null
@@ -1,54 +0,0 @@
-"""
-@generated
-cargo-raze crate build file.
-
-DO NOT EDIT! Replaced on runs of cargo-raze
-"""
-
-# buildifier: disable=load
-load("@bazel_skylib//lib:selects.bzl", "selects")
-
-# buildifier: disable=load
-load(
-    "@rules_rust//rust:defs.bzl",
-    "rust_binary",
-    "rust_library",
-    "rust_proc_macro",
-    "rust_test",
-)
-
-package(default_visibility = [
-    # Public for visibility by "@raze__crate__version//" targets.
-    #
-    # Prefer access through "//third_party/cargo", which limits external
-    # visibility to explicit Cargo.toml dependencies.
-    "//visibility:public",
-])
-
-licenses([
-    "notice",  # MIT from expression "MIT OR Apache-2.0"
-])
-
-# Generated Targets
-
-rust_library(
-    name = "rand_core",
-    srcs = glob(["**/*.rs"]),
-    crate_features = [
-    ],
-    crate_root = "src/lib.rs",
-    data = [],
-    edition = "2015",
-    rustc_flags = [
-        "--cap-lints=allow",
-    ],
-    tags = [
-        "cargo-raze",
-        "crate-name=rand_core",
-        "manual",
-    ],
-    version = "0.4.2",
-    # buildifier: leave-alone
-    deps = [
-    ],
-)
diff --git a/third_party/cargo/remote/BUILD.rdrand-0.4.0.bazel b/third_party/cargo/remote/BUILD.rdrand-0.4.0.bazel
deleted file mode 100644
index 768dc0b..0000000
--- a/third_party/cargo/remote/BUILD.rdrand-0.4.0.bazel
+++ /dev/null
@@ -1,63 +0,0 @@
-"""
-@generated
-cargo-raze crate build file.
-
-DO NOT EDIT! Replaced on runs of cargo-raze
-"""
-
-# buildifier: disable=load
-load("@bazel_skylib//lib:selects.bzl", "selects")
-
-# buildifier: disable=load
-load(
-    "@rules_rust//rust:defs.bzl",
-    "rust_binary",
-    "rust_library",
-    "rust_proc_macro",
-    "rust_test",
-)
-
-package(default_visibility = [
-    # Public for visibility by "@raze__crate__version//" targets.
-    #
-    # Prefer access through "//third_party/cargo", which limits external
-    # visibility to explicit Cargo.toml dependencies.
-    "//visibility:public",
-])
-
-licenses([
-    "notice",  # ISC from expression "ISC"
-])
-
-# Generated Targets
-
-# Unsupported target "rdrand" with type "bench" omitted
-
-# Unsupported target "rdseed" with type "bench" omitted
-
-# Unsupported target "std" with type "bench" omitted
-
-rust_library(
-    name = "rdrand",
-    srcs = glob(["**/*.rs"]),
-    crate_features = [
-        "default",
-        "std",
-    ],
-    crate_root = "src/lib.rs",
-    data = [],
-    edition = "2015",
-    rustc_flags = [
-        "--cap-lints=allow",
-    ],
-    tags = [
-        "cargo-raze",
-        "crate-name=rdrand",
-        "manual",
-    ],
-    version = "0.4.0",
-    # buildifier: leave-alone
-    deps = [
-        "@raze__rand_core__0_3_1//:rand_core",
-    ],
-)
diff --git a/third_party/cargo/remote/BUILD.regex-1.5.5.bazel b/third_party/cargo/remote/BUILD.regex-1.6.0.bazel
similarity index 96%
rename from third_party/cargo/remote/BUILD.regex-1.5.5.bazel
rename to third_party/cargo/remote/BUILD.regex-1.6.0.bazel
index 6180973..af9b051 100644
--- a/third_party/cargo/remote/BUILD.regex-1.5.5.bazel
+++ b/third_party/cargo/remote/BUILD.regex-1.6.0.bazel
@@ -76,12 +76,12 @@
         "crate-name=regex",
         "manual",
     ],
-    version = "1.5.5",
+    version = "1.6.0",
     # buildifier: leave-alone
     deps = [
         "@raze__aho_corasick__0_7_18//:aho_corasick",
         "@raze__memchr__2_5_0//:memchr",
-        "@raze__regex_syntax__0_6_25//:regex_syntax",
+        "@raze__regex_syntax__0_6_27//:regex_syntax",
     ],
 )
 
diff --git a/third_party/cargo/remote/BUILD.regex-syntax-0.6.25.bazel b/third_party/cargo/remote/BUILD.regex-syntax-0.6.27.bazel
similarity index 97%
rename from third_party/cargo/remote/BUILD.regex-syntax-0.6.25.bazel
rename to third_party/cargo/remote/BUILD.regex-syntax-0.6.27.bazel
index 29becb6..7dba3ce 100644
--- a/third_party/cargo/remote/BUILD.regex-syntax-0.6.25.bazel
+++ b/third_party/cargo/remote/BUILD.regex-syntax-0.6.27.bazel
@@ -58,7 +58,7 @@
         "crate-name=regex-syntax",
         "manual",
     ],
-    version = "0.6.25",
+    version = "0.6.27",
     # buildifier: leave-alone
     deps = [
     ],
diff --git a/third_party/cargo/remote/BUILD.rustversion-1.0.6.bazel b/third_party/cargo/remote/BUILD.rustversion-1.0.8.bazel
similarity index 97%
rename from third_party/cargo/remote/BUILD.rustversion-1.0.6.bazel
rename to third_party/cargo/remote/BUILD.rustversion-1.0.8.bazel
index b106cae..2609fab 100644
--- a/third_party/cargo/remote/BUILD.rustversion-1.0.6.bazel
+++ b/third_party/cargo/remote/BUILD.rustversion-1.0.8.bazel
@@ -54,7 +54,7 @@
         "cargo-raze",
         "manual",
     ],
-    version = "1.0.6",
+    version = "1.0.8",
     visibility = ["//visibility:private"],
     deps = [
     ],
@@ -76,7 +76,7 @@
         "crate-name=rustversion",
         "manual",
     ],
-    version = "1.0.6",
+    version = "1.0.8",
     # buildifier: leave-alone
     deps = [
         ":rustversion_build_script",
diff --git a/third_party/cargo/remote/BUILD.serde-1.0.137.bazel b/third_party/cargo/remote/BUILD.serde-1.0.139.bazel
similarity index 94%
rename from third_party/cargo/remote/BUILD.serde-1.0.137.bazel
rename to third_party/cargo/remote/BUILD.serde-1.0.139.bazel
index f171109..ecc80bc 100644
--- a/third_party/cargo/remote/BUILD.serde-1.0.137.bazel
+++ b/third_party/cargo/remote/BUILD.serde-1.0.139.bazel
@@ -58,7 +58,7 @@
         "cargo-raze",
         "manual",
     ],
-    version = "1.0.137",
+    version = "1.0.139",
     visibility = ["//visibility:private"],
     deps = [
     ],
@@ -77,7 +77,7 @@
     data = [],
     edition = "2015",
     proc_macro_deps = [
-        "@raze__serde_derive__1_0_137//:serde_derive",
+        "@raze__serde_derive__1_0_139//:serde_derive",
     ],
     rustc_flags = [
         "--cap-lints=allow",
@@ -87,7 +87,7 @@
         "crate-name=serde",
         "manual",
     ],
-    version = "1.0.137",
+    version = "1.0.139",
     # buildifier: leave-alone
     deps = [
         ":serde_build_script",
diff --git a/third_party/cargo/remote/BUILD.serde_derive-1.0.137.bazel b/third_party/cargo/remote/BUILD.serde_derive-1.0.139.bazel
similarity index 90%
rename from third_party/cargo/remote/BUILD.serde_derive-1.0.137.bazel
rename to third_party/cargo/remote/BUILD.serde_derive-1.0.139.bazel
index e6b7a2b..29ce18e 100644
--- a/third_party/cargo/remote/BUILD.serde_derive-1.0.137.bazel
+++ b/third_party/cargo/remote/BUILD.serde_derive-1.0.139.bazel
@@ -55,7 +55,7 @@
         "cargo-raze",
         "manual",
     ],
-    version = "1.0.137",
+    version = "1.0.139",
     visibility = ["//visibility:private"],
     deps = [
     ],
@@ -78,12 +78,12 @@
         "crate-name=serde_derive",
         "manual",
     ],
-    version = "1.0.137",
+    version = "1.0.139",
     # buildifier: leave-alone
     deps = [
         ":serde_derive_build_script",
-        "@raze__proc_macro2__1_0_39//:proc_macro2",
-        "@raze__quote__1_0_18//:quote",
-        "@raze__syn__1_0_95//:syn",
+        "@raze__proc_macro2__1_0_40//:proc_macro2",
+        "@raze__quote__1_0_20//:quote",
+        "@raze__syn__1_0_98//:syn",
     ],
 )
diff --git a/third_party/cargo/remote/BUILD.serde_json-1.0.81.bazel b/third_party/cargo/remote/BUILD.serde_json-1.0.82.bazel
similarity index 95%
rename from third_party/cargo/remote/BUILD.serde_json-1.0.81.bazel
rename to third_party/cargo/remote/BUILD.serde_json-1.0.82.bazel
index 1869171..beb9b62 100644
--- a/third_party/cargo/remote/BUILD.serde_json-1.0.81.bazel
+++ b/third_party/cargo/remote/BUILD.serde_json-1.0.82.bazel
@@ -56,7 +56,7 @@
         "cargo-raze",
         "manual",
     ],
-    version = "1.0.81",
+    version = "1.0.82",
     visibility = ["//visibility:private"],
     deps = [
     ],
@@ -80,13 +80,13 @@
         "crate-name=serde_json",
         "manual",
     ],
-    version = "1.0.81",
+    version = "1.0.82",
     # buildifier: leave-alone
     deps = [
         ":serde_json_build_script",
         "@raze__itoa__1_0_2//:itoa",
         "@raze__ryu__1_0_10//:ryu",
-        "@raze__serde__1_0_137//:serde",
+        "@raze__serde__1_0_139//:serde",
     ],
 )
 
diff --git a/third_party/cargo/remote/BUILD.cxxbridge-flags-1.0.68.bazel b/third_party/cargo/remote/BUILD.slab-0.4.6.bazel
similarity index 79%
copy from third_party/cargo/remote/BUILD.cxxbridge-flags-1.0.68.bazel
copy to third_party/cargo/remote/BUILD.slab-0.4.6.bazel
index 05f205c..fa2b3e8 100644
--- a/third_party/cargo/remote/BUILD.cxxbridge-flags-1.0.68.bazel
+++ b/third_party/cargo/remote/BUILD.slab-0.4.6.bazel
@@ -26,16 +26,17 @@
 ])
 
 licenses([
-    "notice",  # MIT from expression "MIT OR Apache-2.0"
+    "notice",  # MIT from expression "MIT"
 ])
 
 # Generated Targets
 
 rust_library(
-    name = "cxxbridge_flags",
+    name = "slab",
     srcs = glob(["**/*.rs"]),
     crate_features = [
         "default",
+        "std",
     ],
     crate_root = "src/lib.rs",
     data = [],
@@ -45,11 +46,15 @@
     ],
     tags = [
         "cargo-raze",
-        "crate-name=cxxbridge-flags",
+        "crate-name=slab",
         "manual",
     ],
-    version = "1.0.68",
+    version = "0.4.6",
     # buildifier: leave-alone
     deps = [
     ],
 )
+
+# Unsupported target "serde" with type "test" omitted
+
+# Unsupported target "slab" with type "test" omitted
diff --git a/third_party/cargo/remote/BUILD.smallvec-1.8.0.bazel b/third_party/cargo/remote/BUILD.smallvec-1.9.0.bazel
similarity index 97%
rename from third_party/cargo/remote/BUILD.smallvec-1.8.0.bazel
rename to third_party/cargo/remote/BUILD.smallvec-1.9.0.bazel
index 06f9025..d6861e5 100644
--- a/third_party/cargo/remote/BUILD.smallvec-1.8.0.bazel
+++ b/third_party/cargo/remote/BUILD.smallvec-1.9.0.bazel
@@ -49,7 +49,7 @@
         "crate-name=smallvec",
         "manual",
     ],
-    version = "1.8.0",
+    version = "1.9.0",
     # buildifier: leave-alone
     deps = [
     ],
diff --git a/third_party/cargo/remote/BUILD.strum_macros-0.24.0.bazel b/third_party/cargo/remote/BUILD.strum_macros-0.24.2.bazel
similarity index 84%
rename from third_party/cargo/remote/BUILD.strum_macros-0.24.0.bazel
rename to third_party/cargo/remote/BUILD.strum_macros-0.24.2.bazel
index 6d97605..3378bff 100644
--- a/third_party/cargo/remote/BUILD.strum_macros-0.24.0.bazel
+++ b/third_party/cargo/remote/BUILD.strum_macros-0.24.2.bazel
@@ -40,7 +40,7 @@
     data = [],
     edition = "2018",
     proc_macro_deps = [
-        "@raze__rustversion__1_0_6//:rustversion",
+        "@raze__rustversion__1_0_8//:rustversion",
     ],
     rustc_flags = [
         "--cap-lints=allow",
@@ -50,12 +50,12 @@
         "crate-name=strum_macros",
         "manual",
     ],
-    version = "0.24.0",
+    version = "0.24.2",
     # buildifier: leave-alone
     deps = [
         "@raze__heck__0_4_0//:heck",
-        "@raze__proc_macro2__1_0_39//:proc_macro2",
-        "@raze__quote__1_0_18//:quote",
-        "@raze__syn__1_0_95//:syn",
+        "@raze__proc_macro2__1_0_40//:proc_macro2",
+        "@raze__quote__1_0_20//:quote",
+        "@raze__syn__1_0_98//:syn",
     ],
 )
diff --git a/third_party/cargo/remote/BUILD.syn-1.0.95.bazel b/third_party/cargo/remote/BUILD.syn-1.0.98.bazel
similarity index 94%
rename from third_party/cargo/remote/BUILD.syn-1.0.95.bazel
rename to third_party/cargo/remote/BUILD.syn-1.0.98.bazel
index 2e805e7..dde8690 100644
--- a/third_party/cargo/remote/BUILD.syn-1.0.95.bazel
+++ b/third_party/cargo/remote/BUILD.syn-1.0.98.bazel
@@ -63,7 +63,7 @@
         "cargo-raze",
         "manual",
     ],
-    version = "1.0.95",
+    version = "1.0.98",
     visibility = ["//visibility:private"],
     deps = [
     ],
@@ -98,13 +98,13 @@
         "crate-name=syn",
         "manual",
     ],
-    version = "1.0.95",
+    version = "1.0.98",
     # buildifier: leave-alone
     deps = [
         ":syn_build_script",
-        "@raze__proc_macro2__1_0_39//:proc_macro2",
-        "@raze__quote__1_0_18//:quote",
-        "@raze__unicode_ident__1_0_0//:unicode_ident",
+        "@raze__proc_macro2__1_0_40//:proc_macro2",
+        "@raze__quote__1_0_20//:quote",
+        "@raze__unicode_ident__1_0_2//:unicode_ident",
     ],
 )
 
diff --git a/third_party/cargo/remote/BUILD.tempdir-0.3.7.bazel b/third_party/cargo/remote/BUILD.tempdir-0.3.7.bazel
deleted file mode 100644
index a32d1f4..0000000
--- a/third_party/cargo/remote/BUILD.tempdir-0.3.7.bazel
+++ /dev/null
@@ -1,58 +0,0 @@
-"""
-@generated
-cargo-raze crate build file.
-
-DO NOT EDIT! Replaced on runs of cargo-raze
-"""
-
-# buildifier: disable=load
-load("@bazel_skylib//lib:selects.bzl", "selects")
-
-# buildifier: disable=load
-load(
-    "@rules_rust//rust:defs.bzl",
-    "rust_binary",
-    "rust_library",
-    "rust_proc_macro",
-    "rust_test",
-)
-
-package(default_visibility = [
-    # Public for visibility by "@raze__crate__version//" targets.
-    #
-    # Prefer access through "//third_party/cargo", which limits external
-    # visibility to explicit Cargo.toml dependencies.
-    "//visibility:public",
-])
-
-licenses([
-    "notice",  # MIT from expression "MIT OR Apache-2.0"
-])
-
-# Generated Targets
-
-rust_library(
-    name = "tempdir",
-    srcs = glob(["**/*.rs"]),
-    crate_features = [
-    ],
-    crate_root = "src/lib.rs",
-    data = [],
-    edition = "2015",
-    rustc_flags = [
-        "--cap-lints=allow",
-    ],
-    tags = [
-        "cargo-raze",
-        "crate-name=tempdir",
-        "manual",
-    ],
-    version = "0.3.7",
-    # buildifier: leave-alone
-    deps = [
-        "@raze__rand__0_4_6//:rand",
-        "@raze__remove_dir_all__0_5_3//:remove_dir_all",
-    ],
-)
-
-# Unsupported target "smoke" with type "test" omitted
diff --git a/third_party/cargo/remote/BUILD.test-log-0.2.10.bazel b/third_party/cargo/remote/BUILD.test-log-0.2.10.bazel
index c1c1ac5..f67c730 100644
--- a/third_party/cargo/remote/BUILD.test-log-0.2.10.bazel
+++ b/third_party/cargo/remote/BUILD.test-log-0.2.10.bazel
@@ -52,8 +52,8 @@
     version = "0.2.10",
     # buildifier: leave-alone
     deps = [
-        "@raze__proc_macro2__1_0_39//:proc_macro2",
-        "@raze__quote__1_0_18//:quote",
-        "@raze__syn__1_0_95//:syn",
+        "@raze__proc_macro2__1_0_40//:proc_macro2",
+        "@raze__quote__1_0_20//:quote",
+        "@raze__syn__1_0_98//:syn",
     ],
 )
diff --git a/third_party/cargo/remote/BUILD.thiserror-impl-1.0.31.bazel b/third_party/cargo/remote/BUILD.thiserror-impl-1.0.31.bazel
index c357661..b2f0429 100644
--- a/third_party/cargo/remote/BUILD.thiserror-impl-1.0.31.bazel
+++ b/third_party/cargo/remote/BUILD.thiserror-impl-1.0.31.bazel
@@ -50,8 +50,8 @@
     version = "1.0.31",
     # buildifier: leave-alone
     deps = [
-        "@raze__proc_macro2__1_0_39//:proc_macro2",
-        "@raze__quote__1_0_18//:quote",
-        "@raze__syn__1_0_95//:syn",
+        "@raze__proc_macro2__1_0_40//:proc_macro2",
+        "@raze__quote__1_0_20//:quote",
+        "@raze__syn__1_0_98//:syn",
     ],
 )
diff --git a/third_party/cargo/remote/BUILD.toml-0.5.9.bazel b/third_party/cargo/remote/BUILD.toml-0.5.9.bazel
index c78c445..ce029fa 100644
--- a/third_party/cargo/remote/BUILD.toml-0.5.9.bazel
+++ b/third_party/cargo/remote/BUILD.toml-0.5.9.bazel
@@ -57,7 +57,7 @@
     version = "0.5.9",
     # buildifier: leave-alone
     deps = [
-        "@raze__serde__1_0_137//:serde",
+        "@raze__serde__1_0_139//:serde",
     ],
 )
 
diff --git a/third_party/cargo/remote/BUILD.trybuild-1.0.61.bazel b/third_party/cargo/remote/BUILD.trybuild-1.0.63.bazel
similarity index 88%
rename from third_party/cargo/remote/BUILD.trybuild-1.0.61.bazel
rename to third_party/cargo/remote/BUILD.trybuild-1.0.63.bazel
index 63092b8..9a9d516 100644
--- a/third_party/cargo/remote/BUILD.trybuild-1.0.61.bazel
+++ b/third_party/cargo/remote/BUILD.trybuild-1.0.63.bazel
@@ -54,7 +54,7 @@
         "cargo-raze",
         "manual",
     ],
-    version = "1.0.61",
+    version = "1.0.63",
     visibility = ["//visibility:private"],
     deps = [
     ],
@@ -69,7 +69,7 @@
     data = [],
     edition = "2018",
     proc_macro_deps = [
-        "@raze__serde_derive__1_0_137//:serde_derive",
+        "@raze__serde_derive__1_0_139//:serde_derive",
     ],
     rustc_flags = [
         "--cap-lints=allow",
@@ -79,14 +79,14 @@
         "crate-name=trybuild",
         "manual",
     ],
-    version = "1.0.61",
+    version = "1.0.63",
     # buildifier: leave-alone
     deps = [
         ":trybuild_build_script",
         "@raze__glob__0_3_0//:glob",
-        "@raze__once_cell__1_10_0//:once_cell",
-        "@raze__serde__1_0_137//:serde",
-        "@raze__serde_json__1_0_81//:serde_json",
+        "@raze__once_cell__1_13_0//:once_cell",
+        "@raze__serde__1_0_139//:serde",
+        "@raze__serde_json__1_0_82//:serde_json",
         "@raze__termcolor__1_1_3//:termcolor",
         "@raze__toml__0_5_9//:toml",
     ],
diff --git a/third_party/cargo/remote/BUILD.unicode-ident-1.0.0.bazel b/third_party/cargo/remote/BUILD.unicode-ident-1.0.2.bazel
similarity index 91%
rename from third_party/cargo/remote/BUILD.unicode-ident-1.0.0.bazel
rename to third_party/cargo/remote/BUILD.unicode-ident-1.0.2.bazel
index c3ec41f..001aae9 100644
--- a/third_party/cargo/remote/BUILD.unicode-ident-1.0.0.bazel
+++ b/third_party/cargo/remote/BUILD.unicode-ident-1.0.2.bazel
@@ -26,7 +26,7 @@
 ])
 
 licenses([
-    "notice",  # MIT from expression "MIT OR Apache-2.0"
+    "notice",  # MIT from expression "(MIT OR Apache-2.0) AND Unicode-DFS-2016"
 ])
 
 # Generated Targets
@@ -49,7 +49,7 @@
         "crate-name=unicode-ident",
         "manual",
     ],
-    version = "1.0.0",
+    version = "1.0.2",
     # buildifier: leave-alone
     deps = [
     ],
diff --git a/third_party/cargo/remote/BUILD.unicode-linebreak-0.1.2.bazel b/third_party/cargo/remote/BUILD.unicode-linebreak-0.1.2.bazel
index c1b4bf8..f2e3045 100644
--- a/third_party/cargo/remote/BUILD.unicode-linebreak-0.1.2.bazel
+++ b/third_party/cargo/remote/BUILD.unicode-linebreak-0.1.2.bazel
@@ -57,7 +57,7 @@
     version = "0.1.2",
     visibility = ["//visibility:private"],
     deps = [
-        "@raze__regex__1_5_5//:regex",
+        "@raze__regex__1_6_0//:regex",
     ],
 )
 
diff --git a/third_party/cargo/remote/BUILD.uuid-1.0.0.bazel b/third_party/cargo/remote/BUILD.uuid-1.1.2.bazel
similarity index 98%
rename from third_party/cargo/remote/BUILD.uuid-1.0.0.bazel
rename to third_party/cargo/remote/BUILD.uuid-1.1.2.bazel
index 1121fea..7d633bb 100644
--- a/third_party/cargo/remote/BUILD.uuid-1.0.0.bazel
+++ b/third_party/cargo/remote/BUILD.uuid-1.1.2.bazel
@@ -61,7 +61,7 @@
         "crate-name=uuid",
         "manual",
     ],
-    version = "1.0.0",
+    version = "1.1.2",
     # buildifier: leave-alone
     deps = [
     ],
diff --git a/third_party/cargo/remote/BUILD.which-4.2.5.bazel b/third_party/cargo/remote/BUILD.which-4.2.5.bazel
index f09777f..7dd0db4 100644
--- a/third_party/cargo/remote/BUILD.which-4.2.5.bazel
+++ b/third_party/cargo/remote/BUILD.which-4.2.5.bazel
@@ -50,7 +50,7 @@
     version = "4.2.5",
     # buildifier: leave-alone
     deps = [
-        "@raze__either__1_6_1//:either",
+        "@raze__either__1_7_0//:either",
         "@raze__libc__0_2_126//:libc",
     ],
 )
diff --git a/third_party/cargo/remote/BUILD.winapi-0.3.9.bazel b/third_party/cargo/remote/BUILD.winapi-0.3.9.bazel
index fef3c89..b162b5d 100644
--- a/third_party/cargo/remote/BUILD.winapi-0.3.9.bazel
+++ b/third_party/cargo/remote/BUILD.winapi-0.3.9.bazel
@@ -50,9 +50,7 @@
         "libloaderapi",
         "minwinbase",
         "minwindef",
-        "ntsecapi",
         "processenv",
-        "profileapi",
         "std",
         "winbase",
         "wincon",
@@ -86,9 +84,7 @@
         "libloaderapi",
         "minwinbase",
         "minwindef",
-        "ntsecapi",
         "processenv",
-        "profileapi",
         "std",
         "winbase",
         "wincon",
diff --git a/third_party/cargo_raze/cargo_raze.patch b/third_party/cargo_raze/cargo_raze.patch
index 306fcf3..f4cd02a 100644
--- a/third_party/cargo_raze/cargo_raze.patch
+++ b/third_party/cargo_raze/cargo_raze.patch
@@ -95,12 +95,13 @@
 
 --- third_party/libssh2/BUILD.libssh2.bazel	2022-02-04 00:03:43.831120614 -0800
 +++ third_party/libssh2/BUILD.libssh2.bazel	2022-02-04 00:04:19.100745883 -0800
-@@ -29,6 +29,14 @@ cmake(
+@@ -29,6 +29,15 @@ cmake(
          "@rules_rust//rust/platform:windows": ["ssh2.lib"],
          "//conditions:default": ["libssh2.a"],
      }),
 +    copts = [
 +        "-Wno-cast-qual",
++        # See https://github.com/openbsd/src/commit/04a2240bd8f465bcae6b595d912af3e2965856de, it's a false positive.
 +        "-Wno-sizeof-array-div",
 +        "-Wno-unused-parameter",
 +        "-DHAVE_SNPRINTF=1",
@@ -162,6 +163,15 @@
 
 --- third_party/cargo/remote/BUILD.libssh2-sys-0.2.21.bazel	2022-02-04 00:54:43.031966734 -0800
 +++ third_party/cargo/remote/BUILD.libssh2-sys-0.2.21.bazel	2022-02-04 00:54:44.272023742 -0800
+@@ -41,6 +41,8 @@ cargo_build_script(
+     name = "libssh2_sys_build_script",
+     srcs = glob(["**/*.rs"]),
+     build_script_env = {
++        # See https://github.com/openbsd/src/commit/04a2240bd8f465bcae6b595d912af3e2965856de, it's a false positive.
++        "CFLAGS": "-Wno-sizeof-array-div",
+     },
+     crate_features = [
+     ],
 @@ -48,6 +48,7 @@ cargo_build_script(
      data = glob(["**"]) + [
          "@cargo_raze__libssh2//:libssh2",
diff --git a/third_party/flatbuffers/build_defs.bzl b/third_party/flatbuffers/build_defs.bzl
index 7eaa4d3..c3e2bb0 100644
--- a/third_party/flatbuffers/build_defs.bzl
+++ b/third_party/flatbuffers/build_defs.bzl
@@ -6,6 +6,8 @@
 """
 
 load("@io_bazel_rules_go//go:def.bzl", "go_library")
+load("@rules_rust//rust:defs.bzl", "rust_library")
+load("@rules_rust//rust:rust_common.bzl", "CrateInfo")
 load("@build_bazel_rules_nodejs//:index.bzl", "js_library")
 load("@npm//@bazel/typescript:index.bzl", "ts_project")
 load("@rules_cc//cc:defs.bzl", "cc_library")
@@ -14,9 +16,6 @@
 
 DEFAULT_INCLUDE_PATHS = [
     "./",
-    "$(GENDIR)",
-    "$(BINDIR)",
-    "$(execpath @com_github_google_flatbuffers//:flatc).runfiles/com_github_google_flatbuffers",
 ]
 
 DEFAULT_FLATC_ARGS = [
@@ -28,7 +27,8 @@
     "--require-explicit-ids",
     "--gen-mutable",
     "--reflect-names",
-    "--cpp-ptr-type flatbuffers::unique_ptr",
+    "--cpp-ptr-type",
+    "flatbuffers::unique_ptr",
     "--force-empty",
     "--scoped-enums",
     "--gen-name-strings",
@@ -40,6 +40,11 @@
     "--require-explicit-ids",
 ]
 
+DEFAULT_FLATC_RUST_ARGS = [
+    "--gen-object-api",
+    "--require-explicit-ids",
+]
+
 DEFAULT_FLATC_TS_ARGS = [
     "--gen-object-api",
     "--gen-mutable",
@@ -49,12 +54,80 @@
     "--keep-prefix",
 ]
 
+"""Contains information about a set of flatbuffers which have their code for
+reading/writing generated in a single library-style rule.
+
+Fields:
+    srcs: [File], the .fbs source files
+"""
+FlatbufferLibraryInfo = provider()
+
+def _flatbuffer_library_compile_impl(ctx):
+    outs = []
+    commands = []
+    for src in ctx.files.srcs:
+        if ctx.attr.tables_for_filenames:
+            out_dir = None
+            for table in ctx.attr.tables_for_filenames:
+                out = ctx.actions.declare_file(ctx.attr.out_prefix + table + ctx.attr.output_suffix)
+                this_out_dir = "/".join(out.dirname.split("/")[:-(len(ctx.attr.out_prefix.split("/")) - 1)])
+                if out_dir:
+                    if this_out_dir != out_dir:
+                        fail("Trying to write to multiple directories")
+                else:
+                    out_dir = this_out_dir
+                outs.append(out)
+        else:
+            out = ctx.actions.declare_file(ctx.attr.out_prefix + src.basename.replace(".fbs", "") + ctx.attr.output_suffix)
+            outs.append(out)
+            out_dir = out.dirname
+        arguments = [ctx.executable._flatc.path]
+        for path in ctx.attr.include_paths:
+            for subpath in ["", ctx.bin_dir.path + "/"]:
+                arguments.append("-I")
+                arguments.append(subpath + path)
+        arguments.append("-I")
+        arguments.append("%s.runfiles/com_github_google_flatbuffers" % ctx.executable._flatc.path)
+        arguments.extend(ctx.attr.flatc_args)
+        arguments.extend(ctx.attr.language_flags)
+        arguments.extend([
+            "-o",
+            out_dir,
+        ])
+        arguments.append(src.path)
+        commands.append(arguments)
+    ctx.actions.run_shell(
+        outputs = outs,
+        inputs = ctx.files.srcs + ctx.files.includes,
+        tools = [ctx.executable._flatc],
+        command = " && ".join([" ".join(arguments) for arguments in commands]),
+        mnemonic = "Flatc",
+        progress_message = "Generating flatbuffer files for %{input}:",
+    )
+    return [DefaultInfo(files = depset(outs), runfiles = ctx.runfiles(files = outs)), FlatbufferLibraryInfo(srcs = ctx.files.srcs)]
+
+_flatbuffer_library_compile = rule(
+    implementation = _flatbuffer_library_compile_impl,
+    attrs = {
+        "srcs": attr.label_list(mandatory = True, allow_files = True),
+        "output_suffix": attr.string(mandatory = True),
+        "tables_for_filenames": attr.string_list(mandatory = False),
+        "language_flags": attr.string_list(mandatory = True),
+        "includes": attr.label_list(default = [], allow_files = True),
+        "include_paths": attr.string_list(default = []),
+        "flatc_args": attr.string_list(default = []),
+        "out_prefix": attr.string(default = ""),
+        "_flatc": attr.label(executable = True, cfg = "exec", default = Label(flatc_path)),
+    },
+)
+
 def flatbuffer_library_public(
         name,
         srcs,
-        outs,
+        output_suffix,
         language_flag,
         out_prefix = "",
+        tables_for_filenames = None,
         includes = [],
         include_paths = DEFAULT_INCLUDE_PATHS,
         flatc_args = DEFAULT_FLATC_ARGS,
@@ -63,14 +136,15 @@
         compatible_with = None,
         restricted_to = None,
         target_compatible_with = None,
-        output_to_bindir = False):
+        output_to_bindir = False,
+        visibility = None):
     """Generates code files for reading/writing the given flatbuffers in the
     requested language using the public compiler.
 
     Args:
       name: Rule name.
       srcs: Source .fbs files. Sent in order to the compiler.
-      outs: Output files from flatc.
+      output_suffix: Suffix for output files from flatc.
       language_flag: Target language flag. One of [-c, -j, -js].
       out_prefix: Prepend this path to the front of all generated files except on
           single source targets. Usually is a directory name.
@@ -94,73 +168,36 @@
     optionally a Fileset([reflection_name]) with all generated reflection
     binaries.
     """
-    include_paths_cmd = ["-I %s" % (s) for s in include_paths]
-
-    # '$(@D)' when given a single source target will give the appropriate
-    # directory. Appending 'out_prefix' is only necessary when given a build
-    # target with multiple sources.
-    output_directory = (
-        ("-o $(@D)/%s" % (out_prefix)) if len(srcs) > 1 else ("-o $(@D)")
-    )
-    genrule_cmd = " ".join([
-        "SRCS=($(SRCS));",
-        "for f in $${SRCS[@]:0:%s}; do" % len(srcs),
-        "$(location %s)" % (flatc_path),
-        " ".join(include_paths_cmd),
-        " ".join(flatc_args),
-        language_flag,
-        output_directory,
-        "$$f;",
-        "done",
-    ])
-    native.genrule(
+    _flatbuffer_library_compile(
         name = name,
-        srcs = srcs + includes,
-        outs = outs,
-        output_to_bindir = output_to_bindir,
-        tools = [flatc_path],
-        cmd = genrule_cmd,
+        srcs = srcs,
+        output_suffix = output_suffix,
+        language_flags = [language_flag],
+        includes = includes,
+        include_paths = include_paths,
+        flatc_args = flatc_args,
+        out_prefix = out_prefix,
+        tables_for_filenames = tables_for_filenames,
         compatible_with = compatible_with,
         target_compatible_with = target_compatible_with,
         restricted_to = restricted_to,
-        message = "Generating flatbuffer files for %s:" % (name),
+        visibility = visibility,
     )
+
     if reflection_name:
-        reflection_genrule_cmd = " ".join([
-            "SRCS=($(SRCS));",
-            "for f in $${SRCS[@]:0:%s}; do" % len(srcs),
-            "$(location %s)" % (flatc_path),
-            "-b --schema",
-            " ".join(flatc_args),
-            " ".join(include_paths_cmd),
-            language_flag,
-            output_directory,
-            "$$f;",
-            "done",
-        ])
-        reflection_outs = [
-            (out_prefix + "%s.bfbs") % (s.replace(".fbs", "").split("/")[-1])
-            for s in srcs
-        ]
-        native.genrule(
-            name = "%s_srcs" % reflection_name,
-            srcs = srcs + includes,
-            outs = reflection_outs,
-            output_to_bindir = output_to_bindir,
-            tools = [flatc_path],
-            compatible_with = compatible_with,
-            restricted_to = restricted_to,
-            target_compatible_with = target_compatible_with,
-            cmd = reflection_genrule_cmd,
-            message = "Generating flatbuffer reflection binary for %s:" % (name),
-            visibility = reflection_visibility,
-        )
-        native.filegroup(
+        _flatbuffer_library_compile(
             name = "%s_out" % reflection_name,
-            srcs = reflection_outs,
-            visibility = reflection_visibility,
+            srcs = srcs,
+            output_suffix = ".bfbs",
+            language_flags = ["-b", "--schema"],
+            includes = includes,
+            include_paths = include_paths,
+            flatc_args = flatc_args,
+            out_prefix = out_prefix,
             compatible_with = compatible_with,
+            target_compatible_with = target_compatible_with,
             restricted_to = restricted_to,
+            visibility = reflection_visibility,
         )
 
 def flatbuffer_cc_library(
@@ -220,10 +257,6 @@
       Fileset([name]_reflection): (Optional) all generated reflection binaries.
       cc_library([name]): library with sources and flatbuffers deps.
     """
-    output_headers = [
-        (out_prefix + "%s_generated.h") % (s.replace(".fbs", "").split("/")[-1].split(":")[-1])
-        for s in srcs
-    ]
     if deps and includes:
         # There is no inherent reason we couldn't support both, but this discourages
         # use of includes without good reason.
@@ -236,7 +269,7 @@
     flatbuffer_library_public(
         name = srcs_lib,
         srcs = srcs,
-        outs = output_headers,
+        output_suffix = "_generated.h",
         language_flag = "-c",
         out_prefix = out_prefix,
         includes = includes,
@@ -309,23 +342,27 @@
         been parsed. As such, we just force the user to manually specify
         things.
     """
-    python_files = ["%s/%s.py" % (namespace.replace(".", "/"), table) for table in tables]
 
     srcs_lib = "%s_srcs" % (name)
+    if not tables:
+        fail("Must specify the list of tables")
     flatbuffer_library_public(
         name = srcs_lib,
         srcs = srcs,
-        outs = python_files,
+        output_suffix = ".py",
+        out_prefix = namespace.replace(".", "/") + "/",
+        tables_for_filenames = tables,
         language_flag = "--python",
         includes = includes,
         include_paths = include_paths,
         flatc_args = flatc_args,
         compatible_with = compatible_with,
         target_compatible_with = target_compatible_with,
+        visibility = ["//visibility:private"],
     )
     native.py_library(
         name = name,
-        srcs = python_files,
+        srcs = [srcs_lib],
         visibility = visibility,
         compatible_with = compatible_with,
         target_compatible_with = target_compatible_with,
@@ -345,23 +382,23 @@
         visibility = None,
         srcs_filegroup_visibility = None):
     srcs_lib = "%s_srcs" % (name)
-    outs = ["%s_generated.go" % (s.replace(".fbs", "").split("/")[-1]) for s in srcs]
     flatc_args = flatc_args + ["--go-namespace", importpath.split("/")[-1]]
 
     flatbuffer_library_public(
         name = srcs_lib,
         srcs = srcs,
-        outs = outs,
+        output_suffix = "_generated.go",
         language_flag = "--go",
         includes = includes,
         include_paths = include_paths,
         flatc_args = flatc_args,
         compatible_with = compatible_with,
         target_compatible_with = target_compatible_with,
+        visibility = ["//visibility:private"],
     )
     go_library(
         name = name,
-        srcs = outs,
+        srcs = [srcs_lib],
         deps = ["@com_github_google_flatbuffers//go"],
         importpath = importpath,
         visibility = visibility,
@@ -369,6 +406,93 @@
         target_compatible_with = target_compatible_with,
     )
 
+def _flatbuffer_rust_lib_gen_impl(ctx):
+    # TODO(Brian): I think this needs changes to properly handle multiple .fbs files in a rule.
+    uses = []
+    for (dep, dep_srcs) in zip(ctx.attr.deps, ctx.attr.dep_srcs):
+        for dep_src in dep_srcs[FlatbufferLibraryInfo].srcs:
+            uses.append((dep[CrateInfo].name, dep_src.basename.replace(".fbs", "_generated")))
+    lib_rs_content = "\n".join(
+        [
+            "// Automatically generated by the Flatbuffers Bazel rules. Do not modify",
+            "#![allow(unused_imports)]",
+        ] + ["use %s as %s;" % (crate, use_as) for (crate, use_as) in uses] +
+        ["include!(\"%s\");" % src.basename for src in ctx.files.srcs_lib],
+    )
+    output = ctx.actions.declare_file(ctx.attr.name + "_lib.rs")
+    ctx.actions.write(
+        output = output,
+        content = lib_rs_content,
+    )
+    return [DefaultInfo(files = depset([output]))]
+
+"""Generates a lib.rs for a flatbuffer_rust_library.
+
+flatc generates individual .rs files for us. It can also generate a top-level mod.rs to be included
+in a crate, but that is laid out to include all flatbuffers files in a project. That's not a good
+fit for Bazel rules and monorepos, so we generate an alternative that imports all dependencies under
+their expected names."""
+_flatbuffer_rust_lib_gen = rule(
+    implementation = _flatbuffer_rust_lib_gen_impl,
+    attrs = {
+        "srcs_lib": attr.label(mandatory = True, doc = "The generated srcs for this rule"),
+        "dep_srcs": attr.label_list(mandatory = True, providers = [FlatbufferLibraryInfo], doc = "The _srcs rules for all our direct dependencies"),
+        "deps": attr.label_list(mandatory = True, providers = [CrateInfo]),
+    },
+)
+
+def flatbuffer_rust_library(
+        name,
+        srcs,
+        compatible_with = None,
+        target_compatible_with = None,
+        deps = [],
+        include_paths = DEFAULT_INCLUDE_PATHS,
+        flatc_args = DEFAULT_FLATC_RUST_ARGS,
+        include_reflection = True,
+        crate_name = None,
+        visibility = None,
+        srcs_filegroup_visibility = None):
+    includes = [d + "_includes" for d in deps]
+    srcs_lib = "%s_srcs" % (name)
+    lib_gen = "%s_lib_gen" % (name)
+    deps = list(deps)
+    if include_reflection:
+        deps.append("@com_github_google_flatbuffers//reflection:reflection_rust_fbs")
+
+    flatbuffer_library_public(
+        name = srcs_lib,
+        srcs = srcs,
+        language_flag = "--rust",
+        output_suffix = "_generated.rs",
+        includes = includes,
+        include_paths = include_paths,
+        flatc_args = flatc_args,
+        compatible_with = compatible_with,
+        target_compatible_with = target_compatible_with,
+        visibility = visibility,
+    )
+    _flatbuffer_rust_lib_gen(
+        name = lib_gen,
+        deps = deps,
+        dep_srcs = [dep + "_srcs" for dep in deps],
+        srcs_lib = srcs_lib,
+        visibility = ["//visibility:private"],
+        compatible_with = compatible_with,
+        target_compatible_with = target_compatible_with,
+    )
+    rust_library(
+        name = name,
+        srcs = [srcs_lib, lib_gen],
+        crate_root = lib_gen,
+        crate_name = crate_name,
+        deps = ["@com_github_google_flatbuffers//rust"] + deps,
+        edition = "2018",
+        visibility = visibility,
+        compatible_with = compatible_with,
+        target_compatible_with = target_compatible_with,
+    )
+
 def flatbuffer_ts_library(
         name,
         srcs,
@@ -411,13 +535,12 @@
     # third_party/.
     # TODO(james): There absolutely are better ways to do this, but this was the quick and dirty
     # one....
-    pre_outs = ["%s_pregenerated.ts" % (s.replace(".fbs", "").split("/")[-1]) for s in srcs]
     outs = ["%s_generated.ts" % (s.replace(".fbs", "").split("/")[-1]) for s in srcs]
     includes = [d + "_includes" for d in deps]
     flatbuffer_library_public(
         name = srcs_lib,
         srcs = srcs,
-        outs = pre_outs,
+        output_suffix = "_pregenerated.ts",
         language_flag = "--ts",
         includes = includes,
         include_paths = include_paths,
@@ -435,8 +558,8 @@
         "done",
     ])
     native.genrule(
-        name = name + "_reimporter",
-        srcs = pre_outs,
+        name = name + "_reimporter.ts",
+        srcs = [srcs_lib],
         outs = outs,
         cmd = genrule_cmd,
     )
diff --git a/third_party/flatbuffers/reflection/BUILD.bazel b/third_party/flatbuffers/reflection/BUILD.bazel
index aa421db..9b08734 100644
--- a/third_party/flatbuffers/reflection/BUILD.bazel
+++ b/third_party/flatbuffers/reflection/BUILD.bazel
@@ -1,4 +1,4 @@
-load("//:build_defs.bzl", "flatbuffer_ts_library")
+load("//:build_defs.bzl", "flatbuffer_rust_library", "flatbuffer_ts_library")
 
 filegroup(
     name = "reflection_fbs_schema",
@@ -13,3 +13,11 @@
     include_reflection = False,
     visibility = ["//visibility:public"],
 )
+
+flatbuffer_rust_library(
+    name = "reflection_rust_fbs",
+    srcs = ["reflection.fbs"],
+    crate_name = "flatbuffers_reflection",
+    include_reflection = False,
+    visibility = ["//visibility:public"],
+)
diff --git a/third_party/flatbuffers/rust/BUILD.bazel b/third_party/flatbuffers/rust/BUILD.bazel
new file mode 100644
index 0000000..246f5a0
--- /dev/null
+++ b/third_party/flatbuffers/rust/BUILD.bazel
@@ -0,0 +1,16 @@
+load("@rules_rust//rust:defs.bzl", "rust_library")
+
+rust_library(
+    name = "rust",
+    srcs = glob(["flatbuffers/**/*.rs"]),
+    crate_name = "flatbuffers",
+    crate_root = "flatbuffers/src/lib.rs",
+    edition = "2018",
+    version = "2.1.1",
+    visibility = ["//visibility:public"],
+    deps = [
+        "@//third_party/cargo:bitflags",
+        "@//third_party/cargo:smallvec",
+        "@//third_party/cargo:thiserror",
+    ],
+)
diff --git a/third_party/flatbuffers/src/idl_gen_rust.cpp b/third_party/flatbuffers/src/idl_gen_rust.cpp
index 17853a0..b15cdf5 100644
--- a/third_party/flatbuffers/src/idl_gen_rust.cpp
+++ b/third_party/flatbuffers/src/idl_gen_rust.cpp
@@ -366,6 +366,7 @@
       if (symbol.generated) continue;
       code_.Clear();
       code_ += "// " + std::string(FlatBuffersGeneratedWarning());
+      code_ += "#![allow(unused_imports)]";
       code_ += "extern crate flatbuffers;";
       code_ += "use std::mem;";
       code_ += "use std::cmp::Ordering;";
diff --git a/third_party/rules_rust/docs/flatten.md b/third_party/rules_rust/docs/flatten.md
index 0f82ad7..18fee16 100644
--- a/third_party/rules_rust/docs/flatten.md
+++ b/third_party/rules_rust/docs/flatten.md
@@ -1306,7 +1306,7 @@
 
 <pre>
 CrateInfo(<a href="#CrateInfo-aliases">aliases</a>, <a href="#CrateInfo-compile_data">compile_data</a>, <a href="#CrateInfo-deps">deps</a>, <a href="#CrateInfo-edition">edition</a>, <a href="#CrateInfo-is_test">is_test</a>, <a href="#CrateInfo-name">name</a>, <a href="#CrateInfo-output">output</a>, <a href="#CrateInfo-owner">owner</a>, <a href="#CrateInfo-proc_macro_deps">proc_macro_deps</a>, <a href="#CrateInfo-root">root</a>,
-          <a href="#CrateInfo-rustc_env">rustc_env</a>, <a href="#CrateInfo-srcs">srcs</a>, <a href="#CrateInfo-type">type</a>, <a href="#CrateInfo-wrapped_crate_type">wrapped_crate_type</a>)
+          <a href="#CrateInfo-rustc_env">rustc_env</a>, <a href="#CrateInfo-rustc_env_files">rustc_env_files</a>, <a href="#CrateInfo-srcs">srcs</a>, <a href="#CrateInfo-type">type</a>, <a href="#CrateInfo-wrapped_crate_type">wrapped_crate_type</a>)
 </pre>
 
 A provider containing general Crate information.
@@ -1327,6 +1327,7 @@
 | <a id="CrateInfo-proc_macro_deps"></a>proc_macro_deps |  depset[DepVariantInfo]: This crate's rust proc_macro dependencies' providers.    |
 | <a id="CrateInfo-root"></a>root |  File: The source File entrypoint to this crate, eg. lib.rs    |
 | <a id="CrateInfo-rustc_env"></a>rustc_env |  Dict[String, String]: Additional <code>"key": "value"</code> environment variables to set for rustc.    |
+| <a id="CrateInfo-rustc_env_files"></a>rustc_env_files |  [File]: Files containing additional environment variables to set for rustc.    |
 | <a id="CrateInfo-srcs"></a>srcs |  depset[File]: All source Files that are part of the crate.    |
 | <a id="CrateInfo-type"></a>type |  str: The type of this crate (see [rustc --crate-type](https://doc.rust-lang.org/rustc/command-line-arguments.html#--crate-type-a-list-of-types-of-crates-for-the-compiler-to-emit)).    |
 | <a id="CrateInfo-wrapped_crate_type"></a>wrapped_crate_type |  str, optional: The original crate type for targets generated using a previously defined crate (typically tests using the <code>rust_test::crate</code> attribute)    |
diff --git a/third_party/rules_rust/docs/providers.md b/third_party/rules_rust/docs/providers.md
index f4aa819..713d618 100644
--- a/third_party/rules_rust/docs/providers.md
+++ b/third_party/rules_rust/docs/providers.md
@@ -11,7 +11,7 @@
 
 <pre>
 CrateInfo(<a href="#CrateInfo-aliases">aliases</a>, <a href="#CrateInfo-compile_data">compile_data</a>, <a href="#CrateInfo-deps">deps</a>, <a href="#CrateInfo-edition">edition</a>, <a href="#CrateInfo-is_test">is_test</a>, <a href="#CrateInfo-name">name</a>, <a href="#CrateInfo-output">output</a>, <a href="#CrateInfo-owner">owner</a>, <a href="#CrateInfo-proc_macro_deps">proc_macro_deps</a>, <a href="#CrateInfo-root">root</a>,
-          <a href="#CrateInfo-rustc_env">rustc_env</a>, <a href="#CrateInfo-srcs">srcs</a>, <a href="#CrateInfo-type">type</a>, <a href="#CrateInfo-wrapped_crate_type">wrapped_crate_type</a>)
+          <a href="#CrateInfo-rustc_env">rustc_env</a>, <a href="#CrateInfo-rustc_env_files">rustc_env_files</a>, <a href="#CrateInfo-srcs">srcs</a>, <a href="#CrateInfo-type">type</a>, <a href="#CrateInfo-wrapped_crate_type">wrapped_crate_type</a>)
 </pre>
 
 A provider containing general Crate information.
@@ -32,6 +32,7 @@
 | <a id="CrateInfo-proc_macro_deps"></a>proc_macro_deps |  depset[DepVariantInfo]: This crate's rust proc_macro dependencies' providers.    |
 | <a id="CrateInfo-root"></a>root |  File: The source File entrypoint to this crate, eg. lib.rs    |
 | <a id="CrateInfo-rustc_env"></a>rustc_env |  Dict[String, String]: Additional <code>"key": "value"</code> environment variables to set for rustc.    |
+| <a id="CrateInfo-rustc_env_files"></a>rustc_env_files |  [File]: Files containing additional environment variables to set for rustc.    |
 | <a id="CrateInfo-srcs"></a>srcs |  depset[File]: All source Files that are part of the crate.    |
 | <a id="CrateInfo-type"></a>type |  str: The type of this crate (see [rustc --crate-type](https://doc.rust-lang.org/rustc/command-line-arguments.html#--crate-type-a-list-of-types-of-crates-for-the-compiler-to-emit)).    |
 | <a id="CrateInfo-wrapped_crate_type"></a>wrapped_crate_type |  str, optional: The original crate type for targets generated using a previously defined crate (typically tests using the <code>rust_test::crate</code> attribute)    |
diff --git a/third_party/rules_rust/rust/private/common.bzl b/third_party/rules_rust/rust/private/common.bzl
index 78a3011..66425d2 100644
--- a/third_party/rules_rust/rust/private/common.bzl
+++ b/third_party/rules_rust/rust/private/common.bzl
@@ -48,6 +48,8 @@
     """
     if not "wrapped_crate_type" in kwargs:
         kwargs.update({"wrapped_crate_type": None})
+    if not "rustc_env_files" in kwargs:
+        kwargs.update({"rustc_env_files": []})
     return CrateInfo(**kwargs)
 
 rust_common = struct(
diff --git a/third_party/rules_rust/rust/private/providers.bzl b/third_party/rules_rust/rust/private/providers.bzl
index de6d314..0846498 100644
--- a/third_party/rules_rust/rust/private/providers.bzl
+++ b/third_party/rules_rust/rust/private/providers.bzl
@@ -28,6 +28,7 @@
         "proc_macro_deps": "depset[DepVariantInfo]: This crate's rust proc_macro dependencies' providers.",
         "root": "File: The source File entrypoint to this crate, eg. lib.rs",
         "rustc_env": "Dict[String, String]: Additional `\"key\": \"value\"` environment variables to set for rustc.",
+        "rustc_env_files": "[File]: Files containing additional environment variables to set for rustc.",
         "srcs": "depset[File]: All source Files that are part of the crate.",
         "type": (
             "str: The type of this crate " +
diff --git a/third_party/rules_rust/rust/private/rust.bzl b/third_party/rules_rust/rust/private/rust.bzl
index ad2fba8..688d9bf 100644
--- a/third_party/rules_rust/rust/private/rust.bzl
+++ b/third_party/rules_rust/rust/private/rust.bzl
@@ -282,6 +282,7 @@
             output = rust_lib,
             edition = get_edition(ctx.attr, toolchain),
             rustc_env = ctx.attr.rustc_env,
+            rustc_env_files = ctx.files.rustc_env_files,
             is_test = False,
             compile_data = depset(ctx.files.compile_data),
             owner = ctx.label,
@@ -322,6 +323,7 @@
             output = output,
             edition = get_edition(ctx.attr, toolchain),
             rustc_env = ctx.attr.rustc_env,
+            rustc_env_files = ctx.files.rustc_env_files,
             is_test = False,
             compile_data = depset(ctx.files.compile_data),
             owner = ctx.label,
@@ -357,6 +359,9 @@
             compile_data = depset(ctx.files.compile_data, transitive = [crate.compile_data])
         else:
             compile_data = depset(ctx.files.compile_data)
+        rustc_env_files = ctx.files.rustc_env_files + crate.rustc_env_files
+        rustc_env = dict(crate.rustc_env)
+        rustc_env.update(**ctx.attr.rustc_env)
 
         # Build the test binary using the dependency's srcs.
         crate_info = rust_common.create_crate_info(
@@ -369,7 +374,8 @@
             aliases = ctx.attr.aliases,
             output = output,
             edition = crate.edition,
-            rustc_env = ctx.attr.rustc_env,
+            rustc_env = rustc_env,
+            rustc_env_files = rustc_env_files,
             is_test = True,
             compile_data = compile_data,
             wrapped_crate_type = crate.type,
@@ -388,6 +394,7 @@
             output = output,
             edition = get_edition(ctx.attr, toolchain),
             rustc_env = ctx.attr.rustc_env,
+            rustc_env_files = ctx.files.rustc_env_files,
             is_test = True,
             compile_data = depset(ctx.files.compile_data),
             owner = ctx.label,
diff --git a/third_party/rules_rust/rust/private/rustc.bzl b/third_party/rules_rust/rust/private/rustc.bzl
index 0c3df5e..fc7c5f6 100644
--- a/third_party/rules_rust/rust/private/rustc.bzl
+++ b/third_party/rules_rust/rust/private/rustc.bzl
@@ -581,7 +581,7 @@
         ],
     )
 
-    build_env_files = getattr(files, "rustc_env_files", [])
+    build_env_files = getattr(files, "rustc_env_files", []) + crate_info.rustc_env_files
     compile_inputs, out_dir, build_env_file, build_flags_files = _process_build_scripts(build_info, dep_info, compile_inputs)
     if build_env_file:
         build_env_files = [f for f in build_env_files] + [build_env_file]
diff --git a/third_party/rules_rust/rust/private/rustdoc.bzl b/third_party/rules_rust/rust/private/rustdoc.bzl
index 86c2acd..ca91ff0 100644
--- a/third_party/rules_rust/rust/private/rustdoc.bzl
+++ b/third_party/rules_rust/rust/private/rustdoc.bzl
@@ -39,6 +39,7 @@
         output = None,
         edition = crate_info.edition,
         rustc_env = crate_info.rustc_env,
+        rustc_env_files = crate_info.rustc_env_files,
         is_test = crate_info.is_test,
         compile_data = crate_info.compile_data,
     )
diff --git a/third_party/rules_rust/test/build_env/BUILD.bazel b/third_party/rules_rust/test/build_env/BUILD.bazel
index 4fd7df3..511d23a 100644
--- a/third_party/rules_rust/test/build_env/BUILD.bazel
+++ b/third_party/rules_rust/test/build_env/BUILD.bazel
@@ -2,7 +2,7 @@
     "//cargo:cargo_build_script.bzl",
     "cargo_build_script",
 )
-load("//rust:defs.bzl", "rust_test")
+load("//rust:defs.bzl", "rust_library", "rust_test")
 
 package(default_visibility = ["//visibility:public"])
 
@@ -12,6 +12,21 @@
     data = ["src/manifest_dir_file.txt"],
 )
 
+rust_library(
+    name = "arbitrary_env_lib",
+    srcs = ["tests/arbitrary_env_lib.rs"],
+    edition = "2018",
+    rustc_env = {
+        "USER_DEFINED_KEY": "USER_DEFINED_VALUE",
+    },
+)
+
+rust_test(
+    name = "arbitrary_env_lib_test",
+    crate = ":arbitrary_env_lib",
+    edition = "2018",
+)
+
 rust_test(
     name = "arbitrary_env_test",
     srcs = ["tests/arbitrary_env.rs"],
diff --git a/third_party/rules_rust/test/build_env/tests/arbitrary_env_lib.rs b/third_party/rules_rust/test/build_env/tests/arbitrary_env_lib.rs
new file mode 100644
index 0000000..e89fc2e
--- /dev/null
+++ b/third_party/rules_rust/test/build_env/tests/arbitrary_env_lib.rs
@@ -0,0 +1,16 @@
+pub fn from_lib() -> &'static str {
+    env!("USER_DEFINED_KEY")
+}
+
+#[cfg(test)]
+mod tests {
+    #[test]
+    fn verify_from_lib() {
+        assert_eq!(super::from_lib(), "USER_DEFINED_VALUE");
+    }
+
+    #[test]
+    fn verify_from_test() {
+        assert_eq!(env!("USER_DEFINED_KEY"), "USER_DEFINED_VALUE");
+    }
+}
diff --git a/third_party/rules_rust/test/rustc_env_files/BUILD.bazel b/third_party/rules_rust/test/rustc_env_files/BUILD.bazel
index 431cd07..efb21b7 100644
--- a/third_party/rules_rust/test/rustc_env_files/BUILD.bazel
+++ b/third_party/rules_rust/test/rustc_env_files/BUILD.bazel
@@ -1,4 +1,4 @@
-load("//rust:defs.bzl", "rust_binary")
+load("//rust:defs.bzl", "rust_binary", "rust_library", "rust_test")
 
 package(default_visibility = ["//visibility:public"])
 
@@ -20,3 +20,15 @@
     args = ["$(location :hello_env)"],
     data = [":hello_env"],
 )
+
+rust_library(
+    name = "hello_env_crate",
+    srcs = ["src/lib.rs"],
+    edition = "2018",
+    rustc_env_files = [":generate_rustc_env_file"],
+)
+
+rust_test(
+    name = "hello_env_crate_test",
+    crate = ":hello_env_crate",
+)
diff --git a/third_party/rules_rust/test/rustc_env_files/src/lib.rs b/third_party/rules_rust/test/rustc_env_files/src/lib.rs
new file mode 100644
index 0000000..8cd7cab
--- /dev/null
+++ b/third_party/rules_rust/test/rustc_env_files/src/lib.rs
@@ -0,0 +1,16 @@
+pub fn from_lib() -> &'static str {
+    env!("GREETING")
+}
+
+#[cfg(test)]
+mod tests {
+    #[test]
+    fn verify_from_lib() {
+        assert_eq!(super::from_lib(), "Howdy");
+    }
+
+    #[test]
+    fn verify_from_test() {
+        assert_eq!(env!("GREETING"), "Howdy");
+    }
+}
diff --git a/tools/actions/generate_compile_command.py b/tools/actions/generate_compile_command.py
index b41f2fc..6f10e09 100755
--- a/tools/actions/generate_compile_command.py
+++ b/tools/actions/generate_compile_command.py
@@ -5,23 +5,26 @@
 
 import third_party.bazel.protos.extra_actions_base_pb2 as extra_actions_base_pb2
 
+
 def _get_cpp_command(cpp_compile_info):
-  compiler = cpp_compile_info.tool
-  options = ' '.join(cpp_compile_info.compiler_option)
-  source = cpp_compile_info.source_file
-  output = cpp_compile_info.output_file
-  return '%s %s -c %s -o %s' % (compiler, options, source, output), source
+    compiler = cpp_compile_info.tool
+    options = ' '.join(cpp_compile_info.compiler_option)
+    source = cpp_compile_info.source_file
+    output = cpp_compile_info.output_file
+    return '%s %s -c %s -o %s' % (compiler, options, source, output), source
+
 
 def main(argv):
-  action = extra_actions_base_pb2.ExtraActionInfo()
-  with open(argv[1], 'rb') as f:
-    action.MergeFromString(f.read())
-  command, source_file = _get_cpp_command(
-      action.Extensions[extra_actions_base_pb2.CppCompileInfo.cpp_compile_info])
-  with open(argv[2], 'w') as f:
-    f.write(command)
-    f.write('\0')
-    f.write(source_file)
+    action = extra_actions_base_pb2.ExtraActionInfo()
+    with open(argv[1], 'rb') as f:
+        action.MergeFromString(f.read())
+    command, source_file = _get_cpp_command(action.Extensions[
+        extra_actions_base_pb2.CppCompileInfo.cpp_compile_info])
+    with open(argv[2], 'w') as f:
+        f.write(command)
+        f.write('\0')
+        f.write(source_file)
+
 
 if __name__ == '__main__':
-  sys.exit(main(sys.argv))
+    sys.exit(main(sys.argv))
diff --git a/tools/actions/generate_compile_commands_json.py b/tools/actions/generate_compile_commands_json.py
index d867698..04df592 100755
--- a/tools/actions/generate_compile_commands_json.py
+++ b/tools/actions/generate_compile_commands_json.py
@@ -13,7 +13,6 @@
 import pathlib
 import os.path
 import subprocess
-
 '''
 Args:
   path: The pathlib.Path to _compile_command file.
@@ -21,18 +20,21 @@
 
 Returns a string to stick in compile_commands.json.
 '''
+
+
 def _get_command(path, command_directory):
-  with path.open('r') as f:
-    contents = f.read().split('\0')
-    if len(contents) != 2:
-      # Old/incomplete file or something; silently ignore it.
-      return None
-    return '''{
+    with path.open('r') as f:
+        contents = f.read().split('\0')
+        if len(contents) != 2:
+            # Old/incomplete file or something; silently ignore it.
+            return None
+        return '''{
         "directory": "%s",
         "command": "%s",
         "file": "%s",
       },''' % (command_directory, contents[0].replace('"', '\\"'), contents[1])
 
+
 '''
 Args:
   path: A directory pathlib.Path to look for _compile_command files under.
@@ -40,29 +42,34 @@
 
 Yields strings to stick in compile_commands.json.
 '''
+
+
 def _get_compile_commands(path, command_directory):
-  for f in path.iterdir():
-    if f.is_dir():
-      yield from _get_compile_commands(f, command_directory)
-    elif f.name.endswith('_compile_command'):
-      command = _get_command(f, command_directory)
-      if command:
-        yield command
+    for f in path.iterdir():
+        if f.is_dir():
+            yield from _get_compile_commands(f, command_directory)
+        elif f.name.endswith('_compile_command'):
+            command = _get_command(f, command_directory)
+            if command:
+                yield command
+
 
 def main(argv):
-  source_path = os.path.join(os.path.dirname(__file__), '../..')
-  action_outs = os.path.join(source_path,
-                             'bazel-bin/../extra_actions',
-                             'tools/actions/generate_compile_commands_action')
-  command_directory = subprocess.check_output(
-      ('bazel', 'info', 'execution_root'),
-      cwd=source_path).decode('utf-8').rstrip()
-  commands = _get_compile_commands(pathlib.Path(action_outs), command_directory)
-  with open(os.path.join(source_path, 'compile_commands.json'), 'w') as f:
-    f.write('[')
-    for command in commands:
-      f.write(command)
-    f.write(']')
+    source_path = os.path.join(os.path.dirname(__file__), '../..')
+    action_outs = os.path.join(
+        source_path, 'bazel-bin/../extra_actions',
+        'tools/actions/generate_compile_commands_action')
+    command_directory = subprocess.check_output(
+        ('bazel', 'info', 'execution_root'),
+        cwd=source_path).decode('utf-8').rstrip()
+    commands = _get_compile_commands(pathlib.Path(action_outs),
+                                     command_directory)
+    with open(os.path.join(source_path, 'compile_commands.json'), 'w') as f:
+        f.write('[')
+        for command in commands:
+            f.write(command)
+        f.write(']')
+
 
 if __name__ == '__main__':
-  sys.exit(main(sys.argv))
+    sys.exit(main(sys.argv))
diff --git a/tools/build_rules/apache_runner.py b/tools/build_rules/apache_runner.py
index 3839cb0..87d7224 100644
--- a/tools/build_rules/apache_runner.py
+++ b/tools/build_rules/apache_runner.py
@@ -34,6 +34,7 @@
 dummy@frc971.org
 """
 
+
 def wait_for_server(port: int):
     """Waits for the server at the specified port to respond to TCP connections."""
     while True:
@@ -46,102 +47,110 @@
             connection.close()
             time.sleep(0.01)
 
+
 def main(argv):
-  parser = argparse.ArgumentParser()
-  parser.add_argument("--binary", type=str, required=True)
-  parser.add_argument("--https_port", type=int, default=7000)
-  parser.add_argument("--wrapped_port", type=int, default=7500)
-  parser.add_argument(
-    "--ldap_info",
-    type=str,
-    help="JSON file containing 'ldap_bind_dn', 'ldap_url', and 'ldap_password' entries.",
-    default="",
-  )
-  args, unknown_args = parser.parse_known_args(argv[1:])
-
-  if not args.ldap_info:
-    args.ldap_info = os.path.join(os.environ["BUILD_WORKSPACE_DIRECTORY"], "ldap.json")
-
-  with open("tools/build_rules/apache_template.conf", "r") as file:
-    template = jinja2.Template(file.read())
-
-  with open(args.ldap_info, "r") as file:
-    substitutions = json.load(file)
-
-  for key in ("ldap_bind_dn", "ldap_url", "ldap_password"):
-    if key not in substitutions:
-      raise KeyError(f"The ldap_info JSON file must contain key '{key}'.")
-
-  substitutions.update({
-    "https_port": args.https_port,
-    "wrapped_port": args.wrapped_port,
-  })
-
-  config_text = template.render(substitutions)
-
-  with tempfile.TemporaryDirectory() as temp_dir:
-    temp_dir = Path(temp_dir)
-    with open(temp_dir / "apache2.conf", "w") as file:
-      file.write(config_text)
-
-    # Create a directory for error logs and such.
-    logs_dir = temp_dir / "logs"
-    os.mkdir(logs_dir)
-
-    print("-" * 60)
-    print(f"Logs are in {logs_dir}/")
-    print("-" * 60)
-
-    # Make modules available.
-    modules_path = Path("external/apache2/usr/lib/apache2/modules")
-    os.symlink(modules_path.resolve(), temp_dir / "modules")
-
-    # Generate a testing cert.
-    subprocess.run([
-        "openssl",
-        "req",
-        "-x509",
-        "-nodes",
-        "-days=365",
-        "-newkey=rsa:2048",
-        "-keyout=" + str(temp_dir / "apache-selfsigned.key"),
-        "-out="  + str(temp_dir / "apache-selfsigned.crt"),
-      ],
-      check=True,
-      input=DUMMY_CERT_ANSWERS,
-      text=True,
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--binary", type=str, required=True)
+    parser.add_argument("--https_port", type=int, default=7000)
+    parser.add_argument("--wrapped_port", type=int, default=7500)
+    parser.add_argument(
+        "--ldap_info",
+        type=str,
+        help=
+        "JSON file containing 'ldap_bind_dn', 'ldap_url', and 'ldap_password' entries.",
+        default="",
     )
+    args, unknown_args = parser.parse_known_args(argv[1:])
 
-    # Start the wrapped binary in the background.
-    # Tell it via the environment what port to listen on.
-    env = os.environ.copy()
-    env["APACHE_WRAPPED_PORT"] = str(args.wrapped_port)
-    wrapped_binary = subprocess.Popen([args.binary] + unknown_args, env=env)
+    if not args.ldap_info:
+        args.ldap_info = os.path.join(os.environ["BUILD_WORKSPACE_DIRECTORY"],
+                                      "ldap.json")
 
-    # Start the apache server.
-    env = os.environ.copy()
-    env["LD_LIBRARY_PATH"] = "external/apache2/usr/lib/x86_64-linux-gnu"
-    apache = subprocess.Popen(
-      ["external/apache2/usr/sbin/apache2", "-X", "-d", str(temp_dir)],
-      env=env,
-    )
+    with open("tools/build_rules/apache_template.conf", "r") as file:
+        template = jinja2.Template(file.read())
 
-    wait_for_server(args.https_port)
-    wait_for_server(args.wrapped_port)
-    # Sleep to attempt to get the HTTPS message after the webserver message.
-    time.sleep(1)
-    print(f"Serving HTTPS on port {args.https_port}")
+    with open(args.ldap_info, "r") as file:
+        substitutions = json.load(file)
 
-    # Wait until we see a request to shut down.
-    signal.signal(signal.SIGINT, lambda signum, frame: None)
-    signal.signal(signal.SIGTERM, lambda signum, frame: None)
-    signal.pause()
+    for key in ("ldap_bind_dn", "ldap_url", "ldap_password"):
+        if key not in substitutions:
+            raise KeyError(
+                f"The ldap_info JSON file must contain key '{key}'.")
 
-    print("\nShutting down apache and wrapped binary.")
-    apache.terminate()
-    wrapped_binary.terminate()
-    apache.wait()
-    wrapped_binary.wait()
+    substitutions.update({
+        "https_port": args.https_port,
+        "wrapped_port": args.wrapped_port,
+    })
+
+    config_text = template.render(substitutions)
+
+    with tempfile.TemporaryDirectory() as temp_dir:
+        temp_dir = Path(temp_dir)
+        with open(temp_dir / "apache2.conf", "w") as file:
+            file.write(config_text)
+
+        # Create a directory for error logs and such.
+        logs_dir = temp_dir / "logs"
+        os.mkdir(logs_dir)
+
+        print("-" * 60)
+        print(f"Logs are in {logs_dir}/")
+        print("-" * 60)
+
+        # Make modules available.
+        modules_path = Path("external/apache2/usr/lib/apache2/modules")
+        os.symlink(modules_path.resolve(), temp_dir / "modules")
+
+        # Generate a testing cert.
+        subprocess.run(
+            [
+                "openssl",
+                "req",
+                "-x509",
+                "-nodes",
+                "-days=365",
+                "-newkey=rsa:2048",
+                "-keyout=" + str(temp_dir / "apache-selfsigned.key"),
+                "-out=" + str(temp_dir / "apache-selfsigned.crt"),
+            ],
+            check=True,
+            input=DUMMY_CERT_ANSWERS,
+            text=True,
+        )
+
+        # Start the wrapped binary in the background.
+        # Tell it via the environment what port to listen on.
+        env = os.environ.copy()
+        env["APACHE_WRAPPED_PORT"] = str(args.wrapped_port)
+        wrapped_binary = subprocess.Popen([args.binary] + unknown_args,
+                                          env=env)
+
+        # Start the apache server.
+        env = os.environ.copy()
+        env["LD_LIBRARY_PATH"] = "external/apache2/usr/lib/x86_64-linux-gnu"
+        apache = subprocess.Popen(
+            ["external/apache2/usr/sbin/apache2", "-X", "-d",
+             str(temp_dir)],
+            env=env,
+        )
+
+        wait_for_server(args.https_port)
+        wait_for_server(args.wrapped_port)
+        # Sleep to attempt to get the HTTPS message after the webserver message.
+        time.sleep(1)
+        print(f"Serving HTTPS on port {args.https_port}")
+
+        # Wait until we see a request to shut down.
+        signal.signal(signal.SIGINT, lambda signum, frame: None)
+        signal.signal(signal.SIGTERM, lambda signum, frame: None)
+        signal.pause()
+
+        print("\nShutting down apache and wrapped binary.")
+        apache.terminate()
+        wrapped_binary.terminate()
+        apache.wait()
+        wrapped_binary.wait()
+
 
 if __name__ == "__main__":
-  sys.exit(main(sys.argv))
+    sys.exit(main(sys.argv))
diff --git a/tools/build_rules/autocxx.bzl b/tools/build_rules/autocxx.bzl
new file mode 100644
index 0000000..ee8b4d4
--- /dev/null
+++ b/tools/build_rules/autocxx.bzl
@@ -0,0 +1,345 @@
+load("@rules_rust//rust:defs.bzl", "rust_library")
+load("@rules_cc//cc:find_cc_toolchain.bzl", "find_cc_toolchain")
+load("@bazel_tools//tools/build_defs/cc:action_names.bzl", "ACTION_NAMES")
+
+def _cc_toolchain_flags(ctx, cc_toolchain):
+    feature_configuration = cc_common.configure_features(
+        ctx = ctx,
+        cc_toolchain = cc_toolchain,
+        requested_features = ctx.features,
+        unsupported_features = ctx.disabled_features,
+    )
+    compiler_path = cc_common.get_tool_for_action(
+        feature_configuration = feature_configuration,
+        action_name = ACTION_NAMES.cpp_compile,
+    )
+    compile_variables = cc_common.create_compile_variables(
+        feature_configuration = feature_configuration,
+        cc_toolchain = cc_toolchain,
+        user_compile_flags = ctx.fragments.cpp.copts + ctx.fragments.cpp.cxxopts,
+    )
+    command_line = cc_common.get_memory_inefficient_command_line(
+        feature_configuration = feature_configuration,
+        action_name = ACTION_NAMES.cpp_compile,
+        variables = compile_variables,
+    )
+    env = cc_common.get_environment_variables(
+        feature_configuration = feature_configuration,
+        action_name = ACTION_NAMES.cpp_compile,
+        variables = compile_variables,
+    )
+    return command_line, env
+
+# All the stuff about AUTOCXX_RS_FILE and--fix-rs-include-name only applies when
+# using --gen-rs-include. We use --gen-rs-archive so none of that matters.
+#
+# The generated .rs file uses `include!` on the top-level C++ headers imported
+# via `#include` in `include_cpp!`. This always operates relative to the source
+# file (I don't see any way to change it), nor does autocxx have a way to change
+# the path. There are headers involved which use `#pragma once`, so just copying
+# them is a bad idea. Instead, we generate forwarding headers.
+def _autocxx_library_gen_impl(ctx):
+    rust_toolchain = ctx.toolchains[Label("@rules_rust//rust:toolchain")]
+
+    # TODO(Brian): Provide some way to override this globally in WORKSPACE? Need
+    # a real strategy for coordinating toolchains and flags, see the TODO below
+    # where cc_command_line is used for details.
+    if ctx.attr.override_cc_toolchain:
+        cc_toolchain = ctx.attr.override_cc_toolchain[cc_common.CcToolchainInfo]
+    else:
+        cc_toolchain = find_cc_toolchain(ctx)
+
+    # The directory where we generate files. Needs to be unique and in our package.
+    gendir = ctx.label.name + "__dir"
+
+    cc_command_line, cc_env = _cc_toolchain_flags(ctx, cc_toolchain)
+
+    includes = []
+    in_headers = []
+    forwarding_headers = []
+    for lib in ctx.attr.libs:
+        compilation = lib[CcInfo].compilation_context
+
+        # TODO(Brian): How should we be juggling includes, quote_includes, and system_includes?
+        includes.append(compilation.includes)
+        includes.append(compilation.quote_includes)
+        includes.append(compilation.system_includes)
+        in_headers.append(compilation.headers)
+        for header in compilation.direct_public_headers:
+            # TODO(Brian): This doesn't work if it's being `#include`ed (via
+            # `include_cpp!`) using one of the includes paths. Maybe we should
+            # add each `includes` prefixed with `gendir` to solve that?
+            forwarding = ctx.actions.declare_file("%s/%s" % (gendir, header.short_path))
+            forwarding_headers.append(forwarding)
+            ctx.actions.write(forwarding, '#include "%s"' % header.short_path)
+    includes = depset(transitive = includes)
+    action_inputs = depset(
+        direct = ctx.files.srcs + ctx.files.cxxbridge_srcs,
+        transitive = in_headers + [cc_toolchain.all_files],
+    )
+
+    # This is always the name with --gen-rs-archive, regardless of other flags.
+    out_rs_json = ctx.actions.declare_file("%s/gen.rs.json" % gendir)
+    out_env_file = ctx.actions.declare_file("%s/rustc_env" % gendir)
+    ctx.actions.write(
+        output = out_env_file,
+        content = "AUTOCXX_RS_JSON_ARCHIVE=%s" % out_rs_json.path,
+    )
+
+    out_h = ctx.actions.declare_file("%s_cxxgen.h" % ctx.label.name.rstrip("__gen"))
+    out_h_guard = out_h.short_path.replace("/", "_").replace(".", "_")
+    out_h_contents = [
+        "#ifndef %s" % out_h_guard,
+        "#define %s" % out_h_guard,
+        "// GENERATED FILE, DO NOT EDIT",
+        "//",
+        "// #includes all of the declarations exported to C++ from %s" % ctx.label,
+    ]
+    out_cc = []
+
+    # See `gen --help` for details on the naming of these outputs.
+    for cc_index in range(ctx.attr.sections_to_generate):
+        out_cc.append(ctx.actions.declare_file("%s/gen%d.cc" % (gendir, cc_index)))
+        gen_h = ctx.actions.declare_file("%s/gen%d.h" % (gendir, cc_index))
+        out_cc.append(gen_h)
+        out_h_contents.append("#include \"%s\"" % gen_h.short_path)
+        autocxxgen_h = ctx.actions.declare_file("%s/autocxxgen%d.h" % (gendir, cc_index))
+        out_cc.append(autocxxgen_h)
+        out_h_contents.append("#include \"%s\"" % autocxxgen_h.short_path)
+
+    cxxbridge_cc_srcs = []
+    for src in ctx.files.cxxbridge_srcs:
+        cxxbridge_cc = ctx.actions.declare_file("%s/cxxbridge.cc" % gendir)
+        cxxbridge_cc_srcs.append(cxxbridge_cc)
+        cxxbridge_h = ctx.actions.declare_file("%s/cxxbridge.h" % gendir)
+        cxxbridge_cc_srcs.append(cxxbridge_h)
+        out_h_contents.append("#include \"%s\"" % cxxbridge_h.short_path)
+        ctx.actions.run(
+            mnemonic = "CxxCodegen",
+            executable = ctx.executable._cxx_codegen,
+            inputs = [src],
+            outputs = [cxxbridge_cc, cxxbridge_h],
+            arguments = [src.path, "--output", cxxbridge_h.path, "--output", cxxbridge_cc.path],
+        )
+
+    out_h_contents.append("#endif  // %s" % out_h_guard)
+    ctx.actions.write(
+        output = out_h,
+        content = "\n".join(out_h_contents),
+    )
+
+    gen_rs = ctx.actions.args()
+    gen_rs.add_all(["--outdir", out_rs_json.dirname])
+    gen_rs.add("--gen-rs-archive")
+    gen_rs.add("--gen-cpp")
+
+    gen_rs.add_all(["--generate-exact", ctx.attr.sections_to_generate])
+
+    gen_rs.add_all(ctx.files.srcs)
+    gen_rs.add_all(ctx.files.cxxbridge_srcs)
+
+    # TODO: Do these go before or after the --? They're partially redundant with
+    # cc_command_line too.
+    gen_rs.add_all(includes, before_each = "-I")
+    gen_rs.add("--")
+
+    # TODO: These are flags for the cc_toolchain, not the libclang they're being passed to.
+    # Figure out how to handle that nicely. Maybe just require they're compatible, and direct
+    # people to overriding the toolchain in use instead?
+    gen_rs.add_all(cc_command_line)
+
+    gen_rs.add("-Wno-unused-private-field")
+    env = dict(cc_env)
+    env.update(
+        LIBCLANG_PATH = ctx.file._libclang.path,
+    )
+    if ctx.attr.gen_debug:
+        env.update(
+            RUST_BACKTRACE = "full",
+            RUST_LOG = "autocxx_engine=info",
+        )
+    ctx.actions.run(
+        arguments = [gen_rs],
+        outputs = [out_rs_json] + out_cc,
+        tools = [ctx.file._libclang],
+        inputs = action_inputs,
+        env = env,
+        executable = ctx.executable._autocxx_gen,
+        mnemonic = "AutocxxGen",
+    )
+
+    return [
+        OutputGroupInfo(
+            cc_srcs = out_cc + cxxbridge_cc_srcs,
+            hdr_srcs = [out_h],
+            compile_data = forwarding_headers + [out_rs_json],
+            env_files = [out_env_file],
+        ),
+    ]
+
+_autocxx_library_gen = rule(
+    implementation = _autocxx_library_gen_impl,
+    attrs = {
+        "libs": attr.label_list(
+            mandatory = True,
+            providers = [CcInfo],
+            doc = "C++ libraries to let Rust use headers from",
+        ),
+        "srcs": attr.label_list(
+            allow_files = [".rs"],
+            mandatory = False,
+            doc = "Rust sources with `include_cpp!` macros",
+            default = [],
+        ),
+        # TODO(Brian): Do we need to support this? Or just throw them in srcs?
+        "cxxbridge_srcs": attr.label_list(
+            allow_files = [".rs"],
+            mandatory = False,
+            doc = "Rust sources with only [cxx::bridge] annotations",
+            default = [],
+        ),
+        "sections_to_generate": attr.int(
+            default = 20,
+            doc = (
+                "The number of `cxx::bridge` sections to support," +
+                " including ones created by `autocxx::include_cpp!`." +
+                " The default is sufficient for most use cases." +
+                " Setting this too large has a small performance impact, setting it" +
+                " too low will result in a build failure."
+            ),
+        ),
+        "gen_debug": attr.bool(
+            default = False,
+            doc = "Print (lots of) debug info about autocxx's codegen at build time.",
+        ),
+        "_autocxx_gen": attr.label(
+            executable = True,
+            default = Label("@//third_party/autocxx/gen/cmd:gen"),
+            cfg = "exec",
+        ),
+        "_cxx_codegen": attr.label(
+            executable = True,
+            default = Label("@//third_party/cargo:cargo_bin_cxxbridge"),
+            cfg = "exec",
+        ),
+        "_libclang": attr.label(
+            cfg = "exec",
+            default = Label("@llvm_k8//:libclang"),
+            allow_single_file = True,
+        ),
+        "override_cc_toolchain": attr.label(mandatory = False, providers = [cc_common.CcToolchainInfo]),
+        "_cc_toolchain": attr.label(default = Label("@bazel_tools//tools/cpp:current_cc_toolchain")),
+    },
+    toolchains = [
+        "@rules_rust//rust:toolchain",
+        "@bazel_tools//tools/cpp:toolchain_type",
+    ],
+    fragments = ["cpp"],
+)
+
+def autocxx_library(
+        name,
+        visibility = None,
+        target_compatible_with = None,
+        libs = [],
+        srcs = [],
+        cxxbridge_srcs = [],
+        override_cc_toolchain = None,
+        deps = [],
+        rs_deps = [],
+        testonly = None,
+        crate_features = None,
+        crate_name = None,
+        gen_debug = None):
+    """A macro to generate Rust <-> C++ interop code with autocxx.
+
+    Creates the following rules:
+      * A rust_library with the given name, which includes the given srcs. Note that it will not
+        include them directly due to how autocxx works, instead they will be copied into a
+        generated file along with changes from autocxx.
+      * A cc_library with the given name + `_cc`. This is for C++ code that wants to use APIs
+        from the given Rust code. Rust dependencies should _not_ depend on this. The header for C++
+        to #include will be named by the given name + `_cxxgen.h`.
+
+      `deps` is for other `autocxx_library` rules. `rs_deps` is for dependencies of the Rust code.
+    """
+    library_gen_name = "%s__gen" % name
+    _autocxx_library_gen(
+        name = library_gen_name,
+        visibility = ["//visibility:private"],
+        target_compatible_with = target_compatible_with,
+        testonly = testonly,
+        libs = libs,
+        srcs = srcs,
+        cxxbridge_srcs = cxxbridge_srcs,
+        override_cc_toolchain = override_cc_toolchain,
+        gen_debug = gen_debug,
+    )
+    gen_cc_srcs_name = "%s__cc_srcs" % name
+    native.filegroup(
+        name = gen_cc_srcs_name,
+        visibility = ["//visibility:private"],
+        target_compatible_with = target_compatible_with,
+        testonly = testonly,
+        srcs = [library_gen_name],
+        output_group = "cc_srcs",
+    )
+    gen_hdr_srcs_name = "%s__hdr_srcs" % name
+    native.filegroup(
+        name = gen_hdr_srcs_name,
+        visibility = ["//visibility:private"],
+        target_compatible_with = target_compatible_with,
+        testonly = testonly,
+        srcs = [library_gen_name],
+        output_group = "hdr_srcs",
+    )
+    gen_compile_data_name = "%s__compile_data" % name
+    native.filegroup(
+        name = gen_compile_data_name,
+        visibility = ["//visibility:private"],
+        target_compatible_with = target_compatible_with,
+        testonly = testonly,
+        srcs = [library_gen_name],
+        output_group = "compile_data",
+    )
+    gen_env_files_name = "%s__env_files" % name
+    native.filegroup(
+        name = gen_env_files_name,
+        visibility = ["//visibility:private"],
+        target_compatible_with = target_compatible_with,
+        testonly = testonly,
+        srcs = [library_gen_name],
+        output_group = "env_files",
+    )
+    cc_library_name = "%s__cc" % name
+    native.cc_library(
+        name = cc_library_name,
+        visibility = visibility,
+        target_compatible_with = target_compatible_with,
+        testonly = testonly,
+        deps = deps + libs + [
+            "//third_party/cargo:cxx_cc",
+        ],
+        srcs = [gen_cc_srcs_name],
+        hdrs = [gen_hdr_srcs_name],
+    )
+
+    rust_library(
+        name = name,
+        visibility = visibility,
+        target_compatible_with = target_compatible_with,
+        testonly = testonly,
+        srcs = srcs + cxxbridge_srcs,
+        proc_macro_deps = [
+            "//third_party/cargo:cxxbridge_macro",
+        ],
+        crate_features = crate_features,
+        crate_name = crate_name,
+        deps = deps + rs_deps + [
+            cc_library_name,
+            "//third_party/cargo:cxx",
+            "//third_party/autocxx",
+        ],
+        compile_data = [gen_compile_data_name],
+        rustc_env_files = [gen_env_files_name],
+    )
diff --git a/tools/build_rules/jinja2_generator.py b/tools/build_rules/jinja2_generator.py
index 34f3354..e5f6fa3 100644
--- a/tools/build_rules/jinja2_generator.py
+++ b/tools/build_rules/jinja2_generator.py
@@ -8,22 +8,26 @@
 
 
 def main():
-  # Note: this is a pretty transparent interface to jinja2--there's no reason
-  # this script couldn't be renamed and then used to generate any config from
-  # a template.
-  parser = argparse.ArgumentParser(
-      description="Generates the raspberry pi configs from a template.")
-  parser.add_argument("template", type=str, help="File to use for template.")
-  parser.add_argument("replacements", type=json.loads, help="Dictionary of parameters to replace in the template.")
-  parser.add_argument("output", type=str, help="Output file to create.")
-  args = parser.parse_args(sys.argv[1:])
+    # Note: this is a pretty transparent interface to jinja2--there's no reason
+    # this script couldn't be renamed and then used to generate any config from
+    # a template.
+    parser = argparse.ArgumentParser(
+        description="Generates the raspberry pi configs from a template.")
+    parser.add_argument("template", type=str, help="File to use for template.")
+    parser.add_argument(
+        "replacements",
+        type=json.loads,
+        help="Dictionary of parameters to replace in the template.")
+    parser.add_argument("output", type=str, help="Output file to create.")
+    args = parser.parse_args(sys.argv[1:])
 
-  with open(args.template, 'r') as input_file:
-    template = jinja2.Template(input_file.read())
+    with open(args.template, 'r') as input_file:
+        template = jinja2.Template(input_file.read())
 
-  output = template.render(args.replacements)
-  with open(args.output, 'w') as config_file:
-    config_file.write(output)
+    output = template.render(args.replacements)
+    with open(args.output, 'w') as config_file:
+        config_file.write(output)
+
 
 if __name__ == '__main__':
-  main()
+    main()
diff --git a/tools/go/mirror_go_repos.py b/tools/go/mirror_go_repos.py
index ae8f722..52d611f 100644
--- a/tools/go/mirror_go_repos.py
+++ b/tools/go/mirror_go_repos.py
@@ -20,15 +20,20 @@
 
 GO_DEPS_WWWW_DIR = "/var/www/html/files/frc971/Build-Dependencies/go_deps"
 
+
 def compute_sha256(filepath: str) -> str:
     """Computes the SHA256 of a file at the specified location."""
     with open(filepath, "rb") as file:
         contents = file.read()
     return hashlib.sha256(contents).hexdigest()
 
+
 def get_existing_mirrored_repos(ssh_host: str) -> Dict[str, str]:
     """Gathers information about the libraries that are currently mirrored."""
-    run_result = subprocess.run(["ssh", ssh_host, f"bash -c 'sha256sum {GO_DEPS_WWWW_DIR}/*'"], check=True, stdout=subprocess.PIPE)
+    run_result = subprocess.run(
+        ["ssh", ssh_host, f"bash -c 'sha256sum {GO_DEPS_WWWW_DIR}/*'"],
+        check=True,
+        stdout=subprocess.PIPE)
 
     existing_mirrored_repos = {}
     for line in run_result.stdout.decode("utf-8").splitlines():
@@ -37,10 +42,10 @@
 
     return existing_mirrored_repos
 
-def download_repos(
-        repos: Dict[str, str],
-        existing_mirrored_repos: Dict[str, str],
-        tar: tarfile.TarFile) -> Dict[str, str]:
+
+def download_repos(repos: Dict[str, str], existing_mirrored_repos: Dict[str,
+                                                                        str],
+                   tar: tarfile.TarFile) -> Dict[str, str]:
     """Downloads the not-yet-mirrored repos into a tarball."""
     cached_info = {}
 
@@ -52,7 +57,8 @@
 
         download_result = subprocess.run(
             ["external/go_sdk/bin/go", "mod", "download", "-json", module],
-            check=True, stdout=subprocess.PIPE)
+            check=True,
+            stdout=subprocess.PIPE)
         if download_result.returncode != 0:
             print("Failed to download file.")
             return 1
@@ -81,6 +87,7 @@
 
     return cached_info
 
+
 def copy_to_host_and_unpack(filename: str, ssh_host: str) -> None:
     subprocess.run(["scp", filename, f"{ssh_host}:"], check=True)
 
@@ -94,7 +101,10 @@
     ])
 
     print("You might be asked for your sudo password shortly.")
-    subprocess.run(["ssh", "-t", ssh_host, f"sudo -u www-data bash -c '{command}'"], check=True)
+    subprocess.run(
+        ["ssh", "-t", ssh_host, f"sudo -u www-data bash -c '{command}'"],
+        check=True)
+
 
 def main(argv):
     parser = argparse.ArgumentParser()
@@ -113,12 +123,15 @@
               "Build-Dependencies files live. Only specify this if you have "
               "access to the server."))
     parser.add_argument("--go_deps_bzl", type=str, default="go_deps.bzl")
-    parser.add_argument("--go_mirrors_bzl", type=str, default="tools/go/go_mirrors.bzl")
+    parser.add_argument("--go_mirrors_bzl",
+                        type=str,
+                        default="tools/go/go_mirrors.bzl")
     args = parser.parse_args(argv[1:])
 
     os.chdir(os.environ["BUILD_WORKSPACE_DIRECTORY"])
 
-    repos = org_frc971.tools.go.mirror_lib.parse_go_repositories(args.go_deps_bzl)
+    repos = org_frc971.tools.go.mirror_lib.parse_go_repositories(
+        args.go_deps_bzl)
 
     if args.ssh_host:
         existing_mirrored_repos = get_existing_mirrored_repos(args.ssh_host)
@@ -129,7 +142,8 @@
 
     if args.prune:
         # Delete all mirror info that is not needed anymore.
-        existing_cache_info = org_frc971.tools.go.mirror_lib.parse_go_mirror_info(args.go_mirrors_bzl)
+        existing_cache_info = org_frc971.tools.go.mirror_lib.parse_go_mirror_info(
+            args.go_mirrors_bzl)
         cached_info = {}
         for repo in repos:
             try:
@@ -151,10 +165,12 @@
         if args.ssh_host and num_not_already_mirrored:
             copy_to_host_and_unpack("go_deps.tar", args.ssh_host)
         else:
-            print("Skipping mirroring because of lack of --ssh_host or there's "
-                  "nothing to actually mirror.")
+            print(
+                "Skipping mirroring because of lack of --ssh_host or there's "
+                "nothing to actually mirror.")
 
-    org_frc971.tools.go.mirror_lib.write_go_mirror_info(args.go_mirrors_bzl, cached_info)
+    org_frc971.tools.go.mirror_lib.write_go_mirror_info(
+        args.go_mirrors_bzl, cached_info)
 
     return exit_code
 
diff --git a/tools/go/mirror_lib.py b/tools/go/mirror_lib.py
index ea23abc..b613ca8 100644
--- a/tools/go/mirror_lib.py
+++ b/tools/go/mirror_lib.py
@@ -13,6 +13,7 @@
     with open(filepath, "r") as file:
         return file.read()
 
+
 def parse_go_repositories(filepath: str) -> List[Dict[str, str]]:
     """Parses the top-level go_deps.bzl file.
 
@@ -53,7 +54,8 @@
     return global_data["GO_MIRROR_INFO"]
 
 
-def write_go_mirror_info(filepath: str, mirror_info: Dict[str, Dict[str, str]]):
+def write_go_mirror_info(filepath: str, mirror_info: Dict[str, Dict[str,
+                                                                    str]]):
     """Saves the specified mirror_info as GO_MIRROR_INFO into tools/go/go_mirrors.bzl."""
     with open(filepath, "w") as file:
         file.write("# This file is auto-generated. Do not edit.\n")
@@ -64,9 +66,10 @@
 
     # Properly format the file now so that the linter doesn't complain.
     r = runfiles.Create()
-    subprocess.run(
-        [
-            r.Rlocation("com_github_bazelbuild_buildtools/buildifier/buildifier_/buildifier"),
-            filepath,
-        ],
-        check=True)
+    subprocess.run([
+        r.Rlocation(
+            "com_github_bazelbuild_buildtools/buildifier/buildifier_/buildifier"
+        ),
+        filepath,
+    ],
+                   check=True)
diff --git a/tools/go/tweak_gazelle_go_deps.py b/tools/go/tweak_gazelle_go_deps.py
index d9d7901..d7f3372 100644
--- a/tools/go/tweak_gazelle_go_deps.py
+++ b/tools/go/tweak_gazelle_go_deps.py
@@ -14,22 +14,27 @@
 
 import org_frc971.tools.go.mirror_lib
 
+
 def main(argv):
     parser = argparse.ArgumentParser()
     parser.add_argument("go_deps_bzl", type=str)
     args = parser.parse_args(argv[1:])
 
-    repos = org_frc971.tools.go.mirror_lib.parse_go_repositories(args.go_deps_bzl)
+    repos = org_frc971.tools.go.mirror_lib.parse_go_repositories(
+        args.go_deps_bzl)
 
     with open(args.go_deps_bzl, "w") as file:
-        file.write(textwrap.dedent("""\
+        file.write(
+            textwrap.dedent("""\
             # This file is auto-generated. Do not edit.
             load("//tools/go:mirrored_go_deps.bzl", "maybe_override_go_dep")
 
             def go_dependencies():
             """))
         for repo in repos:
-            file.write(textwrap.indent(textwrap.dedent(f"""\
+            file.write(
+                textwrap.indent(
+                    textwrap.dedent(f"""\
                 maybe_override_go_dep(
                     name = "{repo['name']}",
                     importpath = "{repo['importpath']}",
@@ -38,5 +43,6 @@
                 )
                 """), " " * 4))
 
+
 if __name__ == "__main__":
     sys.exit(main(sys.argv))
diff --git a/tools/lint/BUILD b/tools/lint/BUILD
index 57d4e08..55cccff 100644
--- a/tools/lint/BUILD
+++ b/tools/lint/BUILD
@@ -35,6 +35,17 @@
 )
 
 sh_binary(
+    name = "yapf",
+    srcs = ["yapf.sh"],
+    data = [
+        "@python_yapf",
+    ],
+    deps = [
+        "@bazel_tools//tools/bash/runfiles",
+    ],
+)
+
+sh_binary(
     name = "prettier",
     srcs = ["prettier.sh"],
     data = [
@@ -56,6 +67,7 @@
         ":gofmt",
         ":prettier",
         ":rustfmt",
+        ":yapf",
         "//:gazelle-runner",
         "//tools/go:mirror_go_repos",
         "//tools/go:tweak_gazelle_go_deps",
diff --git a/tools/lint/run-ci.sh b/tools/lint/run-ci.sh
index a036c06..5e48da6 100755
--- a/tools/lint/run-ci.sh
+++ b/tools/lint/run-ci.sh
@@ -89,6 +89,10 @@
     ./tools/lint/prettier
 }
 
+yapf() {
+    ./tools/lint/yapf
+}
+
 git_status_is_clean() {
     cd "${BUILD_WORKSPACE_DIRECTORY}"
     if ! git diff --quiet; then
@@ -110,6 +114,7 @@
     tweak_cargo_raze
     buildifier
     prettier
+    yapf
     git_status_is_clean  # This must the last linter.
 )
 
diff --git a/tools/lint/yapf.sh b/tools/lint/yapf.sh
new file mode 100755
index 0000000..404d5fd
--- /dev/null
+++ b/tools/lint/yapf.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+# --- begin runfiles.bash initialization v2 ---
+# Copy-pasted from the Bazel Bash runfiles library v2.
+set -uo pipefail; f=bazel_tools/tools/bash/runfiles/runfiles.bash
+source "${RUNFILES_DIR:-/dev/null}/$f" 2>/dev/null || \
+  source "$(grep -sm1 "^$f " "${RUNFILES_MANIFEST_FILE:-/dev/null}" | cut -f2- -d' ')" 2>/dev/null || \
+  source "$0.runfiles/$f" 2>/dev/null || \
+  source "$(grep -sm1 "^$f " "$0.runfiles_manifest" | cut -f2- -d' ')" 2>/dev/null || \
+  source "$(grep -sm1 "^$f " "$0.exe.runfiles_manifest" | cut -f2- -d' ')" 2>/dev/null || \
+  { echo>&2 "ERROR: cannot find $f"; exit 1; }; f=; set -e
+# --- end runfiles.bash initialization v2 ---
+
+readonly YAPF="$(rlocation python_yapf/python_yapf)"
+
+# Run everything from the root of the tree.
+cd "${BUILD_WORKSPACE_DIRECTORY}"
+
+# Find all the Python files in the repo.
+python_files=($(git ls-tree --name-only --full-tree -r @ \
+    | grep -v '^third_party/' \
+    | (grep '\.py$' || :)))
+
+# If we have any Python files, format them.
+if ((${#python_files[@]} > 0)); then
+    exec "${YAPF}" --in-place --parallel --verbose "${python_files[@]}"
+fi
diff --git a/tools/platforms/BUILD b/tools/platforms/BUILD
index 37fed2e..1d79f06 100644
--- a/tools/platforms/BUILD
+++ b/tools/platforms/BUILD
@@ -42,7 +42,12 @@
         "@platforms//cpu:armv7",
         "//tools/platforms/hardware:roborio",
         "//tools/platforms/go:lacks_support",
-        "//tools/platforms/rust:has_support",
+        # TODO(Brian): This almost works, but cxx assumes llvm-ld's linking
+        # behavior and doesn't have an easy way to support GNU ld. See
+        # https://github.com/dtolnay/cxx/pull/1069 for a bit more explanation.
+        # Bazel needs to group multiple things into a single cc_library to
+        # handle that, need to figure out how to do that here or switch linkers.
+        "//tools/platforms/rust:lacks_support",
         "//tools/platforms/nodejs:lacks_support",
     ],
 )
diff --git a/tools/rust/BUILD b/tools/rust/BUILD
index 74ed92d..a5a2f89 100644
--- a/tools/rust/BUILD
+++ b/tools/rust/BUILD
@@ -98,7 +98,7 @@
     exec_triple = "none",
     os = "none",
     rust_doc = ":noop_error_exit",
-    rust_lib = ":empty_stdlib",
+    rust_std = ":empty_stdlib",
     rustc = ":noop_error_exit",
     rustc_lib = ":noop_error_exit",
     rustc_srcs = None,
@@ -124,7 +124,7 @@
 rust_binary(
     name = "tweak_cargo_raze_output",
     srcs = ["tweak_cargo_raze_output.rs"],
-    target_compatible_with = ["@platforms//os:linux"],
+    target_compatible_with = ["//tools/platforms/rust:has_support"],
     visibility = ["//visibility:public"],
     deps = [
         "//third_party/cargo:anyhow",
diff --git a/tools/rust/forward_allocator.c b/tools/rust/forward_allocator.c
index 3e6ba96..a321af3 100644
--- a/tools/rust/forward_allocator.c
+++ b/tools/rust/forward_allocator.c
@@ -2,7 +2,8 @@
 
 // This file has some exciting magic to get Rust code linking in a cc_binary.
 // The Rust compiler generates some similar symbol aliases when it links, so we
-// have to do it manually.
+// have to do it manually. We mark all our symbols as weak so that linking this
+// via Rust tooling to produce a binary with a Rust main works.
 //
 // It is intended to be used in rust_toolchain.allocator_library.
 //
@@ -20,27 +21,32 @@
 // not work with any other allocated switched in via `#[global_allocator]`.
 
 // New feature as of https://github.com/rust-lang/rust/pull/88098.
-uint8_t __rust_alloc_error_handler_should_panic = 0;
+__attribute__((weak)) uint8_t __rust_alloc_error_handler_should_panic = 0;
 
 uint8_t *__rdl_alloc(uintptr_t size, uintptr_t align);
+__attribute__((weak))
 uint8_t *__rust_alloc(uintptr_t size, uintptr_t align) {
   return __rdl_alloc(size, align);
 }
 void __rdl_dealloc(uint8_t *ptr, uintptr_t size, uintptr_t align);
+__attribute__((weak))
 void __rust_dealloc(uint8_t *ptr, uintptr_t size, uintptr_t align) {
   __rdl_dealloc(ptr, size, align);
 }
 uint8_t *__rdl_realloc(uint8_t *ptr, uintptr_t old_size, uintptr_t align,
                        uintptr_t new_size);
+__attribute__((weak))
 uint8_t *__rust_realloc(uint8_t *ptr, uintptr_t old_size, uintptr_t align,
                         uintptr_t new_size) {
   return __rdl_realloc(ptr, old_size, align, new_size);
 }
 uint8_t *__rdl_alloc_zeroed(uintptr_t size, uintptr_t align);
+__attribute__((weak))
 uint8_t *__rust_alloc_zeroed(uintptr_t size, uintptr_t align) {
   return __rdl_alloc_zeroed(size, align);
 }
 void __rdl_oom(uintptr_t size, uintptr_t align);
+__attribute__((weak))
 void __rust_alloc_error_handler(uintptr_t size, uintptr_t align) {
   __rdl_oom(size, align);
 }
diff --git a/y2014/control_loops/python/claw.py b/y2014/control_loops/python/claw.py
index 77ccf14..488d792 100755
--- a/y2014/control_loops/python/claw.py
+++ b/y2014/control_loops/python/claw.py
@@ -51,8 +51,8 @@
         # Input is [bottom power, top power - bottom power * J_top / J_bottom]
         # Motor time constants. difference_bottom refers to the constant for how the
         # bottom velocity affects the difference of the top and bottom velocities.
-        self.common_motor_constant = -self.Kt / self.Kv / (
-            self.G * self.G * self.R)
+        self.common_motor_constant = -self.Kt / self.Kv / (self.G * self.G *
+                                                           self.R)
         self.bottom_bottom = self.common_motor_constant / self.J_bottom
         self.difference_bottom = -self.common_motor_constant * (
             1 / self.J_bottom - 1 / self.J_top)
@@ -96,7 +96,8 @@
                                                    self.B_continuous, self.dt)
 
         self.A_bottom, self.B_bottom = controls.c2d(self.A_bottom_cont,
-                                                    self.B_bottom_cont, self.dt)
+                                                    self.B_bottom_cont,
+                                                    self.dt)
         self.A_diff, self.B_diff = controls.c2d(self.A_diff_cont,
                                                 self.B_diff_cont, self.dt)
 
@@ -135,12 +136,12 @@
         glog.debug(str(lstsq_A))
         glog.debug('det %s', str(numpy.linalg.det(lstsq_A)))
 
-        out_x = numpy.linalg.lstsq(
-            lstsq_A,
-            numpy.matrix([[self.A[1, 2]], [self.A[3, 2]]]),
-            rcond=None)[0]
-        self.K[1, 2] = -lstsq_A[0, 0] * (
-            self.K[0, 2] - out_x[0]) / lstsq_A[0, 1] + out_x[1]
+        out_x = numpy.linalg.lstsq(lstsq_A,
+                                   numpy.matrix([[self.A[1, 2]],
+                                                 [self.A[3, 2]]]),
+                                   rcond=None)[0]
+        self.K[1, 2] = -lstsq_A[0, 0] * (self.K[0, 2] -
+                                         out_x[0]) / lstsq_A[0, 1] + out_x[1]
 
         glog.debug('K unaugmented')
         glog.debug(str(self.K))
@@ -181,8 +182,9 @@
         X_ss[2, 0] = 1 / (1 - A[2, 2]) * B[2, 0] * U[0, 0]
         #X_ss[3, 0] = X_ss[3, 0] * A[3, 3] + X_ss[2, 0] * A[3, 2] + B[3, 0] * U[0, 0] + B[3, 1] * U[1, 0]
         #X_ss[3, 0] * (1 - A[3, 3]) = X_ss[2, 0] * A[3, 2] + B[3, 0] * U[0, 0] + B[3, 1] * U[1, 0]
-        X_ss[3, 0] = 1 / (1 - A[3, 3]) * (
-            X_ss[2, 0] * A[3, 2] + B[3, 1] * U[1, 0] + B[3, 0] * U[0, 0])
+        X_ss[3,
+             0] = 1 / (1 - A[3, 3]) * (X_ss[2, 0] * A[3, 2] +
+                                       B[3, 1] * U[1, 0] + B[3, 0] * U[0, 0])
         #X_ss[3, 0] = 1 / (1 - A[3, 3]) / (1 - A[2, 2]) * B[2, 0] * U[0, 0] * A[3, 2] + B[3, 0] * U[0, 0] + B[3, 1] * U[1, 0]
         X_ss[0, 0] = A[0, 2] * X_ss[2, 0] + B[0, 0] * U[0, 0]
         X_ss[1, 0] = (A[1, 2] * X_ss[2, 0]) + (A[1, 3] * X_ss[3, 0]) + (
@@ -247,7 +249,8 @@
         self.rpl = .05
         self.ipl = 0.008
         self.PlaceObserverPoles([
-            self.rpl + 1j * self.ipl, 0.10, 0.09, self.rpl - 1j * self.ipl, 0.90
+            self.rpl + 1j * self.ipl, 0.10, 0.09, self.rpl - 1j * self.ipl,
+            0.90
         ])
         #print "A is"
         #print self.A
@@ -284,8 +287,8 @@
         numpy.matrix([[1, 0], [-1, 0], [0, 1], [0, -1]]),
         numpy.matrix([[12], [12], [12], [12]]))
 
-    if (bottom_u > claw.U_max[0, 0] or bottom_u < claw.U_min[0, 0] or
-            top_u > claw.U_max[0, 0] or top_u < claw.U_min[0, 0]):
+    if (bottom_u > claw.U_max[0, 0] or bottom_u < claw.U_min[0, 0]
+            or top_u > claw.U_max[0, 0] or top_u < claw.U_min[0, 0]):
 
         position_K = K[:, 0:2]
         velocity_K = K[:, 2:]
@@ -501,8 +504,8 @@
     else:
         namespaces = ['y2014', 'control_loops', 'claw']
         claw = Claw('Claw')
-        loop_writer = control_loop.ControlLoopWriter(
-            'Claw', [claw], namespaces=namespaces)
+        loop_writer = control_loop.ControlLoopWriter('Claw', [claw],
+                                                     namespaces=namespaces)
         loop_writer.AddConstant(
             control_loop.Constant('kClawMomentOfInertiaRatio', '%f',
                                   claw.J_top / claw.J_bottom))
diff --git a/y2014/control_loops/python/drivetrain.py b/y2014/control_loops/python/drivetrain.py
index cac236f..d72c222 100755
--- a/y2014/control_loops/python/drivetrain.py
+++ b/y2014/control_loops/python/drivetrain.py
@@ -11,30 +11,32 @@
 
 gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
 
-kDrivetrain = drivetrain.DrivetrainParams(J = 2.8,
-                                          mass = 68,
-                                          robot_radius = 0.647998644 / 2.0,
-                                          wheel_radius = .04445,
-                                          G_high = 28.0 / 50.0 * 18.0 / 50.0,
-                                          G_low = 18.0 / 60.0 * 18.0 / 50.0,
-                                          q_pos_low = 0.12,
-                                          q_pos_high = 0.12,
-                                          q_vel_low = 1.0,
-                                          q_vel_high = 1.0,
-                                          has_imu = False,
-                                          dt = 0.005,
-                                          controller_poles = [0.7, 0.7])
+kDrivetrain = drivetrain.DrivetrainParams(J=2.8,
+                                          mass=68,
+                                          robot_radius=0.647998644 / 2.0,
+                                          wheel_radius=.04445,
+                                          G_high=28.0 / 50.0 * 18.0 / 50.0,
+                                          G_low=18.0 / 60.0 * 18.0 / 50.0,
+                                          q_pos_low=0.12,
+                                          q_pos_high=0.12,
+                                          q_vel_low=1.0,
+                                          q_vel_high=1.0,
+                                          has_imu=False,
+                                          dt=0.005,
+                                          controller_poles=[0.7, 0.7])
+
 
 def main(argv):
-  argv = FLAGS(argv)
+    argv = FLAGS(argv)
 
-  if FLAGS.plot:
-    drivetrain.PlotDrivetrainMotions(kDrivetrain)
-  elif len(argv) != 5:
-    print("Expected .h file name and .cc file name")
-  else:
-    # Write the generated constants out to a file.
-    drivetrain.WriteDrivetrain(argv[1:3], argv[3:5], 'y2014', kDrivetrain)
+    if FLAGS.plot:
+        drivetrain.PlotDrivetrainMotions(kDrivetrain)
+    elif len(argv) != 5:
+        print("Expected .h file name and .cc file name")
+    else:
+        # Write the generated constants out to a file.
+        drivetrain.WriteDrivetrain(argv[1:3], argv[3:5], 'y2014', kDrivetrain)
+
 
 if __name__ == '__main__':
-  sys.exit(main(sys.argv))
+    sys.exit(main(sys.argv))
diff --git a/y2014/control_loops/python/dt_mpc.py b/y2014/control_loops/python/dt_mpc.py
index 0c229c1..2f13807 100755
--- a/y2014/control_loops/python/dt_mpc.py
+++ b/y2014/control_loops/python/dt_mpc.py
@@ -13,8 +13,9 @@
 #
 # Inital algorithm from http://www.ece.ufrgs.br/~fetter/icma05_608.pdf
 
+
 def cartesian_to_polar(X_cartesian):
-  """Converts a cartesian coordinate to polar.
+    """Converts a cartesian coordinate to polar.
 
   Args:
     X_cartesian, numpy.matrix[3, 1] with x, y, theta as rows.
@@ -22,13 +23,13 @@
   Returns:
     numpy.matrix[3, 1] with e, phi, alpha as rows.
   """
-  phi = numpy.arctan2(X_cartesian[1, 0], X_cartesian[0, 0])
-  return numpy.matrix([[numpy.hypot(X_cartesian[0, 0], X_cartesian[1, 0])],
-                       [phi],
-                       [X_cartesian[2, 0] - phi]])
+    phi = numpy.arctan2(X_cartesian[1, 0], X_cartesian[0, 0])
+    return numpy.matrix([[numpy.hypot(X_cartesian[0, 0], X_cartesian[1, 0])],
+                         [phi], [X_cartesian[2, 0] - phi]])
+
 
 def polar_to_cartesian(X_polar):
-  """Converts a polar coordinate to cartesian.
+    """Converts a polar coordinate to cartesian.
 
   Args:
     X_polar, numpy.matrix[3, 1] with e, phi, alpha as rows.
@@ -36,12 +37,13 @@
   Returns:
     numpy.matrix[3, 1] with x, y, theta as rows.
   """
-  return numpy.matrix([[X_polar[0, 0] * numpy.cos(X_polar[1, 0])],
-                       [X_polar[0, 0] * numpy.sin(X_polar[1, 0])],
-                       [X_polar[1, 0] + X_polar[2, 0]]])
+    return numpy.matrix([[X_polar[0, 0] * numpy.cos(X_polar[1, 0])],
+                         [X_polar[0, 0] * numpy.sin(X_polar[1, 0])],
+                         [X_polar[1, 0] + X_polar[2, 0]]])
+
 
 def simulate_dynamics(X_cartesian, U):
-  """Calculates the robot location after 1 timestep.
+    """Calculates the robot location after 1 timestep.
 
   Args:
     X_cartesian, numpy.matrix[3, 1] with the starting location in
@@ -51,15 +53,15 @@
   Returns:
     numpy.matrix[3, 1] with the cartesian coordinate.
   """
-  X_cartesian += numpy.matrix(
-      [[U[0, 0] * numpy.cos(X_cartesian[2, 0]) * dt],
-       [U[0, 0] * numpy.sin(X_cartesian[2, 0]) * dt],
-       [U[1, 0] * dt]])
+    X_cartesian += numpy.matrix([[U[0, 0] * numpy.cos(X_cartesian[2, 0]) * dt],
+                                 [U[0, 0] * numpy.sin(X_cartesian[2, 0]) * dt],
+                                 [U[1, 0] * dt]])
 
-  return X_cartesian
+    return X_cartesian
+
 
 def U_from_array(U_array):
-  """Converts the U array from the optimizer to a bunch of column vectors.
+    """Converts the U array from the optimizer to a bunch of column vectors.
 
   Args:
     U_array, numpy.array[N] The U coordinates in v, av, v, av, ...
@@ -67,10 +69,11 @@
   Returns:
     numpy.matrix[2, N/2] with [[v, v, v, ...], [av, av, av, ...]]
   """
-  return numpy.matrix(U_array).reshape((2, -1), order='F')
+    return numpy.matrix(U_array).reshape((2, -1), order='F')
+
 
 def U_to_array(U_matrix):
-  """Converts the U matrix to the U array for the optimizer.
+    """Converts the U matrix to the U array for the optimizer.
 
   Args:
     U_matrix, numpy.matrix[2, N/2] with [[v, v, v, ...], [av, av, av, ...]]
@@ -78,10 +81,11 @@
   Returns:
     numpy.array[N] The U coordinates in v, av, v, av, ...
   """
-  return numpy.array(U_matrix.reshape((1, -1), order='F'))
+    return numpy.array(U_matrix.reshape((1, -1), order='F'))
+
 
 def cost(U_array, X_cartesian):
-  """Computes the cost given the inital position and the U array.
+    """Computes the cost given the inital position and the U array.
 
   Args:
     U_array: numpy.array[N] The U coordinates.
@@ -91,91 +95,93 @@
   Returns:
     double, The quadratic cost of evaluating U.
   """
-  X_cartesian_mod = X_cartesian.copy()
-  U_matrix = U_from_array(U_array)
-  my_cost = 0
-  Q = numpy.matrix([[0.01, 0, 0],
-                    [0, 0.01, 0],
-                    [0, 0, 0.01]]) / dt / dt
-  R = numpy.matrix([[0.001, 0],
-                    [0, 0.001]]) / dt / dt
-  for U in U_matrix.T:
-    # TODO(austin): Let it go to the point from either side.
-    U = U.T
-    X_cartesian_mod = simulate_dynamics(X_cartesian_mod, U)
-    X_current_polar = cartesian_to_polar(X_cartesian_mod)
-    my_cost += U.T * R * U + X_current_polar.T * Q * X_current_polar
+    X_cartesian_mod = X_cartesian.copy()
+    U_matrix = U_from_array(U_array)
+    my_cost = 0
+    Q = numpy.matrix([[0.01, 0, 0], [0, 0.01, 0], [0, 0, 0.01]]) / dt / dt
+    R = numpy.matrix([[0.001, 0], [0, 0.001]]) / dt / dt
+    for U in U_matrix.T:
+        # TODO(austin): Let it go to the point from either side.
+        U = U.T
+        X_cartesian_mod = simulate_dynamics(X_cartesian_mod, U)
+        X_current_polar = cartesian_to_polar(X_cartesian_mod)
+        my_cost += U.T * R * U + X_current_polar.T * Q * X_current_polar
 
-  return my_cost
+    return my_cost
+
 
 if __name__ == '__main__':
-  X_cartesian = numpy.matrix([[-1.0],
-                              [-1.0],
-                              [0.0]])
-  x_array = []
-  y_array = []
-  theta_array = []
+    X_cartesian = numpy.matrix([[-1.0], [-1.0], [0.0]])
+    x_array = []
+    y_array = []
+    theta_array = []
 
-  e_array = []
-  phi_array = []
-  alpha_array = []
+    e_array = []
+    phi_array = []
+    alpha_array = []
 
-  cost_array = []
+    cost_array = []
 
-  time_array = []
-  u0_array = []
-  u1_array = []
+    time_array = []
+    u0_array = []
+    u1_array = []
 
-  num_steps = 20
+    num_steps = 20
 
-  U_array = numpy.zeros((num_steps * 2))
-  for i in range(400):
-    print('Iteration', i)
-    # Solve the NMPC
-    U_array, fx, _, _, _ = scipy.optimize.fmin_slsqp(
-        cost, U_array.copy(), bounds = [(-1, 1), (-7, 7)] * num_steps,
-        args=(X_cartesian,), iter=500, full_output=True)
-    U_matrix = U_from_array(U_array)
+    U_array = numpy.zeros((num_steps * 2))
+    for i in range(400):
+        print('Iteration', i)
+        # Solve the NMPC
+        U_array, fx, _, _, _ = scipy.optimize.fmin_slsqp(cost,
+                                                         U_array.copy(),
+                                                         bounds=[(-1, 1),
+                                                                 (-7, 7)] *
+                                                         num_steps,
+                                                         args=(X_cartesian, ),
+                                                         iter=500,
+                                                         full_output=True)
+        U_matrix = U_from_array(U_array)
 
-    # Simulate the dynamics
-    X_cartesian = simulate_dynamics(X_cartesian, U_matrix[:, 0])
+        # Simulate the dynamics
+        X_cartesian = simulate_dynamics(X_cartesian, U_matrix[:, 0])
 
-    # Save variables for plotting.
-    cost_array.append(fx[0, 0])
-    u0_array.append(U_matrix[0, 0])
-    u1_array.append(U_matrix[1, 0])
-    x_array.append(X_cartesian[0, 0])
-    y_array.append(X_cartesian[1, 0])
-    theta_array.append(X_cartesian[2, 0])
-    time_array.append(i * dt)
-    X_polar = cartesian_to_polar(X_cartesian)
-    e_array.append(X_polar[0, 0])
-    phi_array.append(X_polar[1, 0])
-    alpha_array.append(X_polar[2, 0])
+        # Save variables for plotting.
+        cost_array.append(fx[0, 0])
+        u0_array.append(U_matrix[0, 0])
+        u1_array.append(U_matrix[1, 0])
+        x_array.append(X_cartesian[0, 0])
+        y_array.append(X_cartesian[1, 0])
+        theta_array.append(X_cartesian[2, 0])
+        time_array.append(i * dt)
+        X_polar = cartesian_to_polar(X_cartesian)
+        e_array.append(X_polar[0, 0])
+        phi_array.append(X_polar[1, 0])
+        alpha_array.append(X_polar[2, 0])
 
-    U_array = U_to_array(numpy.hstack((U_matrix[:, 1:], numpy.matrix([[0], [0]]))))
+        U_array = U_to_array(
+            numpy.hstack((U_matrix[:, 1:], numpy.matrix([[0], [0]]))))
 
-    if fx < 1e-05:
-      print('Cost is', fx, 'after', i, 'cycles, finishing early')
-      break
+        if fx < 1e-05:
+            print('Cost is', fx, 'after', i, 'cycles, finishing early')
+            break
 
-  # Plot
-  pylab.figure('xy')
-  pylab.plot(x_array, y_array, label = 'trajectory')
+    # Plot
+    pylab.figure('xy')
+    pylab.plot(x_array, y_array, label='trajectory')
 
-  pylab.figure('time')
-  pylab.plot(time_array, x_array, label='x')
-  pylab.plot(time_array, y_array, label='y')
-  pylab.plot(time_array, theta_array, label = 'theta')
-  pylab.plot(time_array, e_array, label='e')
-  pylab.plot(time_array, phi_array, label='phi')
-  pylab.plot(time_array, alpha_array, label='alpha')
-  pylab.plot(time_array, cost_array, label='cost')
-  pylab.legend()
+    pylab.figure('time')
+    pylab.plot(time_array, x_array, label='x')
+    pylab.plot(time_array, y_array, label='y')
+    pylab.plot(time_array, theta_array, label='theta')
+    pylab.plot(time_array, e_array, label='e')
+    pylab.plot(time_array, phi_array, label='phi')
+    pylab.plot(time_array, alpha_array, label='alpha')
+    pylab.plot(time_array, cost_array, label='cost')
+    pylab.legend()
 
-  pylab.figure('u')
-  pylab.plot(time_array, u0_array, label='u0')
-  pylab.plot(time_array, u1_array, label='u1')
-  pylab.legend()
+    pylab.figure('u')
+    pylab.plot(time_array, u0_array, label='u0')
+    pylab.plot(time_array, u1_array, label='u1')
+    pylab.legend()
 
-  pylab.show()
+    pylab.show()
diff --git a/y2014/control_loops/python/extended_lqr.py b/y2014/control_loops/python/extended_lqr.py
index b3f2372..699dd30 100755
--- a/y2014/control_loops/python/extended_lqr.py
+++ b/y2014/control_loops/python/extended_lqr.py
@@ -17,8 +17,9 @@
 num_inputs = 2
 x_hat_initial = numpy.matrix([[0.10], [1.0], [0.0]])
 
+
 def dynamics(X, U):
-  """Calculates the dynamics for a 2 wheeled robot.
+    """Calculates the dynamics for a 2 wheeled robot.
 
   Args:
     X, numpy.matrix(3, 1), The state.  [x, y, theta]
@@ -27,29 +28,33 @@
   Returns:
     numpy.matrix(3, 1), The derivative of the dynamics.
   """
-  #return numpy.matrix([[X[1, 0]],
-  #                     [X[2, 0]],
-  #                     [U[0, 0]]])
-  return numpy.matrix([[(U[0, 0] + U[1, 0]) * numpy.cos(X[2, 0]) / 2.0],
-                       [(U[0, 0] + U[1, 0]) * numpy.sin(X[2, 0]) / 2.0],
-                       [(U[1, 0] - U[0, 0]) / width]])
+    #return numpy.matrix([[X[1, 0]],
+    #                     [X[2, 0]],
+    #                     [U[0, 0]]])
+    return numpy.matrix([[(U[0, 0] + U[1, 0]) * numpy.cos(X[2, 0]) / 2.0],
+                         [(U[0, 0] + U[1, 0]) * numpy.sin(X[2, 0]) / 2.0],
+                         [(U[1, 0] - U[0, 0]) / width]])
+
 
 def RungeKutta(f, x, dt):
-  """4th order RungeKutta integration of F starting at X."""
-  a = f(x)
-  b = f(x + dt / 2.0 * a)
-  c = f(x + dt / 2.0 * b)
-  d = f(x + dt * c)
-  return x + dt * (a + 2.0 * b + 2.0 * c + d) / 6.0
+    """4th order RungeKutta integration of F starting at X."""
+    a = f(x)
+    b = f(x + dt / 2.0 * a)
+    c = f(x + dt / 2.0 * b)
+    d = f(x + dt * c)
+    return x + dt * (a + 2.0 * b + 2.0 * c + d) / 6.0
+
 
 def discrete_dynamics(X, U):
-  return RungeKutta(lambda startingX: dynamics(startingX, U), X, dt)
+    return RungeKutta(lambda startingX: dynamics(startingX, U), X, dt)
+
 
 def inverse_discrete_dynamics(X, U):
-  return RungeKutta(lambda startingX: -dynamics(startingX, U), X, dt)
+    return RungeKutta(lambda startingX: -dynamics(startingX, U), X, dt)
+
 
 def numerical_jacobian_x(fn, X, U, epsilon=1e-4):
-  """Numerically estimates the jacobian around X, U in X.
+    """Numerically estimates the jacobian around X, U in X.
 
   Args:
     fn: A function of X, U.
@@ -62,20 +67,21 @@
     numpy.matrix(num_states, num_states), The jacobian of fn with X as the
       variable.
   """
-  num_states = X.shape[0]
-  nominal = fn(X, U)
-  answer = numpy.matrix(numpy.zeros((nominal.shape[0], num_states)))
-  # It's more expensive, but +- epsilon will be more reliable
-  for i in range(0, num_states):
-    dX_plus = X.copy()
-    dX_plus[i] += epsilon
-    dX_minus = X.copy()
-    dX_minus[i] -= epsilon
-    answer[:, i] = (fn(dX_plus, U) - fn(dX_minus, U)) / epsilon / 2.0
-  return answer
+    num_states = X.shape[0]
+    nominal = fn(X, U)
+    answer = numpy.matrix(numpy.zeros((nominal.shape[0], num_states)))
+    # It's more expensive, but +- epsilon will be more reliable
+    for i in range(0, num_states):
+        dX_plus = X.copy()
+        dX_plus[i] += epsilon
+        dX_minus = X.copy()
+        dX_minus[i] -= epsilon
+        answer[:, i] = (fn(dX_plus, U) - fn(dX_minus, U)) / epsilon / 2.0
+    return answer
+
 
 def numerical_jacobian_u(fn, X, U, epsilon=1e-4):
-  """Numerically estimates the jacobian around X, U in U.
+    """Numerically estimates the jacobian around X, U in U.
 
   Args:
     fn: A function of X, U.
@@ -88,48 +94,56 @@
     numpy.matrix(num_states, num_inputs), The jacobian of fn with U as the
       variable.
   """
-  num_states = X.shape[0]
-  num_inputs = U.shape[0]
-  nominal = fn(X, U)
-  answer = numpy.matrix(numpy.zeros((nominal.shape[0], num_inputs)))
-  for i in range(0, num_inputs):
-    dU_plus = U.copy()
-    dU_plus[i] += epsilon
-    dU_minus = U.copy()
-    dU_minus[i] -= epsilon
-    answer[:, i] = (fn(X, dU_plus) - fn(X, dU_minus)) / epsilon / 2.0
-  return answer
+    num_states = X.shape[0]
+    num_inputs = U.shape[0]
+    nominal = fn(X, U)
+    answer = numpy.matrix(numpy.zeros((nominal.shape[0], num_inputs)))
+    for i in range(0, num_inputs):
+        dU_plus = U.copy()
+        dU_plus[i] += epsilon
+        dU_minus = U.copy()
+        dU_minus[i] -= epsilon
+        answer[:, i] = (fn(X, dU_plus) - fn(X, dU_minus)) / epsilon / 2.0
+    return answer
+
 
 def numerical_jacobian_x_x(fn, X, U):
-  return numerical_jacobian_x(
-      lambda X_inner, U_inner: numerical_jacobian_x(fn, X_inner, U_inner).T, X, U)
+    return numerical_jacobian_x(
+        lambda X_inner, U_inner: numerical_jacobian_x(fn, X_inner, U_inner).T,
+        X, U)
+
 
 def numerical_jacobian_x_u(fn, X, U):
-  return numerical_jacobian_x(
-      lambda X_inner, U_inner: numerical_jacobian_u(fn, X_inner, U_inner).T, X, U)
+    return numerical_jacobian_x(
+        lambda X_inner, U_inner: numerical_jacobian_u(fn, X_inner, U_inner).T,
+        X, U)
+
 
 def numerical_jacobian_u_x(fn, X, U):
-  return numerical_jacobian_u(
-      lambda X_inner, U_inner: numerical_jacobian_x(fn, X_inner, U_inner).T, X, U)
+    return numerical_jacobian_u(
+        lambda X_inner, U_inner: numerical_jacobian_x(fn, X_inner, U_inner).T,
+        X, U)
+
 
 def numerical_jacobian_u_u(fn, X, U):
-  return numerical_jacobian_u(
-      lambda X_inner, U_inner: numerical_jacobian_u(fn, X_inner, U_inner).T, X, U)
+    return numerical_jacobian_u(
+        lambda X_inner, U_inner: numerical_jacobian_u(fn, X_inner, U_inner).T,
+        X, U)
+
 
 # Simple implementation for a quadratic cost function.
 class CostFunction:
-  def __init__(self):
-    self.num_states = num_states
-    self.num_inputs = num_inputs
-    self.dt = dt
-    self.Q = numpy.matrix([[0.1, 0, 0],
-                           [0, 0.6, 0],
-                           [0, 0, 0.1]]) / dt / dt
-    self.R = numpy.matrix([[0.40, 0],
-                           [0, 0.40]]) / dt / dt
 
-  def estimate_Q_final(self, X_hat):
-    """Returns the quadraticized final Q around X_hat.
+    def __init__(self):
+        self.num_states = num_states
+        self.num_inputs = num_inputs
+        self.dt = dt
+        self.Q = numpy.matrix([[0.1, 0, 0], [0, 0.6, 0], [0, 0, 0.1]
+                               ]) / dt / dt
+        self.R = numpy.matrix([[0.40, 0], [0, 0.40]]) / dt / dt
+
+    def estimate_Q_final(self, X_hat):
+        """Returns the quadraticized final Q around X_hat.
 
     This is calculated by evaluating partial^2 cost(X_hat) / (partial X * partial X)
 
@@ -139,11 +153,11 @@
     Result:
       numpy.matrix(self.num_states, self.num_states)
     """
-    zero_U = numpy.matrix(numpy.zeros((self.num_inputs, 1)))
-    return numerical_jacobian_x_x(self.final_cost, X_hat, zero_U)
+        zero_U = numpy.matrix(numpy.zeros((self.num_inputs, 1)))
+        return numerical_jacobian_x_x(self.final_cost, X_hat, zero_U)
 
-  def estimate_partial_cost_partial_x_final(self, X_hat):
-    """Returns \frac{\partial cost}{\partial X}(X_hat) for the final cost.
+    def estimate_partial_cost_partial_x_final(self, X_hat):
+        """Returns \frac{\partial cost}{\partial X}(X_hat) for the final cost.
 
     Args:
       X_hat: numpy.matrix(self.num_states, 1), The state to quadraticize around.
@@ -151,14 +165,17 @@
     Result:
       numpy.matrix(self.num_states, 1)
     """
-    return numerical_jacobian_x(self.final_cost, X_hat, numpy.matrix(numpy.zeros((self.num_inputs, 1)))).T
+        return numerical_jacobian_x(
+            self.final_cost, X_hat,
+            numpy.matrix(numpy.zeros((self.num_inputs, 1)))).T
 
-  def estimate_q_final(self, X_hat):
-    """Returns q evaluated at X_hat for the final cost function."""
-    return self.estimate_partial_cost_partial_x_final(X_hat) - self.estimate_Q_final(X_hat) * X_hat
+    def estimate_q_final(self, X_hat):
+        """Returns q evaluated at X_hat for the final cost function."""
+        return self.estimate_partial_cost_partial_x_final(
+            X_hat) - self.estimate_Q_final(X_hat) * X_hat
 
-  def final_cost(self, X, U):
-    """Computes the final cost of being at X
+    def final_cost(self, X, U):
+        """Computes the final cost of being at X
 
     Args:
       X: numpy.matrix(self.num_states, 1)
@@ -167,10 +184,10 @@
     Returns:
       numpy.matrix(1, 1), The quadratic cost of being at X
     """
-    return X.T * self.Q * X * 1000
+        return X.T * self.Q * X * 1000
 
-  def cost(self, X, U):
-    """Computes the incremental cost given a position and U.
+    def cost(self, X, U):
+        """Computes the incremental cost given a position and U.
 
     Args:
       X: numpy.matrix(self.num_states, 1)
@@ -179,250 +196,334 @@
     Returns:
       numpy.matrix(1, 1), The quadratic cost of evaluating U.
     """
-    return U.T * self.R * U + X.T * self.Q * X
+        return U.T * self.R * U + X.T * self.Q * X
+
 
 cost_fn_obj = CostFunction()
 
-S_bar_t = [numpy.matrix(numpy.zeros((num_states, num_states))) for _ in range(l + 1)]
+S_bar_t = [
+    numpy.matrix(numpy.zeros((num_states, num_states))) for _ in range(l + 1)
+]
 s_bar_t = [numpy.matrix(numpy.zeros((num_states, 1))) for _ in range(l + 1)]
 s_scalar_bar_t = [numpy.matrix(numpy.zeros((1, 1))) for _ in range(l + 1)]
 
-L_t = [numpy.matrix(numpy.zeros((num_inputs, num_states))) for _ in range(l + 1)]
+L_t = [
+    numpy.matrix(numpy.zeros((num_inputs, num_states))) for _ in range(l + 1)
+]
 l_t = [numpy.matrix(numpy.zeros((num_inputs, 1))) for _ in range(l + 1)]
-L_bar_t = [numpy.matrix(numpy.zeros((num_inputs, num_states))) for _ in range(l + 1)]
+L_bar_t = [
+    numpy.matrix(numpy.zeros((num_inputs, num_states))) for _ in range(l + 1)
+]
 l_bar_t = [numpy.matrix(numpy.zeros((num_inputs, 1))) for _ in range(l + 1)]
 
-S_t = [numpy.matrix(numpy.zeros((num_states, num_states))) for _ in range(l + 1)]
+S_t = [
+    numpy.matrix(numpy.zeros((num_states, num_states))) for _ in range(l + 1)
+]
 s_t = [numpy.matrix(numpy.zeros((num_states, 1))) for _ in range(l + 1)]
 s_scalar_t = [numpy.matrix(numpy.zeros((1, 1))) for _ in range(l + 1)]
 
-
-last_x_hat_t = [numpy.matrix(numpy.zeros((num_states, 1))) for _ in range(l + 1)]
+last_x_hat_t = [
+    numpy.matrix(numpy.zeros((num_states, 1))) for _ in range(l + 1)
+]
 
 for a in range(15):
-  x_hat = x_hat_initial
-  u_t = L_t[0] * x_hat + l_t[0]
-  S_bar_t[0] = numpy.matrix(numpy.zeros((num_states, num_states)))
-  s_bar_t[0] = numpy.matrix(numpy.zeros((num_states, 1)))
-  s_scalar_bar_t[0] = numpy.matrix([[0]])
+    x_hat = x_hat_initial
+    u_t = L_t[0] * x_hat + l_t[0]
+    S_bar_t[0] = numpy.matrix(numpy.zeros((num_states, num_states)))
+    s_bar_t[0] = numpy.matrix(numpy.zeros((num_states, 1)))
+    s_scalar_bar_t[0] = numpy.matrix([[0]])
 
-  last_x_hat_t[0] = x_hat_initial
+    last_x_hat_t[0] = x_hat_initial
 
-  Q_t = numerical_jacobian_x_x(cost_fn_obj.cost, x_hat_initial, u_t)
-  P_t = numerical_jacobian_x_u(cost_fn_obj.cost, x_hat_initial, u_t)
-  R_t = numerical_jacobian_u_u(cost_fn_obj.cost, x_hat_initial, u_t)
+    Q_t = numerical_jacobian_x_x(cost_fn_obj.cost, x_hat_initial, u_t)
+    P_t = numerical_jacobian_x_u(cost_fn_obj.cost, x_hat_initial, u_t)
+    R_t = numerical_jacobian_u_u(cost_fn_obj.cost, x_hat_initial, u_t)
 
-  q_t = numerical_jacobian_x(cost_fn_obj.cost, x_hat_initial, u_t).T - Q_t * x_hat_initial - P_t.T * u_t
-  r_t = numerical_jacobian_u(cost_fn_obj.cost, x_hat_initial, u_t).T - P_t * x_hat_initial - R_t * u_t
+    q_t = numerical_jacobian_x(cost_fn_obj.cost, x_hat_initial,
+                               u_t).T - Q_t * x_hat_initial - P_t.T * u_t
+    r_t = numerical_jacobian_u(cost_fn_obj.cost, x_hat_initial,
+                               u_t).T - P_t * x_hat_initial - R_t * u_t
 
-  q_scalar_t = cost_fn_obj.cost(x_hat_initial, u_t) - 0.5 * (x_hat_initial.T * (Q_t * x_hat_initial + P_t.T * u_t) + u_t.T * (P_t * x_hat_initial + R_t * u_t)) - x_hat_initial.T * q_t - u_t.T * r_t
+    q_scalar_t = cost_fn_obj.cost(
+        x_hat_initial,
+        u_t) - 0.5 * (x_hat_initial.T *
+                      (Q_t * x_hat_initial + P_t.T * u_t) + u_t.T *
+                      (P_t * x_hat_initial + R_t * u_t)
+                      ) - x_hat_initial.T * q_t - u_t.T * r_t
 
-  start_A_t = numerical_jacobian_x(discrete_dynamics, x_hat_initial, u_t)
-  start_B_t = numerical_jacobian_u(discrete_dynamics, x_hat_initial, u_t)
-  x_hat_next = discrete_dynamics(x_hat_initial, u_t)
-  start_c_t = x_hat_next - start_A_t * x_hat_initial - start_B_t * u_t
+    start_A_t = numerical_jacobian_x(discrete_dynamics, x_hat_initial, u_t)
+    start_B_t = numerical_jacobian_u(discrete_dynamics, x_hat_initial, u_t)
+    x_hat_next = discrete_dynamics(x_hat_initial, u_t)
+    start_c_t = x_hat_next - start_A_t * x_hat_initial - start_B_t * u_t
 
-  B_svd_u, B_svd_sigma_diag, B_svd_v = numpy.linalg.svd(start_B_t)
-  B_svd_sigma = numpy.matrix(numpy.zeros(start_B_t.shape))
-  B_svd_sigma[0:B_svd_sigma_diag.shape[0], 0:B_svd_sigma_diag.shape[0]] = numpy.diag(B_svd_sigma_diag)
+    B_svd_u, B_svd_sigma_diag, B_svd_v = numpy.linalg.svd(start_B_t)
+    B_svd_sigma = numpy.matrix(numpy.zeros(start_B_t.shape))
+    B_svd_sigma[0:B_svd_sigma_diag.shape[0],
+                0:B_svd_sigma_diag.shape[0]] = numpy.diag(B_svd_sigma_diag)
 
-  B_svd_sigma_inv = numpy.matrix(numpy.zeros(start_B_t.shape)).T
-  B_svd_sigma_inv[0:B_svd_sigma_diag.shape[0], 0:B_svd_sigma_diag.shape[0]] = numpy.linalg.inv(numpy.diag(B_svd_sigma_diag))
-  B_svd_inv = B_svd_v.T * B_svd_sigma_inv * B_svd_u.T
+    B_svd_sigma_inv = numpy.matrix(numpy.zeros(start_B_t.shape)).T
+    B_svd_sigma_inv[0:B_svd_sigma_diag.shape[0],
+                    0:B_svd_sigma_diag.shape[0]] = numpy.linalg.inv(
+                        numpy.diag(B_svd_sigma_diag))
+    B_svd_inv = B_svd_v.T * B_svd_sigma_inv * B_svd_u.T
 
-  L_bar_t[1] = B_svd_inv
-  l_bar_t[1] = -B_svd_inv * (start_A_t * x_hat_initial + start_c_t)
+    L_bar_t[1] = B_svd_inv
+    l_bar_t[1] = -B_svd_inv * (start_A_t * x_hat_initial + start_c_t)
 
-  S_bar_t[1] = L_bar_t[1].T * R_t * L_bar_t[1]
+    S_bar_t[1] = L_bar_t[1].T * R_t * L_bar_t[1]
 
-  TotalS_1 = start_B_t.T * S_t[1] * start_B_t + R_t
-  Totals_1 = start_B_t.T * S_t[1] * (start_c_t + start_A_t * x_hat_initial) + start_B_t.T * s_t[1] + P_t * x_hat_initial + r_t
-  Totals_scalar_1 = 0.5 * (start_c_t.T + x_hat_initial.T * start_A_t.T) * S_t[1] * (start_c_t + start_A_t * x_hat_initial) + s_scalar_t[1] + x_hat_initial.T * q_t + q_scalar_t + 0.5 * x_hat_initial.T * Q_t * x_hat_initial + (start_c_t.T + x_hat_initial.T * start_A_t.T) * s_t[1]
+    TotalS_1 = start_B_t.T * S_t[1] * start_B_t + R_t
+    Totals_1 = start_B_t.T * S_t[1] * (
+        start_c_t + start_A_t *
+        x_hat_initial) + start_B_t.T * s_t[1] + P_t * x_hat_initial + r_t
+    Totals_scalar_1 = 0.5 * (
+        start_c_t.T + x_hat_initial.T * start_A_t.T
+    ) * S_t[1] * (start_c_t + start_A_t * x_hat_initial) + s_scalar_t[
+        1] + x_hat_initial.T * q_t + q_scalar_t + 0.5 * x_hat_initial.T * Q_t * x_hat_initial + (
+            start_c_t.T + x_hat_initial.T * start_A_t.T) * s_t[1]
 
-  optimal_u_1 = -numpy.linalg.solve(TotalS_1, Totals_1)
-  optimal_x_1 = start_A_t * x_hat_initial + start_B_t * optimal_u_1 + start_c_t
+    optimal_u_1 = -numpy.linalg.solve(TotalS_1, Totals_1)
+    optimal_x_1 = start_A_t * x_hat_initial + start_B_t * optimal_u_1 + start_c_t
 
-  S_bar_1_eigh_eigenvalues, S_bar_1_eigh_eigenvectors = numpy.linalg.eigh(S_bar_t[1])
-  S_bar_1_eigh = numpy.matrix(numpy.diag(S_bar_1_eigh_eigenvalues))
-  S_bar_1_eigh_eigenvalues_stiff = S_bar_1_eigh_eigenvalues.copy()
-  for i in range(S_bar_1_eigh_eigenvalues_stiff.shape[0]):
-    if abs(S_bar_1_eigh_eigenvalues_stiff[i]) < 1e-8:
-      S_bar_1_eigh_eigenvalues_stiff[i] = max(S_bar_1_eigh_eigenvalues_stiff) * 1.0
+    S_bar_1_eigh_eigenvalues, S_bar_1_eigh_eigenvectors = numpy.linalg.eigh(
+        S_bar_t[1])
+    S_bar_1_eigh = numpy.matrix(numpy.diag(S_bar_1_eigh_eigenvalues))
+    S_bar_1_eigh_eigenvalues_stiff = S_bar_1_eigh_eigenvalues.copy()
+    for i in range(S_bar_1_eigh_eigenvalues_stiff.shape[0]):
+        if abs(S_bar_1_eigh_eigenvalues_stiff[i]) < 1e-8:
+            S_bar_1_eigh_eigenvalues_stiff[i] = max(
+                S_bar_1_eigh_eigenvalues_stiff) * 1.0
 
-  #print 'eigh eigenvalues of S bar', S_bar_1_eigh_eigenvalues
-  #print 'eigh eigenvectors of S bar', S_bar_1_eigh_eigenvectors.T
+    #print 'eigh eigenvalues of S bar', S_bar_1_eigh_eigenvalues
+    #print 'eigh eigenvectors of S bar', S_bar_1_eigh_eigenvectors.T
 
-  #print 'S bar eig recreate', S_bar_1_eigh_eigenvectors * numpy.matrix(numpy.diag(S_bar_1_eigh_eigenvalues)) * S_bar_1_eigh_eigenvectors.T
-  #print 'S bar eig recreate error', (S_bar_1_eigh_eigenvectors * numpy.matrix(numpy.diag(S_bar_1_eigh_eigenvalues)) * S_bar_1_eigh_eigenvectors.T - S_bar_t[1])
+    #print 'S bar eig recreate', S_bar_1_eigh_eigenvectors * numpy.matrix(numpy.diag(S_bar_1_eigh_eigenvalues)) * S_bar_1_eigh_eigenvectors.T
+    #print 'S bar eig recreate error', (S_bar_1_eigh_eigenvectors * numpy.matrix(numpy.diag(S_bar_1_eigh_eigenvalues)) * S_bar_1_eigh_eigenvectors.T - S_bar_t[1])
 
-  S_bar_stiff = S_bar_1_eigh_eigenvectors * numpy.matrix(numpy.diag(S_bar_1_eigh_eigenvalues_stiff)) * S_bar_1_eigh_eigenvectors.T
+    S_bar_stiff = S_bar_1_eigh_eigenvectors * numpy.matrix(
+        numpy.diag(
+            S_bar_1_eigh_eigenvalues_stiff)) * S_bar_1_eigh_eigenvectors.T
 
-  print 'Min u', -numpy.linalg.solve(TotalS_1, Totals_1)
-  print 'Min x_hat', optimal_x_1
-  s_bar_t[1] = -s_t[1] - (S_bar_stiff + S_t[1]) * optimal_x_1
-  s_scalar_bar_t[1] = 0.5 * (optimal_u_1.T * TotalS_1 * optimal_u_1 - optimal_x_1.T * (S_bar_stiff + S_t[1]) * optimal_x_1) + optimal_u_1.T * Totals_1 - optimal_x_1.T * (s_bar_t[1] + s_t[1]) - s_scalar_t[1] + Totals_scalar_1
+    print 'Min u', -numpy.linalg.solve(TotalS_1, Totals_1)
+    print 'Min x_hat', optimal_x_1
+    s_bar_t[1] = -s_t[1] - (S_bar_stiff + S_t[1]) * optimal_x_1
+    s_scalar_bar_t[1] = 0.5 * (
+        optimal_u_1.T * TotalS_1 * optimal_u_1 - optimal_x_1.T *
+        (S_bar_stiff + S_t[1]) *
+        optimal_x_1) + optimal_u_1.T * Totals_1 - optimal_x_1.T * (
+            s_bar_t[1] + s_t[1]) - s_scalar_t[1] + Totals_scalar_1
 
-  print 'optimal_u_1', optimal_u_1
-  print 'TotalS_1', TotalS_1
-  print 'Totals_1', Totals_1
-  print 'Totals_scalar_1', Totals_scalar_1
-  print 'overall cost 1',  0.5 * (optimal_u_1.T * TotalS_1 * optimal_u_1) + optimal_u_1.T * Totals_1 + Totals_scalar_1
-  print 'overall cost 0',  0.5 * (x_hat_initial.T * S_t[0] * x_hat_initial) +  x_hat_initial.T * s_t[0] + s_scalar_t[0]
+    print 'optimal_u_1', optimal_u_1
+    print 'TotalS_1', TotalS_1
+    print 'Totals_1', Totals_1
+    print 'Totals_scalar_1', Totals_scalar_1
+    print 'overall cost 1', 0.5 * (
+        optimal_u_1.T * TotalS_1 *
+        optimal_u_1) + optimal_u_1.T * Totals_1 + Totals_scalar_1
+    print 'overall cost 0', 0.5 * (x_hat_initial.T * S_t[0] * x_hat_initial
+                                   ) + x_hat_initial.T * s_t[0] + s_scalar_t[0]
 
-  print 't forward 0'
-  print 'x_hat_initial[ 0]: %s' % (x_hat_initial)
-  print 'x_hat[%2d]: %s' % (0, x_hat.T)
-  print 'x_hat_next[%2d]: %s' % (0, x_hat_next.T)
-  print 'u[%2d]: %s' % (0, u_t.T)
-  print ('L[ 0]: %s' % (L_t[0],)).replace('\n', '\n        ')
-  print ('l[ 0]: %s' % (l_t[0],)).replace('\n', '\n        ')
+    print 't forward 0'
+    print 'x_hat_initial[ 0]: %s' % (x_hat_initial)
+    print 'x_hat[%2d]: %s' % (0, x_hat.T)
+    print 'x_hat_next[%2d]: %s' % (0, x_hat_next.T)
+    print 'u[%2d]: %s' % (0, u_t.T)
+    print('L[ 0]: %s' % (L_t[0], )).replace('\n', '\n        ')
+    print('l[ 0]: %s' % (l_t[0], )).replace('\n', '\n        ')
 
-  print ('A_t[%2d]: %s' % (0, start_A_t)).replace('\n', '\n         ')
-  print ('B_t[%2d]: %s' % (0, start_B_t)).replace('\n', '\n         ')
-  print ('c_t[%2d]: %s' % (0, start_c_t)).replace('\n', '\n         ')
+    print('A_t[%2d]: %s' % (0, start_A_t)).replace('\n', '\n         ')
+    print('B_t[%2d]: %s' % (0, start_B_t)).replace('\n', '\n         ')
+    print('c_t[%2d]: %s' % (0, start_c_t)).replace('\n', '\n         ')
 
-  # TODO(austin): optimal_x_1 is x_hat
-  x_hat = -numpy.linalg.solve((S_t[1] + S_bar_stiff), (s_t[1] + s_bar_t[1]))
-  print 'new xhat', x_hat
+    # TODO(austin): optimal_x_1 is x_hat
+    x_hat = -numpy.linalg.solve((S_t[1] + S_bar_stiff), (s_t[1] + s_bar_t[1]))
+    print 'new xhat', x_hat
 
-  S_bar_t[1] = S_bar_stiff
+    S_bar_t[1] = S_bar_stiff
 
-  last_x_hat_t[1] = x_hat
+    last_x_hat_t[1] = x_hat
 
-  for t in range(1, l):
-    print 't forward', t
-    u_t = L_t[t] * x_hat + l_t[t]
+    for t in range(1, l):
+        print 't forward', t
+        u_t = L_t[t] * x_hat + l_t[t]
 
-    x_hat_next = discrete_dynamics(x_hat, u_t)
-    A_bar_t = numerical_jacobian_x(inverse_discrete_dynamics, x_hat_next, u_t)
-    B_bar_t = numerical_jacobian_u(inverse_discrete_dynamics, x_hat_next, u_t)
-    c_bar_t = x_hat - A_bar_t * x_hat_next - B_bar_t * u_t
+        x_hat_next = discrete_dynamics(x_hat, u_t)
+        A_bar_t = numerical_jacobian_x(inverse_discrete_dynamics, x_hat_next,
+                                       u_t)
+        B_bar_t = numerical_jacobian_u(inverse_discrete_dynamics, x_hat_next,
+                                       u_t)
+        c_bar_t = x_hat - A_bar_t * x_hat_next - B_bar_t * u_t
 
-    print 'x_hat[%2d]: %s' % (t, x_hat.T)
-    print 'x_hat_next[%2d]: %s' % (t, x_hat_next.T)
-    print ('L[%2d]: %s' % (t, L_t[t],)).replace('\n', '\n        ')
-    print ('l[%2d]: %s' % (t, l_t[t],)).replace('\n', '\n        ')
-    print 'u[%2d]: %s' % (t, u_t.T)
+        print 'x_hat[%2d]: %s' % (t, x_hat.T)
+        print 'x_hat_next[%2d]: %s' % (t, x_hat_next.T)
+        print('L[%2d]: %s' % (
+            t,
+            L_t[t],
+        )).replace('\n', '\n        ')
+        print('l[%2d]: %s' % (
+            t,
+            l_t[t],
+        )).replace('\n', '\n        ')
+        print 'u[%2d]: %s' % (t, u_t.T)
 
-    print ('A_bar_t[%2d]: %s' % (t, A_bar_t)).replace('\n', '\n             ')
-    print ('B_bar_t[%2d]: %s' % (t, B_bar_t)).replace('\n', '\n             ')
-    print ('c_bar_t[%2d]: %s' % (t, c_bar_t)).replace('\n', '\n             ')
+        print('A_bar_t[%2d]: %s' % (t, A_bar_t)).replace(
+            '\n', '\n             ')
+        print('B_bar_t[%2d]: %s' % (t, B_bar_t)).replace(
+            '\n', '\n             ')
+        print('c_bar_t[%2d]: %s' % (t, c_bar_t)).replace(
+            '\n', '\n             ')
 
-    Q_t = numerical_jacobian_x_x(cost_fn_obj.cost, x_hat, u_t)
-    P_t = numerical_jacobian_x_u(cost_fn_obj.cost, x_hat, u_t)
-    R_t = numerical_jacobian_u_u(cost_fn_obj.cost, x_hat, u_t)
+        Q_t = numerical_jacobian_x_x(cost_fn_obj.cost, x_hat, u_t)
+        P_t = numerical_jacobian_x_u(cost_fn_obj.cost, x_hat, u_t)
+        R_t = numerical_jacobian_u_u(cost_fn_obj.cost, x_hat, u_t)
 
-    q_t = numerical_jacobian_x(cost_fn_obj.cost, x_hat, u_t).T - Q_t * x_hat - P_t.T * u_t
-    r_t = numerical_jacobian_u(cost_fn_obj.cost, x_hat, u_t).T - P_t * x_hat - R_t * u_t
+        q_t = numerical_jacobian_x(cost_fn_obj.cost, x_hat,
+                                   u_t).T - Q_t * x_hat - P_t.T * u_t
+        r_t = numerical_jacobian_u(cost_fn_obj.cost, x_hat,
+                                   u_t).T - P_t * x_hat - R_t * u_t
 
-    q_scalar_t = cost_fn_obj.cost(x_hat, u_t) - 0.5 * (x_hat.T * (Q_t * x_hat + P_t.T * u_t) + u_t.T * (P_t * x_hat + R_t * u_t)) - x_hat.T * q_t - u_t.T * r_t
+        q_scalar_t = cost_fn_obj.cost(x_hat, u_t) - 0.5 * (
+            x_hat.T * (Q_t * x_hat + P_t.T * u_t) + u_t.T *
+            (P_t * x_hat + R_t * u_t)) - x_hat.T * q_t - u_t.T * r_t
 
-    C_bar_t = B_bar_t.T * (S_bar_t[t] + Q_t) * A_bar_t + P_t * A_bar_t
-    D_bar_t = A_bar_t.T * (S_bar_t[t] + Q_t) * A_bar_t
-    E_bar_t = B_bar_t.T * (S_bar_t[t] + Q_t) * B_bar_t + R_t + P_t * B_bar_t + B_bar_t.T * P_t.T
-    d_bar_t = A_bar_t.T * (s_bar_t[t] + q_t) + A_bar_t.T * (S_bar_t[t] + Q_t) * c_bar_t
-    e_bar_t = r_t + P_t * c_bar_t + B_bar_t.T * s_bar_t[t] + B_bar_t.T * (S_bar_t[t] + Q_t) * c_bar_t
+        C_bar_t = B_bar_t.T * (S_bar_t[t] + Q_t) * A_bar_t + P_t * A_bar_t
+        D_bar_t = A_bar_t.T * (S_bar_t[t] + Q_t) * A_bar_t
+        E_bar_t = B_bar_t.T * (
+            S_bar_t[t] +
+            Q_t) * B_bar_t + R_t + P_t * B_bar_t + B_bar_t.T * P_t.T
+        d_bar_t = A_bar_t.T * (s_bar_t[t] + q_t) + A_bar_t.T * (S_bar_t[t] +
+                                                                Q_t) * c_bar_t
+        e_bar_t = r_t + P_t * c_bar_t + B_bar_t.T * s_bar_t[t] + B_bar_t.T * (
+            S_bar_t[t] + Q_t) * c_bar_t
 
-    L_bar_t[t + 1] = -numpy.linalg.inv(E_bar_t) * C_bar_t
-    l_bar_t[t + 1] = -numpy.linalg.inv(E_bar_t) * e_bar_t
+        L_bar_t[t + 1] = -numpy.linalg.inv(E_bar_t) * C_bar_t
+        l_bar_t[t + 1] = -numpy.linalg.inv(E_bar_t) * e_bar_t
 
-    S_bar_t[t + 1] = D_bar_t + C_bar_t.T * L_bar_t[t + 1]
-    s_bar_t[t + 1] = d_bar_t + C_bar_t.T * l_bar_t[t + 1]
-    s_scalar_bar_t[t + 1] = -0.5 * e_bar_t.T * numpy.linalg.inv(E_bar_t) * e_bar_t + 0.5 * c_bar_t.T * (S_bar_t[t] + Q_t) * c_bar_t + c_bar_t.T * s_bar_t[t] + c_bar_t.T * q_t + s_scalar_bar_t[t] + q_scalar_t
+        S_bar_t[t + 1] = D_bar_t + C_bar_t.T * L_bar_t[t + 1]
+        s_bar_t[t + 1] = d_bar_t + C_bar_t.T * l_bar_t[t + 1]
+        s_scalar_bar_t[t + 1] = -0.5 * e_bar_t.T * numpy.linalg.inv(
+            E_bar_t) * e_bar_t + 0.5 * c_bar_t.T * (
+                S_bar_t[t] + Q_t) * c_bar_t + c_bar_t.T * s_bar_t[
+                    t] + c_bar_t.T * q_t + s_scalar_bar_t[t] + q_scalar_t
 
-    x_hat = -numpy.linalg.solve((S_t[t + 1] + S_bar_t[t + 1]), (s_t[t + 1] + s_bar_t[t + 1]))
+        x_hat = -numpy.linalg.solve((S_t[t + 1] + S_bar_t[t + 1]),
+                                    (s_t[t + 1] + s_bar_t[t + 1]))
 
-  S_t[l] = cost_fn_obj.estimate_Q_final(x_hat)
-  s_t[l] = cost_fn_obj.estimate_q_final(x_hat)
-  x_hat = -numpy.linalg.inv(S_t[l] + S_bar_t[l]) * (s_t[l] + s_bar_t[l])
+    S_t[l] = cost_fn_obj.estimate_Q_final(x_hat)
+    s_t[l] = cost_fn_obj.estimate_q_final(x_hat)
+    x_hat = -numpy.linalg.inv(S_t[l] + S_bar_t[l]) * (s_t[l] + s_bar_t[l])
 
-  for t in reversed(range(l)):
-    print 't backward', t
-    # TODO(austin): I don't think we can use L_t like this here.
-    # I think we are off by 1 somewhere...
-    u_t = L_bar_t[t + 1] * x_hat + l_bar_t[t + 1]
+    for t in reversed(range(l)):
+        print 't backward', t
+        # TODO(austin): I don't think we can use L_t like this here.
+        # I think we are off by 1 somewhere...
+        u_t = L_bar_t[t + 1] * x_hat + l_bar_t[t + 1]
 
-    x_hat_prev = inverse_discrete_dynamics(x_hat, u_t)
-    print 'x_hat[%2d]: %s' % (t, x_hat.T)
-    print 'x_hat_prev[%2d]: %s' % (t, x_hat_prev.T)
-    print ('L_bar[%2d]: %s' % (t + 1, L_bar_t[t + 1])).replace('\n', '\n            ')
-    print ('l_bar[%2d]: %s' % (t + 1, l_bar_t[t + 1])).replace('\n', '\n            ')
-    print 'u[%2d]: %s' % (t, u_t.T)
-    # Now compute the linearized A, B, and C
-    # Start by doing it numerically, and then optimize.
-    A_t = numerical_jacobian_x(discrete_dynamics, x_hat_prev, u_t)
-    B_t = numerical_jacobian_u(discrete_dynamics, x_hat_prev, u_t)
-    c_t = x_hat - A_t * x_hat_prev - B_t * u_t
+        x_hat_prev = inverse_discrete_dynamics(x_hat, u_t)
+        print 'x_hat[%2d]: %s' % (t, x_hat.T)
+        print 'x_hat_prev[%2d]: %s' % (t, x_hat_prev.T)
+        print('L_bar[%2d]: %s' % (t + 1, L_bar_t[t + 1])).replace(
+            '\n', '\n            ')
+        print('l_bar[%2d]: %s' % (t + 1, l_bar_t[t + 1])).replace(
+            '\n', '\n            ')
+        print 'u[%2d]: %s' % (t, u_t.T)
+        # Now compute the linearized A, B, and C
+        # Start by doing it numerically, and then optimize.
+        A_t = numerical_jacobian_x(discrete_dynamics, x_hat_prev, u_t)
+        B_t = numerical_jacobian_u(discrete_dynamics, x_hat_prev, u_t)
+        c_t = x_hat - A_t * x_hat_prev - B_t * u_t
 
-    print ('A_t[%2d]: %s' % (t, A_t)).replace('\n', '\n         ')
-    print ('B_t[%2d]: %s' % (t, B_t)).replace('\n', '\n         ')
-    print ('c_t[%2d]: %s' % (t, c_t)).replace('\n', '\n         ')
+        print('A_t[%2d]: %s' % (t, A_t)).replace('\n', '\n         ')
+        print('B_t[%2d]: %s' % (t, B_t)).replace('\n', '\n         ')
+        print('c_t[%2d]: %s' % (t, c_t)).replace('\n', '\n         ')
 
-    Q_t = numerical_jacobian_x_x(cost_fn_obj.cost, x_hat_prev, u_t)
-    P_t = numerical_jacobian_x_u(cost_fn_obj.cost, x_hat_prev, u_t)
-    P_T_t = numerical_jacobian_u_x(cost_fn_obj.cost, x_hat_prev, u_t)
-    R_t = numerical_jacobian_u_u(cost_fn_obj.cost, x_hat_prev, u_t)
+        Q_t = numerical_jacobian_x_x(cost_fn_obj.cost, x_hat_prev, u_t)
+        P_t = numerical_jacobian_x_u(cost_fn_obj.cost, x_hat_prev, u_t)
+        P_T_t = numerical_jacobian_u_x(cost_fn_obj.cost, x_hat_prev, u_t)
+        R_t = numerical_jacobian_u_u(cost_fn_obj.cost, x_hat_prev, u_t)
 
-    q_t = numerical_jacobian_x(cost_fn_obj.cost, x_hat_prev, u_t).T - Q_t * x_hat_prev - P_T_t * u_t
-    r_t = numerical_jacobian_u(cost_fn_obj.cost, x_hat_prev, u_t).T - P_t * x_hat_prev - R_t * u_t
+        q_t = numerical_jacobian_x(cost_fn_obj.cost, x_hat_prev,
+                                   u_t).T - Q_t * x_hat_prev - P_T_t * u_t
+        r_t = numerical_jacobian_u(cost_fn_obj.cost, x_hat_prev,
+                                   u_t).T - P_t * x_hat_prev - R_t * u_t
 
-    q_scalar_t = cost_fn_obj.cost(x_hat_prev, u_t) - 0.5 * (x_hat_prev.T * (Q_t * x_hat_prev + P_t.T * u_t) + u_t.T * (P_t * x_hat_prev + R_t * u_t)) - x_hat_prev.T * q_t - u_t.T * r_t
+        q_scalar_t = cost_fn_obj.cost(x_hat_prev, u_t) - 0.5 * (
+            x_hat_prev.T * (Q_t * x_hat_prev + P_t.T * u_t) + u_t.T *
+            (P_t * x_hat_prev + R_t * u_t)) - x_hat_prev.T * q_t - u_t.T * r_t
 
-    C_t = P_t + B_t.T * S_t[t + 1] * A_t
-    D_t = Q_t + A_t.T * S_t[t + 1] * A_t
-    E_t = R_t + B_t.T * S_t[t + 1] * B_t
-    d_t = q_t + A_t.T * s_t[t + 1] + A_t.T * S_t[t + 1] * c_t
-    e_t = r_t + B_t.T * s_t[t + 1] + B_t.T * S_t[t + 1] * c_t
-    L_t[t] = -numpy.linalg.inv(E_t) * C_t
-    l_t[t] = -numpy.linalg.inv(E_t) * e_t
-    s_t[t] = d_t + C_t.T * l_t[t]
-    S_t[t] = D_t + C_t.T * L_t[t]
-    s_scalar_t[t] = q_scalar_t - 0.5 * e_t.T * numpy.linalg.inv(E_t) * e_t + 0.5 * c_t.T * S_t[t + 1] * c_t + c_t.T * s_t[t + 1] + s_scalar_t[t + 1]
+        C_t = P_t + B_t.T * S_t[t + 1] * A_t
+        D_t = Q_t + A_t.T * S_t[t + 1] * A_t
+        E_t = R_t + B_t.T * S_t[t + 1] * B_t
+        d_t = q_t + A_t.T * s_t[t + 1] + A_t.T * S_t[t + 1] * c_t
+        e_t = r_t + B_t.T * s_t[t + 1] + B_t.T * S_t[t + 1] * c_t
+        L_t[t] = -numpy.linalg.inv(E_t) * C_t
+        l_t[t] = -numpy.linalg.inv(E_t) * e_t
+        s_t[t] = d_t + C_t.T * l_t[t]
+        S_t[t] = D_t + C_t.T * L_t[t]
+        s_scalar_t[t] = q_scalar_t - 0.5 * e_t.T * numpy.linalg.inv(
+            E_t) * e_t + 0.5 * c_t.T * S_t[t + 1] * c_t + c_t.T * s_t[
+                t + 1] + s_scalar_t[t + 1]
 
-    x_hat = -numpy.linalg.solve((S_t[t] + S_bar_t[t]), (s_t[t] + s_bar_t[t]))
-    if t == 0:
-      last_x_hat_t[t] = x_hat_initial
-    else:
-      last_x_hat_t[t] = x_hat
+        x_hat = -numpy.linalg.solve((S_t[t] + S_bar_t[t]),
+                                    (s_t[t] + s_bar_t[t]))
+        if t == 0:
+            last_x_hat_t[t] = x_hat_initial
+        else:
+            last_x_hat_t[t] = x_hat
 
-  x_hat_t = [x_hat_initial]
+    x_hat_t = [x_hat_initial]
 
-  pylab.figure('states %d' % a)
-  pylab.ion()
-  for dim in range(num_states):
-    pylab.plot(numpy.arange(len(last_x_hat_t)),
-             [x_hat_loop[dim, 0] for x_hat_loop in last_x_hat_t], marker='o', label='Xhat[%d]'%dim)
-  pylab.legend()
-  pylab.draw()
+    pylab.figure('states %d' % a)
+    pylab.ion()
+    for dim in range(num_states):
+        pylab.plot(numpy.arange(len(last_x_hat_t)),
+                   [x_hat_loop[dim, 0] for x_hat_loop in last_x_hat_t],
+                   marker='o',
+                   label='Xhat[%d]' % dim)
+    pylab.legend()
+    pylab.draw()
 
-  pylab.figure('xy %d' % a)
-  pylab.ion()
-  pylab.plot([x_hat_loop[0, 0] for x_hat_loop in last_x_hat_t],
-             [x_hat_loop[1, 0] for x_hat_loop in last_x_hat_t], marker='o', label='trajectory')
-  pylab.legend()
-  pylab.draw()
+    pylab.figure('xy %d' % a)
+    pylab.ion()
+    pylab.plot([x_hat_loop[0, 0] for x_hat_loop in last_x_hat_t],
+               [x_hat_loop[1, 0] for x_hat_loop in last_x_hat_t],
+               marker='o',
+               label='trajectory')
+    pylab.legend()
+    pylab.draw()
 
 final_u_t = [numpy.matrix(numpy.zeros((num_inputs, 1))) for _ in range(l + 1)]
 cost_to_go = []
 cost_to_come = []
 cost = []
 for t in range(l):
-  cost_to_go.append((0.5 * last_x_hat_t[t].T * S_t[t] * last_x_hat_t[t] + last_x_hat_t[t].T * s_t[t] + s_scalar_t[t])[0, 0])
-  cost_to_come.append((0.5 * last_x_hat_t[t].T * S_bar_t[t] * last_x_hat_t[t] + last_x_hat_t[t].T * s_bar_t[t] + s_scalar_bar_t[t])[0, 0])
-  cost.append(cost_to_go[-1] + cost_to_come[-1])
-  final_u_t[t] = L_t[t] * last_x_hat_t[t] + l_t[t]
+    cost_to_go.append((0.5 * last_x_hat_t[t].T * S_t[t] * last_x_hat_t[t] +
+                       last_x_hat_t[t].T * s_t[t] + s_scalar_t[t])[0, 0])
+    cost_to_come.append(
+        (0.5 * last_x_hat_t[t].T * S_bar_t[t] * last_x_hat_t[t] +
+         last_x_hat_t[t].T * s_bar_t[t] + s_scalar_bar_t[t])[0, 0])
+    cost.append(cost_to_go[-1] + cost_to_come[-1])
+    final_u_t[t] = L_t[t] * last_x_hat_t[t] + l_t[t]
 
 for t in range(l):
-  A_t = numerical_jacobian_x(discrete_dynamics, last_x_hat_t[t], final_u_t[t])
-  B_t = numerical_jacobian_u(discrete_dynamics, last_x_hat_t[t], final_u_t[t])
-  c_t = discrete_dynamics(last_x_hat_t[t], final_u_t[t]) - A_t * last_x_hat_t[t] - B_t * final_u_t[t]
-  print("Infeasability at", t, "is", ((A_t * last_x_hat_t[t] + B_t * final_u_t[t] + c_t) - last_x_hat_t[t + 1]).T)
+    A_t = numerical_jacobian_x(discrete_dynamics, last_x_hat_t[t],
+                               final_u_t[t])
+    B_t = numerical_jacobian_u(discrete_dynamics, last_x_hat_t[t],
+                               final_u_t[t])
+    c_t = discrete_dynamics(
+        last_x_hat_t[t],
+        final_u_t[t]) - A_t * last_x_hat_t[t] - B_t * final_u_t[t]
+    print("Infeasability at", t, "is",
+          ((A_t * last_x_hat_t[t] + B_t * final_u_t[t] + c_t) -
+           last_x_hat_t[t + 1]).T)
 
 pylab.figure('u')
 samples = numpy.arange(len(final_u_t))
 for i in range(num_inputs):
-  pylab.plot(samples, [u[i, 0] for u in final_u_t], label='u[%d]' % i, marker='o')
-  pylab.legend()
+    pylab.plot(samples, [u[i, 0] for u in final_u_t],
+               label='u[%d]' % i,
+               marker='o')
+    pylab.legend()
 
 pylab.figure('cost')
 cost_samples = numpy.arange(len(cost))
diff --git a/y2014/control_loops/python/extended_lqr_derivation.py b/y2014/control_loops/python/extended_lqr_derivation.py
index 010c5de..6857654 100755
--- a/y2014/control_loops/python/extended_lqr_derivation.py
+++ b/y2014/control_loops/python/extended_lqr_derivation.py
@@ -6,7 +6,6 @@
 import random
 
 import sympy
-
 '''
 * `x_t1` means `x_{t + 1}`. Using `'x_t + 1'` as the symbol name makes the non-
   latex output really confusing, so not doing that.
@@ -57,351 +56,335 @@
 ub = sympy.MatrixSymbol('ubold', number_of_inputs, 1)
 
 CONSTANTS = set([
-    A_t, B_t, cb_t,
-    S_t1, sb_t1, s_t1,
-    A_tb, B_tb, cb_tb,
-    S_tb, sb_tb, s_tb,
-    P_t, Q_t, R_t, qb_t, rb_t, q_t,
-    ])
+    A_t,
+    B_t,
+    cb_t,
+    S_t1,
+    sb_t1,
+    s_t1,
+    A_tb,
+    B_tb,
+    cb_tb,
+    S_tb,
+    sb_tb,
+    s_tb,
+    P_t,
+    Q_t,
+    R_t,
+    qb_t,
+    rb_t,
+    q_t,
+])
 
 SYMMETRIC_CONSTANTS = set([
-    S_t1, S_tb,
-    Q_t, R_t,
-    ])
+    S_t1,
+    S_tb,
+    Q_t,
+    R_t,
+])
+
 
 def verify_equivalent(a, b, inverses={}):
-  def get_matrices(m):
-    matrices = m.atoms(sympy.MatrixSymbol)
-    new_matrices = set()
-    for matrix in matrices:
-      if matrix in inverses:
-        new_matrices.update(inverses[matrix].atoms(sympy.MatrixSymbol))
-    matrices.update(new_matrices)
-  a_matrices, b_matrices = get_matrices(a), get_matrices(b)
-  if a_matrices != b_matrices:
-    raise RuntimeError('matrices different: %s vs %s' % (a_matrices,
-                                                         b_matrices))
-  a_symbols, b_symbols = a.atoms(sympy.Symbol), b.atoms(sympy.Symbol)
-  if a_symbols != b_symbols:
-    raise RuntimeError('symbols different: %s vs %s' % (a_symbols, b_symbols))
-  if not a_symbols < DIMENSIONS:
-    raise RuntimeError('not sure what to do with %s' % (a_symbols - DIMENSIONS))
 
-  if a.shape != b.shape:
-    raise RuntimeError('Different shapes: %s and %s' % (a.shape, b.shape))
+    def get_matrices(m):
+        matrices = m.atoms(sympy.MatrixSymbol)
+        new_matrices = set()
+        for matrix in matrices:
+            if matrix in inverses:
+                new_matrices.update(inverses[matrix].atoms(sympy.MatrixSymbol))
+        matrices.update(new_matrices)
 
-  for _ in range(10):
-    subs_symbols = {s: random.randint(1, 5) for s in a_symbols}
+    a_matrices, b_matrices = get_matrices(a), get_matrices(b)
+    if a_matrices != b_matrices:
+        raise RuntimeError('matrices different: %s vs %s' %
+                           (a_matrices, b_matrices))
+    a_symbols, b_symbols = a.atoms(sympy.Symbol), b.atoms(sympy.Symbol)
+    if a_symbols != b_symbols:
+        raise RuntimeError('symbols different: %s vs %s' %
+                           (a_symbols, b_symbols))
+    if not a_symbols < DIMENSIONS:
+        raise RuntimeError('not sure what to do with %s' %
+                           (a_symbols - DIMENSIONS))
+
+    if a.shape != b.shape:
+        raise RuntimeError('Different shapes: %s and %s' % (a.shape, b.shape))
+
     for _ in range(10):
-      diff = a - b
-      subs_matrices = {}
-      def get_replacement(*args):
-        try:
-          m = sympy.MatrixSymbol(*args)
-          if m not in subs_matrices:
-            if m in inverses:
-              i = inverses[m].replace(sympy.MatrixSymbol, get_replacement, simultaneous=False)
-              i_evaled = sympy.ImmutableMatrix(i.rows, i.cols,
-                                               lambda x,y: i[x, y].evalf())
-              subs_matrices[m] = i_evaled.I
-            else:
-              rows = m.rows.subs(subs_symbols)
-              cols = m.cols.subs(subs_symbols)
-              new_m = sympy.ImmutableMatrix(rows, cols,
-                                            lambda i,j: random.uniform(-5, 5))
-              if m in SYMMETRIC_CONSTANTS:
-                if rows != cols:
-                  raise RuntimeError('Non-square symmetric matrix %s' % m)
-                def calculate_cell(i, j):
-                  if i > j:
-                    return new_m[i, j]
-                  else:
-                    return new_m[j, i]
-                new_m = sympy.ImmutableMatrix(rows, cols, calculate_cell)
-              subs_matrices[m] = new_m
-          return subs_matrices[m]
-        except AttributeError as e:
-          # Stupid sympy silently eats AttributeErrors and treats them as
-          # "no replacement"...
-          raise RuntimeError(e)
-      # subs fails when it tries doing matrix multiplies between fixed-size ones
-      # and the rest of the equation which still has the symbolic-sized ones.
-      # subs(simultaneous=True) wants to put dummies in for everything first,
-      # and Dummy().transpose() is broken.
-      # replace() has the same issue as subs with simultaneous being True.
-      # lambdify() has no idea what to do with the transposes if you replace all
-      # the matrices with ones of random sizes full of dummies.
-      diff = diff.replace(sympy.MatrixSymbol, get_replacement,
-                          simultaneous=False)
-      for row in range(diff.rows):
-        for col in range(diff.cols):
-          result = diff[row, col].evalf()
-          if abs(result) > 1e-7:
-            raise RuntimeError('difference at (%s, %s) is %s' % (row, col,
-                                                                 result))
+        subs_symbols = {s: random.randint(1, 5) for s in a_symbols}
+        for _ in range(10):
+            diff = a - b
+            subs_matrices = {}
+
+            def get_replacement(*args):
+                try:
+                    m = sympy.MatrixSymbol(*args)
+                    if m not in subs_matrices:
+                        if m in inverses:
+                            i = inverses[m].replace(sympy.MatrixSymbol,
+                                                    get_replacement,
+                                                    simultaneous=False)
+                            i_evaled = sympy.ImmutableMatrix(
+                                i.rows, i.cols, lambda x, y: i[x, y].evalf())
+                            subs_matrices[m] = i_evaled.I
+                        else:
+                            rows = m.rows.subs(subs_symbols)
+                            cols = m.cols.subs(subs_symbols)
+                            new_m = sympy.ImmutableMatrix(
+                                rows, cols, lambda i, j: random.uniform(-5, 5))
+                            if m in SYMMETRIC_CONSTANTS:
+                                if rows != cols:
+                                    raise RuntimeError(
+                                        'Non-square symmetric matrix %s' % m)
+
+                                def calculate_cell(i, j):
+                                    if i > j:
+                                        return new_m[i, j]
+                                    else:
+                                        return new_m[j, i]
+
+                                new_m = sympy.ImmutableMatrix(
+                                    rows, cols, calculate_cell)
+                            subs_matrices[m] = new_m
+                    return subs_matrices[m]
+                except AttributeError as e:
+                    # Stupid sympy silently eats AttributeErrors and treats them as
+                    # "no replacement"...
+                    raise RuntimeError(e)
+
+            # subs fails when it tries doing matrix multiplies between fixed-size ones
+            # and the rest of the equation which still has the symbolic-sized ones.
+            # subs(simultaneous=True) wants to put dummies in for everything first,
+            # and Dummy().transpose() is broken.
+            # replace() has the same issue as subs with simultaneous being True.
+            # lambdify() has no idea what to do with the transposes if you replace all
+            # the matrices with ones of random sizes full of dummies.
+            diff = diff.replace(sympy.MatrixSymbol,
+                                get_replacement,
+                                simultaneous=False)
+            for row in range(diff.rows):
+                for col in range(diff.cols):
+                    result = diff[row, col].evalf()
+                    if abs(result) > 1e-7:
+                        raise RuntimeError('difference at (%s, %s) is %s' %
+                                           (row, col, result))
+
 
 def verify_arguments(f, *args):
-  matrix_atoms = f.atoms(sympy.MatrixSymbol) - CONSTANTS
-  if matrix_atoms != set(args):
-    print('Arguments expected to be %s, but are %s, in:\n%s' % (
-        sorted(args), sorted(list(matrix_atoms)), f), file=sys.stderr)
-    raise RuntimeError
+    matrix_atoms = f.atoms(sympy.MatrixSymbol) - CONSTANTS
+    if matrix_atoms != set(args):
+        print('Arguments expected to be %s, but are %s, in:\n%s' %
+              (sorted(args), sorted(list(matrix_atoms)), f),
+              file=sys.stderr)
+        raise RuntimeError
+
 
 def make_c_t():
-  x_and_u = sympy.BlockMatrix(((xb,), (ub,)))
-  c_t = (half * x_and_u.transpose() *
-         sympy.BlockMatrix(((Q_t, P_t.T), (P_t, R_t))) * x_and_u +
-         x_and_u.transpose() * sympy.BlockMatrix(((qb_t,), (rb_t,))) +
-         q_t)
-  verify_arguments(c_t, xb, ub)
-  return c_t
+    x_and_u = sympy.BlockMatrix(((xb, ), (ub, )))
+    c_t = (half * x_and_u.transpose() * sympy.BlockMatrix(
+        ((Q_t, P_t.T),
+         (P_t, R_t))) * x_and_u + x_and_u.transpose() * sympy.BlockMatrix(
+             ((qb_t, ), (rb_t, ))) + q_t)
+    verify_arguments(c_t, xb, ub)
+    return c_t
+
 
 def check_backwards_cost():
-  g_t = A_t * xb_t + B_t * ub_t + cb_t
-  verify_arguments(g_t, xb_t, ub_t)
-  v_t1 = half * xb.transpose() * S_t1 * xb + xb.transpose() * sb_t1 + s_t1
-  verify_arguments(v_t1, xb)
-  v_t = (v_t1.subs(xb, g_t) + make_c_t()).subs({xb_t: xb, ub_t: ub})
-  verify_arguments(v_t, xb, ub)
+    g_t = A_t * xb_t + B_t * ub_t + cb_t
+    verify_arguments(g_t, xb_t, ub_t)
+    v_t1 = half * xb.transpose() * S_t1 * xb + xb.transpose() * sb_t1 + s_t1
+    verify_arguments(v_t1, xb)
+    v_t = (v_t1.subs(xb, g_t) + make_c_t()).subs({xb_t: xb, ub_t: ub})
+    verify_arguments(v_t, xb, ub)
 
-  v_t_for_cost = (
-      half * (
-          xb.transpose() * (A_t.transpose() * S_t1 * A_t + Q_t) * xb +
-          ub.transpose() * (B_t.transpose() * S_t1 * A_t + P_t) * xb +
-          xb.transpose() * (A_t.transpose() * S_t1 * B_t + P_t.T) * ub +
-          ub.transpose() * (B_t.transpose() * S_t1 * B_t + R_t) * ub) +
-      xb.transpose() * (A_t.transpose() * sb_t1 + qb_t) +
-      ub.transpose() * (B_t.transpose() * sb_t1 + rb_t) +
-      cb_t.transpose() * sb_t1 +
-      s_t1 + q_t +
-      half * (cb_t.transpose() * S_t1 * cb_t +
-    xb.transpose() * A_t.transpose() * S_t1 * cb_t +
-    ub.transpose() * B_t.transpose() * S_t1 * cb_t +
-    cb_t.transpose() * S_t1 * B_t * ub +
-              cb_t.transpose() * S_t1 * A_t * xb))
-  verify_equivalent(v_t, v_t_for_cost)
+    v_t_for_cost = (
+        half * (xb.transpose() *
+                (A_t.transpose() * S_t1 * A_t + Q_t) * xb + ub.transpose() *
+                (B_t.transpose() * S_t1 * A_t + P_t) * xb + xb.transpose() *
+                (A_t.transpose() * S_t1 * B_t + P_t.T) * ub + ub.transpose() *
+                (B_t.transpose() * S_t1 * B_t + R_t) * ub) + xb.transpose() *
+        (A_t.transpose() * sb_t1 + qb_t) + ub.transpose() *
+        (B_t.transpose() * sb_t1 + rb_t) + cb_t.transpose() * sb_t1 + s_t1 +
+        q_t + half *
+        (cb_t.transpose() * S_t1 * cb_t + xb.transpose() * A_t.transpose() *
+         S_t1 * cb_t + ub.transpose() * B_t.transpose() * S_t1 * cb_t +
+         cb_t.transpose() * S_t1 * B_t * ub +
+         cb_t.transpose() * S_t1 * A_t * xb))
+    verify_equivalent(v_t, v_t_for_cost)
 
-  v_t_now = (
-      half * (xb.T * (A_t.T * S_t1 * A_t + Q_t) * xb +
-              ub.T * (B_t.T * S_t1 * A_t + P_t) * xb +
-              xb.T * (A_t.T * S_t1 * B_t + P_t.T) * ub +
-              ub.T * (B_t.T * S_t1 * B_t + R_t) * ub) +
-      xb.T * (A_t.T * sb_t1 + qb_t) +
-      ub.T * (B_t.T * sb_t1 + rb_t) +
-      cb_t.T * sb_t1 + s_t1 + q_t +
-      half * (cb_t.T * S_t1 * cb_t +
-              xb.T * A_t.T * S_t1 * cb_t +
-              ub.T * B_t.T * S_t1 * cb_t +
-              cb_t.T * S_t1 * B_t * ub +
-              cb_t.T * S_t1 * A_t * xb))
+    v_t_now = (
+        half * (xb.T * (A_t.T * S_t1 * A_t + Q_t) * xb + ub.T *
+                (B_t.T * S_t1 * A_t + P_t) * xb + xb.T *
+                (A_t.T * S_t1 * B_t + P_t.T) * ub + ub.T *
+                (B_t.T * S_t1 * B_t + R_t) * ub) + xb.T *
+        (A_t.T * sb_t1 + qb_t) + ub.T * (B_t.T * sb_t1 + rb_t) +
+        cb_t.T * sb_t1 + s_t1 + q_t + half *
+        (cb_t.T * S_t1 * cb_t + xb.T * A_t.T * S_t1 * cb_t + ub.T * B_t.T *
+         S_t1 * cb_t + cb_t.T * S_t1 * B_t * ub + cb_t.T * S_t1 * A_t * xb))
 
-  v_t_now = (
-      half * (xb.T * (A_t.T * S_t1 * A_t + Q_t) * xb +
-              ub.T * (B_t.T * S_t1 * A_t + P_t) * xb +
-              xb.T * (A_t.T * S_t1 * B_t + P_t.T) * ub +
-              ub.T * (B_t.T * S_t1 * B_t + R_t) * ub) +
-      xb.T * (A_t.T * sb_t1 + qb_t + half * A_t.T * S_t1 * cb_t) +
-      ub.T * (B_t.T * sb_t1 + rb_t + half * B_t.T * S_t1 * cb_t) +
-      half * cb_t.T * S_t1 * (A_t * xb + B_t * ub + cb_t) +
-      cb_t.T * sb_t1 + s_t1 + q_t)
+    v_t_now = (half * (xb.T * (A_t.T * S_t1 * A_t + Q_t) * xb + ub.T *
+                       (B_t.T * S_t1 * A_t + P_t) * xb + xb.T *
+                       (A_t.T * S_t1 * B_t + P_t.T) * ub + ub.T *
+                       (B_t.T * S_t1 * B_t + R_t) * ub) + xb.T *
+               (A_t.T * sb_t1 + qb_t + half * A_t.T * S_t1 * cb_t) + ub.T *
+               (B_t.T * sb_t1 + rb_t + half * B_t.T * S_t1 * cb_t) +
+               half * cb_t.T * S_t1 * (A_t * xb + B_t * ub + cb_t) +
+               cb_t.T * sb_t1 + s_t1 + q_t)
 
-  v_t_now = (
-      half * (xb.T * (A_t.T * S_t1 * A_t + Q_t) * xb +
-              ub.T * (B_t.T * S_t1 * A_t + P_t) * xb +
-              xb.T * (A_t.T * S_t1 * B_t + P_t.T) * ub +
-              ub.T * (B_t.T * S_t1 * B_t + R_t) * ub) +
-      xb.T * (A_t.T * sb_t1 + qb_t + A_t.T * S_t1 * cb_t) +
-      ub.T * (B_t.T * sb_t1 + rb_t + B_t.T * S_t1 * cb_t) +
-      half * cb_t.T * S_t1 * cb_t +
-      cb_t.T * sb_t1 + s_t1 + q_t)
+    v_t_now = (half * (xb.T * (A_t.T * S_t1 * A_t + Q_t) * xb + ub.T *
+                       (B_t.T * S_t1 * A_t + P_t) * xb + xb.T *
+                       (A_t.T * S_t1 * B_t + P_t.T) * ub + ub.T *
+                       (B_t.T * S_t1 * B_t + R_t) * ub) + xb.T *
+               (A_t.T * sb_t1 + qb_t + A_t.T * S_t1 * cb_t) + ub.T *
+               (B_t.T * sb_t1 + rb_t + B_t.T * S_t1 * cb_t) +
+               half * cb_t.T * S_t1 * cb_t + cb_t.T * sb_t1 + s_t1 + q_t)
 
-  C_t = B_t.T * S_t1 * A_t + P_t
-  E_t = B_t.T * S_t1 * B_t + R_t
-  E_t_I = sympy.MatrixSymbol('E_t^-1', E_t.cols, E_t.rows)
-  L_t = -E_t_I * C_t
-  eb_t = B_t.T * S_t1 * cb_t + B_t.T * sb_t1 + rb_t
-  lb_t = -E_t_I * eb_t
-  D_t = A_t.T * S_t1 * A_t + Q_t
-  db_t = A_t.T * S_t1 * cb_t + A_t.T * sb_t1 + qb_t
+    C_t = B_t.T * S_t1 * A_t + P_t
+    E_t = B_t.T * S_t1 * B_t + R_t
+    E_t_I = sympy.MatrixSymbol('E_t^-1', E_t.cols, E_t.rows)
+    L_t = -E_t_I * C_t
+    eb_t = B_t.T * S_t1 * cb_t + B_t.T * sb_t1 + rb_t
+    lb_t = -E_t_I * eb_t
+    D_t = A_t.T * S_t1 * A_t + Q_t
+    db_t = A_t.T * S_t1 * cb_t + A_t.T * sb_t1 + qb_t
 
-  v_t_now = (
-      half * (xb.T * D_t * xb + ub.T * C_t * xb +
-              xb.T * C_t.T * ub + ub.T * E_t * ub) +
-      xb.T * db_t + ub.T * eb_t +
-      half * cb_t.T * S_t1 * cb_t +
-      cb_t.T * sb_t1 + s_t1 + q_t)
+    v_t_now = (half * (xb.T * D_t * xb + ub.T * C_t * xb + xb.T * C_t.T * ub +
+                       ub.T * E_t * ub) + xb.T * db_t + ub.T * eb_t +
+               half * cb_t.T * S_t1 * cb_t + cb_t.T * sb_t1 + s_t1 + q_t)
 
-  v_t_final = (
-      half * xb.T * (D_t + L_t.T * C_t + C_t.T * L_t + L_t.T * E_t * L_t) * xb +
-      xb.T * (C_t.T * lb_t + L_t.T * E_t * lb_t + db_t + L_t.T * eb_t) +
-      half * lb_t.T * E_t * lb_t +
-      lb_t.T * eb_t +
-      cb_t.T * sb_t1 + s_t1 + q_t + half * cb_t.T * S_t1 * cb_t
-      )
-  verify_arguments(v_t_final, xb, E_t_I)
-  verify_equivalent(v_t.subs(ub, L_t * xb + lb_t), v_t_final, {E_t_I: E_t})
+    v_t_final = (half * xb.T *
+                 (D_t + L_t.T * C_t + C_t.T * L_t + L_t.T * E_t * L_t) * xb +
+                 xb.T *
+                 (C_t.T * lb_t + L_t.T * E_t * lb_t + db_t + L_t.T * eb_t) +
+                 half * lb_t.T * E_t * lb_t + lb_t.T * eb_t + cb_t.T * sb_t1 +
+                 s_t1 + q_t + half * cb_t.T * S_t1 * cb_t)
+    verify_arguments(v_t_final, xb, E_t_I)
+    verify_equivalent(v_t.subs(ub, L_t * xb + lb_t), v_t_final, {E_t_I: E_t})
 
-  def v_t_from_s(this_S_t, this_sb_t, this_s_t):
-    return half * xb.T * this_S_t * xb + xb.T * this_sb_t + this_s_t
+    def v_t_from_s(this_S_t, this_sb_t, this_s_t):
+        return half * xb.T * this_S_t * xb + xb.T * this_sb_t + this_s_t
 
-  S_t_new_first = D_t + L_t.T * C_t + C_t.T * L_t + L_t.T * E_t * L_t
-  sb_t_new_first = db_t - C_t.T * E_t_I * eb_t
-  s_t_new_first = (half * lb_t.T * E_t * lb_t +
-                   lb_t.T * eb_t +
-                   cb_t.T * sb_t1 +
-                   s_t1 + q_t +
-                   half * cb_t.T * S_t1 * cb_t)
-  verify_equivalent(v_t_from_s(S_t_new_first, sb_t_new_first, s_t_new_first),
-                    v_t_final, {E_t_I: E_t})
+    S_t_new_first = D_t + L_t.T * C_t + C_t.T * L_t + L_t.T * E_t * L_t
+    sb_t_new_first = db_t - C_t.T * E_t_I * eb_t
+    s_t_new_first = (half * lb_t.T * E_t * lb_t + lb_t.T * eb_t +
+                     cb_t.T * sb_t1 + s_t1 + q_t + half * cb_t.T * S_t1 * cb_t)
+    verify_equivalent(v_t_from_s(S_t_new_first, sb_t_new_first, s_t_new_first),
+                      v_t_final, {E_t_I: E_t})
 
-  S_t_new_end = D_t - C_t.T * E_t_I * C_t
-  sb_t_new_end = db_t - C_t.T * E_t_I * eb_t
-  s_t_new_end = (q_t - half * eb_t.T * E_t_I * eb_t +
-                 half * cb_t.T * S_t1 * cb_t + cb_t.T * sb_t1 + s_t1)
-  verify_equivalent(v_t_from_s(S_t_new_end, sb_t_new_end, s_t_new_end),
-                    v_t_final, {E_t_I: E_t})
+    S_t_new_end = D_t - C_t.T * E_t_I * C_t
+    sb_t_new_end = db_t - C_t.T * E_t_I * eb_t
+    s_t_new_end = (q_t - half * eb_t.T * E_t_I * eb_t +
+                   half * cb_t.T * S_t1 * cb_t + cb_t.T * sb_t1 + s_t1)
+    verify_equivalent(v_t_from_s(S_t_new_end, sb_t_new_end, s_t_new_end),
+                      v_t_final, {E_t_I: E_t})
+
 
 def check_forwards_cost():
-  v_tb = half * xb.T * S_tb * xb + xb.T * sb_tb + s_tb
-  verify_arguments(v_tb, xb)
-  g_tb = A_tb * xb_t1 + B_tb * ub + cb_tb
-  verify_arguments(g_tb, xb_t1, ub)
-  c_t1b = make_c_t().subs(xb, g_tb)
-  verify_arguments(c_t1b, xb_t1, ub)
-  v_t1b = v_tb.subs(xb, g_tb) + c_t1b
-  verify_arguments(v_t1b, xb_t1, ub)
+    v_tb = half * xb.T * S_tb * xb + xb.T * sb_tb + s_tb
+    verify_arguments(v_tb, xb)
+    g_tb = A_tb * xb_t1 + B_tb * ub + cb_tb
+    verify_arguments(g_tb, xb_t1, ub)
+    c_t1b = make_c_t().subs(xb, g_tb)
+    verify_arguments(c_t1b, xb_t1, ub)
+    v_t1b = v_tb.subs(xb, g_tb) + c_t1b
+    verify_arguments(v_t1b, xb_t1, ub)
 
-  v_t1b_now = (
-      half * g_tb.T * S_tb * g_tb +
-      g_tb.T * sb_tb + s_tb +
-      half * (g_tb.T * Q_t * g_tb +
-              ub.T * P_t * g_tb +
-              g_tb.T * P_t.T * ub +
-              ub.T * R_t * ub) +
-      g_tb.T * qb_t + ub.T * rb_t + q_t)
+    v_t1b_now = (half * g_tb.T * S_tb * g_tb + g_tb.T * sb_tb + s_tb + half *
+                 (g_tb.T * Q_t * g_tb + ub.T * P_t * g_tb +
+                  g_tb.T * P_t.T * ub + ub.T * R_t * ub) + g_tb.T * qb_t +
+                 ub.T * rb_t + q_t)
 
-  v_t1b_for_cost = (
-      half * (xb_t1.T * A_tb.T * (S_tb + Q_t) * A_tb * xb_t1 +
-              xb_t1.T * A_tb.T * (S_tb + Q_t) * B_tb * ub +
-              xb_t1.T * A_tb.T * (S_tb + Q_t) * cb_tb +
-              ub.T * B_tb.T * (S_tb + Q_t) * A_tb * xb_t1 +
-              ub.T * B_tb.T * (S_tb + Q_t) * B_tb * ub +
-              ub.T * B_tb.T * (S_tb + Q_t) * cb_tb +
-              cb_tb.T * (S_tb + Q_t) * A_tb * xb_t1 +
-              cb_tb.T * (S_tb + Q_t) * B_tb * ub +
-              cb_tb.T * (S_tb + Q_t) * cb_tb) +
-      xb_t1.T * A_tb.T * sb_tb +
-      ub.T * B_tb.T * sb_tb +
-      cb_tb.T * sb_tb +
-      s_tb +
-      half * (ub.T * P_t * A_tb * xb_t1 +
-              ub.T * P_t * B_tb * ub +
-              ub.T * P_t * cb_tb) +
-      half * (xb_t1.T * A_tb.T * P_t.T * ub +
-              ub.T * B_tb.T * P_t.T * ub +
-              cb_tb.T * P_t.T * ub) +
-      half * ub.T * R_t * ub +
-      xb_t1.T * A_tb.T * qb_t + ub.T * B_tb.T * qb_t + cb_tb.T * qb_t +
-      ub.T * rb_t + q_t)
-  verify_equivalent(v_t1b, v_t1b_for_cost)
+    v_t1b_for_cost = (half * (xb_t1.T * A_tb.T *
+                              (S_tb + Q_t) * A_tb * xb_t1 + xb_t1.T * A_tb.T *
+                              (S_tb + Q_t) * B_tb * ub + xb_t1.T * A_tb.T *
+                              (S_tb + Q_t) * cb_tb + ub.T * B_tb.T *
+                              (S_tb + Q_t) * A_tb * xb_t1 + ub.T * B_tb.T *
+                              (S_tb + Q_t) * B_tb * ub + ub.T * B_tb.T *
+                              (S_tb + Q_t) * cb_tb + cb_tb.T *
+                              (S_tb + Q_t) * A_tb * xb_t1 + cb_tb.T *
+                              (S_tb + Q_t) * B_tb * ub + cb_tb.T *
+                              (S_tb + Q_t) * cb_tb) +
+                      xb_t1.T * A_tb.T * sb_tb + ub.T * B_tb.T * sb_tb +
+                      cb_tb.T * sb_tb + s_tb + half *
+                      (ub.T * P_t * A_tb * xb_t1 + ub.T * P_t * B_tb * ub +
+                       ub.T * P_t * cb_tb) + half *
+                      (xb_t1.T * A_tb.T * P_t.T * ub +
+                       ub.T * B_tb.T * P_t.T * ub + cb_tb.T * P_t.T * ub) +
+                      half * ub.T * R_t * ub + xb_t1.T * A_tb.T * qb_t +
+                      ub.T * B_tb.T * qb_t + cb_tb.T * qb_t + ub.T * rb_t +
+                      q_t)
+    verify_equivalent(v_t1b, v_t1b_for_cost)
 
-  S_and_Q = S_tb + Q_t
+    S_and_Q = S_tb + Q_t
 
-  v_t1b_now = (
-      half * (xb_t1.T * A_tb.T * S_and_Q * A_tb * xb_t1 +
-              xb_t1.T * A_tb.T * S_and_Q * B_tb * ub +
-              xb_t1.T * A_tb.T * S_and_Q * cb_tb +
-              ub.T * B_tb.T * S_and_Q * A_tb * xb_t1 +
-              ub.T * B_tb.T * S_and_Q * B_tb * ub +
-              ub.T * B_tb.T * S_and_Q * cb_tb +
-              cb_tb.T * S_and_Q * A_tb * xb_t1 +
-              cb_tb.T * S_and_Q * B_tb * ub +
-              cb_tb.T * S_and_Q * cb_tb) +
-      xb_t1.T * A_tb.T * sb_tb +
-      ub.T * B_tb.T * sb_tb +
-      cb_tb.T * sb_tb +
-      s_tb +
-      half * (ub.T * P_t * A_tb * xb_t1 +
-              ub.T * P_t * B_tb * ub +
-              ub.T * P_t * cb_tb) +
-      half * (xb_t1.T * A_tb.T * P_t.T * ub +
-              ub.T * B_tb.T * P_t.T * ub +
-              cb_tb.T * P_t.T * ub) +
-      half * ub.T * R_t * ub +
-      xb_t1.T * A_tb.T * qb_t +
-      ub.T * B_tb.T * qb_t +
-      cb_tb.T * qb_t +
-      ub.T * rb_t +
-      q_t)
+    v_t1b_now = (
+        half *
+        (xb_t1.T * A_tb.T * S_and_Q * A_tb * xb_t1 + xb_t1.T * A_tb.T *
+         S_and_Q * B_tb * ub + xb_t1.T * A_tb.T * S_and_Q * cb_tb + ub.T *
+         B_tb.T * S_and_Q * A_tb * xb_t1 + ub.T * B_tb.T * S_and_Q * B_tb * ub
+         + ub.T * B_tb.T * S_and_Q * cb_tb + cb_tb.T * S_and_Q * A_tb * xb_t1 +
+         cb_tb.T * S_and_Q * B_tb * ub + cb_tb.T * S_and_Q * cb_tb) +
+        xb_t1.T * A_tb.T * sb_tb + ub.T * B_tb.T * sb_tb + cb_tb.T * sb_tb +
+        s_tb + half * (ub.T * P_t * A_tb * xb_t1 + ub.T * P_t * B_tb * ub +
+                       ub.T * P_t * cb_tb) + half *
+        (xb_t1.T * A_tb.T * P_t.T * ub + ub.T * B_tb.T * P_t.T * ub +
+         cb_tb.T * P_t.T * ub) + half * ub.T * R_t * ub +
+        xb_t1.T * A_tb.T * qb_t + ub.T * B_tb.T * qb_t + cb_tb.T * qb_t +
+        ub.T * rb_t + q_t)
 
-  C_tb = B_tb.T * S_and_Q * A_tb + P_t * A_tb
-  E_tb = B_tb.T * S_and_Q * B_tb + B_tb.T * P_t.T + P_t * B_tb + R_t
-  E_tb_I = sympy.MatrixSymbol('Ebar_t^-1', E_tb.cols, E_tb.rows)
-  L_tb = -E_tb_I * C_tb
-  eb_tb = B_tb.T * S_and_Q * cb_tb + B_tb.T * sb_tb + P_t * cb_tb + B_tb.T * qb_t + rb_t
-  lb_tb = -E_tb_I * eb_tb
-  D_tb = A_tb.T * S_and_Q * A_tb
-  db_tb = A_tb.T * S_and_Q * cb_tb + A_tb.T * (sb_tb + qb_t)
+    C_tb = B_tb.T * S_and_Q * A_tb + P_t * A_tb
+    E_tb = B_tb.T * S_and_Q * B_tb + B_tb.T * P_t.T + P_t * B_tb + R_t
+    E_tb_I = sympy.MatrixSymbol('Ebar_t^-1', E_tb.cols, E_tb.rows)
+    L_tb = -E_tb_I * C_tb
+    eb_tb = B_tb.T * S_and_Q * cb_tb + B_tb.T * sb_tb + P_t * cb_tb + B_tb.T * qb_t + rb_t
+    lb_tb = -E_tb_I * eb_tb
+    D_tb = A_tb.T * S_and_Q * A_tb
+    db_tb = A_tb.T * S_and_Q * cb_tb + A_tb.T * (sb_tb + qb_t)
 
-  v_t1b_now = (
-      half * (xb_t1.T * D_tb * xb_t1 +
-              xb_t1.T * C_tb.T * ub +
-              ub.T * C_tb * xb_t1 +
-              ub.T * E_tb * ub) +
-      xb_t1.T * db_tb +
-      ub.T * eb_tb +
-      half * cb_tb.T * S_and_Q * cb_tb +
-      cb_tb.T * sb_tb +
-      cb_tb.T * qb_t +
-      s_tb + q_t)
+    v_t1b_now = (half * (xb_t1.T * D_tb * xb_t1 + xb_t1.T * C_tb.T * ub +
+                         ub.T * C_tb * xb_t1 + ub.T * E_tb * ub) +
+                 xb_t1.T * db_tb + ub.T * eb_tb +
+                 half * cb_tb.T * S_and_Q * cb_tb + cb_tb.T * sb_tb +
+                 cb_tb.T * qb_t + s_tb + q_t)
 
-  v_t1b_final = (
-      half * xb_t1.T * (D_tb - C_tb.T * E_tb_I * C_tb) * xb_t1 +
-      xb_t1.T * (db_tb - C_tb.T * E_tb_I * eb_tb) +
-      -half * eb_tb.T * E_tb_I * eb_tb +
-      half * cb_tb.T * S_and_Q * cb_tb +
-      cb_tb.T * sb_tb +
-      cb_tb.T * qb_t +
-      s_tb + q_t)
-  verify_arguments(v_t1b_final, xb_t1, E_tb_I)
-  verify_equivalent(v_t1b.subs(ub, -E_tb_I * C_tb * xb_t1 - E_tb_I * eb_tb),
-                    v_t1b_final, {E_tb_I: E_tb})
+    v_t1b_final = (half * xb_t1.T * (D_tb - C_tb.T * E_tb_I * C_tb) * xb_t1 +
+                   xb_t1.T * (db_tb - C_tb.T * E_tb_I * eb_tb) +
+                   -half * eb_tb.T * E_tb_I * eb_tb +
+                   half * cb_tb.T * S_and_Q * cb_tb + cb_tb.T * sb_tb +
+                   cb_tb.T * qb_t + s_tb + q_t)
+    verify_arguments(v_t1b_final, xb_t1, E_tb_I)
+    verify_equivalent(v_t1b.subs(ub, -E_tb_I * C_tb * xb_t1 - E_tb_I * eb_tb),
+                      v_t1b_final, {E_tb_I: E_tb})
+
 
 def check_forwards_u():
-  S_and_Q = S_tb + Q_t
+    S_and_Q = S_tb + Q_t
 
-  diff_start = (
-      half * (xb_t1.T * A_tb.T * S_and_Q * B_tb +
-              (B_tb.T * S_and_Q * A_tb * xb_t1).T +
-              2 * ub.T * B_tb.T * S_and_Q * B_tb +
-              (B_tb.T * S_and_Q * cb_tb).T +
-              cb_tb.T * S_and_Q * B_tb) +
-      sb_tb.T * B_tb +
-      half * (P_t * A_tb * xb_t1).T +
-      half * xb_t1.T * A_tb.T * P_t.T +
-      half * ub.T * (P_t * B_tb + B_tb.T * P_t.T) +
-      half * ub.T * (B_tb.T * P_t.T + P_t * B_tb) +
-      half * (P_t * cb_tb).T +
-      half * cb_tb.T * P_t.T +
-      ub.T * R_t +
-      (B_tb.T * qb_t).T + rb_t.T)
-  verify_arguments(diff_start, xb_t1, ub)
+    diff_start = (half *
+                  (xb_t1.T * A_tb.T * S_and_Q * B_tb +
+                   (B_tb.T * S_and_Q * A_tb * xb_t1).T +
+                   2 * ub.T * B_tb.T * S_and_Q * B_tb +
+                   (B_tb.T * S_and_Q * cb_tb).T + cb_tb.T * S_and_Q * B_tb) +
+                  sb_tb.T * B_tb + half * (P_t * A_tb * xb_t1).T +
+                  half * xb_t1.T * A_tb.T * P_t.T + half * ub.T *
+                  (P_t * B_tb + B_tb.T * P_t.T) + half * ub.T *
+                  (B_tb.T * P_t.T + P_t * B_tb) + half * (P_t * cb_tb).T +
+                  half * cb_tb.T * P_t.T + ub.T * R_t + (B_tb.T * qb_t).T +
+                  rb_t.T)
+    verify_arguments(diff_start, xb_t1, ub)
 
-  diff_end = (
-      xb_t1.T * (A_tb.T * S_and_Q * B_tb + A_tb.T * P_t.T) +
-      ub.T * (B_tb.T * S_and_Q * B_tb + B_tb.T * P_t.T + P_t * B_tb + R_t) +
-      cb_tb.T * S_and_Q * B_tb +
-      sb_tb.T * B_tb +
-      cb_tb.T * P_t.T +
-      qb_t.T * B_tb +
-      rb_t.T)
-  verify_equivalent(diff_start, diff_end)
+    diff_end = (xb_t1.T * (A_tb.T * S_and_Q * B_tb + A_tb.T * P_t.T) + ub.T *
+                (B_tb.T * S_and_Q * B_tb + B_tb.T * P_t.T + P_t * B_tb + R_t) +
+                cb_tb.T * S_and_Q * B_tb + sb_tb.T * B_tb + cb_tb.T * P_t.T +
+                qb_t.T * B_tb + rb_t.T)
+    verify_equivalent(diff_start, diff_end)
+
 
 def main():
-  sympy.init_printing(use_unicode=True)
-  check_backwards_cost()
-  check_forwards_cost()
-  check_forwards_u()
+    sympy.init_printing(use_unicode=True)
+    check_backwards_cost()
+    check_forwards_cost()
+    check_forwards_u()
+
 
 if __name__ == '__main__':
-  main()
+    main()
diff --git a/y2014/control_loops/python/polydrivetrain.py b/y2014/control_loops/python/polydrivetrain.py
index cdaee1b..05b6658 100755
--- a/y2014/control_loops/python/polydrivetrain.py
+++ b/y2014/control_loops/python/polydrivetrain.py
@@ -12,20 +12,22 @@
 FLAGS = gflags.FLAGS
 
 try:
-  gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
+    gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
 except gflags.DuplicateFlagError:
-  pass
+    pass
+
 
 def main(argv):
-  if FLAGS.plot:
-    polydrivetrain.PlotPolyDrivetrainMotions(drivetrain.kDrivetrain)
-  elif len(argv) != 7:
-    glog.fatal('Expected .h file name and .cc file name')
-  else:
-    polydrivetrain.WritePolyDrivetrain(argv[1:3], argv[3:5], argv[5:7], 'y2014',
-                                       drivetrain.kDrivetrain)
+    if FLAGS.plot:
+        polydrivetrain.PlotPolyDrivetrainMotions(drivetrain.kDrivetrain)
+    elif len(argv) != 7:
+        glog.fatal('Expected .h file name and .cc file name')
+    else:
+        polydrivetrain.WritePolyDrivetrain(argv[1:3], argv[3:5], argv[5:7],
+                                           'y2014', drivetrain.kDrivetrain)
+
 
 if __name__ == '__main__':
-  argv = FLAGS(sys.argv)
-  glog.init()
-  sys.exit(main(argv))
+    argv = FLAGS(sys.argv)
+    glog.init()
+    sys.exit(main(argv))
diff --git a/y2014/control_loops/python/polydrivetrain_test.py b/y2014/control_loops/python/polydrivetrain_test.py
index 8e0176e..a5bac4a 100755
--- a/y2014/control_loops/python/polydrivetrain_test.py
+++ b/y2014/control_loops/python/polydrivetrain_test.py
@@ -10,73 +10,72 @@
 
 
 class TestVelocityDrivetrain(unittest.TestCase):
-  def MakeBox(self, x1_min, x1_max, x2_min, x2_max):
-    H = numpy.matrix([[1, 0],
-                      [-1, 0],
-                      [0, 1],
-                      [0, -1]])
-    K = numpy.matrix([[x1_max],
-                      [-x1_min],
-                      [x2_max],
-                      [-x2_min]])
-    return polytope.HPolytope(H, K)
 
-  def test_coerce_inside(self):
-    """Tests coercion when the point is inside the box."""
-    box = self.MakeBox(1, 2, 1, 2)
+    def MakeBox(self, x1_min, x1_max, x2_min, x2_max):
+        H = numpy.matrix([[1, 0], [-1, 0], [0, 1], [0, -1]])
+        K = numpy.matrix([[x1_max], [-x1_min], [x2_max], [-x2_min]])
+        return polytope.HPolytope(H, K)
 
-    # x1 = x2
-    K = numpy.matrix([[1, -1]])
-    w = 0
+    def test_coerce_inside(self):
+        """Tests coercion when the point is inside the box."""
+        box = self.MakeBox(1, 2, 1, 2)
 
-    assert_array_equal(polydrivetrain.CoerceGoal(box, K, w,
-                                                 numpy.matrix([[1.5], [1.5]])),
-                       numpy.matrix([[1.5], [1.5]]))
+        # x1 = x2
+        K = numpy.matrix([[1, -1]])
+        w = 0
 
-  def test_coerce_outside_intersect(self):
-    """Tests coercion when the line intersects the box."""
-    box = self.MakeBox(1, 2, 1, 2)
+        assert_array_equal(
+            polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[1.5], [1.5]])),
+            numpy.matrix([[1.5], [1.5]]))
 
-    # x1 = x2
-    K = numpy.matrix([[1, -1]])
-    w = 0
+    def test_coerce_outside_intersect(self):
+        """Tests coercion when the line intersects the box."""
+        box = self.MakeBox(1, 2, 1, 2)
 
-    assert_array_equal(polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
-                       numpy.matrix([[2.0], [2.0]]))
+        # x1 = x2
+        K = numpy.matrix([[1, -1]])
+        w = 0
 
-  def test_coerce_outside_no_intersect(self):
-    """Tests coercion when the line does not intersect the box."""
-    box = self.MakeBox(3, 4, 1, 2)
+        assert_array_equal(
+            polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
+            numpy.matrix([[2.0], [2.0]]))
 
-    # x1 = x2
-    K = numpy.matrix([[1, -1]])
-    w = 0
+    def test_coerce_outside_no_intersect(self):
+        """Tests coercion when the line does not intersect the box."""
+        box = self.MakeBox(3, 4, 1, 2)
 
-    assert_array_equal(polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
-                       numpy.matrix([[3.0], [2.0]]))
+        # x1 = x2
+        K = numpy.matrix([[1, -1]])
+        w = 0
 
-  def test_coerce_middle_of_edge(self):
-    """Tests coercion when the line intersects the middle of an edge."""
-    box = self.MakeBox(0, 4, 1, 2)
+        assert_array_equal(
+            polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
+            numpy.matrix([[3.0], [2.0]]))
 
-    # x1 = x2
-    K = numpy.matrix([[-1, 1]])
-    w = 0
+    def test_coerce_middle_of_edge(self):
+        """Tests coercion when the line intersects the middle of an edge."""
+        box = self.MakeBox(0, 4, 1, 2)
 
-    assert_array_equal(polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
-                       numpy.matrix([[2.0], [2.0]]))
+        # x1 = x2
+        K = numpy.matrix([[-1, 1]])
+        w = 0
 
-  def test_coerce_perpendicular_line(self):
-    """Tests coercion when the line does not intersect and is in quadrant 2."""
-    box = self.MakeBox(1, 2, 1, 2)
+        assert_array_equal(
+            polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
+            numpy.matrix([[2.0], [2.0]]))
 
-    # x1 = -x2
-    K = numpy.matrix([[1, 1]])
-    w = 0
+    def test_coerce_perpendicular_line(self):
+        """Tests coercion when the line does not intersect and is in quadrant 2."""
+        box = self.MakeBox(1, 2, 1, 2)
 
-    assert_array_equal(polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
-                       numpy.matrix([[1.0], [1.0]]))
+        # x1 = -x2
+        K = numpy.matrix([[1, 1]])
+        w = 0
+
+        assert_array_equal(
+            polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
+            numpy.matrix([[1.0], [1.0]]))
 
 
 if __name__ == '__main__':
-  unittest.main()
+    unittest.main()
diff --git a/y2014/control_loops/python/shooter.py b/y2014/control_loops/python/shooter.py
index c3e8d86..e3ad903 100755
--- a/y2014/control_loops/python/shooter.py
+++ b/y2014/control_loops/python/shooter.py
@@ -264,8 +264,9 @@
 
     sprung_shooter = SprungShooterDeltaU()
     shooter = ShooterDeltaU()
-    loop_writer = control_loop.ControlLoopWriter(
-        "Shooter", [sprung_shooter, shooter], namespaces=namespaces)
+    loop_writer = control_loop.ControlLoopWriter("Shooter",
+                                                 [sprung_shooter, shooter],
+                                                 namespaces=namespaces)
 
     loop_writer.AddConstant(
         control_loop.Constant("kMaxExtension", "%f",
diff --git a/y2014_bot3/control_loops/python/drivetrain.py b/y2014_bot3/control_loops/python/drivetrain.py
index 2990773..955cf4c 100755
--- a/y2014_bot3/control_loops/python/drivetrain.py
+++ b/y2014_bot3/control_loops/python/drivetrain.py
@@ -11,31 +11,34 @@
 
 gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
 
-kDrivetrain = drivetrain.DrivetrainParams(J = 1.8,
-                                          mass = 37,
-                                          robot_radius = 0.45 / 2.0,
-                                          wheel_radius = 0.04445,
-                                          G_high = 28.0 / 48.0 * 19.0 / 50.0,
-                                          G_low = 28.0 / 60.0 * 19.0 / 50.0,
-                                          q_pos_low = 0.12,
-                                          q_pos_high = 0.14,
-                                          q_vel_low = 1.0,
-                                          q_vel_high = 0.95,
-                                          has_imu = False,
-                                          dt = 0.005,
-                                          controller_poles = [0.83, 0.83])
+kDrivetrain = drivetrain.DrivetrainParams(J=1.8,
+                                          mass=37,
+                                          robot_radius=0.45 / 2.0,
+                                          wheel_radius=0.04445,
+                                          G_high=28.0 / 48.0 * 19.0 / 50.0,
+                                          G_low=28.0 / 60.0 * 19.0 / 50.0,
+                                          q_pos_low=0.12,
+                                          q_pos_high=0.14,
+                                          q_vel_low=1.0,
+                                          q_vel_high=0.95,
+                                          has_imu=False,
+                                          dt=0.005,
+                                          controller_poles=[0.83, 0.83])
+
 
 def main(argv):
-  argv = FLAGS(argv)
-  glog.init()
+    argv = FLAGS(argv)
+    glog.init()
 
-  if FLAGS.plot:
-    drivetrain.PlotDrivetrainMotions(kDrivetrain)
-  elif len(argv) != 5:
-    print("Expected .h file name and .cc file name")
-  else:
-    # Write the generated constants out to a file.
-    drivetrain.WriteDrivetrain(argv[1:3], argv[3:5], 'y2014_bot3', kDrivetrain)
+    if FLAGS.plot:
+        drivetrain.PlotDrivetrainMotions(kDrivetrain)
+    elif len(argv) != 5:
+        print("Expected .h file name and .cc file name")
+    else:
+        # Write the generated constants out to a file.
+        drivetrain.WriteDrivetrain(argv[1:3], argv[3:5], 'y2014_bot3',
+                                   kDrivetrain)
+
 
 if __name__ == '__main__':
-  sys.exit(main(sys.argv))
+    sys.exit(main(sys.argv))
diff --git a/y2014_bot3/control_loops/python/polydrivetrain.py b/y2014_bot3/control_loops/python/polydrivetrain.py
index 08e5583..881b2b0 100755
--- a/y2014_bot3/control_loops/python/polydrivetrain.py
+++ b/y2014_bot3/control_loops/python/polydrivetrain.py
@@ -12,20 +12,23 @@
 FLAGS = gflags.FLAGS
 
 try:
-  gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
+    gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
 except gflags.DuplicateFlagError:
-  pass
+    pass
+
 
 def main(argv):
-  if FLAGS.plot:
-    polydrivetrain.PlotPolyDrivetrainMotions(drivetrain.kDrivetrain)
-  elif len(argv) != 7:
-    glog.fatal('Expected .h file name and .cc file name')
-  else:
-    polydrivetrain.WritePolyDrivetrain(argv[1:3], argv[3:5], argv[5:7],
-                                       'y2014_bot3', drivetrain.kDrivetrain)
+    if FLAGS.plot:
+        polydrivetrain.PlotPolyDrivetrainMotions(drivetrain.kDrivetrain)
+    elif len(argv) != 7:
+        glog.fatal('Expected .h file name and .cc file name')
+    else:
+        polydrivetrain.WritePolyDrivetrain(argv[1:3], argv[3:5], argv[5:7],
+                                           'y2014_bot3',
+                                           drivetrain.kDrivetrain)
+
 
 if __name__ == '__main__':
-  argv = FLAGS(sys.argv)
-  glog.init()
-  sys.exit(main(argv))
+    argv = FLAGS(sys.argv)
+    glog.init()
+    sys.exit(main(argv))
diff --git a/y2014_bot3/control_loops/python/polydrivetrain_test.py b/y2014_bot3/control_loops/python/polydrivetrain_test.py
index 8e0176e..a5bac4a 100755
--- a/y2014_bot3/control_loops/python/polydrivetrain_test.py
+++ b/y2014_bot3/control_loops/python/polydrivetrain_test.py
@@ -10,73 +10,72 @@
 
 
 class TestVelocityDrivetrain(unittest.TestCase):
-  def MakeBox(self, x1_min, x1_max, x2_min, x2_max):
-    H = numpy.matrix([[1, 0],
-                      [-1, 0],
-                      [0, 1],
-                      [0, -1]])
-    K = numpy.matrix([[x1_max],
-                      [-x1_min],
-                      [x2_max],
-                      [-x2_min]])
-    return polytope.HPolytope(H, K)
 
-  def test_coerce_inside(self):
-    """Tests coercion when the point is inside the box."""
-    box = self.MakeBox(1, 2, 1, 2)
+    def MakeBox(self, x1_min, x1_max, x2_min, x2_max):
+        H = numpy.matrix([[1, 0], [-1, 0], [0, 1], [0, -1]])
+        K = numpy.matrix([[x1_max], [-x1_min], [x2_max], [-x2_min]])
+        return polytope.HPolytope(H, K)
 
-    # x1 = x2
-    K = numpy.matrix([[1, -1]])
-    w = 0
+    def test_coerce_inside(self):
+        """Tests coercion when the point is inside the box."""
+        box = self.MakeBox(1, 2, 1, 2)
 
-    assert_array_equal(polydrivetrain.CoerceGoal(box, K, w,
-                                                 numpy.matrix([[1.5], [1.5]])),
-                       numpy.matrix([[1.5], [1.5]]))
+        # x1 = x2
+        K = numpy.matrix([[1, -1]])
+        w = 0
 
-  def test_coerce_outside_intersect(self):
-    """Tests coercion when the line intersects the box."""
-    box = self.MakeBox(1, 2, 1, 2)
+        assert_array_equal(
+            polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[1.5], [1.5]])),
+            numpy.matrix([[1.5], [1.5]]))
 
-    # x1 = x2
-    K = numpy.matrix([[1, -1]])
-    w = 0
+    def test_coerce_outside_intersect(self):
+        """Tests coercion when the line intersects the box."""
+        box = self.MakeBox(1, 2, 1, 2)
 
-    assert_array_equal(polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
-                       numpy.matrix([[2.0], [2.0]]))
+        # x1 = x2
+        K = numpy.matrix([[1, -1]])
+        w = 0
 
-  def test_coerce_outside_no_intersect(self):
-    """Tests coercion when the line does not intersect the box."""
-    box = self.MakeBox(3, 4, 1, 2)
+        assert_array_equal(
+            polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
+            numpy.matrix([[2.0], [2.0]]))
 
-    # x1 = x2
-    K = numpy.matrix([[1, -1]])
-    w = 0
+    def test_coerce_outside_no_intersect(self):
+        """Tests coercion when the line does not intersect the box."""
+        box = self.MakeBox(3, 4, 1, 2)
 
-    assert_array_equal(polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
-                       numpy.matrix([[3.0], [2.0]]))
+        # x1 = x2
+        K = numpy.matrix([[1, -1]])
+        w = 0
 
-  def test_coerce_middle_of_edge(self):
-    """Tests coercion when the line intersects the middle of an edge."""
-    box = self.MakeBox(0, 4, 1, 2)
+        assert_array_equal(
+            polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
+            numpy.matrix([[3.0], [2.0]]))
 
-    # x1 = x2
-    K = numpy.matrix([[-1, 1]])
-    w = 0
+    def test_coerce_middle_of_edge(self):
+        """Tests coercion when the line intersects the middle of an edge."""
+        box = self.MakeBox(0, 4, 1, 2)
 
-    assert_array_equal(polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
-                       numpy.matrix([[2.0], [2.0]]))
+        # x1 = x2
+        K = numpy.matrix([[-1, 1]])
+        w = 0
 
-  def test_coerce_perpendicular_line(self):
-    """Tests coercion when the line does not intersect and is in quadrant 2."""
-    box = self.MakeBox(1, 2, 1, 2)
+        assert_array_equal(
+            polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
+            numpy.matrix([[2.0], [2.0]]))
 
-    # x1 = -x2
-    K = numpy.matrix([[1, 1]])
-    w = 0
+    def test_coerce_perpendicular_line(self):
+        """Tests coercion when the line does not intersect and is in quadrant 2."""
+        box = self.MakeBox(1, 2, 1, 2)
 
-    assert_array_equal(polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
-                       numpy.matrix([[1.0], [1.0]]))
+        # x1 = -x2
+        K = numpy.matrix([[1, 1]])
+        w = 0
+
+        assert_array_equal(
+            polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
+            numpy.matrix([[1.0], [1.0]]))
 
 
 if __name__ == '__main__':
-  unittest.main()
+    unittest.main()
diff --git a/y2016/control_loops/python/arm.py b/y2016/control_loops/python/arm.py
index 3c47934..0b5d87c 100755
--- a/y2016/control_loops/python/arm.py
+++ b/y2016/control_loops/python/arm.py
@@ -44,8 +44,8 @@
 
         self.A_continuous = numpy.matrix(numpy.zeros((4, 4)))
         self.A_continuous[0:2, 0:2] = self._shoulder.A_continuous
-        self.A_continuous[2:4, 0:2] = (
-            self._shoulder.A_continuous - self._shooter.A_continuous)
+        self.A_continuous[2:4, 0:2] = (self._shoulder.A_continuous -
+                                       self._shooter.A_continuous)
         self.A_continuous[2:4, 2:4] = self._shooter.A_continuous
 
         self.B_continuous = numpy.matrix(numpy.zeros((4, 2)))
@@ -131,8 +131,11 @@
         self.R[0, 0] = r_voltage**2.0
         self.R[1, 1] = r_voltage**2.0
 
-        self.KalmanGain, self.Q_steady = controls.kalman(
-            A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
+        self.KalmanGain, self.Q_steady = controls.kalman(A=self.A,
+                                                         B=self.B,
+                                                         C=self.C,
+                                                         Q=self.Q,
+                                                         R=self.R)
         self.L = self.A * self.KalmanGain
 
         self.U_max = numpy.matrix([[12.0], [12.0]])
@@ -182,8 +185,11 @@
         self.R[0, 0] = r_pos**2.0
         self.R[1, 1] = r_pos**2.0
 
-        self.KalmanGain, self.Q_steady = controls.kalman(
-            A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
+        self.KalmanGain, self.Q_steady = controls.kalman(A=self.A,
+                                                         B=self.B,
+                                                         C=self.C,
+                                                         Q=self.Q,
+                                                         R=self.R)
         self.L = self.A * self.KalmanGain
 
         self.K_unaugmented = self.K
@@ -365,20 +371,19 @@
         pylab.plot(self.t, self.x_shooter, label='x shooter')
         pylab.plot(self.t, self.x_hat_shooter, label='x_hat shooter')
         pylab.plot(self.t, self.goal_x_shooter, label='goal x shooter')
-        pylab.plot(
-            self.t,
-            map(operator.add, self.x_shooter, self.x_shoulder),
-            label='x shooter ground')
-        pylab.plot(
-            self.t,
-            map(operator.add, self.x_hat_shooter, self.x_hat_shoulder),
-            label='x_hat shooter ground')
+        pylab.plot(self.t,
+                   map(operator.add, self.x_shooter, self.x_shoulder),
+                   label='x shooter ground')
+        pylab.plot(self.t,
+                   map(operator.add, self.x_hat_shooter, self.x_hat_shoulder),
+                   label='x_hat shooter ground')
         pylab.legend()
 
         pylab.subplot(3, 1, 2)
         pylab.plot(self.t, self.u_shoulder, label='u shoulder')
-        pylab.plot(
-            self.t, self.offset_shoulder, label='voltage_offset shoulder')
+        pylab.plot(self.t,
+                   self.offset_shoulder,
+                   label='voltage_offset shoulder')
         pylab.plot(self.t, self.u_shooter, label='u shooter')
         pylab.plot(self.t, self.offset_shooter, label='voltage_offset shooter')
         pylab.legend()
@@ -401,8 +406,8 @@
     J_decelerating = 7
 
     arm = Arm(name='AcceleratingArm', J=J_accelerating)
-    arm_integral_controller = IntegralArm(
-        name='AcceleratingIntegralArm', J=J_accelerating)
+    arm_integral_controller = IntegralArm(name='AcceleratingIntegralArm',
+                                          J=J_accelerating)
     arm_observer = IntegralArm()
 
     # Test moving the shoulder with constant separation.
@@ -418,12 +423,11 @@
     arm.X = initial_X[0:4, 0]
     arm_observer.X = initial_X
 
-    scenario_plotter.run_test(
-        arm=arm,
-        end_goal=R,
-        iterations=300,
-        controller=arm_integral_controller,
-        observer=arm_observer)
+    scenario_plotter.run_test(arm=arm,
+                              end_goal=R,
+                              iterations=300,
+                              controller=arm_integral_controller,
+                              observer=arm_observer)
 
     if len(argv) != 5:
         glog.fatal(
@@ -432,8 +436,9 @@
     else:
         namespaces = ['y2016', 'control_loops', 'superstructure']
         decelerating_arm = Arm(name='DeceleratingArm', J=J_decelerating)
-        loop_writer = control_loop.ControlLoopWriter(
-            'Arm', [arm, decelerating_arm], namespaces=namespaces)
+        loop_writer = control_loop.ControlLoopWriter('Arm',
+                                                     [arm, decelerating_arm],
+                                                     namespaces=namespaces)
         loop_writer.Write(argv[1], argv[2])
 
         decelerating_integral_arm_controller = IntegralArm(
diff --git a/y2016/control_loops/python/drivetrain.py b/y2016/control_loops/python/drivetrain.py
index 8abdd74..a0205e7 100755
--- a/y2016/control_loops/python/drivetrain.py
+++ b/y2016/control_loops/python/drivetrain.py
@@ -11,31 +11,34 @@
 
 gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
 
-kDrivetrain = drivetrain.DrivetrainParams(J = 2.0,
-                                          mass = 68,
-                                          robot_radius = 0.601 / 2.0,
-                                          wheel_radius = 0.097155 * 0.9811158901447808 / 118.0 * 115.75,
-                                          G_high = 14.0 / 48.0 * 28.0 / 50.0 * 18.0 / 36.0,
-                                          G_low = 14.0 / 48.0 * 18.0 / 60.0 * 18.0 / 36.0,
-                                          q_pos_low = 0.12,
-                                          q_pos_high = 0.14,
-                                          q_vel_low = 1.0,
-                                          q_vel_high = 0.95,
-                                          has_imu = False,
-                                          dt = 0.005,
-                                          controller_poles = [0.67, 0.67])
+kDrivetrain = drivetrain.DrivetrainParams(
+    J=2.0,
+    mass=68,
+    robot_radius=0.601 / 2.0,
+    wheel_radius=0.097155 * 0.9811158901447808 / 118.0 * 115.75,
+    G_high=14.0 / 48.0 * 28.0 / 50.0 * 18.0 / 36.0,
+    G_low=14.0 / 48.0 * 18.0 / 60.0 * 18.0 / 36.0,
+    q_pos_low=0.12,
+    q_pos_high=0.14,
+    q_vel_low=1.0,
+    q_vel_high=0.95,
+    has_imu=False,
+    dt=0.005,
+    controller_poles=[0.67, 0.67])
+
 
 def main(argv):
-  argv = FLAGS(argv)
-  glog.init()
+    argv = FLAGS(argv)
+    glog.init()
 
-  if FLAGS.plot:
-    drivetrain.PlotDrivetrainMotions(kDrivetrain)
-  elif len(argv) != 5:
-    print("Expected .h file name and .cc file name")
-  else:
-    # Write the generated constants out to a file.
-    drivetrain.WriteDrivetrain(argv[1:3], argv[3:5], 'y2016', kDrivetrain)
+    if FLAGS.plot:
+        drivetrain.PlotDrivetrainMotions(kDrivetrain)
+    elif len(argv) != 5:
+        print("Expected .h file name and .cc file name")
+    else:
+        # Write the generated constants out to a file.
+        drivetrain.WriteDrivetrain(argv[1:3], argv[3:5], 'y2016', kDrivetrain)
+
 
 if __name__ == '__main__':
-  sys.exit(main(sys.argv))
+    sys.exit(main(sys.argv))
diff --git a/y2016/control_loops/python/polydrivetrain.py b/y2016/control_loops/python/polydrivetrain.py
index 1b7a0ba..69a7be9 100755
--- a/y2016/control_loops/python/polydrivetrain.py
+++ b/y2016/control_loops/python/polydrivetrain.py
@@ -12,20 +12,22 @@
 FLAGS = gflags.FLAGS
 
 try:
-  gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
+    gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
 except gflags.DuplicateFlagError:
-  pass
+    pass
+
 
 def main(argv):
-  if FLAGS.plot:
-    polydrivetrain.PlotPolyDrivetrainMotions(drivetrain.kDrivetrain)
-  elif len(argv) != 7:
-    glog.fatal('Expected .h file name and .cc file name')
-  else:
-    polydrivetrain.WritePolyDrivetrain(argv[1:3], argv[3:5], argv[5:7], 'y2016',
-                                       drivetrain.kDrivetrain)
+    if FLAGS.plot:
+        polydrivetrain.PlotPolyDrivetrainMotions(drivetrain.kDrivetrain)
+    elif len(argv) != 7:
+        glog.fatal('Expected .h file name and .cc file name')
+    else:
+        polydrivetrain.WritePolyDrivetrain(argv[1:3], argv[3:5], argv[5:7],
+                                           'y2016', drivetrain.kDrivetrain)
+
 
 if __name__ == '__main__':
-  argv = FLAGS(sys.argv)
-  glog.init()
-  sys.exit(main(argv))
+    argv = FLAGS(sys.argv)
+    glog.init()
+    sys.exit(main(argv))
diff --git a/y2016/control_loops/python/polydrivetrain_test.py b/y2016/control_loops/python/polydrivetrain_test.py
index 8e0176e..a5bac4a 100755
--- a/y2016/control_loops/python/polydrivetrain_test.py
+++ b/y2016/control_loops/python/polydrivetrain_test.py
@@ -10,73 +10,72 @@
 
 
 class TestVelocityDrivetrain(unittest.TestCase):
-  def MakeBox(self, x1_min, x1_max, x2_min, x2_max):
-    H = numpy.matrix([[1, 0],
-                      [-1, 0],
-                      [0, 1],
-                      [0, -1]])
-    K = numpy.matrix([[x1_max],
-                      [-x1_min],
-                      [x2_max],
-                      [-x2_min]])
-    return polytope.HPolytope(H, K)
 
-  def test_coerce_inside(self):
-    """Tests coercion when the point is inside the box."""
-    box = self.MakeBox(1, 2, 1, 2)
+    def MakeBox(self, x1_min, x1_max, x2_min, x2_max):
+        H = numpy.matrix([[1, 0], [-1, 0], [0, 1], [0, -1]])
+        K = numpy.matrix([[x1_max], [-x1_min], [x2_max], [-x2_min]])
+        return polytope.HPolytope(H, K)
 
-    # x1 = x2
-    K = numpy.matrix([[1, -1]])
-    w = 0
+    def test_coerce_inside(self):
+        """Tests coercion when the point is inside the box."""
+        box = self.MakeBox(1, 2, 1, 2)
 
-    assert_array_equal(polydrivetrain.CoerceGoal(box, K, w,
-                                                 numpy.matrix([[1.5], [1.5]])),
-                       numpy.matrix([[1.5], [1.5]]))
+        # x1 = x2
+        K = numpy.matrix([[1, -1]])
+        w = 0
 
-  def test_coerce_outside_intersect(self):
-    """Tests coercion when the line intersects the box."""
-    box = self.MakeBox(1, 2, 1, 2)
+        assert_array_equal(
+            polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[1.5], [1.5]])),
+            numpy.matrix([[1.5], [1.5]]))
 
-    # x1 = x2
-    K = numpy.matrix([[1, -1]])
-    w = 0
+    def test_coerce_outside_intersect(self):
+        """Tests coercion when the line intersects the box."""
+        box = self.MakeBox(1, 2, 1, 2)
 
-    assert_array_equal(polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
-                       numpy.matrix([[2.0], [2.0]]))
+        # x1 = x2
+        K = numpy.matrix([[1, -1]])
+        w = 0
 
-  def test_coerce_outside_no_intersect(self):
-    """Tests coercion when the line does not intersect the box."""
-    box = self.MakeBox(3, 4, 1, 2)
+        assert_array_equal(
+            polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
+            numpy.matrix([[2.0], [2.0]]))
 
-    # x1 = x2
-    K = numpy.matrix([[1, -1]])
-    w = 0
+    def test_coerce_outside_no_intersect(self):
+        """Tests coercion when the line does not intersect the box."""
+        box = self.MakeBox(3, 4, 1, 2)
 
-    assert_array_equal(polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
-                       numpy.matrix([[3.0], [2.0]]))
+        # x1 = x2
+        K = numpy.matrix([[1, -1]])
+        w = 0
 
-  def test_coerce_middle_of_edge(self):
-    """Tests coercion when the line intersects the middle of an edge."""
-    box = self.MakeBox(0, 4, 1, 2)
+        assert_array_equal(
+            polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
+            numpy.matrix([[3.0], [2.0]]))
 
-    # x1 = x2
-    K = numpy.matrix([[-1, 1]])
-    w = 0
+    def test_coerce_middle_of_edge(self):
+        """Tests coercion when the line intersects the middle of an edge."""
+        box = self.MakeBox(0, 4, 1, 2)
 
-    assert_array_equal(polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
-                       numpy.matrix([[2.0], [2.0]]))
+        # x1 = x2
+        K = numpy.matrix([[-1, 1]])
+        w = 0
 
-  def test_coerce_perpendicular_line(self):
-    """Tests coercion when the line does not intersect and is in quadrant 2."""
-    box = self.MakeBox(1, 2, 1, 2)
+        assert_array_equal(
+            polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
+            numpy.matrix([[2.0], [2.0]]))
 
-    # x1 = -x2
-    K = numpy.matrix([[1, 1]])
-    w = 0
+    def test_coerce_perpendicular_line(self):
+        """Tests coercion when the line does not intersect and is in quadrant 2."""
+        box = self.MakeBox(1, 2, 1, 2)
 
-    assert_array_equal(polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
-                       numpy.matrix([[1.0], [1.0]]))
+        # x1 = -x2
+        K = numpy.matrix([[1, 1]])
+        w = 0
+
+        assert_array_equal(
+            polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
+            numpy.matrix([[1.0], [1.0]]))
 
 
 if __name__ == '__main__':
-  unittest.main()
+    unittest.main()
diff --git a/y2016/control_loops/python/shooter.py b/y2016/control_loops/python/shooter.py
index 70af1b0..cde9c8d 100755
--- a/y2016/control_loops/python/shooter.py
+++ b/y2016/control_loops/python/shooter.py
@@ -135,8 +135,11 @@
         r_pos = 0.05
         self.R = numpy.matrix([[(r_pos**2.0)]])
 
-        self.KalmanGain, self.Q_steady = controls.kalman(
-            A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
+        self.KalmanGain, self.Q_steady = controls.kalman(A=self.A,
+                                                         B=self.B,
+                                                         C=self.C,
+                                                         Q=self.Q,
+                                                         R=self.R)
         self.L = self.A * self.KalmanGain
 
         self.K_unaugmented = self.K
@@ -256,12 +259,11 @@
 
     initial_X = numpy.matrix([[0.0], [0.0]])
     R = numpy.matrix([[0.0], [100.0], [0.0]])
-    scenario_plotter.run_test(
-        shooter,
-        goal=R,
-        controller_shooter=shooter_controller,
-        observer_shooter=observer_shooter,
-        iterations=200)
+    scenario_plotter.run_test(shooter,
+                              goal=R,
+                              controller_shooter=shooter_controller,
+                              observer_shooter=observer_shooter,
+                              iterations=200)
 
     if FLAGS.plot:
         scenario_plotter.Plot()
@@ -271,8 +273,8 @@
     else:
         namespaces = ['y2016', 'control_loops', 'shooter']
         shooter = Shooter('Shooter')
-        loop_writer = control_loop.ControlLoopWriter(
-            'Shooter', [shooter], namespaces=namespaces)
+        loop_writer = control_loop.ControlLoopWriter('Shooter', [shooter],
+                                                     namespaces=namespaces)
         loop_writer.Write(argv[1], argv[2])
 
         integral_shooter = IntegralShooter('IntegralShooter')
diff --git a/y2016/control_loops/python/shoulder.py b/y2016/control_loops/python/shoulder.py
index 7f46b2c..d406089 100755
--- a/y2016/control_loops/python/shoulder.py
+++ b/y2016/control_loops/python/shoulder.py
@@ -12,155 +12,168 @@
 FLAGS = gflags.FLAGS
 
 try:
-  gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
+    gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
 except gflags.DuplicateFlagError:
-  pass
+    pass
+
 
 class Shoulder(control_loop.ControlLoop):
-  def __init__(self, name="Shoulder", J=None):
-    super(Shoulder, self).__init__(name)
-    # TODO(constants): Update all of these & retune poles.
-    # Stall Torque in N m
-    self.stall_torque = 0.71
-    # Stall Current in Amps
-    self.stall_current = 134
-    # Free Speed in RPM
-    self.free_speed = 18730
-    # Free Current in Amps
-    self.free_current = 0.7
 
-    # Resistance of the motor
-    self.R = 12.0 / self.stall_current
-    # Motor velocity constant
-    self.Kv = ((self.free_speed / 60.0 * 2.0 * numpy.pi) /
-               (12.0 - self.R * self.free_current))
-    # Torque constant
-    self.Kt = self.stall_torque / self.stall_current
-    # Gear ratio
-    self.G = (56.0 / 12.0) * (64.0 / 14.0) * (72.0 / 18.0) * (42.0 / 12.0)
+    def __init__(self, name="Shoulder", J=None):
+        super(Shoulder, self).__init__(name)
+        # TODO(constants): Update all of these & retune poles.
+        # Stall Torque in N m
+        self.stall_torque = 0.71
+        # Stall Current in Amps
+        self.stall_current = 134
+        # Free Speed in RPM
+        self.free_speed = 18730
+        # Free Current in Amps
+        self.free_current = 0.7
 
-    if J is None:
-      self.J = 10.0
-    else:
-      self.J = J
+        # Resistance of the motor
+        self.R = 12.0 / self.stall_current
+        # Motor velocity constant
+        self.Kv = ((self.free_speed / 60.0 * 2.0 * numpy.pi) /
+                   (12.0 - self.R * self.free_current))
+        # Torque constant
+        self.Kt = self.stall_torque / self.stall_current
+        # Gear ratio
+        self.G = (56.0 / 12.0) * (64.0 / 14.0) * (72.0 / 18.0) * (42.0 / 12.0)
 
-    # Control loop time step
-    self.dt = 0.005
+        if J is None:
+            self.J = 10.0
+        else:
+            self.J = J
 
-    # State is [position, velocity]
-    # Input is [Voltage]
+        # Control loop time step
+        self.dt = 0.005
 
-    C1 = self.G * self.G * self.Kt / (self.R  * self.J * self.Kv)
-    C2 = self.Kt * self.G / (self.J * self.R)
+        # State is [position, velocity]
+        # Input is [Voltage]
 
-    self.A_continuous = numpy.matrix(
-        [[0, 1],
-         [0, -C1]])
+        C1 = self.G * self.G * self.Kt / (self.R * self.J * self.Kv)
+        C2 = self.Kt * self.G / (self.J * self.R)
 
-    # Start with the unmodified input
-    self.B_continuous = numpy.matrix(
-        [[0],
-         [C2]])
+        self.A_continuous = numpy.matrix([[0, 1], [0, -C1]])
 
-    self.C = numpy.matrix([[1, 0]])
-    self.D = numpy.matrix([[0]])
+        # Start with the unmodified input
+        self.B_continuous = numpy.matrix([[0], [C2]])
 
-    self.A, self.B = self.ContinuousToDiscrete(
-        self.A_continuous, self.B_continuous, self.dt)
+        self.C = numpy.matrix([[1, 0]])
+        self.D = numpy.matrix([[0]])
 
-    controllability = controls.ctrb(self.A, self.B)
+        self.A, self.B = self.ContinuousToDiscrete(self.A_continuous,
+                                                   self.B_continuous, self.dt)
 
-    q_pos = 0.16
-    q_vel = 0.95
-    self.Q = numpy.matrix([[(1.0 / (q_pos ** 2.0)), 0.0],
-                           [0.0, (1.0 / (q_vel ** 2.0))]])
+        controllability = controls.ctrb(self.A, self.B)
 
-    self.R = numpy.matrix([[(1.0 / (12.0 ** 2.0))]])
-    self.K = controls.dlqr(self.A, self.B, self.Q, self.R)
+        q_pos = 0.16
+        q_vel = 0.95
+        self.Q = numpy.matrix([[(1.0 / (q_pos**2.0)), 0.0],
+                               [0.0, (1.0 / (q_vel**2.0))]])
 
-    self.Kff = controls.TwoStateFeedForwards(self.B, self.Q)
+        self.R = numpy.matrix([[(1.0 / (12.0**2.0))]])
+        self.K = controls.dlqr(self.A, self.B, self.Q, self.R)
 
-    glog.debug('Poles are %s for %s',
-               repr(numpy.linalg.eig(self.A - self.B * self.K)[0]), self._name)
+        self.Kff = controls.TwoStateFeedForwards(self.B, self.Q)
 
-    q_pos = 0.05
-    q_vel = 2.65
-    self.Q = numpy.matrix([[(q_pos ** 2.0), 0.0],
-                           [0.0, (q_vel ** 2.0)]])
+        glog.debug('Poles are %s for %s',
+                   repr(numpy.linalg.eig(self.A - self.B * self.K)[0]),
+                   self._name)
 
-    r_volts = 0.025
-    self.R = numpy.matrix([[(r_volts ** 2.0)]])
+        q_pos = 0.05
+        q_vel = 2.65
+        self.Q = numpy.matrix([[(q_pos**2.0), 0.0], [0.0, (q_vel**2.0)]])
 
-    self.KalmanGain, self.Q_steady = controls.kalman(
-        A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
+        r_volts = 0.025
+        self.R = numpy.matrix([[(r_volts**2.0)]])
 
-    self.L = self.A * self.KalmanGain
+        self.KalmanGain, self.Q_steady = controls.kalman(A=self.A,
+                                                         B=self.B,
+                                                         C=self.C,
+                                                         Q=self.Q,
+                                                         R=self.R)
 
-    # The box formed by U_min and U_max must encompass all possible values,
-    # or else Austin's code gets angry.
-    self.U_max = numpy.matrix([[12.0]])
-    self.U_min = numpy.matrix([[-12.0]])
+        self.L = self.A * self.KalmanGain
 
-    self.InitializeState()
+        # The box formed by U_min and U_max must encompass all possible values,
+        # or else Austin's code gets angry.
+        self.U_max = numpy.matrix([[12.0]])
+        self.U_min = numpy.matrix([[-12.0]])
+
+        self.InitializeState()
+
 
 class IntegralShoulder(Shoulder):
-  def __init__(self, name="IntegralShoulder"):
-    super(IntegralShoulder, self).__init__(name=name)
 
-    self.A_continuous_unaugmented = self.A_continuous
-    self.B_continuous_unaugmented = self.B_continuous
+    def __init__(self, name="IntegralShoulder"):
+        super(IntegralShoulder, self).__init__(name=name)
 
-    self.A_continuous = numpy.matrix(numpy.zeros((3, 3)))
-    self.A_continuous[0:2, 0:2] = self.A_continuous_unaugmented
-    self.A_continuous[0:2, 2] = self.B_continuous_unaugmented
+        self.A_continuous_unaugmented = self.A_continuous
+        self.B_continuous_unaugmented = self.B_continuous
 
-    self.B_continuous = numpy.matrix(numpy.zeros((3, 1)))
-    self.B_continuous[0:2, 0] = self.B_continuous_unaugmented
+        self.A_continuous = numpy.matrix(numpy.zeros((3, 3)))
+        self.A_continuous[0:2, 0:2] = self.A_continuous_unaugmented
+        self.A_continuous[0:2, 2] = self.B_continuous_unaugmented
 
-    self.C_unaugmented = self.C
-    self.C = numpy.matrix(numpy.zeros((1, 3)))
-    self.C[0:1, 0:2] = self.C_unaugmented
+        self.B_continuous = numpy.matrix(numpy.zeros((3, 1)))
+        self.B_continuous[0:2, 0] = self.B_continuous_unaugmented
 
-    self.A, self.B = self.ContinuousToDiscrete(self.A_continuous, self.B_continuous, self.dt)
+        self.C_unaugmented = self.C
+        self.C = numpy.matrix(numpy.zeros((1, 3)))
+        self.C[0:1, 0:2] = self.C_unaugmented
 
-    q_pos = 0.08
-    q_vel = 4.00
-    q_voltage = 6.0
-    self.Q = numpy.matrix([[(q_pos ** 2.0), 0.0, 0.0],
-                           [0.0, (q_vel ** 2.0), 0.0],
-                           [0.0, 0.0, (q_voltage ** 2.0)]])
+        self.A, self.B = self.ContinuousToDiscrete(self.A_continuous,
+                                                   self.B_continuous, self.dt)
 
-    r_pos = 0.05
-    self.R = numpy.matrix([[(r_pos ** 2.0)]])
+        q_pos = 0.08
+        q_vel = 4.00
+        q_voltage = 6.0
+        self.Q = numpy.matrix([[(q_pos**2.0), 0.0, 0.0],
+                               [0.0, (q_vel**2.0), 0.0],
+                               [0.0, 0.0, (q_voltage**2.0)]])
 
-    self.KalmanGain, self.Q_steady = controls.kalman(
-        A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
-    self.L = self.A * self.KalmanGain
+        r_pos = 0.05
+        self.R = numpy.matrix([[(r_pos**2.0)]])
 
-    self.K_unaugmented = self.K
-    self.K = numpy.matrix(numpy.zeros((1, 3)))
-    self.K[0, 0:2] = self.K_unaugmented
-    self.K[0, 2] = 1
-    self.Kff_unaugmented = self.Kff
-    self.Kff = numpy.matrix(numpy.zeros((1, 3)))
-    self.Kff[0, 0:2] = self.Kff_unaugmented
+        self.KalmanGain, self.Q_steady = controls.kalman(A=self.A,
+                                                         B=self.B,
+                                                         C=self.C,
+                                                         Q=self.Q,
+                                                         R=self.R)
+        self.L = self.A * self.KalmanGain
 
-    self.InitializeState()
+        self.K_unaugmented = self.K
+        self.K = numpy.matrix(numpy.zeros((1, 3)))
+        self.K[0, 0:2] = self.K_unaugmented
+        self.K[0, 2] = 1
+        self.Kff_unaugmented = self.Kff
+        self.Kff = numpy.matrix(numpy.zeros((1, 3)))
+        self.Kff[0, 0:2] = self.Kff_unaugmented
+
+        self.InitializeState()
+
 
 class ScenarioPlotter(object):
-  def __init__(self):
-    # Various lists for graphing things.
-    self.t = []
-    self.x = []
-    self.v = []
-    self.a = []
-    self.x_hat = []
-    self.u = []
-    self.offset = []
 
-  def run_test(self, shoulder, goal, iterations=200, controller_shoulder=None,
-             observer_shoulder=None):
-    """Runs the shoulder plant with an initial condition and goal.
+    def __init__(self):
+        # Various lists for graphing things.
+        self.t = []
+        self.x = []
+        self.v = []
+        self.a = []
+        self.x_hat = []
+        self.u = []
+        self.offset = []
+
+    def run_test(self,
+                 shoulder,
+                 goal,
+                 iterations=200,
+                 controller_shoulder=None,
+                 observer_shoulder=None):
+        """Runs the shoulder plant with an initial condition and goal.
 
       Test for whether the goal has been reached and whether the separation
       goes  outside of the initial and goal values by more than
@@ -178,100 +191,106 @@
             use the actual state.
     """
 
-    if controller_shoulder is None:
-      controller_shoulder = shoulder
+        if controller_shoulder is None:
+            controller_shoulder = shoulder
 
-    vbat = 12.0
+        vbat = 12.0
 
-    if self.t:
-      initial_t = self.t[-1] + shoulder.dt
-    else:
-      initial_t = 0
+        if self.t:
+            initial_t = self.t[-1] + shoulder.dt
+        else:
+            initial_t = 0
 
-    for i in range(iterations):
-      X_hat = shoulder.X
+        for i in range(iterations):
+            X_hat = shoulder.X
 
-      if observer_shoulder is not None:
-        X_hat = observer_shoulder.X_hat
-        self.x_hat.append(observer_shoulder.X_hat[0, 0])
+            if observer_shoulder is not None:
+                X_hat = observer_shoulder.X_hat
+                self.x_hat.append(observer_shoulder.X_hat[0, 0])
 
-      U = controller_shoulder.K * (goal - X_hat)
-      U[0, 0] = numpy.clip(U[0, 0], -vbat, vbat)
-      self.x.append(shoulder.X[0, 0])
+            U = controller_shoulder.K * (goal - X_hat)
+            U[0, 0] = numpy.clip(U[0, 0], -vbat, vbat)
+            self.x.append(shoulder.X[0, 0])
 
-      if self.v:
-        last_v = self.v[-1]
-      else:
-        last_v = 0
+            if self.v:
+                last_v = self.v[-1]
+            else:
+                last_v = 0
 
-      self.v.append(shoulder.X[1, 0])
-      self.a.append((self.v[-1] - last_v) / shoulder.dt)
+            self.v.append(shoulder.X[1, 0])
+            self.a.append((self.v[-1] - last_v) / shoulder.dt)
 
-      if observer_shoulder is not None:
-        observer_shoulder.Y = shoulder.Y
-        observer_shoulder.CorrectObserver(U)
-        self.offset.append(observer_shoulder.X_hat[2, 0])
+            if observer_shoulder is not None:
+                observer_shoulder.Y = shoulder.Y
+                observer_shoulder.CorrectObserver(U)
+                self.offset.append(observer_shoulder.X_hat[2, 0])
 
-      shoulder.Update(U + 2.0)
+            shoulder.Update(U + 2.0)
 
-      if observer_shoulder is not None:
-        observer_shoulder.PredictObserver(U)
+            if observer_shoulder is not None:
+                observer_shoulder.PredictObserver(U)
 
-      self.t.append(initial_t + i * shoulder.dt)
-      self.u.append(U[0, 0])
+            self.t.append(initial_t + i * shoulder.dt)
+            self.u.append(U[0, 0])
 
-      glog.debug('Time: %f', self.t[-1])
+            glog.debug('Time: %f', self.t[-1])
 
-  def Plot(self):
-    pylab.subplot(3, 1, 1)
-    pylab.plot(self.t, self.x, label='x')
-    pylab.plot(self.t, self.x_hat, label='x_hat')
-    pylab.legend()
+    def Plot(self):
+        pylab.subplot(3, 1, 1)
+        pylab.plot(self.t, self.x, label='x')
+        pylab.plot(self.t, self.x_hat, label='x_hat')
+        pylab.legend()
 
-    pylab.subplot(3, 1, 2)
-    pylab.plot(self.t, self.u, label='u')
-    pylab.plot(self.t, self.offset, label='voltage_offset')
-    pylab.legend()
+        pylab.subplot(3, 1, 2)
+        pylab.plot(self.t, self.u, label='u')
+        pylab.plot(self.t, self.offset, label='voltage_offset')
+        pylab.legend()
 
-    pylab.subplot(3, 1, 3)
-    pylab.plot(self.t, self.a, label='a')
-    pylab.legend()
+        pylab.subplot(3, 1, 3)
+        pylab.plot(self.t, self.a, label='a')
+        pylab.legend()
 
-    pylab.show()
+        pylab.show()
 
 
 def main(argv):
-  argv = FLAGS(argv)
+    argv = FLAGS(argv)
 
-  scenario_plotter = ScenarioPlotter()
+    scenario_plotter = ScenarioPlotter()
 
-  shoulder = Shoulder()
-  shoulder_controller = IntegralShoulder()
-  observer_shoulder = IntegralShoulder()
+    shoulder = Shoulder()
+    shoulder_controller = IntegralShoulder()
+    observer_shoulder = IntegralShoulder()
 
-  # Test moving the shoulder with constant separation.
-  initial_X = numpy.matrix([[0.0], [0.0]])
-  R = numpy.matrix([[numpy.pi / 2.0], [0.0], [0.0]])
-  scenario_plotter.run_test(shoulder, goal=R, controller_shoulder=shoulder_controller,
-                            observer_shoulder=observer_shoulder, iterations=200)
+    # Test moving the shoulder with constant separation.
+    initial_X = numpy.matrix([[0.0], [0.0]])
+    R = numpy.matrix([[numpy.pi / 2.0], [0.0], [0.0]])
+    scenario_plotter.run_test(shoulder,
+                              goal=R,
+                              controller_shoulder=shoulder_controller,
+                              observer_shoulder=observer_shoulder,
+                              iterations=200)
 
-  if FLAGS.plot:
-    scenario_plotter.Plot()
+    if FLAGS.plot:
+        scenario_plotter.Plot()
 
-  # Write the generated constants out to a file.
-  if len(argv) != 5:
-    glog.fatal('Expected .h file name and .cc file name for the shoulder and integral shoulder.')
-  else:
-    namespaces = ['y2016', 'control_loops', 'superstructure']
-    shoulder = Shoulder("Shoulder")
-    loop_writer = control_loop.ControlLoopWriter('Shoulder', [shoulder],
-                                                 namespaces=namespaces)
-    loop_writer.Write(argv[1], argv[2])
+    # Write the generated constants out to a file.
+    if len(argv) != 5:
+        glog.fatal(
+            'Expected .h file name and .cc file name for the shoulder and integral shoulder.'
+        )
+    else:
+        namespaces = ['y2016', 'control_loops', 'superstructure']
+        shoulder = Shoulder("Shoulder")
+        loop_writer = control_loop.ControlLoopWriter('Shoulder', [shoulder],
+                                                     namespaces=namespaces)
+        loop_writer.Write(argv[1], argv[2])
 
-    integral_shoulder = IntegralShoulder("IntegralShoulder")
-    integral_loop_writer = control_loop.ControlLoopWriter("IntegralShoulder", [integral_shoulder],
-                                                          namespaces=namespaces)
-    integral_loop_writer.Write(argv[3], argv[4])
+        integral_shoulder = IntegralShoulder("IntegralShoulder")
+        integral_loop_writer = control_loop.ControlLoopWriter(
+            "IntegralShoulder", [integral_shoulder], namespaces=namespaces)
+        integral_loop_writer.Write(argv[3], argv[4])
+
 
 if __name__ == '__main__':
-  sys.exit(main(sys.argv))
+    sys.exit(main(sys.argv))
diff --git a/y2016/control_loops/python/wrist.py b/y2016/control_loops/python/wrist.py
index ca6874b..12dcc39 100755
--- a/y2016/control_loops/python/wrist.py
+++ b/y2016/control_loops/python/wrist.py
@@ -11,152 +11,165 @@
 FLAGS = gflags.FLAGS
 
 try:
-  gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
+    gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
 except gflags.DuplicateFlagError:
-  pass
+    pass
+
 
 class Wrist(control_loop.ControlLoop):
-  def __init__(self, name="Wrist"):
-    super(Wrist, self).__init__(name)
-    # TODO(constants): Update all of these & retune poles.
-    # Stall Torque in N m
-    self.stall_torque = 0.71
-    # Stall Current in Amps
-    self.stall_current = 134
-    # Free Speed in RPM
-    self.free_speed = 18730
-    # Free Current in Amps
-    self.free_current = 0.7
 
-    # Resistance of the motor
-    self.R = 12.0 / self.stall_current
-    # Motor velocity constant
-    self.Kv = ((self.free_speed / 60.0 * 2.0 * numpy.pi) /
-               (12.0 - self.R * self.free_current))
-    # Torque constant
-    self.Kt = self.stall_torque / self.stall_current
-    # Gear ratio
-    self.G = (56.0 / 12.0) * (54.0 / 14.0) * (64.0 / 18.0) * (48.0 / 16.0)
+    def __init__(self, name="Wrist"):
+        super(Wrist, self).__init__(name)
+        # TODO(constants): Update all of these & retune poles.
+        # Stall Torque in N m
+        self.stall_torque = 0.71
+        # Stall Current in Amps
+        self.stall_current = 134
+        # Free Speed in RPM
+        self.free_speed = 18730
+        # Free Current in Amps
+        self.free_current = 0.7
 
-    self.J = 0.35
+        # Resistance of the motor
+        self.R = 12.0 / self.stall_current
+        # Motor velocity constant
+        self.Kv = ((self.free_speed / 60.0 * 2.0 * numpy.pi) /
+                   (12.0 - self.R * self.free_current))
+        # Torque constant
+        self.Kt = self.stall_torque / self.stall_current
+        # Gear ratio
+        self.G = (56.0 / 12.0) * (54.0 / 14.0) * (64.0 / 18.0) * (48.0 / 16.0)
 
-    # Control loop time step
-    self.dt = 0.005
+        self.J = 0.35
 
-    # State is [position, velocity]
-    # Input is [Voltage]
+        # Control loop time step
+        self.dt = 0.005
 
-    C1 = self.G * self.G * self.Kt / (self.R  * self.J * self.Kv)
-    C2 = self.Kt * self.G / (self.J * self.R)
+        # State is [position, velocity]
+        # Input is [Voltage]
 
-    self.A_continuous = numpy.matrix(
-        [[0, 1],
-         [0, -C1]])
+        C1 = self.G * self.G * self.Kt / (self.R * self.J * self.Kv)
+        C2 = self.Kt * self.G / (self.J * self.R)
 
-    # Start with the unmodified input
-    self.B_continuous = numpy.matrix(
-        [[0],
-         [C2]])
+        self.A_continuous = numpy.matrix([[0, 1], [0, -C1]])
 
-    self.C = numpy.matrix([[1, 0]])
-    self.D = numpy.matrix([[0]])
+        # Start with the unmodified input
+        self.B_continuous = numpy.matrix([[0], [C2]])
 
-    self.A, self.B = self.ContinuousToDiscrete(
-        self.A_continuous, self.B_continuous, self.dt)
+        self.C = numpy.matrix([[1, 0]])
+        self.D = numpy.matrix([[0]])
 
-    controllability = controls.ctrb(self.A, self.B)
+        self.A, self.B = self.ContinuousToDiscrete(self.A_continuous,
+                                                   self.B_continuous, self.dt)
 
-    q_pos = 0.20
-    q_vel = 8.0
-    self.Q = numpy.matrix([[(1.0 / (q_pos ** 2.0)), 0.0],
-                           [0.0, (1.0 / (q_vel ** 2.0))]])
+        controllability = controls.ctrb(self.A, self.B)
 
-    self.R = numpy.matrix([[(1.0 / (12.0 ** 2.0))]])
-    self.K = controls.dlqr(self.A, self.B, self.Q, self.R)
+        q_pos = 0.20
+        q_vel = 8.0
+        self.Q = numpy.matrix([[(1.0 / (q_pos**2.0)), 0.0],
+                               [0.0, (1.0 / (q_vel**2.0))]])
 
-    glog.debug('Poles are %s for %s',
-               repr(numpy.linalg.eig(self.A - self.B * self.K)[0]), self._name)
+        self.R = numpy.matrix([[(1.0 / (12.0**2.0))]])
+        self.K = controls.dlqr(self.A, self.B, self.Q, self.R)
 
-    q_pos = 0.05
-    q_vel = 2.65
-    self.Q = numpy.matrix([[(q_pos ** 2.0), 0.0],
-                           [0.0, (q_vel ** 2.0)]])
+        glog.debug('Poles are %s for %s',
+                   repr(numpy.linalg.eig(self.A - self.B * self.K)[0]),
+                   self._name)
 
-    r_volts = 0.025
-    self.R = numpy.matrix([[(r_volts ** 2.0)]])
+        q_pos = 0.05
+        q_vel = 2.65
+        self.Q = numpy.matrix([[(q_pos**2.0), 0.0], [0.0, (q_vel**2.0)]])
 
-    self.KalmanGain, self.Q_steady = controls.kalman(
-        A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
+        r_volts = 0.025
+        self.R = numpy.matrix([[(r_volts**2.0)]])
 
-    self.L = self.A * self.KalmanGain
+        self.KalmanGain, self.Q_steady = controls.kalman(A=self.A,
+                                                         B=self.B,
+                                                         C=self.C,
+                                                         Q=self.Q,
+                                                         R=self.R)
 
-    # The box formed by U_min and U_max must encompass all possible values,
-    # or else Austin's code gets angry.
-    self.U_max = numpy.matrix([[12.0]])
-    self.U_min = numpy.matrix([[-12.0]])
+        self.L = self.A * self.KalmanGain
 
-    self.Kff = controls.TwoStateFeedForwards(self.B, self.Q)
+        # The box formed by U_min and U_max must encompass all possible values,
+        # or else Austin's code gets angry.
+        self.U_max = numpy.matrix([[12.0]])
+        self.U_min = numpy.matrix([[-12.0]])
 
-    self.InitializeState()
+        self.Kff = controls.TwoStateFeedForwards(self.B, self.Q)
+
+        self.InitializeState()
+
 
 class IntegralWrist(Wrist):
-  def __init__(self, name="IntegralWrist"):
-    super(IntegralWrist, self).__init__(name=name)
 
-    self.A_continuous_unaugmented = self.A_continuous
-    self.B_continuous_unaugmented = self.B_continuous
+    def __init__(self, name="IntegralWrist"):
+        super(IntegralWrist, self).__init__(name=name)
 
-    self.A_continuous = numpy.matrix(numpy.zeros((3, 3)))
-    self.A_continuous[0:2, 0:2] = self.A_continuous_unaugmented
-    self.A_continuous[0:2, 2] = self.B_continuous_unaugmented
+        self.A_continuous_unaugmented = self.A_continuous
+        self.B_continuous_unaugmented = self.B_continuous
 
-    self.B_continuous = numpy.matrix(numpy.zeros((3, 1)))
-    self.B_continuous[0:2, 0] = self.B_continuous_unaugmented
+        self.A_continuous = numpy.matrix(numpy.zeros((3, 3)))
+        self.A_continuous[0:2, 0:2] = self.A_continuous_unaugmented
+        self.A_continuous[0:2, 2] = self.B_continuous_unaugmented
 
-    self.C_unaugmented = self.C
-    self.C = numpy.matrix(numpy.zeros((1, 3)))
-    self.C[0:1, 0:2] = self.C_unaugmented
+        self.B_continuous = numpy.matrix(numpy.zeros((3, 1)))
+        self.B_continuous[0:2, 0] = self.B_continuous_unaugmented
 
-    self.A, self.B = self.ContinuousToDiscrete(self.A_continuous, self.B_continuous, self.dt)
+        self.C_unaugmented = self.C
+        self.C = numpy.matrix(numpy.zeros((1, 3)))
+        self.C[0:1, 0:2] = self.C_unaugmented
 
-    q_pos = 0.08
-    q_vel = 4.00
-    q_voltage = 1.5
-    self.Q = numpy.matrix([[(q_pos ** 2.0), 0.0, 0.0],
-                           [0.0, (q_vel ** 2.0), 0.0],
-                           [0.0, 0.0, (q_voltage ** 2.0)]])
+        self.A, self.B = self.ContinuousToDiscrete(self.A_continuous,
+                                                   self.B_continuous, self.dt)
 
-    r_pos = 0.05
-    self.R = numpy.matrix([[(r_pos ** 2.0)]])
+        q_pos = 0.08
+        q_vel = 4.00
+        q_voltage = 1.5
+        self.Q = numpy.matrix([[(q_pos**2.0), 0.0, 0.0],
+                               [0.0, (q_vel**2.0), 0.0],
+                               [0.0, 0.0, (q_voltage**2.0)]])
 
-    self.KalmanGain, self.Q_steady = controls.kalman(
-        A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
-    self.L = self.A * self.KalmanGain
+        r_pos = 0.05
+        self.R = numpy.matrix([[(r_pos**2.0)]])
 
-    self.K_unaugmented = self.K
-    self.K = numpy.matrix(numpy.zeros((1, 3)))
-    self.K[0, 0:2] = self.K_unaugmented
-    self.K[0, 2] = 1
-    self.Kff_unaugmented = self.Kff
-    self.Kff = numpy.matrix(numpy.zeros((1, 3)))
-    self.Kff[0, 0:2] = self.Kff_unaugmented
+        self.KalmanGain, self.Q_steady = controls.kalman(A=self.A,
+                                                         B=self.B,
+                                                         C=self.C,
+                                                         Q=self.Q,
+                                                         R=self.R)
+        self.L = self.A * self.KalmanGain
 
-    self.InitializeState()
+        self.K_unaugmented = self.K
+        self.K = numpy.matrix(numpy.zeros((1, 3)))
+        self.K[0, 0:2] = self.K_unaugmented
+        self.K[0, 2] = 1
+        self.Kff_unaugmented = self.Kff
+        self.Kff = numpy.matrix(numpy.zeros((1, 3)))
+        self.Kff[0, 0:2] = self.Kff_unaugmented
+
+        self.InitializeState()
+
 
 class ScenarioPlotter(object):
-  def __init__(self):
-    # Various lists for graphing things.
-    self.t = []
-    self.x = []
-    self.v = []
-    self.a = []
-    self.x_hat = []
-    self.u = []
-    self.offset = []
 
-  def run_test(self, wrist, goal, iterations=200, controller_wrist=None,
-             observer_wrist=None):
-    """Runs the wrist plant with an initial condition and goal.
+    def __init__(self):
+        # Various lists for graphing things.
+        self.t = []
+        self.x = []
+        self.v = []
+        self.a = []
+        self.x_hat = []
+        self.u = []
+        self.offset = []
+
+    def run_test(self,
+                 wrist,
+                 goal,
+                 iterations=200,
+                 controller_wrist=None,
+                 observer_wrist=None):
+        """Runs the wrist plant with an initial condition and goal.
 
       Test for whether the goal has been reached and whether the separation
       goes  outside of the initial and goal values by more than
@@ -174,99 +187,105 @@
             use the actual state.
     """
 
-    if controller_wrist is None:
-      controller_wrist = wrist
+        if controller_wrist is None:
+            controller_wrist = wrist
 
-    vbat = 12.0
+        vbat = 12.0
 
-    if self.t:
-      initial_t = self.t[-1] + wrist.dt
-    else:
-      initial_t = 0
+        if self.t:
+            initial_t = self.t[-1] + wrist.dt
+        else:
+            initial_t = 0
 
-    for i in range(iterations):
-      X_hat = wrist.X
+        for i in range(iterations):
+            X_hat = wrist.X
 
-      if observer_wrist is not None:
-        X_hat = observer_wrist.X_hat
-        self.x_hat.append(observer_wrist.X_hat[0, 0])
+            if observer_wrist is not None:
+                X_hat = observer_wrist.X_hat
+                self.x_hat.append(observer_wrist.X_hat[0, 0])
 
-      U = controller_wrist.K * (goal - X_hat)
-      U[0, 0] = numpy.clip(U[0, 0], -vbat, vbat)
-      self.x.append(wrist.X[0, 0])
+            U = controller_wrist.K * (goal - X_hat)
+            U[0, 0] = numpy.clip(U[0, 0], -vbat, vbat)
+            self.x.append(wrist.X[0, 0])
 
-      if self.v:
-        last_v = self.v[-1]
-      else:
-        last_v = 0
+            if self.v:
+                last_v = self.v[-1]
+            else:
+                last_v = 0
 
-      self.v.append(wrist.X[1, 0])
-      self.a.append((self.v[-1] - last_v) / wrist.dt)
+            self.v.append(wrist.X[1, 0])
+            self.a.append((self.v[-1] - last_v) / wrist.dt)
 
-      if observer_wrist is not None:
-        observer_wrist.Y = wrist.Y
-        observer_wrist.CorrectObserver(U)
-        self.offset.append(observer_wrist.X_hat[2, 0])
+            if observer_wrist is not None:
+                observer_wrist.Y = wrist.Y
+                observer_wrist.CorrectObserver(U)
+                self.offset.append(observer_wrist.X_hat[2, 0])
 
-      wrist.Update(U + 2.0)
+            wrist.Update(U + 2.0)
 
-      if observer_wrist is not None:
-        observer_wrist.PredictObserver(U)
+            if observer_wrist is not None:
+                observer_wrist.PredictObserver(U)
 
-      self.t.append(initial_t + i * wrist.dt)
-      self.u.append(U[0, 0])
+            self.t.append(initial_t + i * wrist.dt)
+            self.u.append(U[0, 0])
 
-      glog.debug('Time: %f', self.t[-1])
+            glog.debug('Time: %f', self.t[-1])
 
-  def Plot(self):
-    pylab.subplot(3, 1, 1)
-    pylab.plot(self.t, self.x, label='x')
-    pylab.plot(self.t, self.x_hat, label='x_hat')
-    pylab.legend()
+    def Plot(self):
+        pylab.subplot(3, 1, 1)
+        pylab.plot(self.t, self.x, label='x')
+        pylab.plot(self.t, self.x_hat, label='x_hat')
+        pylab.legend()
 
-    pylab.subplot(3, 1, 2)
-    pylab.plot(self.t, self.u, label='u')
-    pylab.plot(self.t, self.offset, label='voltage_offset')
-    pylab.legend()
+        pylab.subplot(3, 1, 2)
+        pylab.plot(self.t, self.u, label='u')
+        pylab.plot(self.t, self.offset, label='voltage_offset')
+        pylab.legend()
 
-    pylab.subplot(3, 1, 3)
-    pylab.plot(self.t, self.a, label='a')
-    pylab.legend()
+        pylab.subplot(3, 1, 3)
+        pylab.plot(self.t, self.a, label='a')
+        pylab.legend()
 
-    pylab.show()
+        pylab.show()
 
 
 def main(argv):
-  scenario_plotter = ScenarioPlotter()
+    scenario_plotter = ScenarioPlotter()
 
-  wrist = Wrist()
-  wrist_controller = IntegralWrist()
-  observer_wrist = IntegralWrist()
+    wrist = Wrist()
+    wrist_controller = IntegralWrist()
+    observer_wrist = IntegralWrist()
 
-  # Test moving the wrist with constant separation.
-  initial_X = numpy.matrix([[0.0], [0.0]])
-  R = numpy.matrix([[1.0], [0.0], [0.0]])
-  scenario_plotter.run_test(wrist, goal=R, controller_wrist=wrist_controller,
-                            observer_wrist=observer_wrist, iterations=200)
+    # Test moving the wrist with constant separation.
+    initial_X = numpy.matrix([[0.0], [0.0]])
+    R = numpy.matrix([[1.0], [0.0], [0.0]])
+    scenario_plotter.run_test(wrist,
+                              goal=R,
+                              controller_wrist=wrist_controller,
+                              observer_wrist=observer_wrist,
+                              iterations=200)
 
-  if FLAGS.plot:
-    scenario_plotter.Plot()
+    if FLAGS.plot:
+        scenario_plotter.Plot()
 
-  # Write the generated constants out to a file.
-  if len(argv) != 5:
-    glog.fatal('Expected .h file name and .cc file name for the wrist and integral wrist.')
-  else:
-    namespaces = ['y2016', 'control_loops', 'superstructure']
-    wrist = Wrist('Wrist')
-    loop_writer = control_loop.ControlLoopWriter(
-        'Wrist', [wrist], namespaces=namespaces)
-    loop_writer.Write(argv[1], argv[2])
+    # Write the generated constants out to a file.
+    if len(argv) != 5:
+        glog.fatal(
+            'Expected .h file name and .cc file name for the wrist and integral wrist.'
+        )
+    else:
+        namespaces = ['y2016', 'control_loops', 'superstructure']
+        wrist = Wrist('Wrist')
+        loop_writer = control_loop.ControlLoopWriter('Wrist', [wrist],
+                                                     namespaces=namespaces)
+        loop_writer.Write(argv[1], argv[2])
 
-    integral_wrist = IntegralWrist('IntegralWrist')
-    integral_loop_writer = control_loop.ControlLoopWriter(
-        'IntegralWrist', [integral_wrist], namespaces=namespaces)
-    integral_loop_writer.Write(argv[3], argv[4])
+        integral_wrist = IntegralWrist('IntegralWrist')
+        integral_loop_writer = control_loop.ControlLoopWriter(
+            'IntegralWrist', [integral_wrist], namespaces=namespaces)
+        integral_loop_writer.Write(argv[3], argv[4])
+
 
 if __name__ == '__main__':
-  argv = FLAGS(sys.argv)
-  sys.exit(main(argv))
+    argv = FLAGS(sys.argv)
+    sys.exit(main(argv))
diff --git a/y2017/control_loops/python/column.py b/y2017/control_loops/python/column.py
index 0122b58..7573c16 100755
--- a/y2017/control_loops/python/column.py
+++ b/y2017/control_loops/python/column.py
@@ -37,12 +37,12 @@
         self.A_continuous = numpy.matrix(numpy.zeros((3, 3)))
         self.B_continuous = numpy.matrix(numpy.zeros((3, 2)))
 
-        self.A_continuous[0, 0] = -(
-            self.indexer.Kt / self.indexer.Kv /
-            (self.indexer.J * self.indexer.resistance * self.indexer.G *
-             self.indexer.G) + self.turret.Kt / self.turret.Kv /
-            (self.indexer.J * self.turret.resistance * self.turret.G *
-             self.turret.G))
+        self.A_continuous[0, 0] = -(self.indexer.Kt / self.indexer.Kv /
+                                    (self.indexer.J * self.indexer.resistance *
+                                     self.indexer.G * self.indexer.G) +
+                                    self.turret.Kt / self.turret.Kv /
+                                    (self.indexer.J * self.turret.resistance *
+                                     self.turret.G * self.turret.G))
         self.A_continuous[0, 2] = self.turret.Kt / self.turret.Kv / (
             self.indexer.J * self.turret.resistance * self.turret.G *
             self.turret.G)
@@ -142,8 +142,11 @@
         r_pos = 0.05
         self.R = numpy.matrix([[(r_pos**2.0), 0.0], [0.0, (r_pos**2.0)]])
 
-        self.KalmanGain, self.Q_steady = controls.kalman(
-            A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
+        self.KalmanGain, self.Q_steady = controls.kalman(A=self.A,
+                                                         B=self.B,
+                                                         C=self.C,
+                                                         Q=self.Q,
+                                                         R=self.R)
         self.L = self.A * self.KalmanGain
 
         self.InitializeState()
@@ -209,8 +212,11 @@
         r_pos = 0.05
         self.R = numpy.matrix([[(r_pos**2.0), 0.0], [0.0, (r_pos**2.0)]])
 
-        self.KalmanGain, self.Q_steady = controls.kalman(
-            A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
+        self.KalmanGain, self.Q_steady = controls.kalman(A=self.A,
+                                                         B=self.B,
+                                                         C=self.C,
+                                                         Q=self.Q,
+                                                         R=self.R)
         self.L = self.A * self.KalmanGain
 
         self.InitializeState()
@@ -282,12 +288,13 @@
             self.x_hat.append(observer_column.X_hat[0, 0])
 
             next_goal = numpy.concatenate(
-                (end_goal[0:2, :], profile.Update(
-                    end_goal[2, 0], end_goal[3, 0]), end_goal[4:6, :]),
+                (end_goal[0:2, :],
+                 profile.Update(end_goal[2, 0],
+                                end_goal[3, 0]), end_goal[4:6, :]),
                 axis=0)
 
-            ff_U = controller_column.Kff * (
-                next_goal - observer_column.A * goal)
+            ff_U = controller_column.Kff * (next_goal -
+                                            observer_column.A * goal)
             fb_U = controller_column.K * (goal - observer_column.X_hat)
             self.turret_error.append((goal[2, 0] - column.X[2, 0]) * 100.0)
             self.ui_fb.append(fb_U[0, 0])
@@ -373,12 +380,11 @@
 
     initial_X = numpy.matrix([[0.0], [0.0], [0.0], [0.0]])
     R = numpy.matrix([[0.0], [10.0], [5.0], [0.0], [0.0], [0.0]])
-    scenario_plotter.run_test(
-        column,
-        end_goal=R,
-        controller_column=column_controller,
-        observer_column=observer_column,
-        iterations=400)
+    scenario_plotter.run_test(column,
+                              end_goal=R,
+                              controller_column=column_controller,
+                              observer_column=observer_column,
+                              iterations=400)
 
     if FLAGS.plot:
         scenario_plotter.Plot()
@@ -388,8 +394,8 @@
     else:
         namespaces = ['y2017', 'control_loops', 'superstructure', 'column']
         column = Column('Column')
-        loop_writer = control_loop.ControlLoopWriter(
-            'Column', [column], namespaces=namespaces)
+        loop_writer = control_loop.ControlLoopWriter('Column', [column],
+                                                     namespaces=namespaces)
         loop_writer.AddConstant(
             control_loop.Constant('kIndexerFreeSpeed', '%f',
                                   column.indexer.free_speed))
@@ -405,15 +411,15 @@
 
         # IntegralColumn controller 1 will disable the indexer.
         integral_column = IntegralColumn('IntegralColumn')
-        disabled_integral_column = IntegralColumn(
-            'DisabledIntegralColumn', disable_indexer=True)
+        disabled_integral_column = IntegralColumn('DisabledIntegralColumn',
+                                                  disable_indexer=True)
         integral_loop_writer = control_loop.ControlLoopWriter(
             'IntegralColumn', [integral_column, disabled_integral_column],
             namespaces=namespaces)
         integral_loop_writer.Write(argv[3], argv[4])
 
-        stuck_integral_column = IntegralColumn(
-            'StuckIntegralColumn', voltage_error_noise=8.0)
+        stuck_integral_column = IntegralColumn('StuckIntegralColumn',
+                                               voltage_error_noise=8.0)
         stuck_integral_loop_writer = control_loop.ControlLoopWriter(
             'StuckIntegralColumn', [stuck_integral_column],
             namespaces=namespaces)
diff --git a/y2017/control_loops/python/drivetrain.py b/y2017/control_loops/python/drivetrain.py
index 652fcf4..b1ebcd8 100755
--- a/y2017/control_loops/python/drivetrain.py
+++ b/y2017/control_loops/python/drivetrain.py
@@ -11,17 +11,17 @@
 
 gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
 
-kDrivetrain = drivetrain.DrivetrainParams(
-    J=6.0,
-    mass=52,
-    robot_radius=0.59055 / 2.0,
-    wheel_radius=0.08255 / 2.0,
-    G=11.0 / 60.0,
-    q_pos_low=0.12,
-    q_pos_high=0.14,
-    q_vel_low=1.0,
-    q_vel_high=0.95,
-    has_imu=False)
+kDrivetrain = drivetrain.DrivetrainParams(J=6.0,
+                                          mass=52,
+                                          robot_radius=0.59055 / 2.0,
+                                          wheel_radius=0.08255 / 2.0,
+                                          G=11.0 / 60.0,
+                                          q_pos_low=0.12,
+                                          q_pos_high=0.14,
+                                          q_vel_low=1.0,
+                                          q_vel_high=0.95,
+                                          has_imu=False)
+
 
 def main(argv):
     argv = FLAGS(argv)
@@ -35,5 +35,6 @@
         # Write the generated constants out to a file.
         drivetrain.WriteDrivetrain(argv[1:3], argv[3:5], 'y2017', kDrivetrain)
 
+
 if __name__ == '__main__':
     sys.exit(main(sys.argv))
diff --git a/y2017/control_loops/python/hood.py b/y2017/control_loops/python/hood.py
index c405bb2..c77d134 100755
--- a/y2017/control_loops/python/hood.py
+++ b/y2017/control_loops/python/hood.py
@@ -118,8 +118,11 @@
         r_volts = 0.025
         self.R = numpy.matrix([[(r_volts**2.0)]])
 
-        self.KalmanGain, self.Q_steady = controls.kalman(
-            A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
+        self.KalmanGain, self.Q_steady = controls.kalman(A=self.A,
+                                                         B=self.B,
+                                                         C=self.C,
+                                                         Q=self.Q,
+                                                         R=self.R)
 
         glog.debug('Kal %s', repr(self.KalmanGain))
         self.L = self.A * self.KalmanGain
@@ -165,8 +168,11 @@
         r_pos = 0.001
         self.R = numpy.matrix([[(r_pos**2.0)]])
 
-        self.KalmanGain, self.Q_steady = controls.kalman(
-            A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
+        self.KalmanGain, self.Q_steady = controls.kalman(A=self.A,
+                                                         B=self.B,
+                                                         C=self.C,
+                                                         Q=self.Q,
+                                                         R=self.R)
         self.L = self.A * self.KalmanGain
 
         self.K_unaugmented = self.K
@@ -244,7 +250,8 @@
 
             ff_U = controller_hood.Kff * (next_goal - observer_hood.A * goal)
 
-            U_uncapped = controller_hood.K * (goal - observer_hood.X_hat) + ff_U
+            U_uncapped = controller_hood.K * (goal -
+                                              observer_hood.X_hat) + ff_U
             U = U_uncapped.copy()
             U[0, 0] = numpy.clip(U[0, 0], -vbat, vbat)
             self.x.append(hood.X[0, 0])
@@ -310,12 +317,11 @@
     # Test moving the hood with constant separation.
     initial_X = numpy.matrix([[0.0], [0.0]])
     R = numpy.matrix([[numpy.pi / 4.0], [0.0], [0.0]])
-    scenario_plotter.run_test(
-        hood,
-        end_goal=R,
-        controller_hood=hood_controller,
-        observer_hood=observer_hood,
-        iterations=200)
+    scenario_plotter.run_test(hood,
+                              end_goal=R,
+                              controller_hood=hood_controller,
+                              observer_hood=observer_hood,
+                              iterations=200)
 
     if FLAGS.plot:
         scenario_plotter.Plot()
@@ -328,8 +334,8 @@
     else:
         namespaces = ['y2017', 'control_loops', 'superstructure', 'hood']
         hood = Hood('Hood')
-        loop_writer = control_loop.ControlLoopWriter(
-            'Hood', [hood], namespaces=namespaces)
+        loop_writer = control_loop.ControlLoopWriter('Hood', [hood],
+                                                     namespaces=namespaces)
         loop_writer.AddConstant(
             control_loop.Constant('kFreeSpeed', '%f', hood.free_speed))
         loop_writer.AddConstant(
@@ -340,7 +346,8 @@
         integral_loop_writer = control_loop.ControlLoopWriter(
             'IntegralHood', [integral_hood], namespaces=namespaces)
         integral_loop_writer.AddConstant(
-            control_loop.Constant('kLastReduction', '%f', integral_hood.last_G))
+            control_loop.Constant('kLastReduction', '%f',
+                                  integral_hood.last_G))
         integral_loop_writer.Write(argv[3], argv[4])
 
 
diff --git a/y2017/control_loops/python/indexer.py b/y2017/control_loops/python/indexer.py
index 7312e57..d4c3fe6 100755
--- a/y2017/control_loops/python/indexer.py
+++ b/y2017/control_loops/python/indexer.py
@@ -12,188 +12,203 @@
 FLAGS = gflags.FLAGS
 
 try:
-  gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
+    gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
 except gflags.DuplicateFlagError:
-  pass
+    pass
 
 gflags.DEFINE_bool('stall', False, 'If true, stall the indexer.')
 
+
 class VelocityIndexer(control_loop.ControlLoop):
-  def __init__(self, name='VelocityIndexer'):
-    super(VelocityIndexer, self).__init__(name)
-    # Stall Torque in N m
-    self.stall_torque = 0.71
-    # Stall Current in Amps
-    self.stall_current = 134
-    self.free_speed_rpm = 18730.0
-    # Free Speed in rotations/second.
-    self.free_speed = self.free_speed_rpm / 60.0
-    # Free Current in Amps
-    self.free_current = 0.7
-    # Moment of inertia of the indexer halves in kg m^2
-    # This is measured as Iyy in CAD (the moment of inertia around the Y axis).
-    # Inner part of indexer -> Iyy = 59500 lb * mm * mm
-    # Inner spins with 12 / 48 * 18 / 48 * 24 / 36 * 16 / 72
-    # Outer part of indexer -> Iyy = 210000 lb * mm * mm
-    # 1 775 pro -> 12 / 48 * 18 / 48 * 30 / 422
 
-    self.J_inner = 0.0269
-    self.J_outer = 0.0952
-    # Gear ratios for the inner and outer parts.
-    self.G_inner = (12.0 / 48.0) * (20.0 / 34.0) * (18.0 / 36.0) * (12.0 / 84.0)
-    self.G_outer = (12.0 / 48.0) * (20.0 / 34.0) * (18.0 / 36.0) * (24.0 / 420.0)
+    def __init__(self, name='VelocityIndexer'):
+        super(VelocityIndexer, self).__init__(name)
+        # Stall Torque in N m
+        self.stall_torque = 0.71
+        # Stall Current in Amps
+        self.stall_current = 134
+        self.free_speed_rpm = 18730.0
+        # Free Speed in rotations/second.
+        self.free_speed = self.free_speed_rpm / 60.0
+        # Free Current in Amps
+        self.free_current = 0.7
+        # Moment of inertia of the indexer halves in kg m^2
+        # This is measured as Iyy in CAD (the moment of inertia around the Y axis).
+        # Inner part of indexer -> Iyy = 59500 lb * mm * mm
+        # Inner spins with 12 / 48 * 18 / 48 * 24 / 36 * 16 / 72
+        # Outer part of indexer -> Iyy = 210000 lb * mm * mm
+        # 1 775 pro -> 12 / 48 * 18 / 48 * 30 / 422
 
-    # Motor inertia in kg m^2
-    self.motor_inertia = 0.00001187
+        self.J_inner = 0.0269
+        self.J_outer = 0.0952
+        # Gear ratios for the inner and outer parts.
+        self.G_inner = (12.0 / 48.0) * (20.0 / 34.0) * (18.0 / 36.0) * (12.0 /
+                                                                        84.0)
+        self.G_outer = (12.0 / 48.0) * (20.0 / 34.0) * (18.0 / 36.0) * (24.0 /
+                                                                        420.0)
 
-    # The output coordinate system is in radians for the inner part of the
-    # indexer.
-    # Compute the effective moment of inertia assuming all the mass is in that
-    # coordinate system.
-    self.J = (
-        self.J_inner * self.G_inner * self.G_inner +
-        self.J_outer * self.G_outer * self.G_outer) / (self.G_inner * self.G_inner) + \
-        self.motor_inertia * ((1.0 / self.G_inner) ** 2.0)
-    glog.debug('Indexer J is %f', self.J)
-    self.G = self.G_inner
+        # Motor inertia in kg m^2
+        self.motor_inertia = 0.00001187
 
-    # Resistance of the motor, divided by 2 to account for the 2 motors
-    self.resistance = 12.0 / self.stall_current
-    # Motor velocity constant
-    self.Kv = ((self.free_speed * 2.0 * numpy.pi) /
-              (12.0 - self.resistance * self.free_current))
-    # Torque constant
-    self.Kt = self.stall_torque / self.stall_current
-    # Control loop time step
-    self.dt = 0.005
+        # The output coordinate system is in radians for the inner part of the
+        # indexer.
+        # Compute the effective moment of inertia assuming all the mass is in that
+        # coordinate system.
+        self.J = (
+            self.J_inner * self.G_inner * self.G_inner +
+            self.J_outer * self.G_outer * self.G_outer) / (self.G_inner * self.G_inner) + \
+            self.motor_inertia * ((1.0 / self.G_inner) ** 2.0)
+        glog.debug('Indexer J is %f', self.J)
+        self.G = self.G_inner
 
-    # State feedback matrices
-    # [angular velocity]
-    self.A_continuous = numpy.matrix(
-        [[-self.Kt / self.Kv / (self.J * self.G * self.G * self.resistance)]])
-    self.B_continuous = numpy.matrix(
-        [[self.Kt / (self.J * self.G * self.resistance)]])
-    self.C = numpy.matrix([[1]])
-    self.D = numpy.matrix([[0]])
+        # Resistance of the motor, divided by 2 to account for the 2 motors
+        self.resistance = 12.0 / self.stall_current
+        # Motor velocity constant
+        self.Kv = ((self.free_speed * 2.0 * numpy.pi) /
+                   (12.0 - self.resistance * self.free_current))
+        # Torque constant
+        self.Kt = self.stall_torque / self.stall_current
+        # Control loop time step
+        self.dt = 0.005
 
-    self.A, self.B = self.ContinuousToDiscrete(
-        self.A_continuous, self.B_continuous, self.dt)
+        # State feedback matrices
+        # [angular velocity]
+        self.A_continuous = numpy.matrix([[
+            -self.Kt / self.Kv / (self.J * self.G * self.G * self.resistance)
+        ]])
+        self.B_continuous = numpy.matrix(
+            [[self.Kt / (self.J * self.G * self.resistance)]])
+        self.C = numpy.matrix([[1]])
+        self.D = numpy.matrix([[0]])
 
-    self.PlaceControllerPoles([.75])
+        self.A, self.B = self.ContinuousToDiscrete(self.A_continuous,
+                                                   self.B_continuous, self.dt)
 
-    self.PlaceObserverPoles([0.3])
+        self.PlaceControllerPoles([.75])
 
-    self.U_max = numpy.matrix([[12.0]])
-    self.U_min = numpy.matrix([[-12.0]])
+        self.PlaceObserverPoles([0.3])
 
-    qff_vel = 8.0
-    self.Qff = numpy.matrix([[1.0 / (qff_vel ** 2.0)]])
+        self.U_max = numpy.matrix([[12.0]])
+        self.U_min = numpy.matrix([[-12.0]])
 
-    self.Kff = controls.TwoStateFeedForwards(self.B, self.Qff)
-    self.InitializeState()
+        qff_vel = 8.0
+        self.Qff = numpy.matrix([[1.0 / (qff_vel**2.0)]])
+
+        self.Kff = controls.TwoStateFeedForwards(self.B, self.Qff)
+        self.InitializeState()
 
 
 class Indexer(VelocityIndexer):
-  def __init__(self, name='Indexer'):
-    super(Indexer, self).__init__(name)
 
-    self.A_continuous_unaugmented = self.A_continuous
-    self.B_continuous_unaugmented = self.B_continuous
+    def __init__(self, name='Indexer'):
+        super(Indexer, self).__init__(name)
 
-    self.A_continuous = numpy.matrix(numpy.zeros((2, 2)))
-    self.A_continuous[1:2, 1:2] = self.A_continuous_unaugmented
-    self.A_continuous[0, 1] = 1
+        self.A_continuous_unaugmented = self.A_continuous
+        self.B_continuous_unaugmented = self.B_continuous
 
-    self.B_continuous = numpy.matrix(numpy.zeros((2, 1)))
-    self.B_continuous[1:2, 0] = self.B_continuous_unaugmented
+        self.A_continuous = numpy.matrix(numpy.zeros((2, 2)))
+        self.A_continuous[1:2, 1:2] = self.A_continuous_unaugmented
+        self.A_continuous[0, 1] = 1
 
-    # State feedback matrices
-    # [position, angular velocity]
-    self.C = numpy.matrix([[1, 0]])
-    self.D = numpy.matrix([[0]])
+        self.B_continuous = numpy.matrix(numpy.zeros((2, 1)))
+        self.B_continuous[1:2, 0] = self.B_continuous_unaugmented
 
-    self.A, self.B = self.ContinuousToDiscrete(
-        self.A_continuous, self.B_continuous, self.dt)
+        # State feedback matrices
+        # [position, angular velocity]
+        self.C = numpy.matrix([[1, 0]])
+        self.D = numpy.matrix([[0]])
 
-    self.rpl = .45
-    self.ipl = 0.07
-    self.PlaceObserverPoles([self.rpl + 1j * self.ipl,
-                             self.rpl - 1j * self.ipl])
+        self.A, self.B = self.ContinuousToDiscrete(self.A_continuous,
+                                                   self.B_continuous, self.dt)
 
-    self.K_unaugmented = self.K
-    self.K = numpy.matrix(numpy.zeros((1, 2)))
-    self.K[0, 1:2] = self.K_unaugmented
-    self.Kff_unaugmented = self.Kff
-    self.Kff = numpy.matrix(numpy.zeros((1, 2)))
-    self.Kff[0, 1:2] = self.Kff_unaugmented
+        self.rpl = .45
+        self.ipl = 0.07
+        self.PlaceObserverPoles(
+            [self.rpl + 1j * self.ipl, self.rpl - 1j * self.ipl])
 
-    self.InitializeState()
+        self.K_unaugmented = self.K
+        self.K = numpy.matrix(numpy.zeros((1, 2)))
+        self.K[0, 1:2] = self.K_unaugmented
+        self.Kff_unaugmented = self.Kff
+        self.Kff = numpy.matrix(numpy.zeros((1, 2)))
+        self.Kff[0, 1:2] = self.Kff_unaugmented
+
+        self.InitializeState()
 
 
 class IntegralIndexer(Indexer):
-  def __init__(self, name="IntegralIndexer", voltage_error_noise=None):
-    super(IntegralIndexer, self).__init__(name=name)
 
-    self.A_continuous_unaugmented = self.A_continuous
-    self.B_continuous_unaugmented = self.B_continuous
+    def __init__(self, name="IntegralIndexer", voltage_error_noise=None):
+        super(IntegralIndexer, self).__init__(name=name)
 
-    self.A_continuous = numpy.matrix(numpy.zeros((3, 3)))
-    self.A_continuous[0:2, 0:2] = self.A_continuous_unaugmented
-    self.A_continuous[0:2, 2] = self.B_continuous_unaugmented
+        self.A_continuous_unaugmented = self.A_continuous
+        self.B_continuous_unaugmented = self.B_continuous
 
-    self.B_continuous = numpy.matrix(numpy.zeros((3, 1)))
-    self.B_continuous[0:2, 0] = self.B_continuous_unaugmented
+        self.A_continuous = numpy.matrix(numpy.zeros((3, 3)))
+        self.A_continuous[0:2, 0:2] = self.A_continuous_unaugmented
+        self.A_continuous[0:2, 2] = self.B_continuous_unaugmented
 
-    self.C_unaugmented = self.C
-    self.C = numpy.matrix(numpy.zeros((1, 3)))
-    self.C[0:1, 0:2] = self.C_unaugmented
+        self.B_continuous = numpy.matrix(numpy.zeros((3, 1)))
+        self.B_continuous[0:2, 0] = self.B_continuous_unaugmented
 
-    self.A, self.B = self.ContinuousToDiscrete(
-        self.A_continuous, self.B_continuous, self.dt)
+        self.C_unaugmented = self.C
+        self.C = numpy.matrix(numpy.zeros((1, 3)))
+        self.C[0:1, 0:2] = self.C_unaugmented
 
-    q_pos = 0.01
-    q_vel = 2.0
-    q_voltage = 0.6
-    if voltage_error_noise is not None:
-      q_voltage = voltage_error_noise
+        self.A, self.B = self.ContinuousToDiscrete(self.A_continuous,
+                                                   self.B_continuous, self.dt)
 
-    self.Q = numpy.matrix([[(q_pos ** 2.0), 0.0, 0.0],
-                           [0.0, (q_vel ** 2.0), 0.0],
-                           [0.0, 0.0, (q_voltage ** 2.0)]])
+        q_pos = 0.01
+        q_vel = 2.0
+        q_voltage = 0.6
+        if voltage_error_noise is not None:
+            q_voltage = voltage_error_noise
 
-    r_pos = 0.001
-    self.R = numpy.matrix([[(r_pos ** 2.0)]])
+        self.Q = numpy.matrix([[(q_pos**2.0), 0.0, 0.0],
+                               [0.0, (q_vel**2.0), 0.0],
+                               [0.0, 0.0, (q_voltage**2.0)]])
 
-    self.KalmanGain, self.Q_steady = controls.kalman(
-        A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
-    self.L = self.A * self.KalmanGain
+        r_pos = 0.001
+        self.R = numpy.matrix([[(r_pos**2.0)]])
 
-    self.K_unaugmented = self.K
-    self.K = numpy.matrix(numpy.zeros((1, 3)))
-    self.K[0, 0:2] = self.K_unaugmented
-    self.K[0, 2] = 1
-    self.Kff_unaugmented = self.Kff
-    self.Kff = numpy.matrix(numpy.zeros((1, 3)))
-    self.Kff[0, 0:2] = self.Kff_unaugmented
+        self.KalmanGain, self.Q_steady = controls.kalman(A=self.A,
+                                                         B=self.B,
+                                                         C=self.C,
+                                                         Q=self.Q,
+                                                         R=self.R)
+        self.L = self.A * self.KalmanGain
 
-    self.InitializeState()
+        self.K_unaugmented = self.K
+        self.K = numpy.matrix(numpy.zeros((1, 3)))
+        self.K[0, 0:2] = self.K_unaugmented
+        self.K[0, 2] = 1
+        self.Kff_unaugmented = self.Kff
+        self.Kff = numpy.matrix(numpy.zeros((1, 3)))
+        self.Kff[0, 0:2] = self.Kff_unaugmented
+
+        self.InitializeState()
 
 
 class ScenarioPlotter(object):
-  def __init__(self):
-    # Various lists for graphing things.
-    self.t = []
-    self.x = []
-    self.v = []
-    self.a = []
-    self.stall_ratio = []
-    self.x_hat = []
-    self.u = []
-    self.offset = []
 
-  def run_test(self, indexer, goal, iterations=200, controller_indexer=None,
-             observer_indexer=None):
-    """Runs the indexer plant with an initial condition and goal.
+    def __init__(self):
+        # Various lists for graphing things.
+        self.t = []
+        self.x = []
+        self.v = []
+        self.a = []
+        self.stall_ratio = []
+        self.x_hat = []
+        self.u = []
+        self.offset = []
+
+    def run_test(self,
+                 indexer,
+                 goal,
+                 iterations=200,
+                 controller_indexer=None,
+                 observer_indexer=None):
+        """Runs the indexer plant with an initial condition and goal.
 
       Args:
         indexer: Indexer object to use.
@@ -205,141 +220,148 @@
             should use the actual state.
     """
 
-    if controller_indexer is None:
-      controller_indexer = indexer
+        if controller_indexer is None:
+            controller_indexer = indexer
 
-    vbat = 12.0
+        vbat = 12.0
 
-    if self.t:
-      initial_t = self.t[-1] + indexer.dt
-    else:
-      initial_t = 0
-
-    for i in range(iterations):
-      X_hat = indexer.X
-
-      if observer_indexer is not None:
-        X_hat = observer_indexer.X_hat
-        observer_indexer.Y = indexer.Y
-        observer_indexer.CorrectObserver(numpy.matrix([[0.0]]))
-        self.x_hat.append(observer_indexer.X_hat[1, 0])
-        self.offset.append(observer_indexer.X_hat[2, 0])
-
-      ff_U = controller_indexer.Kff * (goal - observer_indexer.A * goal)
-
-      U = controller_indexer.K * (goal - X_hat) + ff_U
-      U[0, 0] = numpy.clip(U[0, 0], -vbat, vbat)
-      self.x.append(indexer.X[0, 0])
-
-      if self.v:
-        last_v = self.v[-1]
-      else:
-        last_v = 0
-
-      self.v.append(indexer.X[1, 0])
-      self.a.append((self.v[-1] - last_v) / indexer.dt)
-
-      applied_U = U.copy()
-      if i >= 40:
-        applied_U -= 2
-
-      if FLAGS.stall and i >= 40:
-        indexer.X[1, 0] = 0.0
-      else:
-        indexer.Update(applied_U)
-
-      if observer_indexer is not None:
-        clipped_u = U[0, 0]
-        clip_u_value = 3.0
-        if clipped_u < 0:
-          clipped_u = min(clipped_u, -clip_u_value)
+        if self.t:
+            initial_t = self.t[-1] + indexer.dt
         else:
-          clipped_u = max(clipped_u, clip_u_value)
+            initial_t = 0
 
-        self.stall_ratio.append(10 * (-self.offset[-1] / clipped_u))
+        for i in range(iterations):
+            X_hat = indexer.X
 
-        observer_indexer.PredictObserver(U)
+            if observer_indexer is not None:
+                X_hat = observer_indexer.X_hat
+                observer_indexer.Y = indexer.Y
+                observer_indexer.CorrectObserver(numpy.matrix([[0.0]]))
+                self.x_hat.append(observer_indexer.X_hat[1, 0])
+                self.offset.append(observer_indexer.X_hat[2, 0])
 
-      self.t.append(initial_t + i * indexer.dt)
-      self.u.append(U[0, 0])
+            ff_U = controller_indexer.Kff * (goal - observer_indexer.A * goal)
 
-  def Plot(self):
-    pylab.subplot(3, 1, 1)
-    pylab.plot(self.t, self.v, label='x')
-    pylab.plot(self.t, self.x_hat, label='x_hat')
-    pylab.legend()
+            U = controller_indexer.K * (goal - X_hat) + ff_U
+            U[0, 0] = numpy.clip(U[0, 0], -vbat, vbat)
+            self.x.append(indexer.X[0, 0])
 
-    pylab.subplot(3, 1, 2)
-    pylab.plot(self.t, self.u, label='u')
-    pylab.plot(self.t, self.offset, label='voltage_offset')
-    pylab.plot(self.t, self.stall_ratio, label='stall_ratio')
-    pylab.plot(self.t,
-               [10.0 if x > 6.0 else 0.0 for x in self.stall_ratio],
-               label='is_stalled')
-    pylab.legend()
+            if self.v:
+                last_v = self.v[-1]
+            else:
+                last_v = 0
 
-    pylab.subplot(3, 1, 3)
-    pylab.plot(self.t, self.a, label='a')
-    pylab.legend()
+            self.v.append(indexer.X[1, 0])
+            self.a.append((self.v[-1] - last_v) / indexer.dt)
 
-    pylab.show()
+            applied_U = U.copy()
+            if i >= 40:
+                applied_U -= 2
+
+            if FLAGS.stall and i >= 40:
+                indexer.X[1, 0] = 0.0
+            else:
+                indexer.Update(applied_U)
+
+            if observer_indexer is not None:
+                clipped_u = U[0, 0]
+                clip_u_value = 3.0
+                if clipped_u < 0:
+                    clipped_u = min(clipped_u, -clip_u_value)
+                else:
+                    clipped_u = max(clipped_u, clip_u_value)
+
+                self.stall_ratio.append(10 * (-self.offset[-1] / clipped_u))
+
+                observer_indexer.PredictObserver(U)
+
+            self.t.append(initial_t + i * indexer.dt)
+            self.u.append(U[0, 0])
+
+    def Plot(self):
+        pylab.subplot(3, 1, 1)
+        pylab.plot(self.t, self.v, label='x')
+        pylab.plot(self.t, self.x_hat, label='x_hat')
+        pylab.legend()
+
+        pylab.subplot(3, 1, 2)
+        pylab.plot(self.t, self.u, label='u')
+        pylab.plot(self.t, self.offset, label='voltage_offset')
+        pylab.plot(self.t, self.stall_ratio, label='stall_ratio')
+        pylab.plot(self.t,
+                   [10.0 if x > 6.0 else 0.0 for x in self.stall_ratio],
+                   label='is_stalled')
+        pylab.legend()
+
+        pylab.subplot(3, 1, 3)
+        pylab.plot(self.t, self.a, label='a')
+        pylab.legend()
+
+        pylab.show()
 
 
 def main(argv):
-  scenario_plotter = ScenarioPlotter()
+    scenario_plotter = ScenarioPlotter()
 
-  indexer = Indexer()
-  indexer_controller = IntegralIndexer()
-  observer_indexer = IntegralIndexer()
+    indexer = Indexer()
+    indexer_controller = IntegralIndexer()
+    observer_indexer = IntegralIndexer()
 
-  initial_X = numpy.matrix([[0.0], [0.0]])
-  R = numpy.matrix([[0.0], [20.0], [0.0]])
-  scenario_plotter.run_test(indexer, goal=R, controller_indexer=indexer_controller,
-                            observer_indexer=observer_indexer, iterations=200)
+    initial_X = numpy.matrix([[0.0], [0.0]])
+    R = numpy.matrix([[0.0], [20.0], [0.0]])
+    scenario_plotter.run_test(indexer,
+                              goal=R,
+                              controller_indexer=indexer_controller,
+                              observer_indexer=observer_indexer,
+                              iterations=200)
 
-  if FLAGS.plot:
-    scenario_plotter.Plot()
+    if FLAGS.plot:
+        scenario_plotter.Plot()
 
-  scenario_plotter = ScenarioPlotter()
+    scenario_plotter = ScenarioPlotter()
 
-  indexer = Indexer()
-  indexer_controller = IntegralIndexer(voltage_error_noise=1.5)
-  observer_indexer = IntegralIndexer(voltage_error_noise=1.5)
+    indexer = Indexer()
+    indexer_controller = IntegralIndexer(voltage_error_noise=1.5)
+    observer_indexer = IntegralIndexer(voltage_error_noise=1.5)
 
-  initial_X = numpy.matrix([[0.0], [0.0]])
-  R = numpy.matrix([[0.0], [20.0], [0.0]])
-  scenario_plotter.run_test(indexer, goal=R, controller_indexer=indexer_controller,
-                            observer_indexer=observer_indexer, iterations=200)
+    initial_X = numpy.matrix([[0.0], [0.0]])
+    R = numpy.matrix([[0.0], [20.0], [0.0]])
+    scenario_plotter.run_test(indexer,
+                              goal=R,
+                              controller_indexer=indexer_controller,
+                              observer_indexer=observer_indexer,
+                              iterations=200)
 
-  if FLAGS.plot:
-    scenario_plotter.Plot()
+    if FLAGS.plot:
+        scenario_plotter.Plot()
 
-  if len(argv) != 7:
-    glog.fatal('Expected .h file name and .cc file names')
-  else:
-    namespaces = ['y2017', 'control_loops', 'superstructure', 'indexer']
-    indexer = Indexer('Indexer')
-    loop_writer = control_loop.ControlLoopWriter('Indexer', [indexer],
-                                                 namespaces=namespaces)
-    loop_writer.AddConstant(control_loop.Constant(
-        'kFreeSpeed', '%f', indexer.free_speed))
-    loop_writer.AddConstant(control_loop.Constant(
-        'kOutputRatio', '%f', indexer.G))
-    loop_writer.Write(argv[1], argv[2])
+    if len(argv) != 7:
+        glog.fatal('Expected .h file name and .cc file names')
+    else:
+        namespaces = ['y2017', 'control_loops', 'superstructure', 'indexer']
+        indexer = Indexer('Indexer')
+        loop_writer = control_loop.ControlLoopWriter('Indexer', [indexer],
+                                                     namespaces=namespaces)
+        loop_writer.AddConstant(
+            control_loop.Constant('kFreeSpeed', '%f', indexer.free_speed))
+        loop_writer.AddConstant(
+            control_loop.Constant('kOutputRatio', '%f', indexer.G))
+        loop_writer.Write(argv[1], argv[2])
 
-    integral_indexer = IntegralIndexer('IntegralIndexer')
-    integral_loop_writer = control_loop.ControlLoopWriter(
-        'IntegralIndexer', [integral_indexer], namespaces=namespaces)
-    integral_loop_writer.Write(argv[3], argv[4])
+        integral_indexer = IntegralIndexer('IntegralIndexer')
+        integral_loop_writer = control_loop.ControlLoopWriter(
+            'IntegralIndexer', [integral_indexer], namespaces=namespaces)
+        integral_loop_writer.Write(argv[3], argv[4])
 
-    stuck_integral_indexer = IntegralIndexer('StuckIntegralIndexer',
-                                             voltage_error_noise=1.5)
-    stuck_integral_loop_writer = control_loop.ControlLoopWriter(
-        'StuckIntegralIndexer', [stuck_integral_indexer], namespaces=namespaces)
-    stuck_integral_loop_writer.Write(argv[5], argv[6])
+        stuck_integral_indexer = IntegralIndexer('StuckIntegralIndexer',
+                                                 voltage_error_noise=1.5)
+        stuck_integral_loop_writer = control_loop.ControlLoopWriter(
+            'StuckIntegralIndexer', [stuck_integral_indexer],
+            namespaces=namespaces)
+        stuck_integral_loop_writer.Write(argv[5], argv[6])
 
 
 if __name__ == '__main__':
-  argv = FLAGS(sys.argv)
-  glog.init()
-  sys.exit(main(argv))
+    argv = FLAGS(sys.argv)
+    glog.init()
+    sys.exit(main(argv))
diff --git a/y2017/control_loops/python/polydrivetrain.py b/y2017/control_loops/python/polydrivetrain.py
index 498a2c3..e181ef5 100755
--- a/y2017/control_loops/python/polydrivetrain.py
+++ b/y2017/control_loops/python/polydrivetrain.py
@@ -12,20 +12,22 @@
 FLAGS = gflags.FLAGS
 
 try:
-  gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
+    gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
 except gflags.DuplicateFlagError:
-  pass
+    pass
+
 
 def main(argv):
-  if FLAGS.plot:
-    polydrivetrain.PlotPolyDrivetrainMotions(drivetrain.kDrivetrain)
-  elif len(argv) != 7:
-    glog.fatal('Expected .h file name and .cc file name')
-  else:
-    polydrivetrain.WritePolyDrivetrain(argv[1:3], argv[3:5], argv[5:7], 'y2017',
-                                       drivetrain.kDrivetrain)
+    if FLAGS.plot:
+        polydrivetrain.PlotPolyDrivetrainMotions(drivetrain.kDrivetrain)
+    elif len(argv) != 7:
+        glog.fatal('Expected .h file name and .cc file name')
+    else:
+        polydrivetrain.WritePolyDrivetrain(argv[1:3], argv[3:5], argv[5:7],
+                                           'y2017', drivetrain.kDrivetrain)
+
 
 if __name__ == '__main__':
-  argv = FLAGS(sys.argv)
-  glog.init()
-  sys.exit(main(argv))
+    argv = FLAGS(sys.argv)
+    glog.init()
+    sys.exit(main(argv))
diff --git a/y2017/control_loops/python/polydrivetrain_test.py b/y2017/control_loops/python/polydrivetrain_test.py
index 8e0176e..a5bac4a 100755
--- a/y2017/control_loops/python/polydrivetrain_test.py
+++ b/y2017/control_loops/python/polydrivetrain_test.py
@@ -10,73 +10,72 @@
 
 
 class TestVelocityDrivetrain(unittest.TestCase):
-  def MakeBox(self, x1_min, x1_max, x2_min, x2_max):
-    H = numpy.matrix([[1, 0],
-                      [-1, 0],
-                      [0, 1],
-                      [0, -1]])
-    K = numpy.matrix([[x1_max],
-                      [-x1_min],
-                      [x2_max],
-                      [-x2_min]])
-    return polytope.HPolytope(H, K)
 
-  def test_coerce_inside(self):
-    """Tests coercion when the point is inside the box."""
-    box = self.MakeBox(1, 2, 1, 2)
+    def MakeBox(self, x1_min, x1_max, x2_min, x2_max):
+        H = numpy.matrix([[1, 0], [-1, 0], [0, 1], [0, -1]])
+        K = numpy.matrix([[x1_max], [-x1_min], [x2_max], [-x2_min]])
+        return polytope.HPolytope(H, K)
 
-    # x1 = x2
-    K = numpy.matrix([[1, -1]])
-    w = 0
+    def test_coerce_inside(self):
+        """Tests coercion when the point is inside the box."""
+        box = self.MakeBox(1, 2, 1, 2)
 
-    assert_array_equal(polydrivetrain.CoerceGoal(box, K, w,
-                                                 numpy.matrix([[1.5], [1.5]])),
-                       numpy.matrix([[1.5], [1.5]]))
+        # x1 = x2
+        K = numpy.matrix([[1, -1]])
+        w = 0
 
-  def test_coerce_outside_intersect(self):
-    """Tests coercion when the line intersects the box."""
-    box = self.MakeBox(1, 2, 1, 2)
+        assert_array_equal(
+            polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[1.5], [1.5]])),
+            numpy.matrix([[1.5], [1.5]]))
 
-    # x1 = x2
-    K = numpy.matrix([[1, -1]])
-    w = 0
+    def test_coerce_outside_intersect(self):
+        """Tests coercion when the line intersects the box."""
+        box = self.MakeBox(1, 2, 1, 2)
 
-    assert_array_equal(polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
-                       numpy.matrix([[2.0], [2.0]]))
+        # x1 = x2
+        K = numpy.matrix([[1, -1]])
+        w = 0
 
-  def test_coerce_outside_no_intersect(self):
-    """Tests coercion when the line does not intersect the box."""
-    box = self.MakeBox(3, 4, 1, 2)
+        assert_array_equal(
+            polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
+            numpy.matrix([[2.0], [2.0]]))
 
-    # x1 = x2
-    K = numpy.matrix([[1, -1]])
-    w = 0
+    def test_coerce_outside_no_intersect(self):
+        """Tests coercion when the line does not intersect the box."""
+        box = self.MakeBox(3, 4, 1, 2)
 
-    assert_array_equal(polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
-                       numpy.matrix([[3.0], [2.0]]))
+        # x1 = x2
+        K = numpy.matrix([[1, -1]])
+        w = 0
 
-  def test_coerce_middle_of_edge(self):
-    """Tests coercion when the line intersects the middle of an edge."""
-    box = self.MakeBox(0, 4, 1, 2)
+        assert_array_equal(
+            polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
+            numpy.matrix([[3.0], [2.0]]))
 
-    # x1 = x2
-    K = numpy.matrix([[-1, 1]])
-    w = 0
+    def test_coerce_middle_of_edge(self):
+        """Tests coercion when the line intersects the middle of an edge."""
+        box = self.MakeBox(0, 4, 1, 2)
 
-    assert_array_equal(polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
-                       numpy.matrix([[2.0], [2.0]]))
+        # x1 = x2
+        K = numpy.matrix([[-1, 1]])
+        w = 0
 
-  def test_coerce_perpendicular_line(self):
-    """Tests coercion when the line does not intersect and is in quadrant 2."""
-    box = self.MakeBox(1, 2, 1, 2)
+        assert_array_equal(
+            polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
+            numpy.matrix([[2.0], [2.0]]))
 
-    # x1 = -x2
-    K = numpy.matrix([[1, 1]])
-    w = 0
+    def test_coerce_perpendicular_line(self):
+        """Tests coercion when the line does not intersect and is in quadrant 2."""
+        box = self.MakeBox(1, 2, 1, 2)
 
-    assert_array_equal(polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
-                       numpy.matrix([[1.0], [1.0]]))
+        # x1 = -x2
+        K = numpy.matrix([[1, 1]])
+        w = 0
+
+        assert_array_equal(
+            polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
+            numpy.matrix([[1.0], [1.0]]))
 
 
 if __name__ == '__main__':
-  unittest.main()
+    unittest.main()
diff --git a/y2017/control_loops/python/shooter.py b/y2017/control_loops/python/shooter.py
index be4fb81..47b7217 100755
--- a/y2017/control_loops/python/shooter.py
+++ b/y2017/control_loops/python/shooter.py
@@ -204,8 +204,8 @@
 
         glog.debug('A: \n%s', repr(self.A_continuous))
         glog.debug('eig(A): \n%s', repr(scipy.linalg.eig(self.A_continuous)))
-        glog.debug('schur(A): \n%s', repr(
-            scipy.linalg.schur(self.A_continuous)))
+        glog.debug('schur(A): \n%s',
+                   repr(scipy.linalg.schur(self.A_continuous)))
         glog.debug('A_dt(A): \n%s', repr(self.A))
 
         q_pos = 0.01
@@ -220,15 +220,17 @@
         r_pos = 0.0003
         self.R_continuous = numpy.matrix([[(r_pos**2.0)]])
 
-        _, _, self.Q, self.R = controls.kalmd(
-            A_continuous=self.A_continuous,
-            B_continuous=self.B_continuous,
-            Q_continuous=self.Q_continuous,
-            R_continuous=self.R_continuous,
-            dt=self.dt)
+        _, _, self.Q, self.R = controls.kalmd(A_continuous=self.A_continuous,
+                                              B_continuous=self.B_continuous,
+                                              Q_continuous=self.Q_continuous,
+                                              R_continuous=self.R_continuous,
+                                              dt=self.dt)
 
-        self.KalmanGain, self.P_steady_state = controls.kalman(
-            A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
+        self.KalmanGain, self.P_steady_state = controls.kalman(A=self.A,
+                                                               B=self.B,
+                                                               C=self.C,
+                                                               Q=self.Q,
+                                                               R=self.R)
         self.L = self.A * self.KalmanGain
 
         self.K_unaugmented = self.K
@@ -363,13 +365,12 @@
         shooter_controller = IntegralShooter()
         observer_shooter_hybrid = IntegralShooter()
 
-        scenario_plotter_int.run_test(
-            shooter,
-            goal=R,
-            controller_shooter=shooter_controller,
-            observer_shooter=observer_shooter_hybrid,
-            iterations=iterations,
-            hybrid_obs=True)
+        scenario_plotter_int.run_test(shooter,
+                                      goal=R,
+                                      controller_shooter=shooter_controller,
+                                      observer_shooter=observer_shooter_hybrid,
+                                      iterations=iterations,
+                                      hybrid_obs=True)
 
         scenario_plotter_int.Plot()
 
@@ -380,8 +381,8 @@
     else:
         namespaces = ['y2017', 'control_loops', 'superstructure', 'shooter']
         shooter = Shooter('Shooter')
-        loop_writer = control_loop.ControlLoopWriter(
-            'Shooter', [shooter], namespaces=namespaces)
+        loop_writer = control_loop.ControlLoopWriter('Shooter', [shooter],
+                                                     namespaces=namespaces)
         loop_writer.AddConstant(
             control_loop.Constant('kFreeSpeed', '%f', shooter.free_speed))
         loop_writer.AddConstant(
diff --git a/y2017/control_loops/python/turret.py b/y2017/control_loops/python/turret.py
index e67904d..6407133 100755
--- a/y2017/control_loops/python/turret.py
+++ b/y2017/control_loops/python/turret.py
@@ -12,166 +12,176 @@
 FLAGS = gflags.FLAGS
 
 try:
-  gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
+    gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
 except gflags.DuplicateFlagError:
-  pass
+    pass
+
 
 class Turret(control_loop.ControlLoop):
-  def __init__(self, name='Turret'):
-    super(Turret, self).__init__(name)
-    # Stall Torque in N m
-    self.stall_torque = 0.71
-    # Stall Current in Amps
-    self.stall_current = 134
-    self.free_speed_rpm = 18730.0
-    # Free Speed in rotations/second.
-    self.free_speed = self.free_speed_rpm / 60.0
-    # Free Current in Amps
-    self.free_current = 0.7
 
-    # Resistance of the motor
-    self.resistance = 12.0 / self.stall_current
-    # Motor velocity constant
-    self.Kv = ((self.free_speed * 2.0 * numpy.pi) /
-               (12.0 - self.resistance * self.free_current))
-    # Torque constant
-    self.Kt = self.stall_torque / self.stall_current
-    # Gear ratio
-    self.G = (12.0 / 60.0) * (11.0 / 94.0)
+    def __init__(self, name='Turret'):
+        super(Turret, self).__init__(name)
+        # Stall Torque in N m
+        self.stall_torque = 0.71
+        # Stall Current in Amps
+        self.stall_current = 134
+        self.free_speed_rpm = 18730.0
+        # Free Speed in rotations/second.
+        self.free_speed = self.free_speed_rpm / 60.0
+        # Free Current in Amps
+        self.free_current = 0.7
 
-    # Motor inertia in kg * m^2
-    self.motor_inertia = 0.00001187
+        # Resistance of the motor
+        self.resistance = 12.0 / self.stall_current
+        # Motor velocity constant
+        self.Kv = ((self.free_speed * 2.0 * numpy.pi) /
+                   (12.0 - self.resistance * self.free_current))
+        # Torque constant
+        self.Kt = self.stall_torque / self.stall_current
+        # Gear ratio
+        self.G = (12.0 / 60.0) * (11.0 / 94.0)
 
-    # Moment of inertia, measured in CAD.
-    # Extra mass to compensate for friction is added on.
-    self.J = 0.06 + self.motor_inertia * ((1.0 / self.G) ** 2.0)
-    glog.debug('Turret J is: %f', self.J)
+        # Motor inertia in kg * m^2
+        self.motor_inertia = 0.00001187
 
-    # Control loop time step
-    self.dt = 0.005
+        # Moment of inertia, measured in CAD.
+        # Extra mass to compensate for friction is added on.
+        self.J = 0.06 + self.motor_inertia * ((1.0 / self.G)**2.0)
+        glog.debug('Turret J is: %f', self.J)
 
-    # State is [position, velocity]
-    # Input is [Voltage]
+        # Control loop time step
+        self.dt = 0.005
 
-    C1 = self.Kt / (self.resistance * self.J * self.Kv * self.G * self.G)
-    C2 = self.Kt / (self.J * self.resistance * self.G)
+        # State is [position, velocity]
+        # Input is [Voltage]
 
-    self.A_continuous = numpy.matrix(
-        [[0, 1],
-         [0, -C1]])
+        C1 = self.Kt / (self.resistance * self.J * self.Kv * self.G * self.G)
+        C2 = self.Kt / (self.J * self.resistance * self.G)
 
-    # Start with the unmodified input
-    self.B_continuous = numpy.matrix(
-        [[0],
-         [C2]])
+        self.A_continuous = numpy.matrix([[0, 1], [0, -C1]])
 
-    self.C = numpy.matrix([[1, 0]])
-    self.D = numpy.matrix([[0]])
+        # Start with the unmodified input
+        self.B_continuous = numpy.matrix([[0], [C2]])
 
-    self.A, self.B = self.ContinuousToDiscrete(
-        self.A_continuous, self.B_continuous, self.dt)
+        self.C = numpy.matrix([[1, 0]])
+        self.D = numpy.matrix([[0]])
 
-    controllability = controls.ctrb(self.A, self.B)
+        self.A, self.B = self.ContinuousToDiscrete(self.A_continuous,
+                                                   self.B_continuous, self.dt)
 
-    glog.debug('Free speed is %f',
-               -self.B_continuous[1, 0] / self.A_continuous[1, 1] * 12.0)
+        controllability = controls.ctrb(self.A, self.B)
 
-    # Calculate the LQR controller gain
-    q_pos = 0.20
-    q_vel = 5.0
-    self.Q = numpy.matrix([[(1.0 / (q_pos ** 2.0)), 0.0],
-                           [0.0, (1.0 / (q_vel ** 2.0))]])
+        glog.debug('Free speed is %f',
+                   -self.B_continuous[1, 0] / self.A_continuous[1, 1] * 12.0)
 
-    self.R = numpy.matrix([[(1.0 / (12.0 ** 2.0))]])
-    self.K = controls.dlqr(self.A, self.B, self.Q, self.R)
+        # Calculate the LQR controller gain
+        q_pos = 0.20
+        q_vel = 5.0
+        self.Q = numpy.matrix([[(1.0 / (q_pos**2.0)), 0.0],
+                               [0.0, (1.0 / (q_vel**2.0))]])
 
-    # Calculate the feed forwards gain.
-    q_pos_ff = 0.005
-    q_vel_ff = 1.0
-    self.Qff = numpy.matrix([[(1.0 / (q_pos_ff ** 2.0)), 0.0],
-                             [0.0, (1.0 / (q_vel_ff ** 2.0))]])
+        self.R = numpy.matrix([[(1.0 / (12.0**2.0))]])
+        self.K = controls.dlqr(self.A, self.B, self.Q, self.R)
 
-    self.Kff = controls.TwoStateFeedForwards(self.B, self.Qff)
+        # Calculate the feed forwards gain.
+        q_pos_ff = 0.005
+        q_vel_ff = 1.0
+        self.Qff = numpy.matrix([[(1.0 / (q_pos_ff**2.0)), 0.0],
+                                 [0.0, (1.0 / (q_vel_ff**2.0))]])
 
-    q_pos = 0.10
-    q_vel = 1.65
-    self.Q = numpy.matrix([[(q_pos ** 2.0), 0.0],
-                           [0.0, (q_vel ** 2.0)]])
+        self.Kff = controls.TwoStateFeedForwards(self.B, self.Qff)
 
-    r_volts = 0.025
-    self.R = numpy.matrix([[(r_volts ** 2.0)]])
+        q_pos = 0.10
+        q_vel = 1.65
+        self.Q = numpy.matrix([[(q_pos**2.0), 0.0], [0.0, (q_vel**2.0)]])
 
-    self.KalmanGain, self.Q_steady = controls.kalman(
-        A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
-    self.L = self.A * self.KalmanGain
+        r_volts = 0.025
+        self.R = numpy.matrix([[(r_volts**2.0)]])
 
-    # The box formed by U_min and U_max must encompass all possible values,
-    # or else Austin's code gets angry.
-    self.U_max = numpy.matrix([[12.0]])
-    self.U_min = numpy.matrix([[-12.0]])
+        self.KalmanGain, self.Q_steady = controls.kalman(A=self.A,
+                                                         B=self.B,
+                                                         C=self.C,
+                                                         Q=self.Q,
+                                                         R=self.R)
+        self.L = self.A * self.KalmanGain
 
-    self.InitializeState()
+        # The box formed by U_min and U_max must encompass all possible values,
+        # or else Austin's code gets angry.
+        self.U_max = numpy.matrix([[12.0]])
+        self.U_min = numpy.matrix([[-12.0]])
+
+        self.InitializeState()
+
 
 class IntegralTurret(Turret):
-  def __init__(self, name='IntegralTurret'):
-    super(IntegralTurret, self).__init__(name=name)
 
-    self.A_continuous_unaugmented = self.A_continuous
-    self.B_continuous_unaugmented = self.B_continuous
+    def __init__(self, name='IntegralTurret'):
+        super(IntegralTurret, self).__init__(name=name)
 
-    self.A_continuous = numpy.matrix(numpy.zeros((3, 3)))
-    self.A_continuous[0:2, 0:2] = self.A_continuous_unaugmented
-    self.A_continuous[0:2, 2] = self.B_continuous_unaugmented
+        self.A_continuous_unaugmented = self.A_continuous
+        self.B_continuous_unaugmented = self.B_continuous
 
-    self.B_continuous = numpy.matrix(numpy.zeros((3, 1)))
-    self.B_continuous[0:2, 0] = self.B_continuous_unaugmented
+        self.A_continuous = numpy.matrix(numpy.zeros((3, 3)))
+        self.A_continuous[0:2, 0:2] = self.A_continuous_unaugmented
+        self.A_continuous[0:2, 2] = self.B_continuous_unaugmented
 
-    self.C_unaugmented = self.C
-    self.C = numpy.matrix(numpy.zeros((1, 3)))
-    self.C[0:1, 0:2] = self.C_unaugmented
+        self.B_continuous = numpy.matrix(numpy.zeros((3, 1)))
+        self.B_continuous[0:2, 0] = self.B_continuous_unaugmented
 
-    self.A, self.B = self.ContinuousToDiscrete(
-        self.A_continuous, self.B_continuous, self.dt)
+        self.C_unaugmented = self.C
+        self.C = numpy.matrix(numpy.zeros((1, 3)))
+        self.C[0:1, 0:2] = self.C_unaugmented
 
-    q_pos = 0.12
-    q_vel = 2.00
-    q_voltage = 3.0
-    self.Q = numpy.matrix([[(q_pos ** 2.0), 0.0, 0.0],
-                           [0.0, (q_vel ** 2.0), 0.0],
-                           [0.0, 0.0, (q_voltage ** 2.0)]])
+        self.A, self.B = self.ContinuousToDiscrete(self.A_continuous,
+                                                   self.B_continuous, self.dt)
 
-    r_pos = 0.05
-    self.R = numpy.matrix([[(r_pos ** 2.0)]])
+        q_pos = 0.12
+        q_vel = 2.00
+        q_voltage = 3.0
+        self.Q = numpy.matrix([[(q_pos**2.0), 0.0, 0.0],
+                               [0.0, (q_vel**2.0), 0.0],
+                               [0.0, 0.0, (q_voltage**2.0)]])
 
-    self.KalmanGain, self.Q_steady = controls.kalman(
-        A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
-    self.L = self.A * self.KalmanGain
+        r_pos = 0.05
+        self.R = numpy.matrix([[(r_pos**2.0)]])
 
-    self.K_unaugmented = self.K
-    self.K = numpy.matrix(numpy.zeros((1, 3)))
-    self.K[0, 0:2] = self.K_unaugmented
-    self.K[0, 2] = 1
+        self.KalmanGain, self.Q_steady = controls.kalman(A=self.A,
+                                                         B=self.B,
+                                                         C=self.C,
+                                                         Q=self.Q,
+                                                         R=self.R)
+        self.L = self.A * self.KalmanGain
 
-    self.Kff = numpy.concatenate((self.Kff, numpy.matrix(numpy.zeros((1, 1)))), axis=1)
+        self.K_unaugmented = self.K
+        self.K = numpy.matrix(numpy.zeros((1, 3)))
+        self.K[0, 0:2] = self.K_unaugmented
+        self.K[0, 2] = 1
 
-    self.InitializeState()
+        self.Kff = numpy.concatenate(
+            (self.Kff, numpy.matrix(numpy.zeros((1, 1)))), axis=1)
+
+        self.InitializeState()
+
 
 class ScenarioPlotter(object):
-  def __init__(self):
-    # Various lists for graphing things.
-    self.t = []
-    self.x = []
-    self.v = []
-    self.a = []
-    self.x_hat = []
-    self.u = []
-    self.offset = []
 
-  def run_test(self, turret, end_goal,
-             controller_turret,
-             observer_turret=None,
-             iterations=200):
-    """Runs the turret plant with an initial condition and goal.
+    def __init__(self):
+        # Various lists for graphing things.
+        self.t = []
+        self.x = []
+        self.v = []
+        self.a = []
+        self.x_hat = []
+        self.u = []
+        self.offset = []
+
+    def run_test(self,
+                 turret,
+                 end_goal,
+                 controller_turret,
+                 observer_turret=None,
+                 iterations=200):
+        """Runs the turret plant with an initial condition and goal.
 
       Args:
         turret: turret object to use.
@@ -183,130 +193,138 @@
         iterations: Number of timesteps to run the model for.
     """
 
-    if controller_turret is None:
-      controller_turret = turret
+        if controller_turret is None:
+            controller_turret = turret
 
-    vbat = 12.0
+        vbat = 12.0
 
-    if self.t:
-      initial_t = self.t[-1] + turret.dt
-    else:
-      initial_t = 0
+        if self.t:
+            initial_t = self.t[-1] + turret.dt
+        else:
+            initial_t = 0
 
-    goal = numpy.concatenate((turret.X, numpy.matrix(numpy.zeros((1, 1)))), axis=0)
+        goal = numpy.concatenate((turret.X, numpy.matrix(numpy.zeros((1, 1)))),
+                                 axis=0)
 
-    profile = TrapezoidProfile(turret.dt)
-    profile.set_maximum_acceleration(100.0)
-    profile.set_maximum_velocity(7.0)
-    profile.SetGoal(goal[0, 0])
+        profile = TrapezoidProfile(turret.dt)
+        profile.set_maximum_acceleration(100.0)
+        profile.set_maximum_velocity(7.0)
+        profile.SetGoal(goal[0, 0])
 
-    U_last = numpy.matrix(numpy.zeros((1, 1)))
-    for i in range(iterations):
-      observer_turret.Y = turret.Y
-      observer_turret.CorrectObserver(U_last)
+        U_last = numpy.matrix(numpy.zeros((1, 1)))
+        for i in range(iterations):
+            observer_turret.Y = turret.Y
+            observer_turret.CorrectObserver(U_last)
 
-      self.offset.append(observer_turret.X_hat[2, 0])
-      self.x_hat.append(observer_turret.X_hat[0, 0])
+            self.offset.append(observer_turret.X_hat[2, 0])
+            self.x_hat.append(observer_turret.X_hat[0, 0])
 
-      next_goal = numpy.concatenate(
-          (profile.Update(end_goal[0, 0], end_goal[1, 0]),
-           numpy.matrix(numpy.zeros((1, 1)))),
-          axis=0)
+            next_goal = numpy.concatenate(
+                (profile.Update(end_goal[0, 0], end_goal[1, 0]),
+                 numpy.matrix(numpy.zeros((1, 1)))),
+                axis=0)
 
-      ff_U = controller_turret.Kff * (next_goal - observer_turret.A * goal)
+            ff_U = controller_turret.Kff * (next_goal -
+                                            observer_turret.A * goal)
 
-      U_uncapped = controller_turret.K * (goal - observer_turret.X_hat) + ff_U
-      U_uncapped = controller_turret.K * (end_goal - observer_turret.X_hat)
-      U = U_uncapped.copy()
-      U[0, 0] = numpy.clip(U[0, 0], -vbat, vbat)
-      self.x.append(turret.X[0, 0])
+            U_uncapped = controller_turret.K * (goal -
+                                                observer_turret.X_hat) + ff_U
+            U_uncapped = controller_turret.K * (end_goal -
+                                                observer_turret.X_hat)
+            U = U_uncapped.copy()
+            U[0, 0] = numpy.clip(U[0, 0], -vbat, vbat)
+            self.x.append(turret.X[0, 0])
 
-      if self.v:
-        last_v = self.v[-1]
-      else:
-        last_v = 0
+            if self.v:
+                last_v = self.v[-1]
+            else:
+                last_v = 0
 
-      self.v.append(turret.X[1, 0])
-      self.a.append((self.v[-1] - last_v) / turret.dt)
+            self.v.append(turret.X[1, 0])
+            self.a.append((self.v[-1] - last_v) / turret.dt)
 
-      offset = 0.0
-      if i > 100:
-        offset = 2.0
-      turret.Update(U + offset)
+            offset = 0.0
+            if i > 100:
+                offset = 2.0
+            turret.Update(U + offset)
 
-      observer_turret.PredictObserver(U)
+            observer_turret.PredictObserver(U)
 
-      self.t.append(initial_t + i * turret.dt)
-      self.u.append(U[0, 0])
+            self.t.append(initial_t + i * turret.dt)
+            self.u.append(U[0, 0])
 
-      ff_U -= U_uncapped - U
-      goal = controller_turret.A * goal + controller_turret.B * ff_U
+            ff_U -= U_uncapped - U
+            goal = controller_turret.A * goal + controller_turret.B * ff_U
 
-      if U[0, 0] != U_uncapped[0, 0]:
-        profile.MoveCurrentState(
-            numpy.matrix([[goal[0, 0]], [goal[1, 0]]]))
+            if U[0, 0] != U_uncapped[0, 0]:
+                profile.MoveCurrentState(
+                    numpy.matrix([[goal[0, 0]], [goal[1, 0]]]))
 
-    glog.debug('Time: %f', self.t[-1])
-    glog.debug('goal_error %s', repr(end_goal - goal))
-    glog.debug('error %s', repr(observer_turret.X_hat - end_goal))
+        glog.debug('Time: %f', self.t[-1])
+        glog.debug('goal_error %s', repr(end_goal - goal))
+        glog.debug('error %s', repr(observer_turret.X_hat - end_goal))
 
-  def Plot(self):
-    pylab.subplot(3, 1, 1)
-    pylab.plot(self.t, self.x, label='x')
-    pylab.plot(self.t, self.x_hat, label='x_hat')
-    pylab.legend()
+    def Plot(self):
+        pylab.subplot(3, 1, 1)
+        pylab.plot(self.t, self.x, label='x')
+        pylab.plot(self.t, self.x_hat, label='x_hat')
+        pylab.legend()
 
-    pylab.subplot(3, 1, 2)
-    pylab.plot(self.t, self.u, label='u')
-    pylab.plot(self.t, self.offset, label='voltage_offset')
-    pylab.legend()
+        pylab.subplot(3, 1, 2)
+        pylab.plot(self.t, self.u, label='u')
+        pylab.plot(self.t, self.offset, label='voltage_offset')
+        pylab.legend()
 
-    pylab.subplot(3, 1, 3)
-    pylab.plot(self.t, self.a, label='a')
-    pylab.legend()
+        pylab.subplot(3, 1, 3)
+        pylab.plot(self.t, self.a, label='a')
+        pylab.legend()
 
-    pylab.show()
+        pylab.show()
 
 
 def main(argv):
-  argv = FLAGS(argv)
-  glog.init()
+    argv = FLAGS(argv)
+    glog.init()
 
-  scenario_plotter = ScenarioPlotter()
+    scenario_plotter = ScenarioPlotter()
 
-  turret = Turret()
-  turret_controller = IntegralTurret()
-  observer_turret = IntegralTurret()
+    turret = Turret()
+    turret_controller = IntegralTurret()
+    observer_turret = IntegralTurret()
 
-  # Test moving the turret with constant separation.
-  initial_X = numpy.matrix([[0.0], [0.0]])
-  R = numpy.matrix([[numpy.pi/2.0], [0.0], [0.0]])
-  scenario_plotter.run_test(turret, end_goal=R,
-                            controller_turret=turret_controller,
-                            observer_turret=observer_turret, iterations=200)
+    # Test moving the turret with constant separation.
+    initial_X = numpy.matrix([[0.0], [0.0]])
+    R = numpy.matrix([[numpy.pi / 2.0], [0.0], [0.0]])
+    scenario_plotter.run_test(turret,
+                              end_goal=R,
+                              controller_turret=turret_controller,
+                              observer_turret=observer_turret,
+                              iterations=200)
 
-  if FLAGS.plot:
-    scenario_plotter.Plot()
+    if FLAGS.plot:
+        scenario_plotter.Plot()
 
-  # Write the generated constants out to a file.
-  if len(argv) != 5:
-    glog.fatal('Expected .h file name and .cc file name for the turret and integral turret.')
-  else:
-    namespaces = ['y2017', 'control_loops', 'superstructure', 'turret']
-    turret = Turret('Turret')
-    loop_writer = control_loop.ControlLoopWriter('Turret', [turret],
-                                                 namespaces=namespaces)
-    loop_writer.AddConstant(control_loop.Constant(
-        'kFreeSpeed', '%f', turret.free_speed))
-    loop_writer.AddConstant(control_loop.Constant(
-        'kOutputRatio', '%f', turret.G))
-    loop_writer.Write(argv[1], argv[2])
+    # Write the generated constants out to a file.
+    if len(argv) != 5:
+        glog.fatal(
+            'Expected .h file name and .cc file name for the turret and integral turret.'
+        )
+    else:
+        namespaces = ['y2017', 'control_loops', 'superstructure', 'turret']
+        turret = Turret('Turret')
+        loop_writer = control_loop.ControlLoopWriter('Turret', [turret],
+                                                     namespaces=namespaces)
+        loop_writer.AddConstant(
+            control_loop.Constant('kFreeSpeed', '%f', turret.free_speed))
+        loop_writer.AddConstant(
+            control_loop.Constant('kOutputRatio', '%f', turret.G))
+        loop_writer.Write(argv[1], argv[2])
 
-    integral_turret = IntegralTurret('IntegralTurret')
-    integral_loop_writer = control_loop.ControlLoopWriter(
-        'IntegralTurret', [integral_turret],
-        namespaces=namespaces)
-    integral_loop_writer.Write(argv[3], argv[4])
+        integral_turret = IntegralTurret('IntegralTurret')
+        integral_loop_writer = control_loop.ControlLoopWriter(
+            'IntegralTurret', [integral_turret], namespaces=namespaces)
+        integral_loop_writer.Write(argv[3], argv[4])
+
 
 if __name__ == '__main__':
-  sys.exit(main(sys.argv))
+    sys.exit(main(sys.argv))
diff --git a/y2018/control_loops/python/arm_distal.py b/y2018/control_loops/python/arm_distal.py
index 2836c50..d443f8a 100755
--- a/y2018/control_loops/python/arm_distal.py
+++ b/y2018/control_loops/python/arm_distal.py
@@ -3,7 +3,7 @@
 # This code was used to select the gear ratio for the distal arm.
 # Run it from the command line and it displays the time required
 # to move the distal arm 60 degrees.
-# 
+#
 # Michael Schuh
 # January 20, 2018
 
@@ -14,206 +14,238 @@
 # apt-get install python-scipy python3-scipy python-numpy python3-numpy
 
 pi = math.pi
-pi2 = 2.0*pi
-rad_to_deg = 180.0/pi
+pi2 = 2.0 * pi
+rad_to_deg = 180.0 / pi
 inches_to_meters = 0.0254
-lbs_to_kg = 1.0/2.2
+lbs_to_kg = 1.0 / 2.2
 newton_to_lbf = 0.224809
 newton_meters_to_ft_lbs = 0.73756
 run_count = 0
 theta_travel = 0.0
 
+
 def to_deg(angle):
-  return (angle*rad_to_deg)
+    return (angle * rad_to_deg)
+
 
 def to_rad(angle):
-  return (angle/rad_to_deg)
+    return (angle / rad_to_deg)
+
 
 def to_rotations(angle):
-  return (angle/pi2)
+    return (angle / pi2)
+
 
 def time_derivative(x, t, voltage, c1, c2, c3):
-  global run_count
-  theta, omega = x
-  dxdt = [omega, -c1*omega + c3*math.sin(theta) + c2*voltage]
-  run_count = run_count + 1
+    global run_count
+    theta, omega = x
+    dxdt = [omega, -c1 * omega + c3 * math.sin(theta) + c2 * voltage]
+    run_count = run_count + 1
 
-  #print ('dxdt = ',dxdt,' repr(dxdt) = ', repr(dxdt))
-  return dxdt
-  
+    #print ('dxdt = ',dxdt,' repr(dxdt) = ', repr(dxdt))
+    return dxdt
 
-def get_180_degree_time(c1,c2,c3,voltage,gear_ratio,motor_free_speed):
-  #print ("# step     time    theta    angular_speed   angular_acceleration  theta   angular_speed  motor_speed motor_speed_fraction")
-  #print ("#          (sec)   (rad)      (rad/sec)        (rad/sec^2)      (rotations) (rotations/sec)    (rpm)   (fraction)")
-  global run_count
-  global theta_travel
 
-  if ( False ):
-    # Gravity is assisting the motion.
-    theta_start = 0.0
-    theta_target = pi
-  elif ( False ):
-    # Gravity is assisting the motion.
-    theta_start = 0.0
-    theta_target = -pi
-  elif ( False ):
-    # Gravity is slowing the motion.
-    theta_start = pi
-    theta_target = 0.0
-  elif ( True ):
-    # Gravity is slowing the motion.
-    theta_start = -pi
-    theta_target = 0.0
+def get_180_degree_time(c1, c2, c3, voltage, gear_ratio, motor_free_speed):
+    #print ("# step     time    theta    angular_speed   angular_acceleration  theta   angular_speed  motor_speed motor_speed_fraction")
+    #print ("#          (sec)   (rad)      (rad/sec)        (rad/sec^2)      (rotations) (rotations/sec)    (rpm)   (fraction)")
+    global run_count
+    global theta_travel
 
-  theta_half = 0.5*(theta_start + theta_target)
-  if (theta_start > theta_target):
-    voltage = -voltage
-  theta = theta_start
-  theta_travel = theta_start - theta_target 
-  if ( run_count == 0 ):
-    print ("# Theta Start = %.2f radians End = %.2f Theta travel %.2f Theta half = %.2f Voltage = %.2f" % (theta_start,theta_target,theta_travel,theta_half, voltage))
-    print ("# Theta Start = %.2f degrees End = %.2f Theta travel %.2f Theta half = %.2f Voltage = %.2f" % (to_deg(theta_start),to_deg(theta_target),to_deg(theta_travel),to_deg(theta_half), voltage))
-  omega = 0.0
-  time = 0.0
-  delta_time = 0.01 # time step in seconds
-  for step in range(1, 5000):
-     t = numpy.array([time, time + delta_time])
-     time = time + delta_time
-     x = [theta, omega]
-     angular_acceleration = -c1*omega + c2*voltage
-     x_n_plus_1 = scipy.integrate.odeint(time_derivative,x,t,args=(voltage,c1,c2,c3))
-     #print ('x_n_plus_1 = ',x_n_plus_1)
-     #print ('repr(x_n_plus_1) = ',repr(x_n_plus_1))
-     theta, omega = x_n_plus_1[1]
-     #theta= x_n_plus_1[0]
-     #omega = x_n_plus_1[1]
-     if ( False ):
-       print ("%4d  %8.4f %8.2f          %8.4f          %8.4f    %8.3f    %8.3f     %8.3f      %8.3f" % \
-         (step, time, theta, omega, angular_acceleration, to_rotations(theta), \
-         to_rotations(omega), omega*gear_ratio*60.0/pi2, omega*gear_ratio/motor_free_speed ))
-     if (theta_start < theta_target):
-       # Angle is increasing through the motion.
-       if (theta > theta_half):
-         break
-     else:
-       # Angle is decreasing through the motion.
-       if (theta < theta_half):
-         break
-       
-  #print ("# step     time    theta    angular_speed   angular_acceleration  theta   angular_speed  motor_speed motor_speed_fraction")
-  #print ("#          (sec)   (rad)      (rad/sec)        (rad/sec^2)      (rotations) (rotations/sec)    (rpm)   (fraction)")
-  #print ("# Total time for 1/2 rotation of arm is %0.2f" % (time*2))
-  return (2.0*time)
+    if (False):
+        # Gravity is assisting the motion.
+        theta_start = 0.0
+        theta_target = pi
+    elif (False):
+        # Gravity is assisting the motion.
+        theta_start = 0.0
+        theta_target = -pi
+    elif (False):
+        # Gravity is slowing the motion.
+        theta_start = pi
+        theta_target = 0.0
+    elif (True):
+        # Gravity is slowing the motion.
+        theta_start = -pi
+        theta_target = 0.0
+
+    theta_half = 0.5 * (theta_start + theta_target)
+    if (theta_start > theta_target):
+        voltage = -voltage
+    theta = theta_start
+    theta_travel = theta_start - theta_target
+    if (run_count == 0):
+        print(
+            "# Theta Start = %.2f radians End = %.2f Theta travel %.2f Theta half = %.2f Voltage = %.2f"
+            % (theta_start, theta_target, theta_travel, theta_half, voltage))
+        print(
+            "# Theta Start = %.2f degrees End = %.2f Theta travel %.2f Theta half = %.2f Voltage = %.2f"
+            % (to_deg(theta_start), to_deg(theta_target), to_deg(theta_travel),
+               to_deg(theta_half), voltage))
+    omega = 0.0
+    time = 0.0
+    delta_time = 0.01  # time step in seconds
+    for step in range(1, 5000):
+        t = numpy.array([time, time + delta_time])
+        time = time + delta_time
+        x = [theta, omega]
+        angular_acceleration = -c1 * omega + c2 * voltage
+        x_n_plus_1 = scipy.integrate.odeint(time_derivative,
+                                            x,
+                                            t,
+                                            args=(voltage, c1, c2, c3))
+        #print ('x_n_plus_1 = ',x_n_plus_1)
+        #print ('repr(x_n_plus_1) = ',repr(x_n_plus_1))
+        theta, omega = x_n_plus_1[1]
+        #theta= x_n_plus_1[0]
+        #omega = x_n_plus_1[1]
+        if (False):
+            print ("%4d  %8.4f %8.2f          %8.4f          %8.4f    %8.3f    %8.3f     %8.3f      %8.3f" % \
+              (step, time, theta, omega, angular_acceleration, to_rotations(theta), \
+              to_rotations(omega), omega*gear_ratio*60.0/pi2, omega*gear_ratio/motor_free_speed ))
+        if (theta_start < theta_target):
+            # Angle is increasing through the motion.
+            if (theta > theta_half):
+                break
+        else:
+            # Angle is decreasing through the motion.
+            if (theta < theta_half):
+                break
+
+    #print ("# step     time    theta    angular_speed   angular_acceleration  theta   angular_speed  motor_speed motor_speed_fraction")
+    #print ("#          (sec)   (rad)      (rad/sec)        (rad/sec^2)      (rotations) (rotations/sec)    (rpm)   (fraction)")
+    #print ("# Total time for 1/2 rotation of arm is %0.2f" % (time*2))
+    return (2.0 * time)
+
 
 def main():
-  gravity = 9.8 # m/sec^2 Gravity Constant
-  voltage_nominal = 12 # Volts
-  
-  # Vex 775 Pro motor specs from http://banebots.com/p/M2-RS550-120
-  motor_name = "Vex 775 Pro motor specs from http://banebots.com/p/M2-RS550-120"
-  current_stall = 134 # amps stall current
-  current_no_load = 0.7 # amps no load current
-  torque_stall = 710/1000.0 # N-m Stall Torque
-  speed_no_load_rpm = 18730 # RPM no load speed
-  
-  if ( False ):
-    # Bag motor from https://www.vexrobotics.com/217-3351.html
-    motor_name = "Bag motor from https://www.vexrobotics.com/217-3351.html"
-    current_stall = 53.0 # amps stall current
-    current_no_load = 1.8 # amps no load current
-    torque_stall = 0.4 # N-m Stall Torque
-    speed_no_load_rpm = 13180.0 # RPM no load speed
-  
-  if ( True ):
-    # Mini CIM motor from https://www.vexrobotics.com/217-3371.html
-    motor_name = "Mini CIM motor from https://www.vexrobotics.com/217-3371.html"
-    current_stall = 89.0 # amps stall current
-    current_no_load = 3.0 # amps no load current
-    torque_stall = 1.4 # N-m Stall Torque
-    speed_no_load_rpm = 5840.0 # RPM no load speed
+    gravity = 9.8  # m/sec^2 Gravity Constant
+    voltage_nominal = 12  # Volts
 
-  # How many motors are we using?
-  num_motors = 2
+    # Vex 775 Pro motor specs from http://banebots.com/p/M2-RS550-120
+    motor_name = "Vex 775 Pro motor specs from http://banebots.com/p/M2-RS550-120"
+    current_stall = 134  # amps stall current
+    current_no_load = 0.7  # amps no load current
+    torque_stall = 710 / 1000.0  # N-m Stall Torque
+    speed_no_load_rpm = 18730  # RPM no load speed
 
-  # Motor values
-  print ("# Motor: %s" % (motor_name))
-  print ("# Number of motors: %d" % (num_motors))
-  print ("# Stall torque: %.1f n-m" % (torque_stall))
-  print ("# Stall current: %.1f amps" % (current_stall))
-  print ("# No load current: %.1f amps" % (current_no_load))a
-  print ("# No load speed: %.0f rpm" % (speed_no_load_rpm))
-  
-  # Constants from motor values
-  resistance_motor = voltage_nominal/current_stall 
-  speed_no_load_rps = speed_no_load_rpm/60.0 # Revolutions per second no load speed
-  speed_no_load = speed_no_load_rps*2.0*pi
-  Kt = num_motors*torque_stall/current_stall # N-m/A torque constant
-  Kv_rpm = speed_no_load_rpm /(voltage_nominal - resistance_motor*current_no_load)  # rpm/V
-  Kv = Kv_rpm*2.0*pi/60.0 # rpm/V
-  
-  # Robot Geometry and physics
-  length_proximal_arm = inches_to_meters*47.34 # m Length of arm connected to the robot base
-  length_distal_arm = inches_to_meters*44.0 # m Length of arm that holds the cube
-  mass_cube = 6.0*lbs_to_kg  # Weight of the cube in Kgrams
-  mass_proximal_arm = 5.5*lbs_to_kg # Weight of proximal arm
-  mass_distal_arm = 3.5*lbs_to_kg # Weight of distal arm
-  mass_distal = mass_cube + mass_distal_arm
-  radius_to_proximal_arm_cg = 22.0*inches_to_meters # m Length from arm pivot point to arm CG
-  radius_to_distal_arm_cg = 10.0*inches_to_meters # m Length from arm pivot point to arm CG
+    if (False):
+        # Bag motor from https://www.vexrobotics.com/217-3351.html
+        motor_name = "Bag motor from https://www.vexrobotics.com/217-3351.html"
+        current_stall = 53.0  # amps stall current
+        current_no_load = 1.8  # amps no load current
+        torque_stall = 0.4  # N-m Stall Torque
+        speed_no_load_rpm = 13180.0  # RPM no load speed
 
-  radius_to_distal_cg = ( length_distal_arm*mass_cube + radius_to_distal_arm_cg*mass_distal_arm)/mass_distal
-  J_cube = length_distal_arm*length_distal_arm*mass_cube 
-  J_proximal_arm = radius_to_proximal_arm_cg*radius_to_proximal_arm_cg*mass_distal_arm # Kg m^2 Moment of inertia of the proximal arm
-  J_distal_arm = radius_to_distal_arm_cg*radius_to_distal_arm_cg*mass_distal_arm # Kg m^2 Moment of inertia of the distal arm
-  J = J_cube + J_distal_arm # Moment of inertia of the arm with the cube on the end
+    if (True):
+        # Mini CIM motor from https://www.vexrobotics.com/217-3371.html
+        motor_name = "Mini CIM motor from https://www.vexrobotics.com/217-3371.html"
+        current_stall = 89.0  # amps stall current
+        current_no_load = 3.0  # amps no load current
+        torque_stall = 1.4  # N-m Stall Torque
+        speed_no_load_rpm = 5840.0  # RPM no load speed
 
-  error_margine = 1.0
-  voltage = 10.0 # voltage for the motor.  Assuming a loaded robot so not using 12 V.
-  # It might make sense to use a lower motor frees peed when the voltage is not a full 12 Volts.
-  # motor_free_speed = Kv*voltage
-  motor_free_speed = speed_no_load
-  
-  
-  print ("# Kt = %f N-m/A\n# Kv_rpm = %f rpm/V\n# Kv = %f radians/V" % (Kt, Kv_rpm, Kv))
-  print ("# %.2f Ohms Resistance of the motor " % (resistance_motor))
-  print ("# %.2f kg Cube weight" % (mass_cube))
-  print ("# %.2f kg Proximal Arm mass" % (mass_proximal_arm))
-  print ("# %.2f kg Distal Arm mass" % (mass_distal_arm))
-  print ("# %.2f kg Distal Arm and Cube weight" % (mass_distal))
-  print ("# %.2f m Length from distal arm pivot point to arm CG" % (radius_to_distal_arm_cg))
-  print ("# %.2f m Length from distal arm pivot point to arm and cube cg" % (radius_to_distal_cg))
-  print ("# %.2f kg-m^2 Moment of inertia of the cube about the arm pivot point" % (J_cube))
-  print ("# %.2f kg-m^2 Moment of inertia of the arm with the cube on the end" % (J))
-  print ("# %.2f m  Proximal arm length" % (length_proximal_arm))
-  print ("# %.2f m  Distal arm length" % (length_distal_arm))
+    # How many motors are we using?
+    num_motors = 2
 
-  print ("# %.2f kg-m^2 Moment of inertia of the proximal arm about the arm pivot point" % (J_proximal_arm))
-  print ("# %.2f kg-m^2 Moment of inertia of the distal arm about the arm pivot point" % (J_distal_arm))
-  print ("# %.2f kg-m^2 Moment of inertia of the distal arm and cube about the arm pivot point" % (J))
-  print ("# %d Number of motors" % (num_motors))
-  
-  print ("# %.2f V Motor voltage" % (voltage))
-  for gear_ratio in range(30, 181, 10):
-    c1 = Kt*gear_ratio*gear_ratio/(Kv*resistance_motor*J)
-    c2 = gear_ratio*Kt/(J*resistance_motor)
-    c3 = radius_to_distal_cg*mass_distal*gravity/J
-  
-    if ( False ):
-      print ("# %.8f 1/sec C1 constant" % (c1))
-      print ("# %.2f 1/sec C2 constant" % (c2))
-      print ("# %.2f 1/(V sec^2) C3 constant" % (c3))
-      print ("# %.2f RPM Free speed at motor voltage" % (voltage*Kv_rpm))
-  
-    torque_90_degrees = radius_to_distal_cg*mass_distal*gravity
-    voltage_90_degrees = resistance_motor*torque_90_degrees/(gear_ratio*Kt)
-    torque_peak = gear_ratio*num_motors*torque_stall
-    torque_peak_ft_lbs = torque_peak * newton_meters_to_ft_lbs
-    normal_force = torque_peak/length_distal_arm
-    normal_force_lbf = newton_to_lbf*normal_force 
-    time_required = get_180_degree_time(c1,c2,c3,voltage,gear_ratio,motor_free_speed)
-    print ("Time for %.1f degrees for gear ratio %3.0f is %.2f seconds.  90 degree torque %.1f N-m and voltage %.1f Volts. Peak torque %3.0f nm %3.0f ft-lb Normal force at distal end %3.0f N %2.0f lbf" % \
-      (to_deg(theta_travel),gear_ratio,time_required,torque_90_degrees,voltage_90_degrees,
-       torque_peak,torque_peak_ft_lbs,normal_force,normal_force_lbf))
-  
+    # Motor values
+    print("# Motor: %s" % (motor_name))
+    print("# Number of motors: %d" % (num_motors))
+    print("# Stall torque: %.1f n-m" % (torque_stall))
+    print("# Stall current: %.1f amps" % (current_stall))
+    print("# No load current: %.1f amps" % (current_no_load))
+    print("# No load speed: %.0f rpm" % (speed_no_load_rpm))
+
+    # Constants from motor values
+    resistance_motor = voltage_nominal / current_stall
+    speed_no_load_rps = speed_no_load_rpm / 60.0  # Revolutions per second no load speed
+    speed_no_load = speed_no_load_rps * 2.0 * pi
+    Kt = num_motors * torque_stall / current_stall  # N-m/A torque constant
+    Kv_rpm = speed_no_load_rpm / (
+        voltage_nominal - resistance_motor * current_no_load)  # rpm/V
+    Kv = Kv_rpm * 2.0 * pi / 60.0  # rpm/V
+
+    # Robot Geometry and physics
+    length_proximal_arm = inches_to_meters * 47.34  # m Length of arm connected to the robot base
+    length_distal_arm = inches_to_meters * 44.0  # m Length of arm that holds the cube
+    mass_cube = 6.0 * lbs_to_kg  # Weight of the cube in Kgrams
+    mass_proximal_arm = 5.5 * lbs_to_kg  # Weight of proximal arm
+    mass_distal_arm = 3.5 * lbs_to_kg  # Weight of distal arm
+    mass_distal = mass_cube + mass_distal_arm
+    radius_to_proximal_arm_cg = 22.0 * inches_to_meters  # m Length from arm pivot point to arm CG
+    radius_to_distal_arm_cg = 10.0 * inches_to_meters  # m Length from arm pivot point to arm CG
+
+    radius_to_distal_cg = (
+        length_distal_arm * mass_cube +
+        radius_to_distal_arm_cg * mass_distal_arm) / mass_distal
+    J_cube = length_distal_arm * length_distal_arm * mass_cube
+    J_proximal_arm = radius_to_proximal_arm_cg * radius_to_proximal_arm_cg * mass_distal_arm  # Kg m^2 Moment of inertia of the proximal arm
+    J_distal_arm = radius_to_distal_arm_cg * radius_to_distal_arm_cg * mass_distal_arm  # Kg m^2 Moment of inertia of the distal arm
+    J = J_cube + J_distal_arm  # Moment of inertia of the arm with the cube on the end
+
+    error_margine = 1.0
+    voltage = 10.0  # voltage for the motor.  Assuming a loaded robot so not using 12 V.
+    # It might make sense to use a lower motor frees peed when the voltage is not a full 12 Volts.
+    # motor_free_speed = Kv*voltage
+    motor_free_speed = speed_no_load
+
+    print("# Kt = %f N-m/A\n# Kv_rpm = %f rpm/V\n# Kv = %f radians/V" %
+          (Kt, Kv_rpm, Kv))
+    print("# %.2f Ohms Resistance of the motor " % (resistance_motor))
+    print("# %.2f kg Cube weight" % (mass_cube))
+    print("# %.2f kg Proximal Arm mass" % (mass_proximal_arm))
+    print("# %.2f kg Distal Arm mass" % (mass_distal_arm))
+    print("# %.2f kg Distal Arm and Cube weight" % (mass_distal))
+    print("# %.2f m Length from distal arm pivot point to arm CG" %
+          (radius_to_distal_arm_cg))
+    print("# %.2f m Length from distal arm pivot point to arm and cube cg" %
+          (radius_to_distal_cg))
+    print(
+        "# %.2f kg-m^2 Moment of inertia of the cube about the arm pivot point"
+        % (J_cube))
+    print(
+        "# %.2f kg-m^2 Moment of inertia of the arm with the cube on the end" %
+        (J))
+    print("# %.2f m  Proximal arm length" % (length_proximal_arm))
+    print("# %.2f m  Distal arm length" % (length_distal_arm))
+
+    print(
+        "# %.2f kg-m^2 Moment of inertia of the proximal arm about the arm pivot point"
+        % (J_proximal_arm))
+    print(
+        "# %.2f kg-m^2 Moment of inertia of the distal arm about the arm pivot point"
+        % (J_distal_arm))
+    print(
+        "# %.2f kg-m^2 Moment of inertia of the distal arm and cube about the arm pivot point"
+        % (J))
+    print("# %d Number of motors" % (num_motors))
+
+    print("# %.2f V Motor voltage" % (voltage))
+    for gear_ratio in range(30, 181, 10):
+        c1 = Kt * gear_ratio * gear_ratio / (Kv * resistance_motor * J)
+        c2 = gear_ratio * Kt / (J * resistance_motor)
+        c3 = radius_to_distal_cg * mass_distal * gravity / J
+
+        if (False):
+            print("# %.8f 1/sec C1 constant" % (c1))
+            print("# %.2f 1/sec C2 constant" % (c2))
+            print("# %.2f 1/(V sec^2) C3 constant" % (c3))
+            print("# %.2f RPM Free speed at motor voltage" %
+                  (voltage * Kv_rpm))
+
+        torque_90_degrees = radius_to_distal_cg * mass_distal * gravity
+        voltage_90_degrees = resistance_motor * torque_90_degrees / (
+            gear_ratio * Kt)
+        torque_peak = gear_ratio * num_motors * torque_stall
+        torque_peak_ft_lbs = torque_peak * newton_meters_to_ft_lbs
+        normal_force = torque_peak / length_distal_arm
+        normal_force_lbf = newton_to_lbf * normal_force
+        time_required = get_180_degree_time(c1, c2, c3, voltage, gear_ratio,
+                                            motor_free_speed)
+        print ("Time for %.1f degrees for gear ratio %3.0f is %.2f seconds.  90 degree torque %.1f N-m and voltage %.1f Volts. Peak torque %3.0f nm %3.0f ft-lb Normal force at distal end %3.0f N %2.0f lbf" % \
+          (to_deg(theta_travel),gear_ratio,time_required,torque_90_degrees,voltage_90_degrees,
+           torque_peak,torque_peak_ft_lbs,normal_force,normal_force_lbf))
+
+
 if __name__ == '__main__':
-   main()
+    main()
diff --git a/y2018/control_loops/python/arm_proximal.py b/y2018/control_loops/python/arm_proximal.py
index d40ec8a..4abd1cf 100755
--- a/y2018/control_loops/python/arm_proximal.py
+++ b/y2018/control_loops/python/arm_proximal.py
@@ -3,7 +3,7 @@
 # This code was used to select the gear ratio for the proximal arm.
 # Run it from the command line and it displays the time required
 # to move the proximal arm 180 degrees from straight down to straight up.
-# 
+#
 # Michael Schuh
 # January 20, 2018
 
@@ -14,241 +14,291 @@
 # apt-get install python-scipy python3-scipy python-numpy python3-numpy
 
 pi = math.pi
-pi2 = 2.0*pi
-rad_to_deg = 180.0/pi
+pi2 = 2.0 * pi
+rad_to_deg = 180.0 / pi
 inches_to_meters = 0.0254
-lbs_to_kg = 1.0/2.2
+lbs_to_kg = 1.0 / 2.2
 newton_to_lbf = 0.224809
 newton_meters_to_ft_lbs = 0.73756
 run_count = 0
 theta_travel = 0.0
 
+
 def to_deg(angle):
-  return (angle*rad_to_deg)
+    return (angle * rad_to_deg)
+
 
 def to_rad(angle):
-  return (angle/rad_to_deg)
+    return (angle / rad_to_deg)
+
 
 def to_rotations(angle):
-  return (angle/pi2)
+    return (angle / pi2)
+
 
 def time_derivative(x, t, voltage, c1, c2, c3):
-  global run_count
-  theta, omega = x
-  dxdt = [omega, -c1*omega + c3*math.sin(theta) + c2*voltage]
-  run_count = run_count + 1
+    global run_count
+    theta, omega = x
+    dxdt = [omega, -c1 * omega + c3 * math.sin(theta) + c2 * voltage]
+    run_count = run_count + 1
 
-  #print ('dxdt = ',dxdt,' repr(dxdt) = ', repr(dxdt))
-  return dxdt
+    #print ('dxdt = ',dxdt,' repr(dxdt) = ', repr(dxdt))
+    return dxdt
+
 
 def get_distal_angle(theta_proximal):
-  # For the proximal angle = -50 degrees, the distal angle is -180 degrees
-  # For the proximal angle =  10 degrees, the distal angle is  -90 degrees
-  distal_angle = to_rad(-180.0 - (-50.0-to_deg(theta_proximal))*(180.0-90.0)/(50.0+10.0))
-  return distal_angle
+    # For the proximal angle = -50 degrees, the distal angle is -180 degrees
+    # For the proximal angle =  10 degrees, the distal angle is  -90 degrees
+    distal_angle = to_rad(-180.0 - (-50.0 - to_deg(theta_proximal)) *
+                          (180.0 - 90.0) / (50.0 + 10.0))
+    return distal_angle
+
 
 def get_distal_omega(omega_proximal):
-  # For the proximal angle = -50 degrees, the distal angle is -180 degrees
-  # For the proximal angle =  10 degrees, the distal angle is  -90 degrees
-  distal_angle = omega_proximal*( (180.0-90.0) / (50.0+10.0) )
-  return distal_angle
-  
+    # For the proximal angle = -50 degrees, the distal angle is -180 degrees
+    # For the proximal angle =  10 degrees, the distal angle is  -90 degrees
+    distal_angle = omega_proximal * ((180.0 - 90.0) / (50.0 + 10.0))
+    return distal_angle
 
-def get_180_degree_time(c1,c2,c3,voltage,gear_ratio,motor_free_speed):
-  #print ("# step     time    theta    angular_speed   angular_acceleration  theta   angular_speed  motor_speed motor_speed_fraction")
-  #print ("#          (sec)   (rad)      (rad/sec)        (rad/sec^2)      (rotations) (rotations/sec)    (rpm)   (fraction)")
-  global run_count
-  global theta_travel
 
-  if ( False ):
-    # Gravity is assisting the motion.
-    theta_start = 0.0
-    theta_target = pi
-  elif ( False ):
-    # Gravity is assisting the motion.
-    theta_start = 0.0
-    theta_target = -pi
-  elif ( False ):
-    # Gravity is slowing the motion.
-    theta_start = pi
-    theta_target = 0.0
-  elif ( False ):
-    # Gravity is slowing the motion.
-    theta_start = -pi
-    theta_target = 0.0
-  elif ( True ):
-    # This is for the proximal arm motion.
-    theta_start = to_rad(-50.0)
-    theta_target = to_rad(10.0)
+def get_180_degree_time(c1, c2, c3, voltage, gear_ratio, motor_free_speed):
+    #print ("# step     time    theta    angular_speed   angular_acceleration  theta   angular_speed  motor_speed motor_speed_fraction")
+    #print ("#          (sec)   (rad)      (rad/sec)        (rad/sec^2)      (rotations) (rotations/sec)    (rpm)   (fraction)")
+    global run_count
+    global theta_travel
 
-  theta_half = 0.5*(theta_start + theta_target)
-  if (theta_start > theta_target):
-    voltage = -voltage
-  theta = theta_start
-  theta_travel = theta_start - theta_target 
-  if ( run_count == 0 ):
-    print ("# Theta Start = %.2f radians End = %.2f Theta travel %.2f Theta half = %.2f Voltage = %.2f" % (theta_start,theta_target,theta_travel,theta_half, voltage))
-    print ("# Theta Start = %.2f degrees End = %.2f Theta travel %.2f Theta half = %.2f Voltage = %.2f" % (to_deg(theta_start),to_deg(theta_target),to_deg(theta_travel),to_deg(theta_half), voltage))
-  omega = 0.0
-  time = 0.0
-  delta_time = 0.01 # time step in seconds
-  for step in range(1, 5000):
-     t = numpy.array([time, time + delta_time])
-     time = time + delta_time
-     x = [theta, omega]
-     angular_acceleration = -c1*omega + c2*voltage
-     x_n_plus_1 = scipy.integrate.odeint(time_derivative,x,t,args=(voltage,c1,c2,c3))
-     #print ('x_n_plus_1 = ',x_n_plus_1)
-     #print ('repr(x_n_plus_1) = ',repr(x_n_plus_1))
-     theta, omega = x_n_plus_1[1]
-     #theta= x_n_plus_1[0]
-     #omega = x_n_plus_1[1]
-     if ( False ):
-       print ("%4d  %8.4f %8.2f          %8.4f          %8.4f    %8.3f    %8.3f     %8.3f      %8.3f" % \
-         (step, time, theta, omega, angular_acceleration, to_rotations(theta), \
-         to_rotations(omega), omega*gear_ratio*60.0/pi2, omega*gear_ratio/motor_free_speed ))
-     if (theta_start < theta_target):
-       # Angle is increasing through the motion.
-       if (theta > theta_half):
-         break
-     else:
-       # Angle is decreasing through the motion.
-       if (theta < theta_half):
-         break
-       
-  #print ("# step     time    theta    angular_speed   angular_acceleration  theta   angular_speed  motor_speed motor_speed_fraction")
-  #print ("#          (sec)   (rad)      (rad/sec)        (rad/sec^2)      (rotations) (rotations/sec)    (rpm)   (fraction)")
-  #print ("# Total time for 1/2 rotation of arm is %0.2f" % (time*2))
-  return (2.0*time)
+    if (False):
+        # Gravity is assisting the motion.
+        theta_start = 0.0
+        theta_target = pi
+    elif (False):
+        # Gravity is assisting the motion.
+        theta_start = 0.0
+        theta_target = -pi
+    elif (False):
+        # Gravity is slowing the motion.
+        theta_start = pi
+        theta_target = 0.0
+    elif (False):
+        # Gravity is slowing the motion.
+        theta_start = -pi
+        theta_target = 0.0
+    elif (True):
+        # This is for the proximal arm motion.
+        theta_start = to_rad(-50.0)
+        theta_target = to_rad(10.0)
+
+    theta_half = 0.5 * (theta_start + theta_target)
+    if (theta_start > theta_target):
+        voltage = -voltage
+    theta = theta_start
+    theta_travel = theta_start - theta_target
+    if (run_count == 0):
+        print(
+            "# Theta Start = %.2f radians End = %.2f Theta travel %.2f Theta half = %.2f Voltage = %.2f"
+            % (theta_start, theta_target, theta_travel, theta_half, voltage))
+        print(
+            "# Theta Start = %.2f degrees End = %.2f Theta travel %.2f Theta half = %.2f Voltage = %.2f"
+            % (to_deg(theta_start), to_deg(theta_target), to_deg(theta_travel),
+               to_deg(theta_half), voltage))
+    omega = 0.0
+    time = 0.0
+    delta_time = 0.01  # time step in seconds
+    for step in range(1, 5000):
+        t = numpy.array([time, time + delta_time])
+        time = time + delta_time
+        x = [theta, omega]
+        angular_acceleration = -c1 * omega + c2 * voltage
+        x_n_plus_1 = scipy.integrate.odeint(time_derivative,
+                                            x,
+                                            t,
+                                            args=(voltage, c1, c2, c3))
+        #print ('x_n_plus_1 = ',x_n_plus_1)
+        #print ('repr(x_n_plus_1) = ',repr(x_n_plus_1))
+        theta, omega = x_n_plus_1[1]
+        #theta= x_n_plus_1[0]
+        #omega = x_n_plus_1[1]
+        if (False):
+            print ("%4d  %8.4f %8.2f          %8.4f          %8.4f    %8.3f    %8.3f     %8.3f      %8.3f" % \
+              (step, time, theta, omega, angular_acceleration, to_rotations(theta), \
+              to_rotations(omega), omega*gear_ratio*60.0/pi2, omega*gear_ratio/motor_free_speed ))
+        if (theta_start < theta_target):
+            # Angle is increasing through the motion.
+            if (theta > theta_half):
+                break
+        else:
+            # Angle is decreasing through the motion.
+            if (theta < theta_half):
+                break
+
+    #print ("# step     time    theta    angular_speed   angular_acceleration  theta   angular_speed  motor_speed motor_speed_fraction")
+    #print ("#          (sec)   (rad)      (rad/sec)        (rad/sec^2)      (rotations) (rotations/sec)    (rpm)   (fraction)")
+    #print ("# Total time for 1/2 rotation of arm is %0.2f" % (time*2))
+    return (2.0 * time)
+
 
 def main():
-  global run_count
-  gravity = 9.8 # m/sec^2 Gravity Constant
-  voltage_nominal = 12 # Volts
-  
-  # Vex 775 Pro motor specs from http://banebots.com/p/M2-RS550-120
-  motor_name = "Vex 775 Pro motor specs from http://banebots.com/p/M2-RS550-120"
-  current_stall = 134 # amps stall current
-  current_no_load = 0.7 # amps no load current
-  torque_stall = 710/1000.0 # N-m Stall Torque
-  speed_no_load_rpm = 18730 # RPM no load speed
-  
-  if ( False ):
-    # Bag motor from https://www.vexrobotics.com/217-3351.html
-    motor_name = "Bag motor from https://www.vexrobotics.com/217-3351.html"
-    current_stall = 53.0 # amps stall current
-    current_no_load = 1.8 # amps no load current
-    torque_stall = 0.4 # N-m Stall Torque
-    speed_no_load_rpm = 13180.0 # RPM no load speed
-  
-  if ( True ):
-    # Mini CIM motor from https://www.vexrobotics.com/217-3371.html
-    motor_name = "Mini CIM motor from https://www.vexrobotics.com/217-3371.html"
-    current_stall = 89.0 # amps stall current
-    current_no_load = 3.0 # amps no load current
-    torque_stall = 1.4 # N-m Stall Torque
-    speed_no_load_rpm = 5840.0 # RPM no load speed
+    global run_count
+    gravity = 9.8  # m/sec^2 Gravity Constant
+    voltage_nominal = 12  # Volts
 
-  # How many motors are we using?
-  num_motors = 1
+    # Vex 775 Pro motor specs from http://banebots.com/p/M2-RS550-120
+    motor_name = "Vex 775 Pro motor specs from http://banebots.com/p/M2-RS550-120"
+    current_stall = 134  # amps stall current
+    current_no_load = 0.7  # amps no load current
+    torque_stall = 710 / 1000.0  # N-m Stall Torque
+    speed_no_load_rpm = 18730  # RPM no load speed
 
-  # Motor values
-  print ("# Motor: %s" % (motor_name))
-  print ("# Number of motors: %d" % (num_motors))
-  print ("# Stall torque: %.1f n-m" % (torque_stall))
-  print ("# Stall current: %.1f amps" % (current_stall))
-  print ("# No load current: %.1f amps" % (current_no_load))
-  print ("# No load speed: %.0f rpm" % (speed_no_load_rpm))
-  
-  # Constants from motor values
-  resistance_motor = voltage_nominal/current_stall 
-  speed_no_load_rps = speed_no_load_rpm/60.0 # Revolutions per second no load speed
-  speed_no_load = speed_no_load_rps*2.0*pi
-  Kt = num_motors*torque_stall/current_stall # N-m/A torque constant
-  Kv_rpm = speed_no_load_rpm /(voltage_nominal - resistance_motor*current_no_load)  # rpm/V
-  Kv = Kv_rpm*2.0*pi/60.0 # rpm/V
-  
-  # Robot Geometry and physics
-  length_proximal_arm = inches_to_meters*47.34 # m Length of arm connected to the robot base
-  length_distal_arm = inches_to_meters*44.0 # m Length of arm that holds the cube
-  mass_cube = 6.0*lbs_to_kg  # Weight of the cube in Kgrams
-  mass_proximal_arm = 5.5*lbs_to_kg # Weight of proximal arm
-  mass_distal_arm = 3.5*lbs_to_kg # Weight of distal arm
-  mass_distal = mass_cube + mass_distal_arm
-  mass_proximal = mass_proximal_arm + mass_distal
-  radius_to_proximal_arm_cg = 22.0*inches_to_meters # m Length from arm pivot point to arm CG
-  radius_to_distal_arm_cg = 10.0*inches_to_meters # m Length from arm pivot point to arm CG
+    if (False):
+        # Bag motor from https://www.vexrobotics.com/217-3351.html
+        motor_name = "Bag motor from https://www.vexrobotics.com/217-3351.html"
+        current_stall = 53.0  # amps stall current
+        current_no_load = 1.8  # amps no load current
+        torque_stall = 0.4  # N-m Stall Torque
+        speed_no_load_rpm = 13180.0  # RPM no load speed
 
-  radius_to_distal_cg = ( length_distal_arm*mass_cube + radius_to_distal_arm_cg*mass_distal_arm)/mass_distal
-  radius_to_proximal_cg = ( length_proximal_arm*mass_distal + radius_to_proximal_arm_cg*mass_proximal_arm)/mass_proximal
-  J_cube = length_distal_arm*length_distal_arm*mass_cube 
-  # Kg m^2 Moment of inertia of the proximal arm
-  J_proximal_arm = radius_to_proximal_arm_cg*radius_to_proximal_arm_cg*mass_distal_arm 
-  # Kg m^2 Moment of inertia distal arm and cube at end of proximal arm.
-  J_distal_arm_and_cube_at_end_of_proximal_arm = length_proximal_arm*length_proximal_arm*mass_distal 
-  J_distal_arm = radius_to_distal_arm_cg*radius_to_distal_arm_cg*mass_distal_arm # Kg m^2 Moment of inertia of the distal arm
-  J = J_distal_arm_and_cube_at_end_of_proximal_arm + J_proximal_arm # Moment of inertia of the arm with the cube on the end
+    if (True):
+        # Mini CIM motor from https://www.vexrobotics.com/217-3371.html
+        motor_name = "Mini CIM motor from https://www.vexrobotics.com/217-3371.html"
+        current_stall = 89.0  # amps stall current
+        current_no_load = 3.0  # amps no load current
+        torque_stall = 1.4  # N-m Stall Torque
+        speed_no_load_rpm = 5840.0  # RPM no load speed
 
-  error_margine = 1.0
-  voltage = 10.0 # voltage for the motor.  Assuming a loaded robot so not using 12 V.
-  # It might make sense to use a lower motor frees peed when the voltage is not a full 12 Volts.
-  # motor_free_speed = Kv*voltage
-  motor_free_speed = speed_no_load
-  
-  print ("# Kt = %f N-m/A\n# Kv_rpm = %f rpm/V\n# Kv = %f radians/V" % (Kt, Kv_rpm, Kv))
-  print ("# %.2f Ohms Resistance of the motor " % (resistance_motor))
-  print ("# %.2f kg Cube weight" % (mass_cube))
-  print ("# %.2f kg Proximal Arm mass" % (mass_proximal_arm))
-  print ("# %.2f kg Distal Arm mass" % (mass_distal_arm))
-  print ("# %.2f kg Distal Arm and Cube weight" % (mass_distal))
-  print ("# %.2f m Length from distal arm pivot point to arm CG" % (radius_to_distal_arm_cg))
-  print ("# %.2f m Length from distal arm pivot point to arm and cube cg" % (radius_to_distal_cg))
-  print ("# %.2f kg-m^2 Moment of inertia of the cube about the arm pivot point" % (J_cube))
-  print ("# %.2f m Length from proximal arm pivot point to arm CG" % (radius_to_proximal_arm_cg))
-  print ("# %.2f m Length from proximal arm pivot point to arm and cube cg" % (radius_to_proximal_cg))
-  print ("# %.2f m  Proximal arm length" % (length_proximal_arm))
-  print ("# %.2f m  Distal arm length" % (length_distal_arm))
+    # How many motors are we using?
+    num_motors = 1
 
-  print ("# %.2f kg-m^2 Moment of inertia of the distal arm about the arm pivot point" % (J_distal_arm))
-  print ("# %.2f kg-m^2 Moment of inertia of the proximal arm about the arm pivot point" % (J_proximal_arm))
-  print ("# %.2f kg-m^2 Moment of inertia of the distal arm and cube mass about the proximal arm pivot point" % (J_distal_arm_and_cube_at_end_of_proximal_arm))
-  print ("# %.2f kg-m^2 Moment of inertia of the proximal arm and distal arm and cube about the arm pivot point" % (J))
-  print ("# %d Number of motors" % (num_motors))
-  
-  print ("# %.2f V Motor voltage" % (voltage))
+    # Motor values
+    print("# Motor: %s" % (motor_name))
+    print("# Number of motors: %d" % (num_motors))
+    print("# Stall torque: %.1f n-m" % (torque_stall))
+    print("# Stall current: %.1f amps" % (current_stall))
+    print("# No load current: %.1f amps" % (current_no_load))
+    print("# No load speed: %.0f rpm" % (speed_no_load_rpm))
 
-  print ("\n# Min time is for proximal arm without any forces from distal arm.  Max time puts all distal arm mass at the end of proximal arm.")
+    # Constants from motor values
+    resistance_motor = voltage_nominal / current_stall
+    speed_no_load_rps = speed_no_load_rpm / 60.0  # Revolutions per second no load speed
+    speed_no_load = speed_no_load_rps * 2.0 * pi
+    Kt = num_motors * torque_stall / current_stall  # N-m/A torque constant
+    Kv_rpm = speed_no_load_rpm / (
+        voltage_nominal - resistance_motor * current_no_load)  # rpm/V
+    Kv = Kv_rpm * 2.0 * pi / 60.0  # rpm/V
 
-  for gear_ratio in range(60, 241, 10):
-    c1 = Kt*gear_ratio*gear_ratio/(Kv*resistance_motor*J)
-    c2 = gear_ratio*Kt/(J*resistance_motor)
-    c3 = radius_to_proximal_cg*mass_proximal*gravity/J
-    c1_proximal_only = Kt*gear_ratio*gear_ratio/(Kv*resistance_motor*J_proximal_arm)
-    c2_proximal_only = gear_ratio*Kt/(J_proximal_arm*resistance_motor)
-    c3_proximal_only = radius_to_proximal_arm_cg*mass_proximal_arm*gravity/J_proximal_arm
-  
-    if ( False and run_count < 1 ):
-      print ("# %.8f 1/sec C1 constant" % (c1))
-      print ("# %.2f 1/sec C2 constant" % (c2))
-      print ("# %.2f 1/(V sec^2) C3 constant" % (c3))
-      print ("# %.8f 1/sec C1 proximal only constant" % (c1_proximal_only))
-      print ("# %.2f 1/sec C2 proximal only constant" % (c2_proximal_only))
-      print ("# %.2f 1/(V sec^2) C3 proximal only constant" % (c3_proximal_only))
-      print ("# %.2f RPM Free speed at motor voltage" % (voltage*Kv_rpm))
-  
-    torque_90_degrees = radius_to_proximal_cg*mass_proximal*gravity
-    voltage_90_degrees = resistance_motor*torque_90_degrees/(gear_ratio*Kt)
-    torque_peak = gear_ratio*num_motors*torque_stall
-    torque_peak_ft_lbs = torque_peak * newton_meters_to_ft_lbs
-    normal_force = torque_peak/length_proximal_arm
-    normal_force_lbf = newton_to_lbf*normal_force 
-    normal_distal_arm_end_force = torque_peak/(length_proximal_arm + length_distal_arm)
-    normal_distal_arm_end_force_lbf = newton_to_lbf*normal_distal_arm_end_force 
-    time_required = get_180_degree_time(c1,c2,c3,voltage,gear_ratio,motor_free_speed)
-    time_required_proximal_only = get_180_degree_time(c1_proximal_only,c2_proximal_only,c3_proximal_only,voltage,gear_ratio,motor_free_speed)
-    print ("Time for %.1f degrees for gear ratio %3.0f is %.2f (min) %.2f (max) seconds.  90 degree torque %.1f N-m and voltage %.1f Volts. Peak torque %3.0f nm %3.0f ft-lb Normal force at proximal end %3.0f N %2.0f lbf distal end %3.0f N %2.0f lbf" % \
-      (to_deg(theta_travel),gear_ratio,time_required_proximal_only,time_required,torque_90_degrees,voltage_90_degrees,
-       torque_peak,torque_peak_ft_lbs,normal_force,normal_force_lbf,normal_distal_arm_end_force,normal_distal_arm_end_force_lbf))
-  
+    # Robot Geometry and physics
+    length_proximal_arm = inches_to_meters * 47.34  # m Length of arm connected to the robot base
+    length_distal_arm = inches_to_meters * 44.0  # m Length of arm that holds the cube
+    mass_cube = 6.0 * lbs_to_kg  # Weight of the cube in Kgrams
+    mass_proximal_arm = 5.5 * lbs_to_kg  # Weight of proximal arm
+    mass_distal_arm = 3.5 * lbs_to_kg  # Weight of distal arm
+    mass_distal = mass_cube + mass_distal_arm
+    mass_proximal = mass_proximal_arm + mass_distal
+    radius_to_proximal_arm_cg = 22.0 * inches_to_meters  # m Length from arm pivot point to arm CG
+    radius_to_distal_arm_cg = 10.0 * inches_to_meters  # m Length from arm pivot point to arm CG
+
+    radius_to_distal_cg = (
+        length_distal_arm * mass_cube +
+        radius_to_distal_arm_cg * mass_distal_arm) / mass_distal
+    radius_to_proximal_cg = (
+        length_proximal_arm * mass_distal +
+        radius_to_proximal_arm_cg * mass_proximal_arm) / mass_proximal
+    J_cube = length_distal_arm * length_distal_arm * mass_cube
+    # Kg m^2 Moment of inertia of the proximal arm
+    J_proximal_arm = radius_to_proximal_arm_cg * radius_to_proximal_arm_cg * mass_distal_arm
+    # Kg m^2 Moment of inertia distal arm and cube at end of proximal arm.
+    J_distal_arm_and_cube_at_end_of_proximal_arm = length_proximal_arm * length_proximal_arm * mass_distal
+    J_distal_arm = radius_to_distal_arm_cg * radius_to_distal_arm_cg * mass_distal_arm  # Kg m^2 Moment of inertia of the distal arm
+    J = J_distal_arm_and_cube_at_end_of_proximal_arm + J_proximal_arm  # Moment of inertia of the arm with the cube on the end
+
+    error_margine = 1.0
+    voltage = 10.0  # voltage for the motor.  Assuming a loaded robot so not using 12 V.
+    # It might make sense to use a lower motor frees peed when the voltage is not a full 12 Volts.
+    # motor_free_speed = Kv*voltage
+    motor_free_speed = speed_no_load
+
+    print("# Kt = %f N-m/A\n# Kv_rpm = %f rpm/V\n# Kv = %f radians/V" %
+          (Kt, Kv_rpm, Kv))
+    print("# %.2f Ohms Resistance of the motor " % (resistance_motor))
+    print("# %.2f kg Cube weight" % (mass_cube))
+    print("# %.2f kg Proximal Arm mass" % (mass_proximal_arm))
+    print("# %.2f kg Distal Arm mass" % (mass_distal_arm))
+    print("# %.2f kg Distal Arm and Cube weight" % (mass_distal))
+    print("# %.2f m Length from distal arm pivot point to arm CG" %
+          (radius_to_distal_arm_cg))
+    print("# %.2f m Length from distal arm pivot point to arm and cube cg" %
+          (radius_to_distal_cg))
+    print(
+        "# %.2f kg-m^2 Moment of inertia of the cube about the arm pivot point"
+        % (J_cube))
+    print("# %.2f m Length from proximal arm pivot point to arm CG" %
+          (radius_to_proximal_arm_cg))
+    print("# %.2f m Length from proximal arm pivot point to arm and cube cg" %
+          (radius_to_proximal_cg))
+    print("# %.2f m  Proximal arm length" % (length_proximal_arm))
+    print("# %.2f m  Distal arm length" % (length_distal_arm))
+
+    print(
+        "# %.2f kg-m^2 Moment of inertia of the distal arm about the arm pivot point"
+        % (J_distal_arm))
+    print(
+        "# %.2f kg-m^2 Moment of inertia of the proximal arm about the arm pivot point"
+        % (J_proximal_arm))
+    print(
+        "# %.2f kg-m^2 Moment of inertia of the distal arm and cube mass about the proximal arm pivot point"
+        % (J_distal_arm_and_cube_at_end_of_proximal_arm))
+    print(
+        "# %.2f kg-m^2 Moment of inertia of the proximal arm and distal arm and cube about the arm pivot point"
+        % (J))
+    print("# %d Number of motors" % (num_motors))
+
+    print("# %.2f V Motor voltage" % (voltage))
+
+    print(
+        "\n# Min time is for proximal arm without any forces from distal arm.  Max time puts all distal arm mass at the end of proximal arm."
+    )
+
+    for gear_ratio in range(60, 241, 10):
+        c1 = Kt * gear_ratio * gear_ratio / (Kv * resistance_motor * J)
+        c2 = gear_ratio * Kt / (J * resistance_motor)
+        c3 = radius_to_proximal_cg * mass_proximal * gravity / J
+        c1_proximal_only = Kt * gear_ratio * gear_ratio / (
+            Kv * resistance_motor * J_proximal_arm)
+        c2_proximal_only = gear_ratio * Kt / (J_proximal_arm *
+                                              resistance_motor)
+        c3_proximal_only = radius_to_proximal_arm_cg * mass_proximal_arm * gravity / J_proximal_arm
+
+        if (False and run_count < 1):
+            print("# %.8f 1/sec C1 constant" % (c1))
+            print("# %.2f 1/sec C2 constant" % (c2))
+            print("# %.2f 1/(V sec^2) C3 constant" % (c3))
+            print("# %.8f 1/sec C1 proximal only constant" %
+                  (c1_proximal_only))
+            print("# %.2f 1/sec C2 proximal only constant" %
+                  (c2_proximal_only))
+            print("# %.2f 1/(V sec^2) C3 proximal only constant" %
+                  (c3_proximal_only))
+            print("# %.2f RPM Free speed at motor voltage" %
+                  (voltage * Kv_rpm))
+
+        torque_90_degrees = radius_to_proximal_cg * mass_proximal * gravity
+        voltage_90_degrees = resistance_motor * torque_90_degrees / (
+            gear_ratio * Kt)
+        torque_peak = gear_ratio * num_motors * torque_stall
+        torque_peak_ft_lbs = torque_peak * newton_meters_to_ft_lbs
+        normal_force = torque_peak / length_proximal_arm
+        normal_force_lbf = newton_to_lbf * normal_force
+        normal_distal_arm_end_force = torque_peak / (length_proximal_arm +
+                                                     length_distal_arm)
+        normal_distal_arm_end_force_lbf = newton_to_lbf * normal_distal_arm_end_force
+        time_required = get_180_degree_time(c1, c2, c3, voltage, gear_ratio,
+                                            motor_free_speed)
+        time_required_proximal_only = get_180_degree_time(
+            c1_proximal_only, c2_proximal_only, c3_proximal_only, voltage,
+            gear_ratio, motor_free_speed)
+        print ("Time for %.1f degrees for gear ratio %3.0f is %.2f (min) %.2f (max) seconds.  90 degree torque %.1f N-m and voltage %.1f Volts. Peak torque %3.0f nm %3.0f ft-lb Normal force at proximal end %3.0f N %2.0f lbf distal end %3.0f N %2.0f lbf" % \
+          (to_deg(theta_travel),gear_ratio,time_required_proximal_only,time_required,torque_90_degrees,voltage_90_degrees,
+           torque_peak,torque_peak_ft_lbs,normal_force,normal_force_lbf,normal_distal_arm_end_force,normal_distal_arm_end_force_lbf))
+
+
 if __name__ == '__main__':
-   main()
+    main()
diff --git a/y2018/control_loops/python/arm_trajectory.py b/y2018/control_loops/python/arm_trajectory.py
index bcfd9f0..9db7717 100755
--- a/y2018/control_loops/python/arm_trajectory.py
+++ b/y2018/control_loops/python/arm_trajectory.py
@@ -20,6 +20,7 @@
 
 
 class Dynamics(object):
+
     def __init__(self, dt):
         self.dt = dt
 
@@ -61,29 +62,30 @@
             [[self.G1 * self.Kt / self.R, 0.0],
              [0.0, self.G2 * self.kNumDistalMotors * self.Kt / self.R]])
         self.K4 = numpy.matrix(
-            [[self.G1 * self.G1 * self.Kt / (self.Kv * self.R), 0.0], [
-                0.0, self.G2 * self.G2 * self.Kt * self.kNumDistalMotors /
-                (self.Kv * self.R)
-            ]])
+            [[self.G1 * self.G1 * self.Kt / (self.Kv * self.R), 0.0],
+             [
+                 0.0, self.G2 * self.G2 * self.Kt * self.kNumDistalMotors /
+                 (self.Kv * self.R)
+             ]])
 
         # These constants are for the Extended Kalman Filter
         # Q is the covariance of the X values.  Use the square of the standard
         # deviation of the error accumulated each time step.
-        self.Q_x_covariance = numpy.matrix([[0.001**2,0.0,0.0,0.0,0.0,0.0],
-                                       [0.0,0.001**2,0.0,0.0,0.0,0.0],
-                                       [0.0,0.0,0.001**2,0.0,0.0,0.0],
-                                       [0.0,0.0,0.0,0.001**2,0.0,0.0],
-                                       [0.0,0.0,0.0,0.0,10.0**2,0.0],
-                                       [0.0,0.0,0.0,0.0,0.0,10.0**2]])
+        self.Q_x_covariance = numpy.matrix(
+            [[0.001**2, 0.0, 0.0, 0.0, 0.0, 0.0],
+             [0.0, 0.001**2, 0.0, 0.0, 0.0, 0.0],
+             [0.0, 0.0, 0.001**2, 0.0, 0.0, 0.0],
+             [0.0, 0.0, 0.0, 0.001**2, 0.0, 0.0],
+             [0.0, 0.0, 0.0, 0.0, 10.0**2, 0.0],
+             [0.0, 0.0, 0.0, 0.0, 0.0, 10.0**2]])
         # R is the covariance of the Z values.  Increase the responsiveness to
         # changes by reducing coresponding term in the R matrix.
-        self.R_y_covariance = numpy.matrix([[0.01**2, 0.0],[0.0, 0.01**2]])
+        self.R_y_covariance = numpy.matrix([[0.01**2, 0.0], [0.0, 0.01**2]])
         # H is the jacobian of the h(x) measurement prediction function
-        self.H_h_jacobian = numpy.matrix([[1.0,0.0,0.0,0.0,0.0,0.0],
-                                          [0.0,0.0,1.0,0.0,0.0,0.0]])
+        self.H_h_jacobian = numpy.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+                                          [0.0, 0.0, 1.0, 0.0, 0.0, 0.0]])
         self.Identity_matrix = numpy.matrix(numpy.identity(6))
 
-
     def discrete_dynamics_ekf_predict(self, X_hat, P_covariance_estimate, U,
                                       sim_dt):
         """Updates the Extended Kalman Filter state for one timestep.
@@ -108,8 +110,8 @@
         # Predict step
         #   Compute the state trasition matrix using the Jacobian of state
         #   estimate
-        F_k = numerical_jacobian_x(self.unbounded_discrete_dynamics_ekf,
-          X_hat, U)
+        F_k = numerical_jacobian_x(self.unbounded_discrete_dynamics_ekf, X_hat,
+                                   U)
         #   State estimate
         X_hat = self.unbounded_discrete_dynamics_ekf(X_hat, U, sim_dt)
         #   Covariance estimate
@@ -134,7 +136,7 @@
         # Update step
         #   Measurement residual error of proximal and distal
         #   joint angles.
-        Y_hat = Y_reading - numpy.matrix([[X_hat[0,0]],[X_hat[2,0]]])
+        Y_hat = Y_reading - numpy.matrix([[X_hat[0, 0]], [X_hat[2, 0]]])
         #   Residual covariance
         S = self.H_h_jacobian * P_covariance_estimate * self.H_h_jacobian.T + \
           self.R_y_covariance
@@ -144,8 +146,8 @@
         #   Updated state estimate
         X_hat = X_hat + Kalman_gain * Y_hat
         #   Updated covariance estimate
-        P_covariance_estimate = (self.Identity_matrix -
-          Kalman_gain * self.H_h_jacobian) * P_covariance_estimate
+        P_covariance_estimate = (self.Identity_matrix - Kalman_gain *
+                                 self.H_h_jacobian) * P_covariance_estimate
         return X_hat, P_covariance_estimate
 
     def NormilizedMatriciesForState(self, X):
@@ -158,8 +160,8 @@
         c = numpy.cos(X[0, 0] - X[2, 0])
 
         # K1 * d^2 theta/dt^2 + K2 * d theta/dt = torque
-        K1 = numpy.matrix(
-            [[self.alpha, c * self.beta], [c * self.beta, self.gamma]])
+        K1 = numpy.matrix([[self.alpha, c * self.beta],
+                           [c * self.beta, self.gamma]])
 
         K2 = numpy.matrix([[0.0, s * self.beta], [-s * self.beta, 0.0]])
 
@@ -183,8 +185,8 @@
         """
         K1, K2, K3, K4 = self.MatriciesForState(X)
 
-        return numpy.linalg.inv(K3) * (
-            K1 * alpha_t + K2 * omega_t + K4 * omega_t)
+        return numpy.linalg.inv(K3) * (K1 * alpha_t + K2 * omega_t +
+                                       K4 * omega_t)
 
     def ff_u_distance(self, trajectory, distance):
         """Computes the feed forward U at the distance on the trajectory.
@@ -252,15 +254,15 @@
                              [accel[1, 0]], [0.0], [0.0]])
 
     def unbounded_discrete_dynamics(self, X, U, dt=None):
-        return RungeKutta(lambda startingX: self.dynamics(startingX, U), X,
-                          dt or self.dt)
+        return RungeKutta(lambda startingX: self.dynamics(startingX, U), X, dt
+                          or self.dt)
 
     def unbounded_discrete_dynamics_ekf(self, X, U, dt=None):
         return RungeKutta(lambda startingX: self.dynamics_ekf(startingX, U), X,
                           dt or self.dt)
 
     def discrete_dynamics(self, X, U, dt=None):
-        assert((numpy.abs(U) <= (12.0 + 1e-6)).all())
+        assert ((numpy.abs(U) <= (12.0 + 1e-6)).all())
         return self.unbounded_discrete_dynamics(X, U, dt)
 
 
@@ -324,8 +326,8 @@
     q_vel = 4.0
     Q = numpy.matrix(
         numpy.diag([
-            1.0 / (q_pos**2.0), 1.0 / (q_vel**2.0), 1.0 / (q_pos**2.0), 1.0 / (
-                q_vel**2.0)
+            1.0 / (q_pos**2.0), 1.0 / (q_vel**2.0), 1.0 / (q_pos**2.0),
+            1.0 / (q_vel**2.0)
         ]))
 
     R = numpy.matrix(numpy.diag([1.0 / (12.0**2.0), 1.0 / (12.0**2.0)]))
@@ -335,19 +337,21 @@
 
     return controls.dlqr(final_A, final_B, Q, R)
 
+
 def get_encoder_values(X):
-  """Returns simulated encoder readings.
+    """Returns simulated encoder readings.
 
   This method returns the encoder readings.  For now simply use values from X
   with some noise added in to make the simulation more interesting.
   """
-  introduced_random_measurement_noise = 0.005
-  introduced_random_measurement_noise = 0.05
-  theta1_measured = X[0,0] + introduced_random_measurement_noise * \
-    2.0 * ( numpy.random.random() - 0.5 )
-  theta2_measured = X[2,0] + introduced_random_measurement_noise * \
-    2.0 * ( numpy.random.random() - 0.5 )
-  return numpy.matrix([[theta1_measured ],[theta2_measured]])
+    introduced_random_measurement_noise = 0.005
+    introduced_random_measurement_noise = 0.05
+    theta1_measured = X[0,0] + introduced_random_measurement_noise * \
+      2.0 * ( numpy.random.random() - 0.5 )
+    theta2_measured = X[2,0] + introduced_random_measurement_noise * \
+      2.0 * ( numpy.random.random() - 0.5 )
+    return numpy.matrix([[theta1_measured], [theta2_measured]])
+
 
 class Trajectory:
     """This class represents a trajectory in theta space."""
@@ -358,8 +362,12 @@
             numpy.matrix([[numpy.pi / 2.0 - x[0]], [numpy.pi / 2.0 - x[1]]])
             for x in self.path_points
         ]
-        self._omegas = [numpy.matrix([[-x[2]], [-x[3]]]) for x in self.path_points]
-        self._alphas = [numpy.matrix([[-x[4]], [-x[5]]]) for x in self.path_points]
+        self._omegas = [
+            numpy.matrix([[-x[2]], [-x[3]]]) for x in self.path_points
+        ]
+        self._alphas = [
+            numpy.matrix([[-x[4]], [-x[5]]]) for x in self.path_points
+        ]
         self.path_step_size = path_step_size
         self.point_distances = [0.0]
         last_point = self._thetas[0]
@@ -389,8 +397,9 @@
             return points[0]
         elif distance >= self._length:
             return points[-1]
-        after_index = numpy.searchsorted(
-            self.point_distances, distance, side='right')
+        after_index = numpy.searchsorted(self.point_distances,
+                                         distance,
+                                         side='right')
         before_index = after_index - 1
         return (distance - self.point_distances[before_index]) / (
             self.point_distances[after_index] -
@@ -420,15 +429,15 @@
             alpha = self.alpha(distance)
             X = numpy.matrix([[theta[0, 0]], [0.0], [theta[1, 0]], [0.0]])
             K1, K2, K3, K4 = dynamics.NormilizedMatriciesForState(X)
-            omega_square = numpy.matrix(
-                [[omega[0, 0], 0.0], [0.0, omega[1, 0]]])
+            omega_square = numpy.matrix([[omega[0, 0], 0.0],
+                                         [0.0, omega[1, 0]]])
             # Here, we can say that
             #   d^2/dt^2 theta = d^2/dd^2 theta(d) * (d d/dt)^2
             # Normalize so that the max accel is 1, and take magnitudes. This
             # gives us the max velocity we can be at each point due to
             # curvature.
-            vk1 = numpy.linalg.inv(K3) * (
-                K1 * alpha + K2 * omega_square * omega)
+            vk1 = numpy.linalg.inv(K3) * (K1 * alpha +
+                                          K2 * omega_square * omega)
             vk2 = numpy.linalg.inv(K3) * K4 * omega
             ddots = []
             for c in [-vmax, vmax]:
@@ -470,8 +479,8 @@
 
         voltage_accel_list = []
         for c in [-vmax, vmax]:
-            for a, b in [(k_constant[0, 0], k_scalar[0, 0]), (k_constant[1, 0],
-                                                              k_scalar[1, 0])]:
+            for a, b in [(k_constant[0, 0], k_scalar[0, 0]),
+                         (k_constant[1, 0], k_scalar[1, 0])]:
                 # This time, we are doing the other pass.  So, find all
                 # the decelerations (and flip them) to find the prior
                 # velocity.
@@ -483,14 +492,16 @@
                 filtered_voltage_accel_list.append(a)
 
         goal_acceleration = numpy.sqrt(
-            max(0.0, 1.0 -
+            max(
+                0.0, 1.0 -
                 (numpy.linalg.norm(alpha_unitizer * alpha) * velocity *
                  velocity)**2.0)) / numpy.linalg.norm(alpha_unitizer * omega)
         if filtered_voltage_accel_list:
             # TODO(austin): The max of the list seems right, but I'm
             # not seeing many lists with a size > 1, so it's hard to
             # tell.  Max is conservative, for sure.
-            goal_acceleration = min(-max(filtered_voltage_accel_list), goal_acceleration)
+            goal_acceleration = min(-max(filtered_voltage_accel_list),
+                                    goal_acceleration)
         return goal_acceleration
 
     def back_trajectory_pass(self, previous_pass, dynamics, alpha_unitizer,
@@ -516,8 +527,8 @@
 
                 integration_step_size = self.path_step_size / float(num_steps)
                 int_d += integration_step_size
-                int_vel = numpy.sqrt(2.0 * int_accel_t * integration_step_size
-                                     + int_vel * int_vel)
+                int_vel = numpy.sqrt(2.0 * int_accel_t *
+                                     integration_step_size + int_vel * int_vel)
             max_dvelocity_back_pass[index] = min(
                 int_vel, max_dvelocity_back_pass[index])
         return max_dvelocity_back_pass
@@ -539,17 +550,18 @@
         omega_square = numpy.matrix([[omega[0, 0], 0.0], [0.0, omega[1, 0]]])
 
         k_constant = numpy.linalg.inv(K3) * (
-            (K1 * alpha + K2 * omega_square * omega
-             ) * goal_velocity * goal_velocity + K4 * omega * goal_velocity)
+            (K1 * alpha + K2 * omega_square * omega) * goal_velocity *
+            goal_velocity + K4 * omega * goal_velocity)
         k_scalar = numpy.linalg.inv(K3) * K1 * omega
         voltage_accel_list = []
         for c in [-vmax, vmax]:
-            for a, b in [(k_constant[0, 0], k_scalar[0, 0]), (k_constant[1, 0],
-                                                              k_scalar[1, 0])]:
+            for a, b in [(k_constant[0, 0], k_scalar[0, 0]),
+                         (k_constant[1, 0], k_scalar[1, 0])]:
                 voltage_accel_list.append((c - a) / b)
 
         goal_acceleration = numpy.sqrt(
-            max(0.0, 1.0 -
+            max(
+                0.0, 1.0 -
                 (numpy.linalg.norm(alpha_unitizer * alpha) * goal_velocity *
                  goal_velocity)**2.0)) / numpy.linalg.norm(
                      alpha_unitizer * omega)
@@ -564,8 +576,8 @@
             # TODO(austin): The max of the list seems right, but I'm not
             # seeing many lists with a size > 1, so it's hard to tell.
             # Min is conservative, for sure.
-            goal_acceleration = min(
-                min(filtered_voltage_accel_list), goal_acceleration)
+            goal_acceleration = min(min(filtered_voltage_accel_list),
+                                    goal_acceleration)
 
         return goal_acceleration
 
@@ -592,8 +604,8 @@
 
                 integration_step_size = self.path_step_size / float(num_steps)
                 int_d += integration_step_size
-                int_vel = numpy.sqrt(2.0 * int_accel_t * integration_step_size
-                                      + int_vel * int_vel)
+                int_vel = numpy.sqrt(2.0 * int_accel_t *
+                                     integration_step_size + int_vel * int_vel)
 
             max_dvelocity_forward_pass[index] = min(
                 int_vel, max_dvelocity_forward_pass[index])
@@ -623,11 +635,12 @@
 
     def interpolate_velocity(self, d, d0, d1, v0, v1):
         if v0 + v1 > 0:
-            return numpy.sqrt(v0 * v0 +
-                              (v1 * v1 - v0 * v0) * (d - d0) / (d1 - d0))
+            return numpy.sqrt(v0 * v0 + (v1 * v1 - v0 * v0) * (d - d0) /
+                              (d1 - d0))
         else:
-            return -numpy.sqrt(v0 * v0 +
-                               (v1 * v1 - v0 * v0) * (d - d0) / (d1 - d0))
+            return -numpy.sqrt(v0 * v0 + (v1 * v1 - v0 * v0) * (d - d0) /
+                               (d1 - d0))
+
     def get_dvelocity(self, d):
         """Computes the path distance velocity of the plan as a function of the distance."""
         after_index = numpy.argmax(self.distance_array > d)
@@ -662,8 +675,8 @@
         if d > self._length:
             return numpy.matrix(numpy.zeros((2, 1)))
         return self.alpha(d) * (
-            (velocity or self.get_dvelocity(d))**2.0) + self.omega(d) * (
-                acceleration or self.get_dacceleration(d))
+            (velocity or self.get_dvelocity(d))**
+            2.0) + self.omega(d) * (acceleration or self.get_dacceleration(d))
 
     def R(self, d, velocity=None):
         theta_t = self.theta(d)
@@ -682,21 +695,17 @@
     # TODO(austin): use computed forward dynamics velocity here.
     theta_t = trajectory.theta(saturation_goal_distance)
     saturation_goal_velocity = trajectory.interpolate_velocity(
-        saturation_goal_distance, last_goal_distance,
-        goal_distance, last_goal_velocity, goal_velocity)
+        saturation_goal_distance, last_goal_distance, goal_distance,
+        last_goal_velocity, goal_velocity)
     saturation_goal_acceleration = trajectory.interpolate_acceleration(
-        last_goal_distance, goal_distance, last_goal_velocity,
-        goal_velocity)
-    omega_t = trajectory.omega_t(
-        saturation_goal_distance,
-        velocity=saturation_goal_velocity)
-    alpha_t = trajectory.alpha_t(
-        saturation_goal_distance,
-        velocity=saturation_goal_velocity,
-        acceleration=saturation_goal_acceleration)
-    R = trajectory.R(
-        saturation_goal_distance,
-        velocity=saturation_goal_velocity)
+        last_goal_distance, goal_distance, last_goal_velocity, goal_velocity)
+    omega_t = trajectory.omega_t(saturation_goal_distance,
+                                 velocity=saturation_goal_velocity)
+    alpha_t = trajectory.alpha_t(saturation_goal_distance,
+                                 velocity=saturation_goal_velocity,
+                                 acceleration=saturation_goal_acceleration)
+    R = trajectory.R(saturation_goal_distance,
+                     velocity=saturation_goal_velocity)
     U_ff = numpy.clip(dynamics.ff_u(R, omega_t, alpha_t), -12.0, 12.0)
     return U_ff + K * (
         R - X), saturation_goal_velocity, saturation_goal_acceleration
@@ -767,12 +776,15 @@
     alpha0_max = 40.0
     alpha1_max = 60.0
 
-    alpha_unitizer = numpy.matrix(
-        [[1.0 / alpha0_max, 0.0], [0.0, 1.0 / alpha1_max]])
+    alpha_unitizer = numpy.matrix([[1.0 / alpha0_max, 0.0],
+                                   [0.0, 1.0 / alpha1_max]])
 
     # Compute the trajectory taking into account our velocity, acceleration
     # and voltage constraints.
-    trajectory.compute_trajectory(dynamics, alpha_unitizer, distance_array, vmax=vmax)
+    trajectory.compute_trajectory(dynamics,
+                                  alpha_unitizer,
+                                  distance_array,
+                                  vmax=vmax)
 
     print 'Computed trajectory'
 
@@ -820,8 +832,8 @@
     theta_t = trajectory.theta(goal_distance)
     X = numpy.matrix([[theta_t[0, 0]], [0.0], [theta_t[1, 0]], [0.0]])
     # X_hat is for the Extended Kalman Filter state estimate
-    X_hat = numpy.matrix([[theta_t[0, 0]], [0.0], [theta_t[1, 0]],
-      [0.0], [0.0], [0.0]])
+    X_hat = numpy.matrix([[theta_t[0, 0]], [0.0], [theta_t[1, 0]], [0.0],
+                          [0.0], [0.0]])
     # P is the Covariance Estimate for the Etended Kalman Filter
     P_covariance_estimate = dynamics.Q_x_covariance.copy()
 
@@ -856,10 +868,9 @@
         t_array.append(t)
         theta_t = trajectory.theta(goal_distance)
         omega_t = trajectory.omega_t(goal_distance, velocity=goal_velocity)
-        alpha_t = trajectory.alpha_t(
-            goal_distance,
-            velocity=goal_velocity,
-            acceleration=goal_acceleration)
+        alpha_t = trajectory.alpha_t(goal_distance,
+                                     velocity=goal_velocity,
+                                     acceleration=goal_acceleration)
 
         theta0_goal_t_array.append(theta_t[0, 0])
         theta1_goal_t_array.append(theta_t[1, 0])
@@ -887,7 +898,7 @@
         # available.  For now, simulate the sensor reading by using the X
         # position and adding some noise to it.
         X_hat, P_covariance_estimate = dynamics.discrete_dynamics_ekf_update(
-          X_hat, P_covariance_estimate, sim_dt, get_encoder_values(X))
+            X_hat, P_covariance_estimate, sim_dt, get_encoder_values(X))
 
         R = trajectory.R(goal_distance, velocity=goal_velocity)
         U_ff = numpy.clip(dynamics.ff_u(R, omega_t, alpha_t), -12.0, 12.0)
@@ -927,8 +938,9 @@
                     fraction_along_path += step_size
             print "Fraction", fraction_along_path, "at", goal_distance, "rad,", t, "sec", goal_velocity
 
-            goal_distance = ((goal_distance - last_goal_distance) *
-                             fraction_along_path + last_goal_distance)
+            goal_distance = (
+                (goal_distance - last_goal_distance) * fraction_along_path +
+                last_goal_distance)
             goal_velocity = saturation_goal_velocity
             goal_acceleration = saturation_goal_acceleration
 
@@ -965,8 +977,7 @@
         # Push Extended Kalman filter state forwards.
         # Predict step - call for each time step
         X_hat, P_covariance_estimate = dynamics.discrete_dynamics_ekf_predict(
-          X_hat, P_covariance_estimate, U, sim_dt)
-
+            X_hat, P_covariance_estimate, U, sim_dt)
 
         if abs(goal_distance - trajectory.length()) < 1e-2:
             # If we go backwards along the path near the goal, snap us to the
@@ -999,12 +1010,15 @@
 
     pylab.figure()
     pylab.title("Path Velocity Plan")
-    pylab.plot(
-        distance_array, trajectory.max_dvelocity_unfiltered, label="pass0")
-    pylab.plot(
-        distance_array, trajectory.max_dvelocity_back_pass, label="passb")
-    pylab.plot(
-        distance_array, trajectory.max_dvelocity_forward_pass, label="passf")
+    pylab.plot(distance_array,
+               trajectory.max_dvelocity_unfiltered,
+               label="pass0")
+    pylab.plot(distance_array,
+               trajectory.max_dvelocity_back_pass,
+               label="passb")
+    pylab.plot(distance_array,
+               trajectory.max_dvelocity_forward_pass,
+               label="passf")
     pylab.legend(loc='center')
     pylab.legend()
 
@@ -1062,11 +1076,14 @@
 
     pylab.figure()
     pylab.title("Disturbance Force from Extended Kalman Filter State Values")
-    pylab.plot(t_array, torque_disturbance_0_hat_array, label="torque_disturbance_0_hat")
-    pylab.plot(t_array, torque_disturbance_1_hat_array, label="torque_disturbance_1_hat")
+    pylab.plot(t_array,
+               torque_disturbance_0_hat_array,
+               label="torque_disturbance_0_hat")
+    pylab.plot(t_array,
+               torque_disturbance_1_hat_array,
+               label="torque_disturbance_1_hat")
     pylab.legend()
 
-
     pylab.show()
 
 
diff --git a/y2018/control_loops/python/drivetrain.py b/y2018/control_loops/python/drivetrain.py
index 675930a..0e5ed43 100644
--- a/y2018/control_loops/python/drivetrain.py
+++ b/y2018/control_loops/python/drivetrain.py
@@ -47,5 +47,6 @@
         # Write the generated constants out to a file.
         drivetrain.WriteDrivetrain(argv[1:3], argv[3:5], 'y2018', kDrivetrain)
 
+
 if __name__ == '__main__':
     sys.exit(main(sys.argv))
diff --git a/y2018/control_loops/python/extended_lqr.py b/y2018/control_loops/python/extended_lqr.py
index 4eecee9..9a4705b 100755
--- a/y2018/control_loops/python/extended_lqr.py
+++ b/y2018/control_loops/python/extended_lqr.py
@@ -11,16 +11,17 @@
 
 
 class ArmDynamics(object):
-  def __init__(self, dt):
-    self.dt = dt
 
-    self.l1 = 1.0
-    self.l2 = 0.8
-    self.num_states = 4
-    self.num_inputs = 2
+    def __init__(self, dt):
+        self.dt = dt
 
-  def dynamics(self, X, U):
-    """Calculates the dynamics for a double jointed arm.
+        self.l1 = 1.0
+        self.l2 = 0.8
+        self.num_states = 4
+        self.num_inputs = 2
+
+    def dynamics(self, X, U):
+        """Calculates the dynamics for a double jointed arm.
 
     Args:
       X, numpy.matrix(4, 1), The state.  [theta1, omega1, theta2, omega2]
@@ -29,48 +30,53 @@
     Returns:
       numpy.matrix(4, 1), The derivative of the dynamics.
     """
-    return numpy.matrix([[X[1, 0]],
-                         [U[0, 0]],
-                         [X[3, 0]],
-                         [U[1, 0]]])
+        return numpy.matrix([[X[1, 0]], [U[0, 0]], [X[3, 0]], [U[1, 0]]])
 
-  def discrete_dynamics(self, X, U):
-    return RungeKutta(lambda startingX: self.dynamics(startingX, U), X, dt)
+    def discrete_dynamics(self, X, U):
+        return RungeKutta(lambda startingX: self.dynamics(startingX, U), X, dt)
 
-  def inverse_discrete_dynamics(self, X, U):
-    return RungeKutta(lambda startingX: -self.dynamics(startingX, U), X, dt)
+    def inverse_discrete_dynamics(self, X, U):
+        return RungeKutta(lambda startingX: -self.dynamics(startingX, U), X,
+                          dt)
+
 
 # Simple implementation for a quadratic cost function.
 class ArmCostFunction:
-  def __init__(self, dt, dynamics):
-    self.num_states = 4
-    self.num_inputs = 2
-    self.dt = dt
-    self.dynamics = dynamics
 
-    q_pos = 0.5
-    q_vel = 1.65
-    self.Q = numpy.matrix(numpy.diag([
-        1.0 / (q_pos ** 2.0), 1.0 / (q_vel ** 2.0),
-        1.0 / (q_pos ** 2.0), 1.0 / (q_vel ** 2.0)]))
+    def __init__(self, dt, dynamics):
+        self.num_states = 4
+        self.num_inputs = 2
+        self.dt = dt
+        self.dynamics = dynamics
 
-    self.R = numpy.matrix(numpy.diag([1.0 / (12.0 ** 2.0),
-                                      1.0 / (12.0 ** 2.0)]))
+        q_pos = 0.5
+        q_vel = 1.65
+        self.Q = numpy.matrix(
+            numpy.diag([
+                1.0 / (q_pos**2.0), 1.0 / (q_vel**2.0), 1.0 / (q_pos**2.0),
+                1.0 / (q_vel**2.0)
+            ]))
 
-    final_A = numerical_jacobian_x(self.dynamics.discrete_dynamics,
-                                   numpy.matrix(numpy.zeros((4, 1))),
-                                   numpy.matrix(numpy.zeros((2, 1))))
-    final_B = numerical_jacobian_u(self.dynamics.discrete_dynamics,
-                                   numpy.matrix(numpy.zeros((4, 1))),
-                                   numpy.matrix(numpy.zeros((2, 1))))
-    print 'Final A', final_A
-    print 'Final B', final_B
-    K, self.S = controls.dlqr(
-        final_A, final_B, self.Q, self.R, optimal_cost_function=True) 
-    print 'Final eig:', numpy.linalg.eig(final_A - final_B * K)
+        self.R = numpy.matrix(
+            numpy.diag([1.0 / (12.0**2.0), 1.0 / (12.0**2.0)]))
 
-  def final_cost(self, X, U):
-    """Computes the final cost of being at X
+        final_A = numerical_jacobian_x(self.dynamics.discrete_dynamics,
+                                       numpy.matrix(numpy.zeros((4, 1))),
+                                       numpy.matrix(numpy.zeros((2, 1))))
+        final_B = numerical_jacobian_u(self.dynamics.discrete_dynamics,
+                                       numpy.matrix(numpy.zeros((4, 1))),
+                                       numpy.matrix(numpy.zeros((2, 1))))
+        print 'Final A', final_A
+        print 'Final B', final_B
+        K, self.S = controls.dlqr(final_A,
+                                  final_B,
+                                  self.Q,
+                                  self.R,
+                                  optimal_cost_function=True)
+        print 'Final eig:', numpy.linalg.eig(final_A - final_B * K)
+
+    def final_cost(self, X, U):
+        """Computes the final cost of being at X
 
     Args:
       X: numpy.matrix(self.num_states, 1)
@@ -79,10 +85,10 @@
     Returns:
       numpy.matrix(1, 1), The quadratic cost of being at X
     """
-    return 0.5 * X.T * self.S * X
+        return 0.5 * X.T * self.S * X
 
-  def cost(self, X, U):
-    """Computes the incremental cost given a position and U.
+    def cost(self, X, U):
+        """Computes the incremental cost given a position and U.
 
     Args:
       X: numpy.matrix(self.num_states, 1)
@@ -91,10 +97,10 @@
     Returns:
       numpy.matrix(1, 1), The quadratic cost of evaluating U.
     """
-    return U.T * self.R * U + X.T * self.Q * X
+        return U.T * self.R * U + X.T * self.Q * X
 
-  def estimate_Q_final(self, X_hat):
-    """Returns the quadraticized final Q around X_hat.
+    def estimate_Q_final(self, X_hat):
+        """Returns the quadraticized final Q around X_hat.
 
     This is calculated by evaluating partial^2 cost(X_hat) / (partial X * partial X)
 
@@ -104,13 +110,13 @@
     Result:
       numpy.matrix(self.num_states, self.num_states)
     """
-    zero_U = numpy.matrix(numpy.zeros((self.num_inputs, 1)))
-    print 'S', self.S
-    print 'Q_final', numerical_jacobian_x_x(self.final_cost, X_hat, zero_U)
-    return numerical_jacobian_x_x(self.final_cost, X_hat, zero_U)
+        zero_U = numpy.matrix(numpy.zeros((self.num_inputs, 1)))
+        print 'S', self.S
+        print 'Q_final', numerical_jacobian_x_x(self.final_cost, X_hat, zero_U)
+        return numerical_jacobian_x_x(self.final_cost, X_hat, zero_U)
 
-  def estimate_partial_cost_partial_x_final(self, X_hat):
-    """Returns \frac{\partial cost}{\partial X}(X_hat) for the final cost.
+    def estimate_partial_cost_partial_x_final(self, X_hat):
+        """Returns \frac{\partial cost}{\partial X}(X_hat) for the final cost.
 
     Args:
       X_hat: numpy.matrix(self.num_states, 1), The state to quadraticize around.
@@ -118,24 +124,26 @@
     Result:
       numpy.matrix(self.num_states, 1)
     """
-    return numerical_jacobian_x(self.final_cost, X_hat,
-                                numpy.matrix(numpy.zeros((self.num_inputs, 1)))).T
+        return numerical_jacobian_x(
+            self.final_cost, X_hat,
+            numpy.matrix(numpy.zeros((self.num_inputs, 1)))).T
 
-  def estimate_q_final(self, X_hat):
-    """Returns q evaluated at X_hat for the final cost function."""
-    return self.estimate_partial_cost_partial_x_final(X_hat) - self.estimate_Q_final(X_hat) * X_hat
-
+    def estimate_q_final(self, X_hat):
+        """Returns q evaluated at X_hat for the final cost function."""
+        return self.estimate_partial_cost_partial_x_final(
+            X_hat) - self.estimate_Q_final(X_hat) * X_hat
 
 
 class SkidSteerDynamics(object):
-  def __init__(self, dt):
-    self.width = 0.2
-    self.dt = dt
-    self.num_states = 3
-    self.num_inputs = 2
 
-  def dynamics(self, X, U):
-    """Calculates the dynamics for a 2 wheeled robot.
+    def __init__(self, dt):
+        self.width = 0.2
+        self.dt = dt
+        self.num_states = 3
+        self.num_inputs = 2
+
+    def dynamics(self, X, U):
+        """Calculates the dynamics for a 2 wheeled robot.
 
     Args:
       X, numpy.matrix(3, 1), The state.  [x, y, theta]
@@ -144,34 +152,34 @@
     Returns:
       numpy.matrix(3, 1), The derivative of the dynamics.
     """
-    #return numpy.matrix([[X[1, 0]],
-    #                     [X[2, 0]],
-    #                     [U[0, 0]]])
-    return numpy.matrix([[(U[0, 0] + U[1, 0]) * numpy.cos(X[2, 0]) / 2.0],
-                         [(U[0, 0] + U[1, 0]) * numpy.sin(X[2, 0]) / 2.0],
-                         [(U[1, 0] - U[0, 0]) / self.width]])
+        #return numpy.matrix([[X[1, 0]],
+        #                     [X[2, 0]],
+        #                     [U[0, 0]]])
+        return numpy.matrix([[(U[0, 0] + U[1, 0]) * numpy.cos(X[2, 0]) / 2.0],
+                             [(U[0, 0] + U[1, 0]) * numpy.sin(X[2, 0]) / 2.0],
+                             [(U[1, 0] - U[0, 0]) / self.width]])
 
-  def discrete_dynamics(self, X, U):
-    return RungeKutta(lambda startingX: self.dynamics(startingX, U), X, dt)
+    def discrete_dynamics(self, X, U):
+        return RungeKutta(lambda startingX: self.dynamics(startingX, U), X, dt)
 
-  def inverse_discrete_dynamics(self, X, U):
-    return RungeKutta(lambda startingX: -self.dynamics(startingX, U), X, dt)
+    def inverse_discrete_dynamics(self, X, U):
+        return RungeKutta(lambda startingX: -self.dynamics(startingX, U), X,
+                          dt)
 
 
 # Simple implementation for a quadratic cost function.
 class CostFunction:
-  def __init__(self, dt):
-    self.num_states = 3
-    self.num_inputs = 2
-    self.dt = dt
-    self.Q = numpy.matrix([[0.1, 0, 0],
-                           [0, 0.6, 0],
-                           [0, 0, 0.1]]) / self.dt / self.dt
-    self.R = numpy.matrix([[0.40, 0],
-                           [0, 0.40]]) / self.dt / self.dt
 
-  def final_cost(self, X, U):
-    """Computes the final cost of being at X
+    def __init__(self, dt):
+        self.num_states = 3
+        self.num_inputs = 2
+        self.dt = dt
+        self.Q = numpy.matrix([[0.1, 0, 0], [0, 0.6, 0], [0, 0, 0.1]
+                               ]) / self.dt / self.dt
+        self.R = numpy.matrix([[0.40, 0], [0, 0.40]]) / self.dt / self.dt
+
+    def final_cost(self, X, U):
+        """Computes the final cost of being at X
 
     Args:
       X: numpy.matrix(self.num_states, 1)
@@ -180,10 +188,10 @@
     Returns:
       numpy.matrix(1, 1), The quadratic cost of being at X
     """
-    return X.T * self.Q * X * 1000
+        return X.T * self.Q * X * 1000
 
-  def cost(self, X, U):
-    """Computes the incremental cost given a position and U.
+    def cost(self, X, U):
+        """Computes the incremental cost given a position and U.
 
     Args:
       X: numpy.matrix(self.num_states, 1)
@@ -192,10 +200,10 @@
     Returns:
       numpy.matrix(1, 1), The quadratic cost of evaluating U.
     """
-    return U.T * self.R * U + X.T * self.Q * X
+        return U.T * self.R * U + X.T * self.Q * X
 
-  def estimate_Q_final(self, X_hat):
-    """Returns the quadraticized final Q around X_hat.
+    def estimate_Q_final(self, X_hat):
+        """Returns the quadraticized final Q around X_hat.
 
     This is calculated by evaluating partial^2 cost(X_hat) / (partial X * partial X)
 
@@ -205,11 +213,11 @@
     Result:
       numpy.matrix(self.num_states, self.num_states)
     """
-    zero_U = numpy.matrix(numpy.zeros((self.num_inputs, 1)))
-    return numerical_jacobian_x_x(self.final_cost, X_hat, zero_U)
+        zero_U = numpy.matrix(numpy.zeros((self.num_inputs, 1)))
+        return numerical_jacobian_x_x(self.final_cost, X_hat, zero_U)
 
-  def estimate_partial_cost_partial_x_final(self, X_hat):
-    """Returns \frac{\partial cost}{\partial X}(X_hat) for the final cost.
+    def estimate_partial_cost_partial_x_final(self, X_hat):
+        """Returns \frac{\partial cost}{\partial X}(X_hat) for the final cost.
 
     Args:
       X_hat: numpy.matrix(self.num_states, 1), The state to quadraticize around.
@@ -217,23 +225,27 @@
     Result:
       numpy.matrix(self.num_states, 1)
     """
-    return numerical_jacobian_x(self.final_cost, X_hat, numpy.matrix(numpy.zeros((self.num_inputs, 1)))).T
+        return numerical_jacobian_x(
+            self.final_cost, X_hat,
+            numpy.matrix(numpy.zeros((self.num_inputs, 1)))).T
 
-  def estimate_q_final(self, X_hat):
-    """Returns q evaluated at X_hat for the final cost function."""
-    return self.estimate_partial_cost_partial_x_final(X_hat) - self.estimate_Q_final(X_hat) * X_hat
+    def estimate_q_final(self, X_hat):
+        """Returns q evaluated at X_hat for the final cost function."""
+        return self.estimate_partial_cost_partial_x_final(
+            X_hat) - self.estimate_Q_final(X_hat) * X_hat
 
 
 def RungeKutta(f, x, dt):
-  """4th order RungeKutta integration of F starting at X."""
-  a = f(x)
-  b = f(x + dt / 2.0 * a)
-  c = f(x + dt / 2.0 * b)
-  d = f(x + dt * c)
-  return x + dt * (a + 2.0 * b + 2.0 * c + d) / 6.0
+    """4th order RungeKutta integration of F starting at X."""
+    a = f(x)
+    b = f(x + dt / 2.0 * a)
+    c = f(x + dt / 2.0 * b)
+    d = f(x + dt * c)
+    return x + dt * (a + 2.0 * b + 2.0 * c + d) / 6.0
+
 
 def numerical_jacobian_x(fn, X, U, epsilon=1e-4):
-  """Numerically estimates the jacobian around X, U in X.
+    """Numerically estimates the jacobian around X, U in X.
 
   Args:
     fn: A function of X, U.
@@ -246,20 +258,21 @@
     numpy.matrix(num_states, num_states), The jacobian of fn with X as the
       variable.
   """
-  num_states = X.shape[0]
-  nominal = fn(X, U)
-  answer = numpy.matrix(numpy.zeros((nominal.shape[0], num_states)))
-  # It's more expensive, but +- epsilon will be more reliable
-  for i in range(0, num_states):
-    dX_plus = X.copy()
-    dX_plus[i] += epsilon
-    dX_minus = X.copy()
-    dX_minus[i] -= epsilon
-    answer[:, i] = (fn(dX_plus, U) - fn(dX_minus, U)) / epsilon / 2.0
-  return answer
+    num_states = X.shape[0]
+    nominal = fn(X, U)
+    answer = numpy.matrix(numpy.zeros((nominal.shape[0], num_states)))
+    # It's more expensive, but +- epsilon will be more reliable
+    for i in range(0, num_states):
+        dX_plus = X.copy()
+        dX_plus[i] += epsilon
+        dX_minus = X.copy()
+        dX_minus[i] -= epsilon
+        answer[:, i] = (fn(dX_plus, U) - fn(dX_minus, U)) / epsilon / 2.0
+    return answer
+
 
 def numerical_jacobian_u(fn, X, U, epsilon=1e-4):
-  """Numerically estimates the jacobian around X, U in U.
+    """Numerically estimates the jacobian around X, U in U.
 
   Args:
     fn: A function of X, U.
@@ -272,355 +285,426 @@
     numpy.matrix(num_states, num_inputs), The jacobian of fn with U as the
       variable.
   """
-  num_states = X.shape[0]
-  num_inputs = U.shape[0]
-  nominal = fn(X, U)
-  answer = numpy.matrix(numpy.zeros((nominal.shape[0], num_inputs)))
-  for i in range(0, num_inputs):
-    dU_plus = U.copy()
-    dU_plus[i] += epsilon
-    dU_minus = U.copy()
-    dU_minus[i] -= epsilon
-    answer[:, i] = (fn(X, dU_plus) - fn(X, dU_minus)) / epsilon / 2.0
-  return answer
+    num_states = X.shape[0]
+    num_inputs = U.shape[0]
+    nominal = fn(X, U)
+    answer = numpy.matrix(numpy.zeros((nominal.shape[0], num_inputs)))
+    for i in range(0, num_inputs):
+        dU_plus = U.copy()
+        dU_plus[i] += epsilon
+        dU_minus = U.copy()
+        dU_minus[i] -= epsilon
+        answer[:, i] = (fn(X, dU_plus) - fn(X, dU_minus)) / epsilon / 2.0
+    return answer
+
 
 def numerical_jacobian_x_x(fn, X, U):
-  return numerical_jacobian_x(
-      lambda X_inner, U_inner: numerical_jacobian_x(fn, X_inner, U_inner).T, X, U)
+    return numerical_jacobian_x(
+        lambda X_inner, U_inner: numerical_jacobian_x(fn, X_inner, U_inner).T,
+        X, U)
+
 
 def numerical_jacobian_x_u(fn, X, U):
-  return numerical_jacobian_x(
-      lambda X_inner, U_inner: numerical_jacobian_u(fn, X_inner, U_inner).T, X, U)
+    return numerical_jacobian_x(
+        lambda X_inner, U_inner: numerical_jacobian_u(fn, X_inner, U_inner).T,
+        X, U)
+
 
 def numerical_jacobian_u_x(fn, X, U):
-  return numerical_jacobian_u(
-      lambda X_inner, U_inner: numerical_jacobian_x(fn, X_inner, U_inner).T, X, U)
+    return numerical_jacobian_u(
+        lambda X_inner, U_inner: numerical_jacobian_x(fn, X_inner, U_inner).T,
+        X, U)
+
 
 def numerical_jacobian_u_u(fn, X, U):
-  return numerical_jacobian_u(
-      lambda X_inner, U_inner: numerical_jacobian_u(fn, X_inner, U_inner).T, X, U)
+    return numerical_jacobian_u(
+        lambda X_inner, U_inner: numerical_jacobian_u(fn, X_inner, U_inner).T,
+        X, U)
 
 
 class ELQR(object):
-  def __init__(self, dynamics, cost):
-    self.dynamics = dynamics
-    self.cost = cost
-  
-  def Solve(self, x_hat_initial, horizon, iterations):
-    l = horizon
-    num_states = self.dynamics.num_states
-    num_inputs = self.dynamics.num_inputs
-    self.S_bar_t = [numpy.matrix(numpy.zeros((num_states, num_states))) for _ in range(l + 1)]
-    self.s_bar_t = [numpy.matrix(numpy.zeros((num_states, 1))) for _ in range(l + 1)]
-    self.s_scalar_bar_t = [numpy.matrix(numpy.zeros((1, 1))) for _ in range(l + 1)]
 
-    self.L_t = [numpy.matrix(numpy.zeros((num_inputs, num_states))) for _ in range(l + 1)]
-    self.l_t = [numpy.matrix(numpy.zeros((num_inputs, 1))) for _ in range(l + 1)]
-    self.L_bar_t = [numpy.matrix(numpy.zeros((num_inputs, num_states))) for _ in range(l + 1)]
-    self.l_bar_t = [numpy.matrix(numpy.zeros((num_inputs, 1))) for _ in range(l + 1)]
+    def __init__(self, dynamics, cost):
+        self.dynamics = dynamics
+        self.cost = cost
 
-    self.S_t = [numpy.matrix(numpy.zeros((num_states, num_states))) for _ in range(l + 1)]
-    self.s_t = [numpy.matrix(numpy.zeros((num_states, 1))) for _ in range(l + 1)]
-    self.s_scalar_t = [numpy.matrix(numpy.zeros((1, 1))) for _ in range(l + 1)]
+    def Solve(self, x_hat_initial, horizon, iterations):
+        l = horizon
+        num_states = self.dynamics.num_states
+        num_inputs = self.dynamics.num_inputs
+        self.S_bar_t = [
+            numpy.matrix(numpy.zeros((num_states, num_states)))
+            for _ in range(l + 1)
+        ]
+        self.s_bar_t = [
+            numpy.matrix(numpy.zeros((num_states, 1))) for _ in range(l + 1)
+        ]
+        self.s_scalar_bar_t = [
+            numpy.matrix(numpy.zeros((1, 1))) for _ in range(l + 1)
+        ]
 
-    self.last_x_hat_t = [numpy.matrix(numpy.zeros((num_states, 1))) for _ in range(l + 1)]
+        self.L_t = [
+            numpy.matrix(numpy.zeros((num_inputs, num_states)))
+            for _ in range(l + 1)
+        ]
+        self.l_t = [
+            numpy.matrix(numpy.zeros((num_inputs, 1))) for _ in range(l + 1)
+        ]
+        self.L_bar_t = [
+            numpy.matrix(numpy.zeros((num_inputs, num_states)))
+            for _ in range(l + 1)
+        ]
+        self.l_bar_t = [
+            numpy.matrix(numpy.zeros((num_inputs, 1))) for _ in range(l + 1)
+        ]
 
-    # Iterate the solver
-    for a in range(iterations):
-      x_hat = x_hat_initial
-      u_t = self.L_t[0] * x_hat + self.l_t[0]
-      self.S_bar_t[0] = numpy.matrix(numpy.zeros((num_states, num_states)))
-      self.s_bar_t[0] = numpy.matrix(numpy.zeros((num_states, 1)))
-      self.s_scalar_bar_t[0] = numpy.matrix([[0]])
+        self.S_t = [
+            numpy.matrix(numpy.zeros((num_states, num_states)))
+            for _ in range(l + 1)
+        ]
+        self.s_t = [
+            numpy.matrix(numpy.zeros((num_states, 1))) for _ in range(l + 1)
+        ]
+        self.s_scalar_t = [
+            numpy.matrix(numpy.zeros((1, 1))) for _ in range(l + 1)
+        ]
 
-      self.last_x_hat_t[0] = x_hat_initial
+        self.last_x_hat_t = [
+            numpy.matrix(numpy.zeros((num_states, 1))) for _ in range(l + 1)
+        ]
 
-      Q_t = numerical_jacobian_x_x(self.cost.cost, x_hat_initial, u_t)
-      P_t = numerical_jacobian_x_u(self.cost.cost, x_hat_initial, u_t)
-      R_t = numerical_jacobian_u_u(self.cost.cost, x_hat_initial, u_t)
+        # Iterate the solver
+        for a in range(iterations):
+            x_hat = x_hat_initial
+            u_t = self.L_t[0] * x_hat + self.l_t[0]
+            self.S_bar_t[0] = numpy.matrix(
+                numpy.zeros((num_states, num_states)))
+            self.s_bar_t[0] = numpy.matrix(numpy.zeros((num_states, 1)))
+            self.s_scalar_bar_t[0] = numpy.matrix([[0]])
 
-      q_t = numerical_jacobian_x(self.cost.cost, x_hat_initial, u_t).T \
-          - Q_t * x_hat_initial - P_t.T * u_t
-      r_t = numerical_jacobian_u(self.cost.cost, x_hat_initial, u_t).T \
-          - P_t * x_hat_initial - R_t * u_t
+            self.last_x_hat_t[0] = x_hat_initial
 
-      q_scalar_t = self.cost.cost(x_hat_initial, u_t) \
-          - 0.5 * (x_hat_initial.T * (Q_t * x_hat_initial + P_t.T * u_t) \
-          + u_t.T * (P_t * x_hat_initial + R_t * u_t)) \
-          - x_hat_initial.T * q_t - u_t.T * r_t
+            Q_t = numerical_jacobian_x_x(self.cost.cost, x_hat_initial, u_t)
+            P_t = numerical_jacobian_x_u(self.cost.cost, x_hat_initial, u_t)
+            R_t = numerical_jacobian_u_u(self.cost.cost, x_hat_initial, u_t)
 
-      start_A_t = numerical_jacobian_x(self.dynamics.discrete_dynamics, x_hat_initial, u_t)
-      start_B_t = numerical_jacobian_u(self.dynamics.discrete_dynamics, x_hat_initial, u_t)
-      x_hat_next = self.dynamics.discrete_dynamics(x_hat_initial, u_t)
-      start_c_t = x_hat_next - start_A_t * x_hat_initial - start_B_t * u_t
+            q_t = numerical_jacobian_x(self.cost.cost, x_hat_initial, u_t).T \
+                - Q_t * x_hat_initial - P_t.T * u_t
+            r_t = numerical_jacobian_u(self.cost.cost, x_hat_initial, u_t).T \
+                - P_t * x_hat_initial - R_t * u_t
 
-      B_svd_u, B_svd_sigma_diag, B_svd_v = numpy.linalg.svd(start_B_t)
-      B_svd_sigma = numpy.matrix(numpy.zeros(start_B_t.shape))
-      B_svd_sigma[0:B_svd_sigma_diag.shape[0], 0:B_svd_sigma_diag.shape[0]] = \
-          numpy.diag(B_svd_sigma_diag)
+            q_scalar_t = self.cost.cost(x_hat_initial, u_t) \
+                - 0.5 * (x_hat_initial.T * (Q_t * x_hat_initial + P_t.T * u_t) \
+                + u_t.T * (P_t * x_hat_initial + R_t * u_t)) \
+                - x_hat_initial.T * q_t - u_t.T * r_t
 
-      B_svd_sigma_inv = numpy.matrix(numpy.zeros(start_B_t.shape)).T
-      B_svd_sigma_inv[0:B_svd_sigma_diag.shape[0],
-                      0:B_svd_sigma_diag.shape[0]] = \
-                          numpy.linalg.inv(numpy.diag(B_svd_sigma_diag))
-      B_svd_inv = B_svd_v.T * B_svd_sigma_inv * B_svd_u.T
+            start_A_t = numerical_jacobian_x(self.dynamics.discrete_dynamics,
+                                             x_hat_initial, u_t)
+            start_B_t = numerical_jacobian_u(self.dynamics.discrete_dynamics,
+                                             x_hat_initial, u_t)
+            x_hat_next = self.dynamics.discrete_dynamics(x_hat_initial, u_t)
+            start_c_t = x_hat_next - start_A_t * x_hat_initial - start_B_t * u_t
 
-      self.L_bar_t[1] = B_svd_inv
-      self.l_bar_t[1] = -B_svd_inv * (start_A_t * x_hat_initial + start_c_t)
+            B_svd_u, B_svd_sigma_diag, B_svd_v = numpy.linalg.svd(start_B_t)
+            B_svd_sigma = numpy.matrix(numpy.zeros(start_B_t.shape))
+            B_svd_sigma[0:B_svd_sigma_diag.shape[0], 0:B_svd_sigma_diag.shape[0]] = \
+                numpy.diag(B_svd_sigma_diag)
 
-      self.S_bar_t[1] = self.L_bar_t[1].T * R_t * self.L_bar_t[1]
+            B_svd_sigma_inv = numpy.matrix(numpy.zeros(start_B_t.shape)).T
+            B_svd_sigma_inv[0:B_svd_sigma_diag.shape[0],
+                            0:B_svd_sigma_diag.shape[0]] = \
+                                numpy.linalg.inv(numpy.diag(B_svd_sigma_diag))
+            B_svd_inv = B_svd_v.T * B_svd_sigma_inv * B_svd_u.T
 
-      TotalS_1 = start_B_t.T * self.S_t[1] * start_B_t + R_t
-      Totals_1 = start_B_t.T * self.S_t[1] * (start_c_t + start_A_t * x_hat_initial) \
-          + start_B_t.T * self.s_t[1] + P_t * x_hat_initial + r_t
-      Totals_scalar_1 = 0.5 * (start_c_t.T + x_hat_initial.T * start_A_t.T) * self.S_t[1] * (start_c_t + start_A_t * x_hat_initial) \
-          + self.s_scalar_t[1] + x_hat_initial.T * q_t + q_scalar_t \
-          + 0.5 * x_hat_initial.T * Q_t * x_hat_initial \
-          + (start_c_t.T + x_hat_initial.T * start_A_t.T) * self.s_t[1]
+            self.L_bar_t[1] = B_svd_inv
+            self.l_bar_t[1] = -B_svd_inv * (start_A_t * x_hat_initial +
+                                            start_c_t)
 
-      optimal_u_1 = -numpy.linalg.solve(TotalS_1, Totals_1)
-      optimal_x_1 = start_A_t * x_hat_initial \
-          + start_B_t * optimal_u_1 + start_c_t
+            self.S_bar_t[1] = self.L_bar_t[1].T * R_t * self.L_bar_t[1]
 
-      # TODO(austin): Disable this if we are controlable.  It should not be needed then.
-      S_bar_1_eigh_eigenvalues, S_bar_1_eigh_eigenvectors = \
-          numpy.linalg.eigh(self.S_bar_t[1])
-      S_bar_1_eigh = numpy.matrix(numpy.diag(S_bar_1_eigh_eigenvalues))
-      S_bar_1_eigh_eigenvalues_stiff = S_bar_1_eigh_eigenvalues.copy()
-      for i in range(S_bar_1_eigh_eigenvalues_stiff.shape[0]):
-        if abs(S_bar_1_eigh_eigenvalues_stiff[i]) < 1e-8:
-          S_bar_1_eigh_eigenvalues_stiff[i] = max(S_bar_1_eigh_eigenvalues_stiff) * 1.0
+            TotalS_1 = start_B_t.T * self.S_t[1] * start_B_t + R_t
+            Totals_1 = start_B_t.T * self.S_t[1] * (start_c_t + start_A_t * x_hat_initial) \
+                + start_B_t.T * self.s_t[1] + P_t * x_hat_initial + r_t
+            Totals_scalar_1 = 0.5 * (start_c_t.T + x_hat_initial.T * start_A_t.T) * self.S_t[1] * (start_c_t + start_A_t * x_hat_initial) \
+                + self.s_scalar_t[1] + x_hat_initial.T * q_t + q_scalar_t \
+                + 0.5 * x_hat_initial.T * Q_t * x_hat_initial \
+                + (start_c_t.T + x_hat_initial.T * start_A_t.T) * self.s_t[1]
 
-      S_bar_stiff = S_bar_1_eigh_eigenvectors * numpy.matrix(numpy.diag(S_bar_1_eigh_eigenvalues_stiff)) * S_bar_1_eigh_eigenvectors.T
+            optimal_u_1 = -numpy.linalg.solve(TotalS_1, Totals_1)
+            optimal_x_1 = start_A_t * x_hat_initial \
+                + start_B_t * optimal_u_1 + start_c_t
 
-      print 'Min u', -numpy.linalg.solve(TotalS_1, Totals_1)
-      print 'Min x_hat', optimal_x_1
-      self.s_bar_t[1] = -self.s_t[1] - (S_bar_stiff + self.S_t[1]) * optimal_x_1
-      self.s_scalar_bar_t[1] = 0.5 * (optimal_u_1.T * TotalS_1 * optimal_u_1 \
-          - optimal_x_1.T * (S_bar_stiff + self.S_t[1]) * optimal_x_1) \
-          + optimal_u_1.T * Totals_1 \
-          - optimal_x_1.T * (self.s_bar_t[1] + self.s_t[1]) \
-          - self.s_scalar_t[1] + Totals_scalar_1
+            # TODO(austin): Disable this if we are controlable.  It should not be needed then.
+            S_bar_1_eigh_eigenvalues, S_bar_1_eigh_eigenvectors = \
+                numpy.linalg.eigh(self.S_bar_t[1])
+            S_bar_1_eigh = numpy.matrix(numpy.diag(S_bar_1_eigh_eigenvalues))
+            S_bar_1_eigh_eigenvalues_stiff = S_bar_1_eigh_eigenvalues.copy()
+            for i in range(S_bar_1_eigh_eigenvalues_stiff.shape[0]):
+                if abs(S_bar_1_eigh_eigenvalues_stiff[i]) < 1e-8:
+                    S_bar_1_eigh_eigenvalues_stiff[i] = max(
+                        S_bar_1_eigh_eigenvalues_stiff) * 1.0
 
-      print 'optimal_u_1', optimal_u_1
-      print 'TotalS_1', TotalS_1
-      print 'Totals_1', Totals_1
-      print 'Totals_scalar_1', Totals_scalar_1
-      print 'overall cost 1', 0.5 * (optimal_u_1.T * TotalS_1 * optimal_u_1) \
-                               + optimal_u_1.T * Totals_1 + Totals_scalar_1
-      print 'overall cost 0', 0.5 * (x_hat_initial.T * self.S_t[0] * x_hat_initial) \
-                              + x_hat_initial.T * self.s_t[0] + self.s_scalar_t[0]
+            S_bar_stiff = S_bar_1_eigh_eigenvectors * numpy.matrix(
+                numpy.diag(S_bar_1_eigh_eigenvalues_stiff)
+            ) * S_bar_1_eigh_eigenvectors.T
 
-      print 't forward 0'
-      print 'x_hat_initial[ 0]: %s' % (x_hat_initial)
-      print 'x_hat[%2d]: %s' % (0, x_hat.T)
-      print 'x_hat_next[%2d]: %s' % (0, x_hat_next.T)
-      print 'u[%2d]: %s' % (0, u_t.T)
-      print ('L[ 0]: %s' % (self.L_t[0],)).replace('\n', '\n        ')
-      print ('l[ 0]: %s' % (self.l_t[0],)).replace('\n', '\n        ')
+            print 'Min u', -numpy.linalg.solve(TotalS_1, Totals_1)
+            print 'Min x_hat', optimal_x_1
+            self.s_bar_t[1] = -self.s_t[1] - (S_bar_stiff +
+                                              self.S_t[1]) * optimal_x_1
+            self.s_scalar_bar_t[1] = 0.5 * (optimal_u_1.T * TotalS_1 * optimal_u_1 \
+                - optimal_x_1.T * (S_bar_stiff + self.S_t[1]) * optimal_x_1) \
+                + optimal_u_1.T * Totals_1 \
+                - optimal_x_1.T * (self.s_bar_t[1] + self.s_t[1]) \
+                - self.s_scalar_t[1] + Totals_scalar_1
 
-      print ('A_t[%2d]: %s' % (0, start_A_t)).replace('\n', '\n         ')
-      print ('B_t[%2d]: %s' % (0, start_B_t)).replace('\n', '\n         ')
-      print ('c_t[%2d]: %s' % (0, start_c_t)).replace('\n', '\n         ')
+            print 'optimal_u_1', optimal_u_1
+            print 'TotalS_1', TotalS_1
+            print 'Totals_1', Totals_1
+            print 'Totals_scalar_1', Totals_scalar_1
+            print 'overall cost 1', 0.5 * (optimal_u_1.T * TotalS_1 * optimal_u_1) \
+                                     + optimal_u_1.T * Totals_1 + Totals_scalar_1
+            print 'overall cost 0', 0.5 * (x_hat_initial.T * self.S_t[0] * x_hat_initial) \
+                                    + x_hat_initial.T * self.s_t[0] + self.s_scalar_t[0]
 
-      # TODO(austin): optimal_x_1 is x_hat
-      x_hat = -numpy.linalg.solve((self.S_t[1] + S_bar_stiff),
-                                  (self.s_t[1] + self.s_bar_t[1]))
-      print 'new xhat', x_hat
+            print 't forward 0'
+            print 'x_hat_initial[ 0]: %s' % (x_hat_initial)
+            print 'x_hat[%2d]: %s' % (0, x_hat.T)
+            print 'x_hat_next[%2d]: %s' % (0, x_hat_next.T)
+            print 'u[%2d]: %s' % (0, u_t.T)
+            print('L[ 0]: %s' % (self.L_t[0], )).replace('\n', '\n        ')
+            print('l[ 0]: %s' % (self.l_t[0], )).replace('\n', '\n        ')
 
-      self.S_bar_t[1] = S_bar_stiff
+            print('A_t[%2d]: %s' % (0, start_A_t)).replace('\n', '\n         ')
+            print('B_t[%2d]: %s' % (0, start_B_t)).replace('\n', '\n         ')
+            print('c_t[%2d]: %s' % (0, start_c_t)).replace('\n', '\n         ')
 
-      self.last_x_hat_t[1] = x_hat
+            # TODO(austin): optimal_x_1 is x_hat
+            x_hat = -numpy.linalg.solve((self.S_t[1] + S_bar_stiff),
+                                        (self.s_t[1] + self.s_bar_t[1]))
+            print 'new xhat', x_hat
 
-      for t in range(1, l):
-        print 't forward', t
-        u_t = self.L_t[t] * x_hat + self.l_t[t]
+            self.S_bar_t[1] = S_bar_stiff
 
-        x_hat_next = self.dynamics.discrete_dynamics(x_hat, u_t)
-        A_bar_t = numerical_jacobian_x(self.dynamics.inverse_discrete_dynamics,
-                                       x_hat_next, u_t)
-        B_bar_t = numerical_jacobian_u(self.dynamics.inverse_discrete_dynamics,
-                                       x_hat_next, u_t)
-        c_bar_t = x_hat - A_bar_t * x_hat_next - B_bar_t * u_t
+            self.last_x_hat_t[1] = x_hat
 
-        print 'x_hat[%2d]: %s' % (t, x_hat.T)
-        print 'x_hat_next[%2d]: %s' % (t, x_hat_next.T)
-        print ('L[%2d]: %s' % (t, self.L_t[t],)).replace('\n', '\n        ')
-        print ('l[%2d]: %s' % (t, self.l_t[t],)).replace('\n', '\n        ')
-        print 'u[%2d]: %s' % (t, u_t.T)
+            for t in range(1, l):
+                print 't forward', t
+                u_t = self.L_t[t] * x_hat + self.l_t[t]
 
-        print ('A_bar_t[%2d]: %s' % (t, A_bar_t)).replace('\n', '\n             ')
-        print ('B_bar_t[%2d]: %s' % (t, B_bar_t)).replace('\n', '\n             ')
-        print ('c_bar_t[%2d]: %s' % (t, c_bar_t)).replace('\n', '\n             ')
+                x_hat_next = self.dynamics.discrete_dynamics(x_hat, u_t)
+                A_bar_t = numerical_jacobian_x(
+                    self.dynamics.inverse_discrete_dynamics, x_hat_next, u_t)
+                B_bar_t = numerical_jacobian_u(
+                    self.dynamics.inverse_discrete_dynamics, x_hat_next, u_t)
+                c_bar_t = x_hat - A_bar_t * x_hat_next - B_bar_t * u_t
 
-        Q_t = numerical_jacobian_x_x(self.cost.cost, x_hat, u_t)
-        P_t = numerical_jacobian_x_u(self.cost.cost, x_hat, u_t)
-        R_t = numerical_jacobian_u_u(self.cost.cost, x_hat, u_t)
+                print 'x_hat[%2d]: %s' % (t, x_hat.T)
+                print 'x_hat_next[%2d]: %s' % (t, x_hat_next.T)
+                print('L[%2d]: %s' % (
+                    t,
+                    self.L_t[t],
+                )).replace('\n', '\n        ')
+                print('l[%2d]: %s' % (
+                    t,
+                    self.l_t[t],
+                )).replace('\n', '\n        ')
+                print 'u[%2d]: %s' % (t, u_t.T)
 
-        q_t = numerical_jacobian_x(self.cost.cost, x_hat, u_t).T \
-            - Q_t * x_hat - P_t.T * u_t
-        r_t = numerical_jacobian_u(self.cost.cost, x_hat, u_t).T \
-            - P_t * x_hat - R_t * u_t
+                print('A_bar_t[%2d]: %s' % (t, A_bar_t)).replace(
+                    '\n', '\n             ')
+                print('B_bar_t[%2d]: %s' % (t, B_bar_t)).replace(
+                    '\n', '\n             ')
+                print('c_bar_t[%2d]: %s' % (t, c_bar_t)).replace(
+                    '\n', '\n             ')
 
-        q_scalar_t = self.cost.cost(x_hat, u_t) \
-            - 0.5 * (x_hat.T * (Q_t * x_hat + P_t.T * u_t) \
-            + u_t.T * (P_t * x_hat + R_t * u_t)) - x_hat.T * q_t - u_t.T * r_t
+                Q_t = numerical_jacobian_x_x(self.cost.cost, x_hat, u_t)
+                P_t = numerical_jacobian_x_u(self.cost.cost, x_hat, u_t)
+                R_t = numerical_jacobian_u_u(self.cost.cost, x_hat, u_t)
 
-        C_bar_t = B_bar_t.T * (self.S_bar_t[t] + Q_t) * A_bar_t + P_t * A_bar_t
-        D_bar_t = A_bar_t.T * (self.S_bar_t[t] + Q_t) * A_bar_t
-        E_bar_t = B_bar_t.T * (self.S_bar_t[t] + Q_t) * B_bar_t + R_t \
-            + P_t * B_bar_t + B_bar_t.T * P_t.T
-        d_bar_t = A_bar_t.T * (self.s_bar_t[t] + q_t) \
-            + A_bar_t.T * (self.S_bar_t[t] + Q_t) * c_bar_t
-        e_bar_t = r_t + P_t * c_bar_t + B_bar_t.T * self.s_bar_t[t] \
-            + B_bar_t.T * (self.S_bar_t[t] + Q_t) * c_bar_t
+                q_t = numerical_jacobian_x(self.cost.cost, x_hat, u_t).T \
+                    - Q_t * x_hat - P_t.T * u_t
+                r_t = numerical_jacobian_u(self.cost.cost, x_hat, u_t).T \
+                    - P_t * x_hat - R_t * u_t
 
-        self.L_bar_t[t + 1] = -numpy.linalg.inv(E_bar_t) * C_bar_t
-        self.l_bar_t[t + 1] = -numpy.linalg.inv(E_bar_t) * e_bar_t
+                q_scalar_t = self.cost.cost(x_hat, u_t) \
+                    - 0.5 * (x_hat.T * (Q_t * x_hat + P_t.T * u_t) \
+                    + u_t.T * (P_t * x_hat + R_t * u_t)) - x_hat.T * q_t - u_t.T * r_t
 
-        self.S_bar_t[t + 1] = D_bar_t + C_bar_t.T * self.L_bar_t[t + 1]
-        self.s_bar_t[t + 1] = d_bar_t + C_bar_t.T * self.l_bar_t[t + 1]
-        self.s_scalar_bar_t[t + 1] = \
-            -0.5 * e_bar_t.T * numpy.linalg.inv(E_bar_t) * e_bar_t \
-            + 0.5 * c_bar_t.T * (self.S_bar_t[t] + Q_t) * c_bar_t \
-            + c_bar_t.T * self.s_bar_t[t] + c_bar_t.T * q_t \
-            + self.s_scalar_bar_t[t] + q_scalar_t
+                C_bar_t = B_bar_t.T * (self.S_bar_t[t] +
+                                       Q_t) * A_bar_t + P_t * A_bar_t
+                D_bar_t = A_bar_t.T * (self.S_bar_t[t] + Q_t) * A_bar_t
+                E_bar_t = B_bar_t.T * (self.S_bar_t[t] + Q_t) * B_bar_t + R_t \
+                    + P_t * B_bar_t + B_bar_t.T * P_t.T
+                d_bar_t = A_bar_t.T * (self.s_bar_t[t] + q_t) \
+                    + A_bar_t.T * (self.S_bar_t[t] + Q_t) * c_bar_t
+                e_bar_t = r_t + P_t * c_bar_t + B_bar_t.T * self.s_bar_t[t] \
+                    + B_bar_t.T * (self.S_bar_t[t] + Q_t) * c_bar_t
 
-        x_hat = -numpy.linalg.solve((self.S_t[t + 1] + self.S_bar_t[t + 1]),
-                                    (self.s_t[t + 1] + self.s_bar_t[t + 1]))
-        self.S_t[l] = self.cost.estimate_Q_final(x_hat)
-      self.s_t[l] = self.cost.estimate_q_final(x_hat)
-      x_hat = -numpy.linalg.inv(self.S_t[l] + self.S_bar_t[l]) \
-          * (self.s_t[l] + self.s_bar_t[l])
+                self.L_bar_t[t + 1] = -numpy.linalg.inv(E_bar_t) * C_bar_t
+                self.l_bar_t[t + 1] = -numpy.linalg.inv(E_bar_t) * e_bar_t
 
-      for t in reversed(range(l)):
-        print 't backward', t
-        # TODO(austin): I don't think we can use L_t like this here.
-        # I think we are off by 1 somewhere...
-        u_t = self.L_bar_t[t + 1] * x_hat + self.l_bar_t[t + 1]
+                self.S_bar_t[t + 1] = D_bar_t + C_bar_t.T * self.L_bar_t[t + 1]
+                self.s_bar_t[t + 1] = d_bar_t + C_bar_t.T * self.l_bar_t[t + 1]
+                self.s_scalar_bar_t[t + 1] = \
+                    -0.5 * e_bar_t.T * numpy.linalg.inv(E_bar_t) * e_bar_t \
+                    + 0.5 * c_bar_t.T * (self.S_bar_t[t] + Q_t) * c_bar_t \
+                    + c_bar_t.T * self.s_bar_t[t] + c_bar_t.T * q_t \
+                    + self.s_scalar_bar_t[t] + q_scalar_t
 
-        x_hat_prev = self.dynamics.inverse_discrete_dynamics(x_hat, u_t)
-        print 'x_hat[%2d]: %s' % (t, x_hat.T)
-        print 'x_hat_prev[%2d]: %s' % (t, x_hat_prev.T)
-        print ('L_bar[%2d]: %s' % (t + 1, self.L_bar_t[t + 1])).replace('\n', '\n            ')
-        print ('l_bar[%2d]: %s' % (t + 1, self.l_bar_t[t + 1])).replace('\n', '\n            ')
-        print 'u[%2d]: %s' % (t, u_t.T)
-        # Now compute the linearized A, B, and C
-        # Start by doing it numerically, and then optimize.
-        A_t = numerical_jacobian_x(self.dynamics.discrete_dynamics, x_hat_prev, u_t)
-        B_t = numerical_jacobian_u(self.dynamics.discrete_dynamics, x_hat_prev, u_t)
-        c_t = x_hat - A_t * x_hat_prev - B_t * u_t
+                x_hat = -numpy.linalg.solve(
+                    (self.S_t[t + 1] + self.S_bar_t[t + 1]),
+                    (self.s_t[t + 1] + self.s_bar_t[t + 1]))
+                self.S_t[l] = self.cost.estimate_Q_final(x_hat)
+            self.s_t[l] = self.cost.estimate_q_final(x_hat)
+            x_hat = -numpy.linalg.inv(self.S_t[l] + self.S_bar_t[l]) \
+                * (self.s_t[l] + self.s_bar_t[l])
 
-        print ('A_t[%2d]: %s' % (t, A_t)).replace('\n', '\n         ')
-        print ('B_t[%2d]: %s' % (t, B_t)).replace('\n', '\n         ')
-        print ('c_t[%2d]: %s' % (t, c_t)).replace('\n', '\n         ')
+            for t in reversed(range(l)):
+                print 't backward', t
+                # TODO(austin): I don't think we can use L_t like this here.
+                # I think we are off by 1 somewhere...
+                u_t = self.L_bar_t[t + 1] * x_hat + self.l_bar_t[t + 1]
 
-        Q_t = numerical_jacobian_x_x(self.cost.cost, x_hat_prev, u_t)
-        P_t = numerical_jacobian_x_u(self.cost.cost, x_hat_prev, u_t)
-        P_T_t = numerical_jacobian_u_x(self.cost.cost, x_hat_prev, u_t)
-        R_t = numerical_jacobian_u_u(self.cost.cost, x_hat_prev, u_t)
+                x_hat_prev = self.dynamics.inverse_discrete_dynamics(
+                    x_hat, u_t)
+                print 'x_hat[%2d]: %s' % (t, x_hat.T)
+                print 'x_hat_prev[%2d]: %s' % (t, x_hat_prev.T)
+                print('L_bar[%2d]: %s' % (t + 1, self.L_bar_t[t + 1])).replace(
+                    '\n', '\n            ')
+                print('l_bar[%2d]: %s' % (t + 1, self.l_bar_t[t + 1])).replace(
+                    '\n', '\n            ')
+                print 'u[%2d]: %s' % (t, u_t.T)
+                # Now compute the linearized A, B, and C
+                # Start by doing it numerically, and then optimize.
+                A_t = numerical_jacobian_x(self.dynamics.discrete_dynamics,
+                                           x_hat_prev, u_t)
+                B_t = numerical_jacobian_u(self.dynamics.discrete_dynamics,
+                                           x_hat_prev, u_t)
+                c_t = x_hat - A_t * x_hat_prev - B_t * u_t
 
-        q_t = numerical_jacobian_x(self.cost.cost, x_hat_prev, u_t).T \
-            - Q_t * x_hat_prev - P_T_t * u_t
-        r_t = numerical_jacobian_u(self.cost.cost, x_hat_prev, u_t).T \
-            - P_t * x_hat_prev - R_t * u_t
+                print('A_t[%2d]: %s' % (t, A_t)).replace('\n', '\n         ')
+                print('B_t[%2d]: %s' % (t, B_t)).replace('\n', '\n         ')
+                print('c_t[%2d]: %s' % (t, c_t)).replace('\n', '\n         ')
 
-        q_scalar_t = self.cost.cost(x_hat_prev, u_t) \
-            - 0.5 * (x_hat_prev.T * (Q_t * x_hat_prev + P_t.T * u_t) \
-            + u_t.T * (P_t * x_hat_prev + R_t * u_t)) \
-            - x_hat_prev.T * q_t - u_t.T * r_t
+                Q_t = numerical_jacobian_x_x(self.cost.cost, x_hat_prev, u_t)
+                P_t = numerical_jacobian_x_u(self.cost.cost, x_hat_prev, u_t)
+                P_T_t = numerical_jacobian_u_x(self.cost.cost, x_hat_prev, u_t)
+                R_t = numerical_jacobian_u_u(self.cost.cost, x_hat_prev, u_t)
 
-        C_t = P_t + B_t.T * self.S_t[t + 1] * A_t
-        D_t = Q_t + A_t.T * self.S_t[t + 1] * A_t
-        E_t = R_t + B_t.T * self.S_t[t + 1] * B_t
-        d_t = q_t + A_t.T * self.s_t[t + 1] + A_t.T * self.S_t[t + 1] * c_t
-        e_t = r_t + B_t.T * self.s_t[t + 1] + B_t.T * self.S_t[t + 1] * c_t
-        self.L_t[t] = -numpy.linalg.inv(E_t) * C_t
-        self.l_t[t] = -numpy.linalg.inv(E_t) * e_t
-        self.s_t[t] = d_t + C_t.T * self.l_t[t]
-        self.S_t[t] = D_t + C_t.T * self.L_t[t]
-        self.s_scalar_t[t] = q_scalar_t \
-            - 0.5 * e_t.T * numpy.linalg.inv(E_t) * e_t \
-            + 0.5 * c_t.T * self.S_t[t + 1] * c_t \
-            + c_t.T * self.s_t[t + 1] \
-            + self.s_scalar_t[t + 1]
+                q_t = numerical_jacobian_x(self.cost.cost, x_hat_prev, u_t).T \
+                    - Q_t * x_hat_prev - P_T_t * u_t
+                r_t = numerical_jacobian_u(self.cost.cost, x_hat_prev, u_t).T \
+                    - P_t * x_hat_prev - R_t * u_t
 
-        x_hat = -numpy.linalg.solve((self.S_t[t] + self.S_bar_t[t]),
-                                    (self.s_t[t] + self.s_bar_t[t]))
-        if t == 0:
-          self.last_x_hat_t[t] = x_hat_initial
-        else:
-          self.last_x_hat_t[t] = x_hat
+                q_scalar_t = self.cost.cost(x_hat_prev, u_t) \
+                    - 0.5 * (x_hat_prev.T * (Q_t * x_hat_prev + P_t.T * u_t) \
+                    + u_t.T * (P_t * x_hat_prev + R_t * u_t)) \
+                    - x_hat_prev.T * q_t - u_t.T * r_t
 
-      x_hat_t = [x_hat_initial]
+                C_t = P_t + B_t.T * self.S_t[t + 1] * A_t
+                D_t = Q_t + A_t.T * self.S_t[t + 1] * A_t
+                E_t = R_t + B_t.T * self.S_t[t + 1] * B_t
+                d_t = q_t + A_t.T * self.s_t[t + 1] + A_t.T * self.S_t[t +
+                                                                       1] * c_t
+                e_t = r_t + B_t.T * self.s_t[t + 1] + B_t.T * self.S_t[t +
+                                                                       1] * c_t
+                self.L_t[t] = -numpy.linalg.inv(E_t) * C_t
+                self.l_t[t] = -numpy.linalg.inv(E_t) * e_t
+                self.s_t[t] = d_t + C_t.T * self.l_t[t]
+                self.S_t[t] = D_t + C_t.T * self.L_t[t]
+                self.s_scalar_t[t] = q_scalar_t \
+                    - 0.5 * e_t.T * numpy.linalg.inv(E_t) * e_t \
+                    + 0.5 * c_t.T * self.S_t[t + 1] * c_t \
+                    + c_t.T * self.s_t[t + 1] \
+                    + self.s_scalar_t[t + 1]
 
-      pylab.figure('states %d' % a)
-      pylab.ion()
-      for dim in range(num_states):
-        pylab.plot(numpy.arange(len(self.last_x_hat_t)),
-                   [x_hat_loop[dim, 0] for x_hat_loop in self.last_x_hat_t],
-                   marker='o', label='Xhat[%d]' % dim)
-      pylab.legend()
-      pylab.draw()
+                x_hat = -numpy.linalg.solve((self.S_t[t] + self.S_bar_t[t]),
+                                            (self.s_t[t] + self.s_bar_t[t]))
+                if t == 0:
+                    self.last_x_hat_t[t] = x_hat_initial
+                else:
+                    self.last_x_hat_t[t] = x_hat
 
-      pylab.figure('xy %d' % a)
-      pylab.ion()
-      pylab.plot([x_hat_loop[0, 0] for x_hat_loop in self.last_x_hat_t],
-                 [x_hat_loop[1, 0] for x_hat_loop in self.last_x_hat_t],
-                 marker='o', label='trajectory')
-      pylab.legend()
-      pylab.draw()
+            x_hat_t = [x_hat_initial]
 
-    final_u_t = [numpy.matrix(numpy.zeros((num_inputs, 1))) for _ in range(l + 1)]
-    cost_to_go = []
-    cost_to_come = []
-    cost = []
-    for t in range(l):
-      cost_to_go.append(
-          (0.5 * self.last_x_hat_t[t].T * self.S_t[t] * self.last_x_hat_t[t] \
-          + self.last_x_hat_t[t].T * self.s_t[t] + self.s_scalar_t[t])[0, 0])
-      cost_to_come.append(
-          (0.5 * self.last_x_hat_t[t].T * self.S_bar_t[t] * self.last_x_hat_t[t] \
-           + self.last_x_hat_t[t].T * self.s_bar_t[t] + self.s_scalar_bar_t[t])[0, 0])
-      cost.append(cost_to_go[-1] + cost_to_come[-1])
-      final_u_t[t] = self.L_t[t] * self.last_x_hat_t[t] + self.l_t[t]
+            pylab.figure('states %d' % a)
+            pylab.ion()
+            for dim in range(num_states):
+                pylab.plot(
+                    numpy.arange(len(self.last_x_hat_t)),
+                    [x_hat_loop[dim, 0] for x_hat_loop in self.last_x_hat_t],
+                    marker='o',
+                    label='Xhat[%d]' % dim)
+            pylab.legend()
+            pylab.draw()
 
-    for t in range(l):
-      A_t = numerical_jacobian_x(self.dynamics.discrete_dynamics,
-                                 self.last_x_hat_t[t], final_u_t[t])
-      B_t = numerical_jacobian_u(self.dynamics.discrete_dynamics,
-                                 self.last_x_hat_t[t], final_u_t[t])
-      c_t = self.dynamics.discrete_dynamics(self.last_x_hat_t[t], final_u_t[t]) \
-          - A_t * self.last_x_hat_t[t] - B_t * final_u_t[t]
-      print("Infeasability at", t, "is",
-            ((A_t * self.last_x_hat_t[t] + B_t * final_u_t[t] + c_t) \
-            - self.last_x_hat_t[t + 1]).T)
+            pylab.figure('xy %d' % a)
+            pylab.ion()
+            pylab.plot([x_hat_loop[0, 0] for x_hat_loop in self.last_x_hat_t],
+                       [x_hat_loop[1, 0] for x_hat_loop in self.last_x_hat_t],
+                       marker='o',
+                       label='trajectory')
+            pylab.legend()
+            pylab.draw()
 
-    pylab.figure('u')
-    samples = numpy.arange(len(final_u_t))
-    for i in range(num_inputs):
-      pylab.plot(samples, [u[i, 0] for u in final_u_t],
-                 label='u[%d]' % i, marker='o')
-      pylab.legend()
+        final_u_t = [
+            numpy.matrix(numpy.zeros((num_inputs, 1))) for _ in range(l + 1)
+        ]
+        cost_to_go = []
+        cost_to_come = []
+        cost = []
+        for t in range(l):
+            cost_to_go.append(
+                (0.5 * self.last_x_hat_t[t].T * self.S_t[t] * self.last_x_hat_t[t] \
+                + self.last_x_hat_t[t].T * self.s_t[t] + self.s_scalar_t[t])[0, 0])
+            cost_to_come.append(
+                (0.5 * self.last_x_hat_t[t].T * self.S_bar_t[t] * self.last_x_hat_t[t] \
+                 + self.last_x_hat_t[t].T * self.s_bar_t[t] + self.s_scalar_bar_t[t])[0, 0])
+            cost.append(cost_to_go[-1] + cost_to_come[-1])
+            final_u_t[t] = self.L_t[t] * self.last_x_hat_t[t] + self.l_t[t]
 
-    pylab.figure('cost')
-    cost_samples = numpy.arange(len(cost))
-    pylab.plot(cost_samples, cost_to_go, label='cost to go', marker='o')
-    pylab.plot(cost_samples, cost_to_come, label='cost to come', marker='o')
-    pylab.plot(cost_samples, cost, label='cost', marker='o')
-    pylab.legend()
+        for t in range(l):
+            A_t = numerical_jacobian_x(self.dynamics.discrete_dynamics,
+                                       self.last_x_hat_t[t], final_u_t[t])
+            B_t = numerical_jacobian_u(self.dynamics.discrete_dynamics,
+                                       self.last_x_hat_t[t], final_u_t[t])
+            c_t = self.dynamics.discrete_dynamics(self.last_x_hat_t[t], final_u_t[t]) \
+                - A_t * self.last_x_hat_t[t] - B_t * final_u_t[t]
+            print("Infeasability at", t, "is",
+                  ((A_t * self.last_x_hat_t[t] + B_t * final_u_t[t] + c_t) \
+                  - self.last_x_hat_t[t + 1]).T)
 
-    pylab.ioff()
-    pylab.show()
+        pylab.figure('u')
+        samples = numpy.arange(len(final_u_t))
+        for i in range(num_inputs):
+            pylab.plot(samples, [u[i, 0] for u in final_u_t],
+                       label='u[%d]' % i,
+                       marker='o')
+            pylab.legend()
+
+        pylab.figure('cost')
+        cost_samples = numpy.arange(len(cost))
+        pylab.plot(cost_samples, cost_to_go, label='cost to go', marker='o')
+        pylab.plot(cost_samples,
+                   cost_to_come,
+                   label='cost to come',
+                   marker='o')
+        pylab.plot(cost_samples, cost, label='cost', marker='o')
+        pylab.legend()
+
+        pylab.ioff()
+        pylab.show()
+
 
 if __name__ == '__main__':
-  dt = 0.05
-  #arm_dynamics = ArmDynamics(dt=dt)
-  #elqr = ELQR(arm_dynamics, ArmCostFunction(dt=dt, dynamics=arm_dynamics))
-  #x_hat_initial = numpy.matrix([[0.10], [1.0], [0.0], [0.0]])
-  #elqr.Solve(x_hat_initial, 100, 3)
+    dt = 0.05
+    #arm_dynamics = ArmDynamics(dt=dt)
+    #elqr = ELQR(arm_dynamics, ArmCostFunction(dt=dt, dynamics=arm_dynamics))
+    #x_hat_initial = numpy.matrix([[0.10], [1.0], [0.0], [0.0]])
+    #elqr.Solve(x_hat_initial, 100, 3)
 
-  elqr = ELQR(SkidSteerDynamics(dt=dt), CostFunction(dt=dt))
-  x_hat_initial = numpy.matrix([[0.10], [1.0], [0.0]])
-  elqr.Solve(x_hat_initial, 100, 15)
-  sys.exit(1)
+    elqr = ELQR(SkidSteerDynamics(dt=dt), CostFunction(dt=dt))
+    x_hat_initial = numpy.matrix([[0.10], [1.0], [0.0]])
+    elqr.Solve(x_hat_initial, 100, 15)
+    sys.exit(1)
diff --git a/y2018/control_loops/python/graph_codegen.py b/y2018/control_loops/python/graph_codegen.py
index 22bd1d2..e7f8ce1 100644
--- a/y2018/control_loops/python/graph_codegen.py
+++ b/y2018/control_loops/python/graph_codegen.py
@@ -23,16 +23,16 @@
         alpha_unitizer = "(::Eigen::Matrix<double, 2, 2>() << %f, %f, %f, %f).finished()" % (
             segment.alpha_unitizer[0, 0], segment.alpha_unitizer[0, 1],
             segment.alpha_unitizer[1, 0], segment.alpha_unitizer[1, 1])
-    cc_file.append( "  trajectories->emplace_back(%s," % (vmax))
-    cc_file.append( "                             %s," % (alpha_unitizer))
+    cc_file.append("  trajectories->emplace_back(%s," % (vmax))
+    cc_file.append("                             %s," % (alpha_unitizer))
     if reverse:
         cc_file.append(
             "                             Trajectory(Path::Reversed(%s()), 0.005));"
             % (path_function_name(str(name))))
     else:
         cc_file.append(
-            "                             Trajectory(%s(), 0.005));"
-            % (path_function_name(str(name))))
+            "                             Trajectory(%s(), 0.005));" %
+            (path_function_name(str(name))))
 
     start_index = None
     end_index = None
@@ -45,15 +45,15 @@
     if reverse:
         start_index, end_index = end_index, start_index
 
-    cc_file.append("  edges.push_back(SearchGraph::Edge{%s(), %s()," %
-                   (index_function_name(start_index),
-                    index_function_name(end_index)))
     cc_file.append(
-        "                     (trajectories->back().trajectory.path().length() + 0.2)});")
+        "  edges.push_back(SearchGraph::Edge{%s(), %s()," %
+        (index_function_name(start_index), index_function_name(end_index)))
+    cc_file.append(
+        "                     (trajectories->back().trajectory.path().length() + 0.2)});"
+    )
 
     # TODO(austin): Allow different vmaxes for different paths.
-    cc_file.append(
-        "  trajectories->back().trajectory.OptimizeTrajectory(")
+    cc_file.append("  trajectories->back().trajectory.OptimizeTrajectory(")
     cc_file.append("      trajectories->back().alpha_unitizer,")
     cc_file.append("      trajectories->back().vmax);")
     cc_file.append("")
@@ -116,15 +116,16 @@
         h_file.append("")
         h_file.append("constexpr uint32_t %s() { return %d; }" %
                       (index_function_name(point[1]), index))
-        h_file.append(
-            "inline ::Eigen::Matrix<double, 2, 1> %sPoint() {" % point[1])
+        h_file.append("inline ::Eigen::Matrix<double, 2, 1> %sPoint() {" %
+                      point[1])
         h_file.append(
             "  return (::Eigen::Matrix<double, 2, 1>() << %f, %f).finished();"
             % (numpy.pi / 2.0 - point[0][0], numpy.pi / 2.0 - point[0][1]))
         h_file.append("}")
 
     front_points = [
-        index_function_name(point[1]) + "()" for point in graph_generate.front_points
+        index_function_name(point[1]) + "()"
+        for point in graph_generate.front_points
     ]
     h_file.append("")
     h_file.append("constexpr ::std::array<uint32_t, %d> FrontPoints() {" %
@@ -134,7 +135,8 @@
     h_file.append("}")
 
     back_points = [
-        index_function_name(point[1]) + "()" for point in graph_generate.back_points
+        index_function_name(point[1]) + "()"
+        for point in graph_generate.back_points
     ]
     h_file.append("")
     h_file.append("constexpr ::std::array<uint32_t, %d> BackPoints() {" %
@@ -149,10 +151,10 @@
     for name, segment in list(enumerate(graph_generate.unnamed_segments)) + [
         (x.name, x) for x in graph_generate.named_segments
     ]:
-        h_file.append(
-            "::std::unique_ptr<Path> %s();" % path_function_name(name))
-        cc_file.append(
-            "::std::unique_ptr<Path> %s() {" % path_function_name(name))
+        h_file.append("::std::unique_ptr<Path> %s();" %
+                      path_function_name(name))
+        cc_file.append("::std::unique_ptr<Path> %s() {" %
+                       path_function_name(name))
         cc_file.append("  return ::std::unique_ptr<Path>(new Path({")
         for point in segment.ToThetaPoints():
             cc_file.append("      {{%.12f, %.12f, %.12f," %
@@ -188,7 +190,8 @@
                    "::std::vector<TrajectoryAndParams> *trajectories,")
     cc_file.append("                            "
                    "const ::Eigen::Matrix<double, 2, 2> &alpha_unitizer,")
-    cc_file.append("                            " "double vmax) {")
+    cc_file.append("                            "
+                   "double vmax) {")
     cc_file.append("  ::std::vector<SearchGraph::Edge> edges;")
 
     index = 0
@@ -202,8 +205,8 @@
         add_edge(cc_file, name, segment, index, True)
         index += 1
 
-    cc_file.append("  return SearchGraph(%d, ::std::move(edges));" % len(
-        graph_generate.points))
+    cc_file.append("  return SearchGraph(%d, ::std::move(edges));" %
+                   len(graph_generate.points))
     cc_file.append("}")
 
     h_file.append("")
diff --git a/y2018/control_loops/python/graph_edit.py b/y2018/control_loops/python/graph_edit.py
index fa9c9e5..7b6179c 100644
--- a/y2018/control_loops/python/graph_edit.py
+++ b/y2018/control_loops/python/graph_edit.py
@@ -7,6 +7,7 @@
 import random
 import gi
 import numpy
+
 gi.require_version('Gtk', '3.0')
 from gi.repository import Gdk, Gtk
 import cairo
@@ -70,8 +71,8 @@
 
 # right hand side lines
 lines2 = [(joint_center[0] + derr, 0.3048), (0.422275, 0.3048),
-          (0.422275, 0.1397), (0.826135, 0.1397), (0.826135,
-                                                   inter_y(0.826135))]
+          (0.422275, 0.1397), (0.826135, 0.1397),
+          (0.826135, inter_y(0.826135))]
 
 t1_min = get_angle((32.525 - 4.0) * 0.0254)
 t2_min = -7.0 / 4.0 * numpy.pi
@@ -119,8 +120,8 @@
 
 p1 = Polygon(lines_theta)
 
-p2 = Polygon([(t1_min, t2_min), (t1_max, t2_min), (t1_max, t2_max), (t1_min,
-                                                                     t2_max)])
+p2 = Polygon([(t1_min, t2_min), (t1_max, t2_min), (t1_max, t2_max),
+              (t1_min, t2_max)])
 
 # Fully computed theta constrints.
 lines_theta = list(p1.intersection(p2).exterior.coords)
@@ -157,7 +158,6 @@
             abs(dpx * pdx + dpy * pdy)
 
 
-
 def closest_segment(lines, pt):
     c_pt, c_pt_dist = get_closest(lines[-1], lines[0], pt)
     for i in range(1, len(lines)):
@@ -172,6 +172,7 @@
 
 # Create a GTK+ widget on which we will draw using Cairo
 class Silly(basic_window.BaseWindow):
+
     def __init__(self):
         super(Silly, self).__init__()
 
@@ -230,6 +231,7 @@
             self.window.queue_draw()
 
     def method_connect(self, event, cb):
+
         def handler(obj, *args):
             cb(*args)
 
@@ -332,7 +334,8 @@
                 cr.stroke()
 
         if not self.theta_version:
-            theta1, theta2 = to_theta(self.last_pos, self.circular_index_select)
+            theta1, theta2 = to_theta(self.last_pos,
+                                      self.circular_index_select)
             x, y = joint_center[0], joint_center[1]
             cr.move_to(x, y)
 
@@ -473,16 +476,16 @@
         else:
             self.segments[0].control2 = self.now_segment_pt
 
-        print('Clicked at theta: %s' % (repr(self.now_segment_pt,)))
+        print('Clicked at theta: %s' % (repr(self.now_segment_pt, )))
         if not self.theta_version:
             print('Clicked at xy, circular index: (%f, %f, %f)' %
                   (self.last_pos[0], self.last_pos[1],
                    self.circular_index_select))
 
-        print('c1: numpy.array([%f, %f])' % (self.segments[0].control1[0],
-                                             self.segments[0].control1[1]))
-        print('c2: numpy.array([%f, %f])' % (self.segments[0].control2[0],
-                                             self.segments[0].control2[1]))
+        print('c1: numpy.array([%f, %f])' %
+              (self.segments[0].control1[0], self.segments[0].control1[1]))
+        print('c2: numpy.array([%f, %f])' %
+              (self.segments[0].control2[0], self.segments[0].control2[1]))
 
         self.redraw()
 
diff --git a/y2018/control_loops/python/graph_generate.py b/y2018/control_loops/python/graph_generate.py
index dae7caa..046b9dd 100644
--- a/y2018/control_loops/python/graph_generate.py
+++ b/y2018/control_loops/python/graph_generate.py
@@ -195,6 +195,7 @@
 
 # Segment in angle space.
 class AngleSegment:
+
     def __init__(self, start, end, name=None, alpha_unitizer=None, vmax=None):
         """Creates an angle segment.
 
@@ -321,8 +322,8 @@
     a = alpha_blend(start, control1, alpha)
     b = alpha_blend(control1, control2, alpha)
     c = alpha_blend(control2, end, alpha)
-    return alpha_blend(
-        alpha_blend(a, b, alpha), alpha_blend(b, c, alpha), alpha)
+    return alpha_blend(alpha_blend(a, b, alpha), alpha_blend(b, c, alpha),
+                       alpha)
 
 
 def subdivide_spline(start, control1, control2, end):
@@ -333,6 +334,7 @@
 
 
 class SplineSegment:
+
     def __init__(self,
                  start,
                  control1,
@@ -350,10 +352,9 @@
         self.vmax = vmax
 
     def __repr__(self):
-        return "SplineSegment(%s, %s, %s, %s)" % (repr(self.start),
-                                                  repr(self.control1),
-                                                  repr(self.control2),
-                                                  repr(self.end))
+        return "SplineSegment(%s, %s, %s, %s)" % (repr(
+            self.start), repr(self.control1), repr(
+                self.control2), repr(self.end))
 
     def DrawTo(self, cr, theta_version):
         if theta_version:
@@ -364,9 +365,8 @@
             end = get_xy(self.end)
 
             draw_lines(cr, [
-                to_theta(
-                    spline_eval(start, control1, control2, end, alpha),
-                    c_i_select)
+                to_theta(spline_eval(start, control1, control2, end, alpha),
+                         c_i_select)
                 for alpha in subdivide_spline(start, control1, control2, end)
             ])
             cr.move_to(self.start[0] + theta_end_circle_size, self.start[1])
@@ -418,6 +418,7 @@
 
 
 class ThetaSplineSegment:
+
     def __init__(self,
                  start,
                  control1,
@@ -435,10 +436,9 @@
         self.vmax = vmax
 
     def __repr__(self):
-        return "ThetaSplineSegment(%s, %s, &s, %s)" % (repr(self.start),
-                                                       repr(self.control1),
-                                                       repr(self.control2),
-                                                       repr(self.end))
+        return "ThetaSplineSegment(%s, %s, &s, %s)" % (repr(
+            self.start), repr(self.control1), repr(
+                self.control2), repr(self.end))
 
     def DrawTo(self, cr, theta_version):
         if (theta_version):
@@ -485,30 +485,36 @@
 short_box_x = 0.431
 short_box_y = 0.082
 
-ready_above_box = to_theta_with_circular_index(
-    tall_box_x, tall_box_y + 0.08, circular_index=-1)
-tall_box_grab = to_theta_with_circular_index(
-    tall_box_x, tall_box_y, circular_index=-1)
-short_box_grab = to_theta_with_circular_index(
-    short_box_x, short_box_y, circular_index=-1)
+ready_above_box = to_theta_with_circular_index(tall_box_x,
+                                               tall_box_y + 0.08,
+                                               circular_index=-1)
+tall_box_grab = to_theta_with_circular_index(tall_box_x,
+                                             tall_box_y,
+                                             circular_index=-1)
+short_box_grab = to_theta_with_circular_index(short_box_x,
+                                              short_box_y,
+                                              circular_index=-1)
 
 # TODO(austin): Drive the front/back off the same numbers a bit better.
 front_high_box = to_theta_with_circular_index(0.378, 2.46, circular_index=-1)
-front_middle3_box = to_theta_with_circular_index(
-    0.700, 2.125, circular_index=-1.000000)
-front_middle2_box = to_theta_with_circular_index(
-    0.700, 2.268, circular_index=-1)
-front_middle1_box = to_theta_with_circular_index(
-    0.800, 1.915, circular_index=-1)
+front_middle3_box = to_theta_with_circular_index(0.700,
+                                                 2.125,
+                                                 circular_index=-1.000000)
+front_middle2_box = to_theta_with_circular_index(0.700,
+                                                 2.268,
+                                                 circular_index=-1)
+front_middle1_box = to_theta_with_circular_index(0.800,
+                                                 1.915,
+                                                 circular_index=-1)
 front_low_box = to_theta_with_circular_index(0.87, 1.572, circular_index=-1)
 back_high_box = to_theta_with_circular_index(-0.75, 2.48, circular_index=0)
-back_middle2_box = to_theta_with_circular_index(
-    -0.700, 2.27, circular_index=0)
-back_middle1_box = to_theta_with_circular_index(
-    -0.800, 1.93, circular_index=0)
+back_middle2_box = to_theta_with_circular_index(-0.700, 2.27, circular_index=0)
+back_middle1_box = to_theta_with_circular_index(-0.800, 1.93, circular_index=0)
 back_low_box = to_theta_with_circular_index(-0.87, 1.64, circular_index=0)
 
-back_extra_low_box = to_theta_with_circular_index(-0.87, 1.52, circular_index=0)
+back_extra_low_box = to_theta_with_circular_index(-0.87,
+                                                  1.52,
+                                                  circular_index=0)
 
 front_switch = to_theta_with_circular_index(0.88, 0.967, circular_index=-1)
 back_switch = to_theta_with_circular_index(-0.88, 0.967, circular_index=-2)
@@ -517,26 +523,20 @@
 
 up = to_theta_with_circular_index(0.0, 2.547, circular_index=-1)
 
-front_switch_auto = to_theta_with_circular_index(
-    0.750, 2.20, circular_index=-1.000000)
+front_switch_auto = to_theta_with_circular_index(0.750,
+                                                 2.20,
+                                                 circular_index=-1.000000)
 
-duck = numpy.array(
-    [numpy.pi / 2.0 - 0.92, numpy.pi / 2.0 - 4.26])
+duck = numpy.array([numpy.pi / 2.0 - 0.92, numpy.pi / 2.0 - 4.26])
 
-starting = numpy.array(
-    [numpy.pi / 2.0 - 0.593329, numpy.pi / 2.0 - 3.749631])
-vertical_starting = numpy.array(
-    [numpy.pi / 2.0, -numpy.pi / 2.0])
+starting = numpy.array([numpy.pi / 2.0 - 0.593329, numpy.pi / 2.0 - 3.749631])
+vertical_starting = numpy.array([numpy.pi / 2.0, -numpy.pi / 2.0])
 
-self_hang = numpy.array(
-    [numpy.pi / 2.0 - 0.191611, numpy.pi / 2.0])
-partner_hang = numpy.array(
-    [numpy.pi / 2.0 - (-0.30), numpy.pi / 2.0])
+self_hang = numpy.array([numpy.pi / 2.0 - 0.191611, numpy.pi / 2.0])
+partner_hang = numpy.array([numpy.pi / 2.0 - (-0.30), numpy.pi / 2.0])
 
-above_hang = numpy.array(
-    [numpy.pi / 2.0 - 0.14, numpy.pi / 2.0 - (-0.165)])
-below_hang = numpy.array(
-    [numpy.pi / 2.0 - 0.39, numpy.pi / 2.0 - (-0.517)])
+above_hang = numpy.array([numpy.pi / 2.0 - 0.14, numpy.pi / 2.0 - (-0.165)])
+below_hang = numpy.array([numpy.pi / 2.0 - 0.39, numpy.pi / 2.0 - (-0.517)])
 
 up_c1 = to_theta((0.63, 1.17), circular_index=-1)
 up_c2 = to_theta((0.65, 1.62), circular_index=-1)
@@ -603,25 +603,25 @@
         num_points = int(numpy.ceil(norm / max_distance))
         last_iteration_point = previous_point
         for subindex in range(1, num_points):
-            subpoint = to_theta(
-                alpha_blend(previous_point_xy, point_xy,
-                            float(subindex) / num_points),
-                circular_index=circular_index)
-            result_points.append((subpoint, '%s%dof%d' % (name, subindex,
-                                                          num_points)))
+            subpoint = to_theta(alpha_blend(previous_point_xy, point_xy,
+                                            float(subindex) / num_points),
+                                circular_index=circular_index)
+            result_points.append(
+                (subpoint, '%s%dof%d' % (name, subindex, num_points)))
             result_paths.append(
                 XYSegment(last_iteration_point, subpoint, vmax=6.0))
             if (last_iteration_point != previous_point).any():
                 result_paths.append(XYSegment(previous_point, subpoint))
             if subindex == num_points - 1:
-              result_paths.append(XYSegment(subpoint, point, vmax=6.0))
+                result_paths.append(XYSegment(subpoint, point, vmax=6.0))
             else:
-              result_paths.append(XYSegment(subpoint, point))
+                result_paths.append(XYSegment(subpoint, point))
             last_iteration_point = subpoint
         result_points.append((point, name))
 
     return result_points, result_paths
 
+
 front_points, front_paths = expand_points(sparse_front_points, 0.06)
 back_points, back_paths = expand_points(sparse_back_points, 0.06)
 
@@ -650,7 +650,6 @@
 front_switch_auto_c1 = numpy.array([1.792857, -0.372768])
 front_switch_auto_c2 = numpy.array([1.861885, -0.273664])
 
-
 # We need to define critical points so we can create paths connecting them.
 # TODO(austin): Attach velocities to the slow ones.
 ready_to_back_low_c1 = numpy.array([2.524325, 0.046417])
@@ -670,30 +669,58 @@
 neutral_to_back_c1 = numpy.array([0.702527, -2.618276])
 neutral_to_back_c2 = numpy.array([0.526914, -3.109691])
 
-
 named_segments = [
-    ThetaSplineSegment(neutral, neutral_to_back_c1, neutral_to_back_c2, back_switch, "BackSwitch"),
-
-    ThetaSplineSegment(neutral, neutral_to_back_low_c1, tall_to_back_high_c2, back_high_box, "NeutralBoxToHigh", alpha_unitizer=long_alpha_unitizer),
-    ThetaSplineSegment(neutral, neutral_to_back_low_c1, tall_to_back_high_c2, back_middle2_box, "NeutralBoxToMiddle2", long_alpha_unitizer),
-    ThetaSplineSegment(neutral, neutral_to_back_low_c1, tall_to_back_low_c2, back_middle1_box, "NeutralBoxToMiddle1", long_alpha_unitizer),
-    ThetaSplineSegment(neutral, neutral_to_back_low_c1, tall_to_back_low_c2, back_low_box, "NeutralBoxToLow", long_alpha_unitizer),
-
-    ThetaSplineSegment(ready_above_box, ready_to_back_low_c1, tall_to_back_high_c2, back_high_box, "ReadyBoxToHigh", long_alpha_unitizer),
-    ThetaSplineSegment(ready_above_box, ready_to_back_low_c1, tall_to_back_high_c2, back_middle2_box, "ReadyBoxToMiddle2", long_alpha_unitizer),
-    ThetaSplineSegment(ready_above_box, ready_to_back_low_c1, tall_to_back_low_c2, back_middle1_box, "ReadyBoxToMiddle1", long_alpha_unitizer),
-    ThetaSplineSegment(ready_above_box, ready_to_back_low_c1, tall_to_back_low_c2, back_low_box, "ReadyBoxToLow", long_alpha_unitizer),
-
-    ThetaSplineSegment(short_box_grab, tall_to_back_low_c1, tall_to_back_high_c2, back_high_box, "ShortBoxToHigh", long_alpha_unitizer),
-    ThetaSplineSegment(short_box_grab, tall_to_back_low_c1, tall_to_back_high_c2, back_middle2_box, "ShortBoxToMiddle2", long_alpha_unitizer),
-    ThetaSplineSegment(short_box_grab, tall_to_back_low_c1, tall_to_back_low_c2, back_middle1_box, "ShortBoxToMiddle1", long_alpha_unitizer),
-    ThetaSplineSegment(short_box_grab, tall_to_back_low_c1, tall_to_back_low_c2, back_low_box, "ShortBoxToLow", long_alpha_unitizer),
-
-    ThetaSplineSegment(tall_box_grab, tall_to_back_low_c1, tall_to_back_high_c2, back_high_box, "TallBoxToHigh", long_alpha_unitizer),
-    ThetaSplineSegment(tall_box_grab, tall_to_back_low_c1, tall_to_back_high_c2, back_middle2_box, "TallBoxToMiddle2", long_alpha_unitizer),
-    ThetaSplineSegment(tall_box_grab, tall_to_back_low_c1, tall_to_back_low_c2, back_middle1_box, "TallBoxToMiddle1", long_alpha_unitizer),
-    ThetaSplineSegment(tall_box_grab, tall_to_back_low_c1, tall_to_back_low_c2, back_low_box, "TallBoxToLow", long_alpha_unitizer),
-
+    ThetaSplineSegment(neutral, neutral_to_back_c1, neutral_to_back_c2,
+                       back_switch, "BackSwitch"),
+    ThetaSplineSegment(neutral,
+                       neutral_to_back_low_c1,
+                       tall_to_back_high_c2,
+                       back_high_box,
+                       "NeutralBoxToHigh",
+                       alpha_unitizer=long_alpha_unitizer),
+    ThetaSplineSegment(neutral, neutral_to_back_low_c1, tall_to_back_high_c2,
+                       back_middle2_box, "NeutralBoxToMiddle2",
+                       long_alpha_unitizer),
+    ThetaSplineSegment(neutral, neutral_to_back_low_c1, tall_to_back_low_c2,
+                       back_middle1_box, "NeutralBoxToMiddle1",
+                       long_alpha_unitizer),
+    ThetaSplineSegment(neutral, neutral_to_back_low_c1, tall_to_back_low_c2,
+                       back_low_box, "NeutralBoxToLow", long_alpha_unitizer),
+    ThetaSplineSegment(ready_above_box, ready_to_back_low_c1,
+                       tall_to_back_high_c2, back_high_box, "ReadyBoxToHigh",
+                       long_alpha_unitizer),
+    ThetaSplineSegment(ready_above_box, ready_to_back_low_c1,
+                       tall_to_back_high_c2, back_middle2_box,
+                       "ReadyBoxToMiddle2", long_alpha_unitizer),
+    ThetaSplineSegment(ready_above_box, ready_to_back_low_c1,
+                       tall_to_back_low_c2, back_middle1_box,
+                       "ReadyBoxToMiddle1", long_alpha_unitizer),
+    ThetaSplineSegment(ready_above_box, ready_to_back_low_c1,
+                       tall_to_back_low_c2, back_low_box, "ReadyBoxToLow",
+                       long_alpha_unitizer),
+    ThetaSplineSegment(short_box_grab, tall_to_back_low_c1,
+                       tall_to_back_high_c2, back_high_box, "ShortBoxToHigh",
+                       long_alpha_unitizer),
+    ThetaSplineSegment(short_box_grab, tall_to_back_low_c1,
+                       tall_to_back_high_c2, back_middle2_box,
+                       "ShortBoxToMiddle2", long_alpha_unitizer),
+    ThetaSplineSegment(short_box_grab, tall_to_back_low_c1,
+                       tall_to_back_low_c2, back_middle1_box,
+                       "ShortBoxToMiddle1", long_alpha_unitizer),
+    ThetaSplineSegment(short_box_grab, tall_to_back_low_c1,
+                       tall_to_back_low_c2, back_low_box, "ShortBoxToLow",
+                       long_alpha_unitizer),
+    ThetaSplineSegment(tall_box_grab, tall_to_back_low_c1,
+                       tall_to_back_high_c2, back_high_box, "TallBoxToHigh",
+                       long_alpha_unitizer),
+    ThetaSplineSegment(tall_box_grab, tall_to_back_low_c1,
+                       tall_to_back_high_c2, back_middle2_box,
+                       "TallBoxToMiddle2", long_alpha_unitizer),
+    ThetaSplineSegment(tall_box_grab, tall_to_back_low_c1, tall_to_back_low_c2,
+                       back_middle1_box, "TallBoxToMiddle1",
+                       long_alpha_unitizer),
+    ThetaSplineSegment(tall_box_grab, tall_to_back_low_c1, tall_to_back_low_c2,
+                       back_low_box, "TallBoxToLow", long_alpha_unitizer),
     SplineSegment(neutral, ready_above_box_c1, ready_above_box_c2,
                   ready_above_box, "ReadyToNeutral"),
     XYSegment(ready_above_box, tall_box_grab, "ReadyToTallBox", vmax=6.0),
@@ -713,32 +740,29 @@
 ]
 
 unnamed_segments = [
-    SplineSegment(neutral, front_switch_auto_c1, front_switch_auto_c2, front_switch_auto),
+    SplineSegment(neutral, front_switch_auto_c1, front_switch_auto_c2,
+                  front_switch_auto),
     SplineSegment(tall_box_grab, ready_to_up_c1, ready_to_up_c2, up),
     SplineSegment(short_box_grab, ready_to_up_c1, ready_to_up_c2, up),
     SplineSegment(ready_above_box, ready_to_up_c1, ready_to_up_c2, up),
     ThetaSplineSegment(duck, duck_c1, duck_c2, neutral),
     SplineSegment(neutral, front_switch_c1, front_switch_c2, front_switch),
-
     XYSegment(ready_above_box, front_low_box),
     XYSegment(ready_above_box, front_switch),
     XYSegment(ready_above_box, front_middle1_box),
     XYSegment(ready_above_box, front_middle2_box),
     XYSegment(ready_above_box, front_middle3_box),
-    SplineSegment(ready_above_box, ready_to_up_c1, ready_to_up_c2, front_high_box),
-
+    SplineSegment(ready_above_box, ready_to_up_c1, ready_to_up_c2,
+                  front_high_box),
     AngleSegment(starting, vertical_starting),
     AngleSegment(vertical_starting, neutral),
-
     XYSegment(neutral, front_low_box),
     XYSegment(up, front_high_box),
     XYSegment(up, front_middle2_box),
-
     XYSegment(front_middle3_box, up),
     XYSegment(front_middle3_box, front_high_box),
     XYSegment(front_middle3_box, front_middle2_box),
     XYSegment(front_middle3_box, front_middle1_box),
-
     XYSegment(up, front_middle1_box),
     XYSegment(up, front_low_box),
     XYSegment(front_high_box, front_middle2_box),
@@ -760,7 +784,6 @@
     XYSegment(back_middle2_box, back_middle1_box),
     XYSegment(back_middle2_box, back_low_box),
     XYSegment(back_middle1_box, back_low_box),
-
     AngleSegment(up, above_hang),
     AngleSegment(above_hang, below_hang),
     AngleSegment(up, below_hang),
diff --git a/y2018/control_loops/python/intake.py b/y2018/control_loops/python/intake.py
index 00f737b..92b7c7a 100755
--- a/y2018/control_loops/python/intake.py
+++ b/y2018/control_loops/python/intake.py
@@ -105,8 +105,11 @@
         r_nm = 0.025
         self.R = numpy.matrix(numpy.diag([(r_nm**2.0), (r_nm**2.0)]))
 
-        self.KalmanGain, self.Q_steady = controls.kalman(
-            A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
+        self.KalmanGain, self.Q_steady = controls.kalman(A=self.A,
+                                                         B=self.B,
+                                                         C=self.C,
+                                                         Q=self.Q,
+                                                         R=self.R)
 
         # The box formed by U_min and U_max must encompass all possible values,
         # or else Austin's code gets angry.
@@ -181,8 +184,9 @@
 
         self.R = numpy.matrix([[(1.0 / (12.0**2.0))]])
 
-        self.K_transformed = controls.dlqr(
-            self.A_transformed, self.B_transformed, self.Q_lqr, self.R)
+        self.K_transformed = controls.dlqr(self.A_transformed,
+                                           self.B_transformed, self.Q_lqr,
+                                           self.R)
 
         # Write the controller back out in the absolute coordinate system.
         self.K = numpy.hstack(
@@ -215,8 +219,11 @@
                    repr(numpy.linalg.eig(self.A - self.B * self.K)[0]),
                    self._name)
 
-        self.KalmanGain, self.Q_steady = controls.kalman(
-            A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
+        self.KalmanGain, self.Q_steady = controls.kalman(A=self.A,
+                                                         B=self.B,
+                                                         C=self.C,
+                                                         Q=self.Q,
+                                                         R=self.R)
 
         self.InitializeState()
 
@@ -359,11 +366,10 @@
     observer_intake.X_hat[0, 0] = intake.X[0, 0]
 
     # Test moving the intake with constant separation.
-    scenario_plotter.run_test(
-        intake,
-        controller_intake=intake_controller,
-        observer_intake=observer_intake,
-        iterations=200)
+    scenario_plotter.run_test(intake,
+                              controller_intake=intake_controller,
+                              observer_intake=observer_intake,
+                              iterations=200)
 
     if FLAGS.plot:
         scenario_plotter.Plot()
@@ -376,8 +382,8 @@
     else:
         namespaces = ['y2018', 'control_loops', 'superstructure', 'intake']
         intake = Intake('Intake')
-        loop_writer = control_loop.ControlLoopWriter(
-            'Intake', [intake], namespaces=namespaces)
+        loop_writer = control_loop.ControlLoopWriter('Intake', [intake],
+                                                     namespaces=namespaces)
         loop_writer.AddConstant(
             control_loop.Constant('kGearRatio', '%f', intake.G))
         loop_writer.AddConstant(
@@ -387,8 +393,9 @@
         loop_writer.Write(argv[1], argv[2])
 
         delayed_intake = DelayedIntake('DelayedIntake')
-        loop_writer = control_loop.ControlLoopWriter(
-            'DelayedIntake', [delayed_intake], namespaces=namespaces)
+        loop_writer = control_loop.ControlLoopWriter('DelayedIntake',
+                                                     [delayed_intake],
+                                                     namespaces=namespaces)
         loop_writer.Write(argv[3], argv[4])
 
 
diff --git a/y2018/control_loops/python/intake_simple.py b/y2018/control_loops/python/intake_simple.py
index 9b6ffb1..4d34f4b 100644
--- a/y2018/control_loops/python/intake_simple.py
+++ b/y2018/control_loops/python/intake_simple.py
@@ -21,253 +21,273 @@
 run_count = 0
 theta_travel = 0.0
 
+
 def to_deg(angle):
-  return angle * rad_to_deg
+    return angle * rad_to_deg
+
 
 def to_rad(angle):
-  return angle / rad_to_deg
+    return angle / rad_to_deg
+
 
 def to_rotations(angle):
-  return angle / pi2
+    return angle / pi2
+
 
 def time_derivative(x, t, voltage, c1, c2, c3):
-  global run_count
-  theta, omega = x
-  dxdt = [omega, -c1 * omega + c3 * math.sin(theta) + c2 * voltage]
-  run_count = run_count + 1
+    global run_count
+    theta, omega = x
+    dxdt = [omega, -c1 * omega + c3 * math.sin(theta) + c2 * voltage]
+    run_count = run_count + 1
 
-  return dxdt
+    return dxdt
+
 
 def get_distal_angle(theta_proximal):
-  # For the proximal angle = -50 degrees, the distal angle is -180 degrees
-  # For the proximal angle =  10 degrees, the distal angle is  -90 degrees
-  distal_angle = to_rad(-180.0 - (-50.0 - to_deg(theta_proximal)) * \
-      (180.0 - 90.0) / (50.0 + 10.0))
-  return distal_angle
+    # For the proximal angle = -50 degrees, the distal angle is -180 degrees
+    # For the proximal angle =  10 degrees, the distal angle is  -90 degrees
+    distal_angle = to_rad(-180.0 - (-50.0 - to_deg(theta_proximal)) * \
+        (180.0 - 90.0) / (50.0 + 10.0))
+    return distal_angle
 
 
 def get_180_degree_time(c1, c2, c3, voltage, gear_ratio, motor_free_speed):
-  global run_count
-  global theta_travel
+    global run_count
+    global theta_travel
 
-  if ( True ):
-    # Gravity is assisting the motion.
-    theta_start = 0.0
-    theta_target = pi
-  elif ( False ):
-    # Gravity is assisting the motion.
-    theta_start = 0.0
-    theta_target = -pi
-  elif ( False ):
-    # Gravity is slowing the motion.
-    theta_start = pi
-    theta_target = 0.0
-  elif ( False ):
-    # Gravity is slowing the motion.
-    theta_start = -pi
-    theta_target = 0.0
-  elif ( False ):
-    # This is for the proximal arm motion.
-    theta_start = to_rad(-50.0)
-    theta_target = to_rad(10.0)
+    if (True):
+        # Gravity is assisting the motion.
+        theta_start = 0.0
+        theta_target = pi
+    elif (False):
+        # Gravity is assisting the motion.
+        theta_start = 0.0
+        theta_target = -pi
+    elif (False):
+        # Gravity is slowing the motion.
+        theta_start = pi
+        theta_target = 0.0
+    elif (False):
+        # Gravity is slowing the motion.
+        theta_start = -pi
+        theta_target = 0.0
+    elif (False):
+        # This is for the proximal arm motion.
+        theta_start = to_rad(-50.0)
+        theta_target = to_rad(10.0)
 
-  theta_half = 0.5 * (theta_start + theta_target)
-  if theta_start > theta_target:
-    voltage = -voltage
-  theta = theta_start
-  theta_travel = theta_start - theta_target
-  if run_count == 0:
-    print("# Theta Start = %.2f radians End = %.2f Theta travel %.2f "
-          "Theta half = %.2f Voltage = %.2f" % (
-              theta_start, theta_target, theta_travel, theta_half, voltage))
-    print("# Theta Start = %.2f degrees End = %.2f Theta travel %.2f "
-          "Theta half = %.2f Voltage = %.2f" % (
-              to_deg(theta_start), to_deg(theta_target), to_deg(theta_travel),
-              to_deg(theta_half), voltage))
-  omega = 0.0
-  time = 0.0
-  delta_time = 0.01 # time step in seconds
-  for step in range(1, 5000):
-     t = numpy.array([time, time + delta_time])
-     time = time + delta_time
-     x = [theta, omega]
-     angular_acceleration = -c1 * omega + c2 * voltage
-     x_n_plus_1 = scipy.integrate.odeint(time_derivative, x, t,
-                                         args=(voltage, c1, c2, c3))
-     theta, omega = x_n_plus_1[1]
+    theta_half = 0.5 * (theta_start + theta_target)
+    if theta_start > theta_target:
+        voltage = -voltage
+    theta = theta_start
+    theta_travel = theta_start - theta_target
+    if run_count == 0:
+        print("# Theta Start = %.2f radians End = %.2f Theta travel %.2f "
+              "Theta half = %.2f Voltage = %.2f" %
+              (theta_start, theta_target, theta_travel, theta_half, voltage))
+        print("# Theta Start = %.2f degrees End = %.2f Theta travel %.2f "
+              "Theta half = %.2f Voltage = %.2f" %
+              (to_deg(theta_start), to_deg(theta_target), to_deg(theta_travel),
+               to_deg(theta_half), voltage))
+    omega = 0.0
+    time = 0.0
+    delta_time = 0.01  # time step in seconds
+    for step in range(1, 5000):
+        t = numpy.array([time, time + delta_time])
+        time = time + delta_time
+        x = [theta, omega]
+        angular_acceleration = -c1 * omega + c2 * voltage
+        x_n_plus_1 = scipy.integrate.odeint(time_derivative,
+                                            x,
+                                            t,
+                                            args=(voltage, c1, c2, c3))
+        theta, omega = x_n_plus_1[1]
 
-     if ( False ):
-       print("%4d  %8.4f %8.2f          %8.4f          %8.4f    %8.3f    "
-             "%8.3f     %8.3f      %8.3f" % (
-                 step, time, theta, omega, angular_acceleration,
-                 to_rotations(theta), to_rotations(omega),
-                 omega * gear_ratio * 60.0 / pi2,
-                 omega * gear_ratio / motor_free_speed))
-     if theta_start < theta_target:
-       # Angle is increasing through the motion.
-       if theta > theta_half:
-         break
-     else:
-       # Angle is decreasing through the motion.
-       if (theta < theta_half):
-         break
+        if (False):
+            print(
+                "%4d  %8.4f %8.2f          %8.4f          %8.4f    %8.3f    "
+                "%8.3f     %8.3f      %8.3f" %
+                (step, time, theta, omega, angular_acceleration,
+                 to_rotations(theta), to_rotations(omega), omega * gear_ratio *
+                 60.0 / pi2, omega * gear_ratio / motor_free_speed))
+        if theta_start < theta_target:
+            # Angle is increasing through the motion.
+            if theta > theta_half:
+                break
+        else:
+            # Angle is decreasing through the motion.
+            if (theta < theta_half):
+                break
 
-  return 2.0 * time
+    return 2.0 * time
+
 
 def main():
-  # m/sec^2 Gravity Constant
-  gravity = 9.8
-  # m/sec^2 Gravity Constant - Use 0.0 for the intake.  It is horizontal.
-  gravity = 0.0
-  # Volts
-  voltage_nominal = 12
+    # m/sec^2 Gravity Constant
+    gravity = 9.8
+    # m/sec^2 Gravity Constant - Use 0.0 for the intake.  It is horizontal.
+    gravity = 0.0
+    # Volts
+    voltage_nominal = 12
 
-  # Vex 775 Pro motor specs from http://banebots.com/p/M2-RS550-120
-  motor_name = "Vex 775 Pro motor specs from http://banebots.com/p/M2-RS550-120"
-  current_stall = 134 # amps stall current
-  current_no_load = 0.7 # amps no load current
-  torque_stall = 710/1000.0 # N-m Stall Torque
-  speed_no_load_rpm = 18730 # RPM no load speed
+    # Vex 775 Pro motor specs from http://banebots.com/p/M2-RS550-120
+    motor_name = "Vex 775 Pro motor specs from http://banebots.com/p/M2-RS550-120"
+    current_stall = 134  # amps stall current
+    current_no_load = 0.7  # amps no load current
+    torque_stall = 710 / 1000.0  # N-m Stall Torque
+    speed_no_load_rpm = 18730  # RPM no load speed
 
-  if ( True ):
-    # Bag motor from https://www.vexrobotics.com/217-3351.html
-    motor_name = "Bag motor from https://www.vexrobotics.com/217-3351.html"
-    current_stall = 53.0 # amps stall current
-    current_no_load = 1.8 # amps no load current
-    torque_stall = 0.4 # N-m Stall Torque
-    speed_no_load_rpm = 13180.0 # RPM no load speed
+    if (True):
+        # Bag motor from https://www.vexrobotics.com/217-3351.html
+        motor_name = "Bag motor from https://www.vexrobotics.com/217-3351.html"
+        current_stall = 53.0  # amps stall current
+        current_no_load = 1.8  # amps no load current
+        torque_stall = 0.4  # N-m Stall Torque
+        speed_no_load_rpm = 13180.0  # RPM no load speed
 
-  if ( False ):
-    # Mini CIM motor from https://www.vexrobotics.com/217-3371.html
-    motor_name = "Mini CIM motor from https://www.vexrobotics.com/217-3371.html"
-    current_stall = 89.0 # amps stall current
-    current_no_load = 3.0 # amps no load current
-    torque_stall = 1.4 # N-m Stall Torque
-    speed_no_load_rpm = 5840.0 # RPM no load speed
+    if (False):
+        # Mini CIM motor from https://www.vexrobotics.com/217-3371.html
+        motor_name = "Mini CIM motor from https://www.vexrobotics.com/217-3371.html"
+        current_stall = 89.0  # amps stall current
+        current_no_load = 3.0  # amps no load current
+        torque_stall = 1.4  # N-m Stall Torque
+        speed_no_load_rpm = 5840.0  # RPM no load speed
 
-  # How many motors are we using?
-  num_motors = 1
+    # How many motors are we using?
+    num_motors = 1
 
-  # Motor values
-  print("# Motor: %s" % (motor_name))
-  print("# Number of motors: %d" % (num_motors))
-  print("# Stall torque: %.1f n-m" % (torque_stall))
-  print("# Stall current: %.1f amps" % (current_stall))
-  print("# No load current: %.1f amps" % (current_no_load))
-  print("# No load speed: %.0f rpm" % (speed_no_load_rpm))
+    # Motor values
+    print("# Motor: %s" % (motor_name))
+    print("# Number of motors: %d" % (num_motors))
+    print("# Stall torque: %.1f n-m" % (torque_stall))
+    print("# Stall current: %.1f amps" % (current_stall))
+    print("# No load current: %.1f amps" % (current_no_load))
+    print("# No load speed: %.0f rpm" % (speed_no_load_rpm))
 
-  # Constants from motor values
-  resistance_motor = voltage_nominal / current_stall
-  speed_no_load_rps = speed_no_load_rpm / 60.0 # Revolutions per second no load speed
-  speed_no_load = speed_no_load_rps * 2.0 * pi
-  Kt = num_motors * torque_stall / current_stall # N-m/A torque constant
-  Kv_rpm = speed_no_load_rpm / (voltage_nominal -
-                                resistance_motor * current_no_load)  # rpm/V
-  Kv = Kv_rpm * 2.0 * pi / 60.0  # rpm/V
+    # Constants from motor values
+    resistance_motor = voltage_nominal / current_stall
+    speed_no_load_rps = speed_no_load_rpm / 60.0  # Revolutions per second no load speed
+    speed_no_load = speed_no_load_rps * 2.0 * pi
+    Kt = num_motors * torque_stall / current_stall  # N-m/A torque constant
+    Kv_rpm = speed_no_load_rpm / (
+        voltage_nominal - resistance_motor * current_no_load)  # rpm/V
+    Kv = Kv_rpm * 2.0 * pi / 60.0  # rpm/V
 
-  # Robot Geometry and physics
-  # m Length of arm connected to the robot base
-  length_proximal_arm = inches_to_meters * 47.34
-  # m Length of arm that holds the cube
-  length_distal_arm = inches_to_meters * 44.0
-  # m Length of intake arm from the pivot point to where the big roller contacts a cube.
-  length_intake_arm =  inches_to_meters * 9.0
-  mass_cube = 6.0 * lbs_to_kg  # Weight of the cube in Kgrams
-  mass_proximal_arm = 5.5 * lbs_to_kg # Weight of proximal arm
-  mass_distal_arm = 3.5 * lbs_to_kg # Weight of distal arm
-  mass_distal = mass_cube + mass_distal_arm
-  mass_proximal = mass_proximal_arm + mass_distal
-  # m Length from arm pivot point to arm CG
-  radius_to_proximal_arm_cg = 22.0 * inches_to_meters
-  # m Length from arm pivot point to arm CG
-  radius_to_distal_arm_cg = 10.0 * inches_to_meters
+    # Robot Geometry and physics
+    # m Length of arm connected to the robot base
+    length_proximal_arm = inches_to_meters * 47.34
+    # m Length of arm that holds the cube
+    length_distal_arm = inches_to_meters * 44.0
+    # m Length of intake arm from the pivot point to where the big roller contacts a cube.
+    length_intake_arm = inches_to_meters * 9.0
+    mass_cube = 6.0 * lbs_to_kg  # Weight of the cube in Kgrams
+    mass_proximal_arm = 5.5 * lbs_to_kg  # Weight of proximal arm
+    mass_distal_arm = 3.5 * lbs_to_kg  # Weight of distal arm
+    mass_distal = mass_cube + mass_distal_arm
+    mass_proximal = mass_proximal_arm + mass_distal
+    # m Length from arm pivot point to arm CG
+    radius_to_proximal_arm_cg = 22.0 * inches_to_meters
+    # m Length from arm pivot point to arm CG
+    radius_to_distal_arm_cg = 10.0 * inches_to_meters
 
-  radius_to_distal_cg = (length_distal_arm * mass_cube +
-                         radius_to_distal_arm_cg * mass_distal_arm) / \
-                             mass_distal
-  radius_to_proximal_cg = (length_proximal_arm * mass_distal +
-                           radius_to_proximal_arm_cg * mass_proximal_arm) / \
-                               mass_proximal
-  J_cube = length_distal_arm * length_distal_arm*mass_cube
-  # Kg m^2 Moment of inertia of the proximal arm
-  J_proximal_arm = radius_to_proximal_arm_cg * radius_to_proximal_arm_cg * \
-      mass_distal_arm
-  # Kg m^2 Moment of inertia distal arm and cube at end of proximal arm.
-  J_distal_arm_and_cube_at_end_of_proximal_arm = length_proximal_arm * \
-      length_proximal_arm * mass_distal
-  # Kg m^2 Moment of inertia of the distal arm
-  J_distal_arm = radius_to_distal_arm_cg * radius_to_distal_arm_cg * mass_distal_arm
-  # Moment of inertia of the arm with the cube on the end
-  J = J_distal_arm_and_cube_at_end_of_proximal_arm + J_proximal_arm
-  # Intake claw
-  J_intake = 0.295  # Kg m^2 Moment of inertia of intake
-  J = J_intake
+    radius_to_distal_cg = (length_distal_arm * mass_cube +
+                           radius_to_distal_arm_cg * mass_distal_arm) / \
+                               mass_distal
+    radius_to_proximal_cg = (length_proximal_arm * mass_distal +
+                             radius_to_proximal_arm_cg * mass_proximal_arm) / \
+                                 mass_proximal
+    J_cube = length_distal_arm * length_distal_arm * mass_cube
+    # Kg m^2 Moment of inertia of the proximal arm
+    J_proximal_arm = radius_to_proximal_arm_cg * radius_to_proximal_arm_cg * \
+        mass_distal_arm
+    # Kg m^2 Moment of inertia distal arm and cube at end of proximal arm.
+    J_distal_arm_and_cube_at_end_of_proximal_arm = length_proximal_arm * \
+        length_proximal_arm * mass_distal
+    # Kg m^2 Moment of inertia of the distal arm
+    J_distal_arm = radius_to_distal_arm_cg * radius_to_distal_arm_cg * mass_distal_arm
+    # Moment of inertia of the arm with the cube on the end
+    J = J_distal_arm_and_cube_at_end_of_proximal_arm + J_proximal_arm
+    # Intake claw
+    J_intake = 0.295  # Kg m^2 Moment of inertia of intake
+    J = J_intake
 
-  gear_ratio = 140.0  # Guess at the gear ratio
-  gear_ratio = 100.0  # Guess at the gear ratio
-  gear_ratio = 90.0  # Guess at the gear ratio
+    gear_ratio = 140.0  # Guess at the gear ratio
+    gear_ratio = 100.0  # Guess at the gear ratio
+    gear_ratio = 90.0  # Guess at the gear ratio
 
-  error_margine = 1.0
-  voltage = 10.0  # voltage for the motor.  Assuming a loaded robot so not using 12 V.
-  # It might make sense to use a lower motor frees peed when the voltage is not a full 12 Volts.
-  # motor_free_speed = Kv * voltage
-  motor_free_speed = speed_no_load
+    error_margine = 1.0
+    voltage = 10.0  # voltage for the motor.  Assuming a loaded robot so not using 12 V.
+    # It might make sense to use a lower motor frees peed when the voltage is not a full 12 Volts.
+    # motor_free_speed = Kv * voltage
+    motor_free_speed = speed_no_load
 
-  print("# Kt = %f N-m/A\n# Kv_rpm = %f rpm/V\n# Kv = %f radians/V" % (Kt, Kv_rpm, Kv))
-  print("# %.2f Ohms Resistance of the motor " % (resistance_motor))
-  print("# %.2f kg Cube weight" % (mass_cube))
-  print("# %.2f kg Proximal Arm mass" % (mass_proximal_arm))
-  print("# %.2f kg Distal Arm mass" % (mass_distal_arm))
-  print("# %.2f kg Distal Arm and Cube weight" % (mass_distal))
-  print("# %.2f m Length from distal arm pivot point to arm CG" % (
-      radius_to_distal_arm_cg))
-  print("# %.2f m Length from distal arm pivot point to arm and cube cg" % (
-      radius_to_distal_cg))
-  print("# %.2f kg-m^2 Moment of inertia of the cube about the arm pivot point" % (J_cube))
-  print("# %.2f m Length from proximal arm pivot point to arm CG" % (radius_to_proximal_arm_cg))
-  print("# %.2f m Length from proximal arm pivot point to arm and cube cg" % (
-      radius_to_proximal_cg))
-  print("# %.2f m  Proximal arm length" % (length_proximal_arm))
-  print("# %.2f m  Distal arm length" % (length_distal_arm))
+    print("# Kt = %f N-m/A\n# Kv_rpm = %f rpm/V\n# Kv = %f radians/V" %
+          (Kt, Kv_rpm, Kv))
+    print("# %.2f Ohms Resistance of the motor " % (resistance_motor))
+    print("# %.2f kg Cube weight" % (mass_cube))
+    print("# %.2f kg Proximal Arm mass" % (mass_proximal_arm))
+    print("# %.2f kg Distal Arm mass" % (mass_distal_arm))
+    print("# %.2f kg Distal Arm and Cube weight" % (mass_distal))
+    print("# %.2f m Length from distal arm pivot point to arm CG" %
+          (radius_to_distal_arm_cg))
+    print("# %.2f m Length from distal arm pivot point to arm and cube cg" %
+          (radius_to_distal_cg))
+    print(
+        "# %.2f kg-m^2 Moment of inertia of the cube about the arm pivot point"
+        % (J_cube))
+    print("# %.2f m Length from proximal arm pivot point to arm CG" %
+          (radius_to_proximal_arm_cg))
+    print("# %.2f m Length from proximal arm pivot point to arm and cube cg" %
+          (radius_to_proximal_cg))
+    print("# %.2f m  Proximal arm length" % (length_proximal_arm))
+    print("# %.2f m  Distal arm length" % (length_distal_arm))
 
-  print("# %.2f kg-m^2 Moment of inertia of the intake about the intake pivot point" % (
-      J_intake))
-  print("# %.2f kg-m^2 Moment of inertia of the distal arm about the arm pivot point" % (
-      J_distal_arm))
-  print("# %.2f kg-m^2 Moment of inertia of the proximal arm about the arm pivot point" % (
-      J_proximal_arm))
-  print("# %.2f kg-m^2 Moment of inertia of the distal arm and cube mass about "
-        "the proximal arm pivot point" % (
-            J_distal_arm_and_cube_at_end_of_proximal_arm))
-  print("# %.2f kg-m^2 Moment of inertia of the intake the intake pivot point "
+    print(
+        "# %.2f kg-m^2 Moment of inertia of the intake about the intake pivot point"
+        % (J_intake))
+    print(
+        "# %.2f kg-m^2 Moment of inertia of the distal arm about the arm pivot point"
+        % (J_distal_arm))
+    print(
+        "# %.2f kg-m^2 Moment of inertia of the proximal arm about the arm pivot point"
+        % (J_proximal_arm))
+    print(
+        "# %.2f kg-m^2 Moment of inertia of the distal arm and cube mass about "
+        "the proximal arm pivot point" %
+        (J_distal_arm_and_cube_at_end_of_proximal_arm))
+    print(
+        "# %.2f kg-m^2 Moment of inertia of the intake the intake pivot point "
         "(J value used in simulation)" % (J))
-  print("# %d Number of motors" % (num_motors))
+    print("# %d Number of motors" % (num_motors))
 
-  print("# %.2f V Motor voltage" % (voltage))
-  for gear_ratio in range(60, 241, 10):
-    c1 = Kt * gear_ratio * gear_ratio / (Kv * resistance_motor * J)
-    c2 = gear_ratio * Kt / (J * resistance_motor)
-    c3 = radius_to_proximal_cg * mass_proximal * gravity / J
+    print("# %.2f V Motor voltage" % (voltage))
+    for gear_ratio in range(60, 241, 10):
+        c1 = Kt * gear_ratio * gear_ratio / (Kv * resistance_motor * J)
+        c2 = gear_ratio * Kt / (J * resistance_motor)
+        c3 = radius_to_proximal_cg * mass_proximal * gravity / J
 
-    if ( False ):
-      print("# %.8f 1/sec C1 constant" % (c1))
-      print("# %.2f 1/sec C2 constant" % (c2))
-      print("# %.2f 1/(V sec^2) C3 constant" % (c3))
-      print("# %.2f RPM Free speed at motor voltage" % (voltage * Kv_rpm))
+        if (False):
+            print("# %.8f 1/sec C1 constant" % (c1))
+            print("# %.2f 1/sec C2 constant" % (c2))
+            print("# %.2f 1/(V sec^2) C3 constant" % (c3))
+            print("# %.2f RPM Free speed at motor voltage" %
+                  (voltage * Kv_rpm))
 
-    torque_90_degrees = radius_to_distal_cg * mass_distal * gravity
-    voltage_90_degrees = resistance_motor * torque_90_degrees / (gear_ratio * Kt)
-    torque_peak = gear_ratio * num_motors * torque_stall
-    torque_peak_ft_lbs = torque_peak * newton_meters_to_ft_lbs
-    normal_force = torque_peak / length_intake_arm
-    normal_force_lbf = newton_to_lbf * normal_force
-    time_required = get_180_degree_time(c1, c2, c3, voltage,
-                                        gear_ratio, motor_free_speed)
-    print("Time for %.1f degrees for gear ratio %3.0f is %.2f seconds.  "
-          "Peak (stall) torque %3.0f nm %3.0f ft-lb Normal force at intake "
-          "end %3.0f N %2.0f lbf" % \
-      (to_deg(theta_travel), gear_ratio, time_required,
-       torque_peak, torque_peak_ft_lbs, normal_force, normal_force_lbf))
+        torque_90_degrees = radius_to_distal_cg * mass_distal * gravity
+        voltage_90_degrees = resistance_motor * torque_90_degrees / (
+            gear_ratio * Kt)
+        torque_peak = gear_ratio * num_motors * torque_stall
+        torque_peak_ft_lbs = torque_peak * newton_meters_to_ft_lbs
+        normal_force = torque_peak / length_intake_arm
+        normal_force_lbf = newton_to_lbf * normal_force
+        time_required = get_180_degree_time(c1, c2, c3, voltage, gear_ratio,
+                                            motor_free_speed)
+        print("Time for %.1f degrees for gear ratio %3.0f is %.2f seconds.  "
+              "Peak (stall) torque %3.0f nm %3.0f ft-lb Normal force at intake "
+              "end %3.0f N %2.0f lbf" % \
+          (to_deg(theta_travel), gear_ratio, time_required,
+           torque_peak, torque_peak_ft_lbs, normal_force, normal_force_lbf))
+
 
 if __name__ == '__main__':
-  main()
+    main()
diff --git a/y2018/control_loops/python/path_points.py b/y2018/control_loops/python/path_points.py
index eed9804..8802629 100644
--- a/y2018/control_loops/python/path_points.py
+++ b/y2018/control_loops/python/path_points.py
@@ -2,9 +2,422 @@
 
 # Path points from Parker.
 # Points in (theta0, theta1) of the boundry of the safe region.
-points_bounary = ([1.8577014383575772, -1.7353804562372057], [1.8322288438826315, -1.761574570216062], [1.7840339881582052, -1.8122752826851365], [1.7367354460218392, -1.863184376466228], [1.69054633425053, -1.9140647421906793], [1.6456590387358871, -1.9646939485526782], [1.6022412524081968, -2.0148691793459164], [1.5604334435784202, -2.0644108218872432], [1.5203477477575325, -2.1131646494909866], [1.4820681545944325, -2.1610026706370666], [1.4456517763321752, -2.207822815007094], [1.4111309389855604, -2.253547685496041], [1.3785158286698027, -2.298122627340264], [1.3477974448369014, -2.3415133576238922], [1.318950649522341, -2.3837033700108243], [1.2919371475531436, -2.4246912899477153], [1.266708279449626, -2.464488312575651], [1.2432075513427807, -2.5031158147261996], [1.2213728618107609, -2.5406031969824228], [1.2011384131259828, -2.5769859833310096], [1.1824363142639513, -2.61230418457078], [1.165197896140849, -2.64660091671642], [1.149354767212547, -2.6799212560849437], [1.134839641107821, -2.712311307404836], [1.121586968578607, -2.7438174590386177], [1.1095334047223224, -2.774485799309971], [1.09861813993779, -2.8043616692157007], [1.088783119985477, -2.8334893289039758], [1.079973177230731, -2.861911717797479], [1.0721360919151535, -2.8896702908486835], [1.065222599283597, -2.916804915950962], [1.0591863556764607, -2.943353819884641], [1.0539838743128325, -2.969353572295195], [1.049574439440948, -2.9948390990606946], [1.0459200058000553, -3.01984371800932], [1.0429850888932353, -3.044399191310831], [1.0407366503803421, -3.0685357900111128], [1.039143981929867, -3.092282367132034], [1.0381785900853944, -3.1156664365461078], [1.0378140840766794, -3.1387142554815886], [1.038026068010855, -3.1614509090414518], [1.0387920384931424, -3.1839003955494407], [1.0400912884293725, -3.2060857118856374], [1.0419048175385754, -3.2280289382581637], [1.0442152499395827, -3.2497513220895704], [1.047006759060362, -3.2712733608874625], [1.050265000044113, -3.2926148841283163], [1.0513266200838918, -3.2986722862692828], [0.6882849734317291, -3.2986722862692828], [0.6882849734317291, -2.40847516476558], [0.8062364039734171, -2.2816832357742984], [0.9846964491122989, -2.0749837238115147], [1.080856081125841, -1.9535526052833936], [1.1403399741223543, -1.8700303125904767], [1.1796460643255697, -1.8073338252603661], [1.206509908068604, -1.7574623871869075], [1.225108933139178, -1.7160975113819317], [1.237917343016644, -1.6806816402603253], [1.2465009152225068, -1.6495957958330445], [1.251901644035212, -1.6217619064422375], [1.2548410275662123, -1.5964327471863136], [1.2558349967266738, -1.5730732293972975], [1.2552624664807475, -1.551289614657788], [1.2534080548485127, -1.5307854126408047], [1.2504896957865235, -1.5113328783804112], [1.2466770509718135, -1.492754008779478], [1.2421041193998992, -1.4749075280685116], [1.236878076935188, -1.457679763958034], [1.231085601347444, -1.4409781183381307], [1.2247974811461413, -1.4247263085140511], [1.2180720288351026, -1.408860841660136], [1.2109576458572935, -1.3933283641188086], [1.2034947755974974, -1.378083641634472], [1.1957174082841977, -1.363088001457586], [1.1876542532510088, -1.3483081171759035], [1.179329661157153, -1.3337150510329991], [1.1707643560768641, -1.3192834919003447], [1.1385726725405734, -1.3512941162901886], [1.1061744838535637, -1.3828092440478137], [1.0736355415504857, -1.4137655512448706], [1.0410203155384732, -1.444102884084807], [1.0083912519665894, -1.4737649313813326], [0.975808099297045, -1.5026998012630925], [0.9433273192311136, -1.5308604887780404], [0.941597772428203, -1.50959779639341], [0.9392183822389457, -1.488861961175901], [0.9362399962318921, -1.4685999567644576], [0.9327074201772598, -1.448764371832554], [0.9286602163651203, -1.4293126388801052], [0.9241333769591611, -1.410206381002334], [0.9191578939466147, -1.3914108561164176], [0.9137612431353617, -1.3728944819981919], [0.9079677963791273, -1.3546284285619337], [0.9017991735984072, -1.3365862662768213], [0.8952745440670551, -1.3187436615861219], [0.888410884742945, -1.3010781117818295], [0.8812232020507231, -1.2835687130679965], [0.8737247224089426, -1.2661959565824437], [0.8659270558807739, -1.2489415479874506], [0.8578403365760008, -1.2317882469234374], [0.8810761292150717, -1.2118184809610988], [0.9041762360363244, -1.1914283310662528], [0.9271196683064211, -1.1706361926784383], [0.949885054142765, -1.1494613527499984], [0.9724507572661958, -1.1279238910069835], [0.9947950004998649, -1.1060445714951375], [1.0168959923240788, -1.0838447258650978], [1.0387320546908576, -1.0613461300367468], [1.0602817502430675, -1.0385708760262022], [1.0815240070740941, -1.0155412408147642], [1.102438239206001, -0.9922795541836177], [1.123004461055328, -0.9688080674301042], [1.1432033942926907, -0.9451488248218312], [1.1630165656794818, -0.9213235395370803], [1.1824263946752058, -0.8973534756890716], [1.2014162698440272, -0.8732593378443982], [1.2199706133398625, -0.8490611692304734], [1.2380749330068292, -0.8247782595916947], [1.2557158618869284, -0.800429063408306], [1.2728811851714203, -0.776031128944234], [1.2895598548592355, -0.7516010383486005], [1.3057419925890068, -0.7271543588072293], [1.3214188812865908, -0.702705604531139], [1.3365829464142562, -0.6782682091832445], [1.351227727719687, -0.6538545081853449], [1.365347842462401, -0.6294757302167044], [1.3789389411433586, -0.6051419971134862], [1.391997656782351, -0.5808623313043475], [1.4045215487801634, -0.5566446698699925], [1.4165090423718034, -0.5324958842910054], [1.4279593646268474, -0.5084218049460658], [1.4388724778869602, -0.4844272494383845], [1.449249011452494, -0.460516053858597], [1.4590901922431447, -0.4366911061340692], [1.468397775065033, -0.4129543806643518], [1.4771739730209745, -0.38930697349737264], [1.485421388504271, -0.36574913735813025], [1.4931429451209484, -0.3422803158986589], [1.5003418207921726, -0.3188991765927855], [1.5070213821985838, -0.2956036417497373], [1.513185120641734, -0.2723909171654731], [1.5188365893149296, -0.24925751796822435], [1.5239793418959948, -0.22619929124396096], [1.5286168722972349, -0.2032114350471563], [1.5327525553319497, -0.1802885134112242], [1.5363895879810432, -0.15742446697009113], [1.5395309308654115, -0.1346126187862511], [1.5421792494481048, -0.11184567494963411], [1.5443368544016174, -0.0891157194637005], [1.5460056404769755, -0.06641420286771664], [1.5471870230984175, -0.043731923953761714], [1.547881871775246, -0.021059003819238205], [1.5480904392645911, 0.0016151486553661097], [1.547812285227133, 0.024301881005710235], [1.5470461928818935, 0.047013352405288866], [1.5457900768723736, 0.06976260156314103], [1.5440408801865422, 0.09256362934244797], [1.5417944575035705, 0.11543149864415554], [1.5390454417383017, 0.13838245474060726], [1.5357870897759938, 0.16143407007284788], [1.5320111023738603, 0.18460541860588778], [1.5277074118667517, 0.20791728625864334], [1.5228639295308124, 0.23139242581719505], [1.5174662420569858, 0.25505586728497265], [1.5114972433117844, 0.27893529808946027], [1.5049366830391921, 0.30306153234932315], [1.4977606078174102, 0.32746909510770755], [1.4899406605544634, 0.35219695697813347], [1.4814431917184283, 0.37728946847450484], [1.4722281161523716, 0.40279756372788583], [1.4622474200998372, 0.4287803341537376], [1.4514431778273647, 0.45530712040457033], [1.4397448652396179, 0.48246034696312606], [1.427065639662639, 0.5103394485717625], [1.4132970536897833, 0.5390664502570618], [1.3983013135749314, 0.5687941401756967], [1.3818995257862978, 0.5997184788509978], [1.3638530549860057, 0.6320982830132342], [1.3438323057823602, 0.6662881915757862], [1.3213606855701236, 0.7027978462604986], [1.2957042956404132, 0.7424084023365868], [1.2656247456808565, 0.786433646353686], [1.2287034601894442, 0.8374338794107618], [1.1786904071656892, 0.902017503822168], [1.0497616572686415, 1.0497616572686426], [0.9097522267255194, 1.1864251300690412], [0.8484431816837388, 1.2397127624624213], [0.8000049977037023, 1.2791960970308718], [0.7581818853313602, 1.3114777786351866], [0.7205493452982701, 1.3391121846078957], [0.6882849734317291, 1.3617105524217008], [0.6882849734317291, 2.356194490192345], [1.3376784164442164, 2.356194490192345], [1.3516056511178856, 2.3360528189227487], [1.3708660530144583, 2.308792458458483], [1.3913233553973305, 2.2804469909122105], [1.4131687110627962, 2.2508089759388485], [1.436656614785, 2.219604401551449], [1.4621380338543308, 2.18645769238238], [1.4901198983671067, 2.1508288797419346], [1.5213822452925796, 2.111889875368928], [1.5572408030205178, 2.0682471032649676], [1.6002650085871786, 2.0171829478543404], [1.657096323745671, 1.9516778011159774], [1.7977393629734166, 1.7977393629734166], [1.8577014383575772, 1.7364646168980775], [1.8577014383575772, -1.7353804562372057])
+points_bounary = (
+    [1.8577014383575772,
+     -1.7353804562372057], [1.8322288438826315, -1.761574570216062],
+    [1.7840339881582052,
+     -1.8122752826851365], [1.7367354460218392, -1.863184376466228
+                            ], [1.69054633425053, -1.9140647421906793],
+    [1.6456590387358871, -1.9646939485526782
+     ], [1.6022412524081968, -2.0148691793459164
+         ], [1.5604334435784202, -2.0644108218872432
+             ], [1.5203477477575325, -2.1131646494909866
+                 ], [1.4820681545944325, -2.1610026706370666
+                     ], [1.4456517763321752, -2.207822815007094
+                         ], [1.4111309389855604, -2.253547685496041
+                             ], [1.3785158286698027, -2.298122627340264],
+    [1.3477974448369014, -2.3415133576238922
+     ], [1.318950649522341, -2.3837033700108243
+         ], [1.2919371475531436, -2.4246912899477153
+             ], [1.266708279449626, -2.464488312575651
+                 ], [1.2432075513427807, -2.5031158147261996
+                     ], [1.2213728618107609, -2.5406031969824228
+                         ], [1.2011384131259828, -2.5769859833310096
+                             ], [1.1824363142639513, -2.61230418457078
+                                 ], [1.165197896140849, -2.64660091671642],
+    [1.149354767212547, -2.6799212560849437
+     ], [1.134839641107821, -2.712311307404836
+         ], [1.121586968578607, -2.7438174590386177
+             ], [1.1095334047223224, -2.774485799309971
+                 ], [1.09861813993779, -2.8043616692157007
+                     ], [1.088783119985477, -2.8334893289039758
+                         ], [1.079973177230731, -2.861911717797479
+                             ], [1.0721360919151535, -2.8896702908486835
+                                 ], [1.065222599283597, -2.916804915950962],
+    [1.0591863556764607, -2.943353819884641
+     ], [1.0539838743128325, -2.969353572295195
+         ], [1.049574439440948, -2.9948390990606946
+             ], [1.0459200058000553, -3.01984371800932
+                 ], [1.0429850888932353, -3.044399191310831
+                     ], [1.0407366503803421, -3.0685357900111128
+                         ], [1.039143981929867, -3.092282367132034
+                             ], [1.0381785900853944, -3.1156664365461078
+                                 ], [1.0378140840766794, -3.1387142554815886],
+    [1.038026068010855,
+     -3.1614509090414518], [1.0387920384931424, -3.1839003955494407], [
+         1.0400912884293725,
+         -3.2060857118856374
+     ], [1.0419048175385754, -3.2280289382581637
+         ], [1.0442152499395827, -3.2497513220895704
+             ], [1.047006759060362, -3.2712733608874625
+                 ], [1.050265000044113, -3.2926148841283163
+                     ], [1.0513266200838918, -3.2986722862692828
+                         ], [0.6882849734317291, -3.2986722862692828
+                             ], [0.6882849734317291, -2.40847516476558
+                                 ], [0.8062364039734171, -2.2816832357742984],
+    [0.9846964491122989,
+     -2.0749837238115147], [1.080856081125841, -1.9535526052833936], [
+         1.1403399741223543,
+         -1.8700303125904767
+     ], [1.1796460643255697,
+         -1.8073338252603661], [1.206509908068604, -1.7574623871869075], [
+             1.225108933139178,
+             -1.7160975113819317
+         ], [1.237917343016644, -1.6806816402603253
+             ], [1.2465009152225068, -1.6495957958330445
+                 ], [1.251901644035212, -1.6217619064422375
+                     ], [1.2548410275662123, -1.5964327471863136
+                         ], [1.2558349967266738, -1.5730732293972975
+                             ], [1.2552624664807475, -1.551289614657788
+                                 ], [1.2534080548485127, -1.5307854126408047],
+    [1.2504896957865235,
+     -1.5113328783804112], [1.2466770509718135, -1.492754008779478], [
+         1.2421041193998992,
+         -1.4749075280685116
+     ], [1.236878076935188, -1.457679763958034
+         ], [1.231085601347444, -1.4409781183381307
+             ], [1.2247974811461413, -1.4247263085140511
+                 ], [1.2180720288351026, -1.408860841660136
+                     ], [1.2109576458572935, -1.3933283641188086
+                         ], [1.2034947755974974, -1.378083641634472
+                             ], [1.1957174082841977, -1.363088001457586
+                                 ], [1.1876542532510088, -1.3483081171759035],
+    [1.179329661157153,
+     -1.3337150510329991], [1.1707643560768641, -1.3192834919003447], [
+         1.1385726725405734,
+         -1.3512941162901886
+     ], [1.1061744838535637,
+         -1.3828092440478137], [1.0736355415504857, -1.4137655512448706], [
+             1.0410203155384732,
+             -1.444102884084807
+         ], [1.0083912519665894,
+             -1.4737649313813326], [0.975808099297045, -1.5026998012630925], [
+                 0.9433273192311136,
+                 -1.5308604887780404
+             ], [0.941597772428203, -1.50959779639341
+                 ], [0.9392183822389457, -1.488861961175901
+                     ], [0.9362399962318921, -1.4685999567644576
+                         ], [0.9327074201772598, -1.448764371832554
+                             ], [0.9286602163651203, -1.4293126388801052
+                                 ], [0.9241333769591611, -1.410206381002334],
+    [0.9191578939466147,
+     -1.3914108561164176], [0.9137612431353617, -1.3728944819981919], [
+         0.9079677963791273,
+         -1.3546284285619337
+     ], [0.9017991735984072,
+         -1.3365862662768213], [0.8952745440670551, -1.3187436615861219], [
+             0.888410884742945,
+             -1.3010781117818295
+         ], [0.8812232020507231,
+             -1.2835687130679965], [0.8737247224089426, -1.2661959565824437], [
+                 0.8659270558807739,
+                 -1.2489415479874506
+             ], [0.8578403365760008, -1.2317882469234374
+                 ], [0.8810761292150717, -1.2118184809610988
+                     ], [0.9041762360363244, -1.1914283310662528
+                         ], [0.9271196683064211, -1.1706361926784383
+                             ], [0.949885054142765, -1.1494613527499984
+                                 ], [0.9724507572661958, -1.1279238910069835],
+    [0.9947950004998649,
+     -1.1060445714951375], [1.0168959923240788, -1.0838447258650978], [
+         1.0387320546908576,
+         -1.0613461300367468
+     ], [1.0602817502430675,
+         -1.0385708760262022], [1.0815240070740941, -1.0155412408147642], [
+             1.102438239206001,
+             -0.9922795541836177
+         ], [1.123004461055328,
+             -0.9688080674301042], [1.1432033942926907, -0.9451488248218312], [
+                 1.1630165656794818,
+                 -0.9213235395370803
+             ], [1.1824263946752058, -0.8973534756890716
+                 ], [1.2014162698440272, -0.8732593378443982
+                     ], [1.2199706133398625, -0.8490611692304734
+                         ], [1.2380749330068292, -0.8247782595916947
+                             ], [1.2557158618869284, -0.800429063408306
+                                 ], [1.2728811851714203, -0.776031128944234],
+    [1.2895598548592355,
+     -0.7516010383486005], [1.3057419925890068, -0.7271543588072293], [
+         1.3214188812865908,
+         -0.702705604531139
+     ], [1.3365829464142562,
+         -0.6782682091832445], [1.351227727719687, -0.6538545081853449], [
+             1.365347842462401, -0.6294757302167044
+         ], [1.3789389411433586,
+             -0.6051419971134862], [1.391997656782351, -0.5808623313043475], [
+                 1.4045215487801634,
+                 -0.5566446698699925
+             ], [1.4165090423718034, -0.5324958842910054
+                 ], [1.4279593646268474, -0.5084218049460658
+                     ], [1.4388724778869602, -0.4844272494383845], [
+                         1.449249011452494, -0.460516053858597
+                     ], [1.4590901922431447, -0.4366911061340692
+                         ], [1.468397775065033, -0.4129543806643518
+                             ], [1.4771739730209745, -0.38930697349737264
+                                 ], [1.485421388504271, -0.36574913735813025],
+    [1.4931429451209484,
+     -0.3422803158986589], [1.5003418207921726, -0.3188991765927855], [
+         1.5070213821985838,
+         -0.2956036417497373
+     ], [1.513185120641734,
+         -0.2723909171654731], [1.5188365893149296, -0.24925751796822435], [
+             1.5239793418959948,
+             -0.22619929124396096
+         ], [1.5286168722972349,
+             -0.2032114350471563], [1.5327525553319497, -0.1802885134112242], [
+                 1.5363895879810432,
+                 -0.15742446697009113
+             ], [1.5395309308654115, -0.1346126187862511
+                 ], [1.5421792494481048, -0.11184567494963411
+                     ], [1.5443368544016174, -0.0891157194637005
+                         ], [1.5460056404769755, -0.06641420286771664
+                             ], [1.5471870230984175, -0.043731923953761714],
+    [1.547881871775246,
+     -0.021059003819238205], [1.5480904392645911, 0.0016151486553661097], [
+         1.547812285227133, 0.024301881005710235
+     ], [1.5470461928818935, 0.047013352405288866
+         ], [1.5457900768723736,
+             0.06976260156314103], [1.5440408801865422, 0.09256362934244797], [
+                 1.5417944575035705,
+                 0.11543149864415554
+             ], [1.5390454417383017, 0.13838245474060726
+                 ], [1.5357870897759938, 0.16143407007284788
+                     ], [1.5320111023738603, 0.18460541860588778
+                         ], [1.5277074118667517, 0.20791728625864334
+                             ], [1.5228639295308124, 0.23139242581719505
+                                 ], [1.5174662420569858, 0.25505586728497265],
+    [1.5114972433117844,
+     0.27893529808946027], [1.5049366830391921, 0.30306153234932315], [
+         1.4977606078174102,
+         0.32746909510770755
+     ], [1.4899406605544634,
+         0.35219695697813347], [1.4814431917184283, 0.37728946847450484], [
+             1.4722281161523716,
+             0.40279756372788583
+         ], [1.4622474200998372,
+             0.4287803341537376], [1.4514431778273647, 0.45530712040457033], [
+                 1.4397448652396179,
+                 0.48246034696312606
+             ], [1.427065639662639, 0.5103394485717625
+                 ], [1.4132970536897833, 0.5390664502570618
+                     ], [1.3983013135749314, 0.5687941401756967
+                         ], [1.3818995257862978, 0.5997184788509978
+                             ], [1.3638530549860057, 0.6320982830132342
+                                 ], [1.3438323057823602, 0.6662881915757862],
+    [1.3213606855701236,
+     0.7027978462604986], [1.2957042956404132, 0.7424084023365868], [
+         1.2656247456808565,
+         0.786433646353686
+     ], [1.2287034601894442,
+         0.8374338794107618], [1.1786904071656892, 0.902017503822168], [
+             1.0497616572686415,
+             1.0497616572686426
+         ], [0.9097522267255194,
+             1.1864251300690412], [0.8484431816837388, 1.2397127624624213], [
+                 0.8000049977037023,
+                 1.2791960970308718
+             ], [0.7581818853313602, 1.3114777786351866
+                 ], [0.7205493452982701, 1.3391121846078957
+                     ], [0.6882849734317291, 1.3617105524217008
+                         ], [0.6882849734317291, 2.356194490192345
+                             ], [1.3376784164442164, 2.356194490192345
+                                 ], [1.3516056511178856, 2.3360528189227487],
+    [1.3708660530144583,
+     2.308792458458483], [1.3913233553973305, 2.2804469909122105], [
+         1.4131687110627962,
+         2.2508089759388485
+     ], [1.436656614785,
+         2.219604401551449], [1.4621380338543308, 2.18645769238238], [
+             1.4901198983671067,
+             2.1508288797419346
+         ], [1.5213822452925796, 2.111889875368928
+             ], [1.5572408030205178, 2.0682471032649676
+                 ], [1.6002650085871786, 2.0171829478543404
+                     ], [1.657096323745671, 1.9516778011159774
+                         ], [1.7977393629734166, 1.7977393629734166
+                             ], [1.8577014383575772, 1.7364646168980775
+                                 ], [1.8577014383575772, -1.7353804562372057])
 
 # Test data set in terms of (theta0, theta1) of a motion from Parker.
 # This is an example box lift profile.
 
-path_with_accelerations = [(1.3583511559969876, 0.99753029519739866, 0.63708920330895369, -0.77079007974101643, -0.21483375398380378, -0.17756921128311187), (1.4037744780290744, 0.94141413786797179, 0.62102298265172207, -0.78379235452915641, -0.23084099683729808, -0.18290283743090688), (1.4401396868545593, 0.89467165253531666, 0.606891738771278, -0.79478451004733031, -0.24685749210031543, -0.18849894149927623), (1.4710837705397868, 0.85345617877170399, 0.59376006111610313, -0.80464215016577489, -0.26311393698286972, -0.19415687958799988), (1.4982967960378542, 0.81597755723156562, 0.58119389263733834, -0.81376511301545618, -0.27984413029527577, -0.19986598102433009), (1.5227269571172215, 0.78122893136122973, 0.56894801707769227, -0.82237348805962973, -0.29704891442706877, -0.20550961547441812), (1.5449685847030388, 0.74857685672553398, 0.55686625320351735, -0.83060217676278458, -0.31482379304256025, -0.21106978462024978), (1.5654226738527495, 0.71759174238125301, 0.54484048864732038, -0.83853970802255351, -0.33321287621660045, -0.21650513557965106), (1.5843743655705165, 0.68796601612512265, 0.53279106733541426, -0.84624681894089859, -0.35226023809265267, -0.22178093106252236), (1.6020345343012865, 0.65947020123078393, 0.52065635488289785, -0.85376633812774205, -0.37188463022164508, -0.22678852570730737), (1.6185639244576269, 0.63192742228662813, 0.50838674358661662, -0.86112886314731996, -0.39227952043050979, -0.23159139751917215), (1.6340880277208036, 0.60519768777296334, 0.49594100637973321, -0.868356216187261, -0.41330592927121557, -0.23605025316897368), (1.6487067026409843, 0.57916772325998889, 0.48328397846038601, -0.87546364639743945, -0.43479093110952094, -0.24001893904573279), (1.6625006419641934, 0.55374413183503834, 0.47038503527185527, -0.88246128447218319, -0.45715591769925784, -0.2436817242766752), (1.6755358637715485, 0.52884863988931285, 0.45721707379330323, -0.88935513009814537, -0.48005035022840359, -0.24679412703629519), (1.6878669166029825, 0.50441469954774187, 0.44375581873319114, -0.89614774079971626, -0.50380737957408728, -0.24947659419788909), (1.6995392207123023, 0.48038500205371104, 0.42997935653089725, -0.90283871923908732, -0.52783372117172589, -0.25138272577381626), (1.7105908129416818, 0.45670961972392227, 0.41586782149756141, -0.90942506840468851, -0.55267961927927023, -0.25273346631273114), (1.7210536699519405, 0.43334459201815007, 0.40140320353386438, -0.91590145113584731, -0.57770009966870806, -0.25318354738409404), (1.7309547270317198, 0.41025083198931545, 0.38656923494570777, -0.92226038979969771, -0.60317645612010684, -0.25282447346857195), (1.7403166729890138, 0.38739326814551689, 0.37135135004406289, -0.92849242044318914, -0.62907036438628838, -0.25159784841893079), (1.7491585775728877, 0.36474016215239319, 0.35573669259875446, -0.93458622156486959, -0.6551663269414254, -0.24938020059543903), (1.7574963917487576, 0.34226255982896742, 0.33971416817223166, -0.94052872574050006, -0.68164592193873064, -0.24620769226541636), (1.7653433501187576, 0.31993384454025714, 0.32327453032164366, -0.94630522456833177, -0.70757451300020135, -0.24172061458903144), (1.7727102970907476, 0.29772937021195395, 0.30641049133414705, -0.95189947515500117, -0.73378453575677771, -0.23620138680600353), (1.7796059529520747, 0.27562615695138953, 0.28911685939500692, -0.95729381154041093, -0.75990897245769828, -0.22950448838605128), (1.7860371320859008, 0.25360263640618674, 0.27139068895919466, -0.9624692690918778, -0.78523056852177775, -0.22141489322699751), (1.7920089227108962, 0.23163843702152076, 0.25323144050682866, -0.96740572540110403, -0.81025448009679157, -0.21209581939563796), (1.7975248354162883, 0.20971420159976159, 0.2346411508301583, -0.97208205946674009, -0.83408589705170777, -0.20133249883457041), (1.8025869261905516, 0.187811431247703, 0.21562459548465329, -0.97647633551565383, -0.85752536967991499, -0.18935884543741083), (1.8071958984562269, 0.16591235107247332, 0.19618945367704119, -0.98056600913243175, -0.8793458743889202, -0.17593847769244828), (1.8113511877225346, 0.14399979396679621, 0.17634645105214913, -0.98432816133711831, -0.89983233183814704, -0.16120962647016274), (1.8150510317784807, 0.12205709958524935, 0.15610948821796866, -0.98773975706575867, -0.91852354093170219, -0.14517104001739675), (1.8182925288197413, 0.10006802621144681, 0.13549574094137548, -0.99077792879471627, -0.93602333307674279, -0.12800867856143744), (1.821071685494891, 0.078016673692446373, 0.11452572935697461, -0.99342028231522084, -0.95112385045254688, -0.10965064890920556), (1.8233834565427998, 0.055887416001137141, 0.093223341590826445, -0.99564522224667962, -0.96368362602021329, -0.090231797498176183), (1.8252217774528923, 0.03366484230234558, 0.071615815013953282, -0.99743229095507402, -0.97395276760288163, -0.069931055757802438), (1.8265795913984428, 0.011333705660752967, 0.049733665943173709, -0.99876251555204698, -0.98177893706111263, -0.048889055531294821), (1.8274488715589303, -0.011121121248675203, 0.02761056315188902, -0.99961875572762016, -0.98689335559342106, -0.027260179511112551), (1.8278206398523622, -0.033714683874300592, 0.0052831412013899706, -0.99998604411213976, -0.98885489268650051, -0.0052254487261787384), (1.8276849830361703, -0.056461977241467148, -0.017209244112191453, -0.99985190999321838, -0.98812791978941406, 0.017006330622021722), (1.8270310671011307, -0.079377971247817633, -0.039824819694542858, -0.99920667718760625, -0.98450390283226485, 0.039237693528591036), (1.8258471508733223, -0.10247762977900209, -0.062519753866286318, -0.99804372668560926, -0.97719363903338485, 0.061212532653782022), (1.8241205997516765, -0.12577592348843702, -0.085248569812080663, -0.99635971483445407, -0.9676884196650537, 0.08279433361404552), (1.8218379005412455, -0.14928783595476772, -0.10796459505798382, -0.99415473957224865, -0.95496997071328249, 0.103708042997466), (1.8189846783932371, -0.17302836280361969, -0.13062045461685196, -0.99143244693508337, -0.93935720014890423, 0.12375848445745616), (1.8155457169306066, -0.19701250325490266, -0.15316858191907523, -0.98820007362522466, -0.92129127811992539, 0.14279680293047756), (1.8115049827211411, -0.22125524343262917, -0.17556174489393769, -0.98446842190585071, -0.90059657633599033, 0.16060369204333666), (1.8068456553568204, -0.24577153065112339, -0.19775357131740673, -0.98025176614541798, -0.87754964758424281, 0.17703366424976713), (1.8015501645066898, -0.27057623777063011, -0.21969906367786421, -0.97556769186923664, -0.85256620853260556, 0.19199796793136439), (1.795600235427889, -0.29568411659857541, -0.24135508622397667, -0.97043687190554384, -0.82563764988942245, 0.20534143268497296), (1.7889769445422168, -0.32110973920321939, -0.26268082100774776, -0.96488278369690872, -0.79733324597202848, 0.2170659935022537), (1.7816607868090406, -0.34686742590848585, -0.2836381738799823, -0.95893137727265387, -0.76782873106500538, 0.22711183686998365), (1.7736317567432636, -0.37297115865829411, -0.30419213280652752, -0.95261070030659223, -0.73738784843863214, 0.23546528518662449), (1.7648694450316516, -0.39943447838330554, -0.32431106313855179, -0.94595049253433039, -0.70610753433104578, 0.2420821140887684), (1.7553531527820141, -0.42627036498226889, -0.34396694344403017, -0.93898175797923322, -0.67467492193224388, 0.24714550708779132), (1.7450620254853675, -0.45349109855564618, -0.36313553561943973, -0.93173632684916963, -0.64280482438985487, 0.25052642309609846), (1.7339752087663065, -0.48110810061485765, -0.38179649614837224, -0.92424641493966653, -0.61125522193929116, 0.2525024014306243), (1.7220720279237476, -0.50913175415228962, -0.39993342129901005, -0.91654419343972093, -0.58016017720123902, 0.25315181203742848), (1.7093321931028456, -0.53757120171300954, -0.41753384214477446, -0.90866137293483673, -0.54964898595174738, 0.25256545404213349), (1.6957360316664474, -0.56643412097840984, -0.43458916639025197, -0.9006288116955985, -0.51972307699036913, 0.25078642241782151), (1.6812647489267576, -0.59572647787451594, -0.45109457862962754, -0.89247615157546845, -0.49088885529812037, 0.24811508685641395), (1.6659007178300929, -0.62545225787260295, -0.46704890024189227, -0.88423148823305242, -0.46296888429931843, 0.24453849915066983), (1.6496277974361373, -0.65561317697342414, -0.48245442463761296, -0.87592107415428122, -0.43630053035835431, 0.24031239163872659), (1.6324316790779632, -0.68620837486997288, -0.49731672574028513, -0.86756906024763358, -0.41095471910587472, 0.23557116495526012), (1.6143002579172752, -0.7172340939700278, -0.51164445121259294, -0.85919727393850864, -0.38675403181117768, 0.23030820687574571), (1.5952240262184549, -0.748683349319481, -0.52544910775441078, -0.85082503204836046, -0.36408233991880601, 0.22484810685703166), (1.5751964830679697, -0.780545595975073, -0.53874483689504815, -0.84246899095392713, -0.34276938388236106, 0.21919491474821423), (1.5542145534957212, -0.81280640198469889, -0.55154819264623711, -0.83414302801658013, -0.32291038254364612, 0.21351295685970414), (1.5322790080695103, -0.84544713677503402, -0.56387791937564813, -0.82585815491559456, -0.30444498192901781, 0.20786805829508584), (1.509394872119298, -0.87844468632349793, -0.57575473429345714, -0.81762245929198296, -0.28737482236853346, 0.20236376870812761), (1.4855718119204786, -0.91177120788178356, -0.58720111439137757, -0.8094410733694728, -0.27173204177162957, 0.1971250636942595), (1.4608244835702884, -0.94539393807497152, -0.59824108887263983, -0.80131616705547515, -0.25741396251510884, 0.19217806498346204), (1.4351728290976418, -0.97927506876108006, -0.60890003734400222, -0.79324696313473042, -0.24447483076597848, 0.18765975242966434), (1.4086423037364302, -1.0133717049339268, -0.61920449072176709, -0.78522977444184905, -0.2328103488745005, 0.18358578028961331), (1.3812640184459468, -1.047635918033063, -0.62918193604047223, -0.77725805969469564, -0.22240924347971955, 0.18003762666126619), (1.3530747828369902, -1.0820149061676485, -0.63886062143319422, -0.76932249829443622, -0.21321450222713362, 0.17705731568712871), (1.324117035769417, -1.1164512699023377, -0.64826936036845306, -0.76141108240389876, -0.20523063810290823, 0.17473421668463424), (1.294438654066159, -1.1508834084076087, -0.6574373330314689, -0.75350922564788103, -0.19832793065660315, 0.17304104866571926), (1.2640926339871807, -1.1852460360561055, -0.66639388399919663, -0.74559988691553936, -0.1925723524639939, 0.17211494490833781), (1.2331366451651338, -1.2194708141674491, -0.67516831336894412, -0.73766370970960415, -0.18786875722059337, 0.17195217715824379), (1.2016324623545263, -1.2534870868845998, -0.68378966242702388, -0.72967917440333774, -0.18419866624510314, 0.17261421082264014), (1.1696452862242701, -1.2872227045046678, -0.6922864921531301, -0.72162276348679166, -0.18150423850780073, 0.17412530594411615), (1.1372429700984137, -1.3206049124326309, -0.70068665547523046, -0.71346913797229905, -0.17977253485932801, 0.17655152952198189), (1.104495174566031, -1.3535612797241663, -0.70901706362016093, -0.70519132403585671, -0.17898758721428376, 0.17995840096834861), (1.0714724758096574, -1.3860206383273435, -0.71730344800943324, -0.69676090839955884, -0.17907637813730631, 0.18435585053172451), (1.0382454559924987, -1.4179140029119093, -0.72557011960647111, -0.68814824095848082, -0.18003314612431659, 0.18982321746977993), (1.0048838048727911, -1.4491754417344611, -0.73383972725116353, -0.67932264404179699, -0.18184420373071258, 0.19643734513631667), (0.97145546090878643, -1.4797428713062153, -0.74213301743428883, -0.67025262732336799, -0.18446052990619061, 0.20424250843873848)]
+path_with_accelerations = [
+    (1.3583511559969876, 0.99753029519739866, 0.63708920330895369,
+     -0.77079007974101643, -0.21483375398380378, -0.17756921128311187),
+    (1.4037744780290744, 0.94141413786797179, 0.62102298265172207,
+     -0.78379235452915641, -0.23084099683729808, -0.18290283743090688),
+    (1.4401396868545593, 0.89467165253531666, 0.606891738771278,
+     -0.79478451004733031, -0.24685749210031543, -0.18849894149927623),
+    (1.4710837705397868, 0.85345617877170399, 0.59376006111610313,
+     -0.80464215016577489, -0.26311393698286972, -0.19415687958799988),
+    (1.4982967960378542, 0.81597755723156562, 0.58119389263733834,
+     -0.81376511301545618, -0.27984413029527577, -0.19986598102433009),
+    (1.5227269571172215, 0.78122893136122973, 0.56894801707769227,
+     -0.82237348805962973, -0.29704891442706877, -0.20550961547441812),
+    (1.5449685847030388, 0.74857685672553398, 0.55686625320351735,
+     -0.83060217676278458, -0.31482379304256025, -0.21106978462024978),
+    (1.5654226738527495, 0.71759174238125301, 0.54484048864732038,
+     -0.83853970802255351, -0.33321287621660045, -0.21650513557965106),
+    (1.5843743655705165, 0.68796601612512265, 0.53279106733541426,
+     -0.84624681894089859, -0.35226023809265267, -0.22178093106252236),
+    (1.6020345343012865, 0.65947020123078393, 0.52065635488289785,
+     -0.85376633812774205, -0.37188463022164508, -0.22678852570730737),
+    (1.6185639244576269, 0.63192742228662813, 0.50838674358661662,
+     -0.86112886314731996, -0.39227952043050979, -0.23159139751917215),
+    (1.6340880277208036, 0.60519768777296334, 0.49594100637973321,
+     -0.868356216187261, -0.41330592927121557, -0.23605025316897368),
+    (1.6487067026409843, 0.57916772325998889, 0.48328397846038601,
+     -0.87546364639743945, -0.43479093110952094, -0.24001893904573279),
+    (1.6625006419641934, 0.55374413183503834, 0.47038503527185527,
+     -0.88246128447218319, -0.45715591769925784, -0.2436817242766752),
+    (1.6755358637715485, 0.52884863988931285, 0.45721707379330323,
+     -0.88935513009814537, -0.48005035022840359, -0.24679412703629519),
+    (1.6878669166029825, 0.50441469954774187, 0.44375581873319114,
+     -0.89614774079971626, -0.50380737957408728, -0.24947659419788909),
+    (1.6995392207123023, 0.48038500205371104, 0.42997935653089725,
+     -0.90283871923908732, -0.52783372117172589, -0.25138272577381626),
+    (1.7105908129416818, 0.45670961972392227, 0.41586782149756141,
+     -0.90942506840468851, -0.55267961927927023, -0.25273346631273114),
+    (1.7210536699519405, 0.43334459201815007, 0.40140320353386438,
+     -0.91590145113584731, -0.57770009966870806, -0.25318354738409404),
+    (1.7309547270317198, 0.41025083198931545, 0.38656923494570777,
+     -0.92226038979969771, -0.60317645612010684, -0.25282447346857195),
+    (1.7403166729890138, 0.38739326814551689, 0.37135135004406289,
+     -0.92849242044318914, -0.62907036438628838, -0.25159784841893079),
+    (1.7491585775728877, 0.36474016215239319, 0.35573669259875446,
+     -0.93458622156486959, -0.6551663269414254, -0.24938020059543903),
+    (1.7574963917487576, 0.34226255982896742, 0.33971416817223166,
+     -0.94052872574050006, -0.68164592193873064, -0.24620769226541636),
+    (1.7653433501187576, 0.31993384454025714, 0.32327453032164366,
+     -0.94630522456833177, -0.70757451300020135, -0.24172061458903144),
+    (1.7727102970907476, 0.29772937021195395, 0.30641049133414705,
+     -0.95189947515500117, -0.73378453575677771, -0.23620138680600353),
+    (1.7796059529520747, 0.27562615695138953, 0.28911685939500692,
+     -0.95729381154041093, -0.75990897245769828, -0.22950448838605128),
+    (1.7860371320859008, 0.25360263640618674, 0.27139068895919466,
+     -0.9624692690918778, -0.78523056852177775, -0.22141489322699751),
+    (1.7920089227108962, 0.23163843702152076, 0.25323144050682866,
+     -0.96740572540110403, -0.81025448009679157, -0.21209581939563796),
+    (1.7975248354162883, 0.20971420159976159, 0.2346411508301583,
+     -0.97208205946674009, -0.83408589705170777, -0.20133249883457041),
+    (1.8025869261905516, 0.187811431247703, 0.21562459548465329,
+     -0.97647633551565383, -0.85752536967991499, -0.18935884543741083),
+    (1.8071958984562269, 0.16591235107247332, 0.19618945367704119,
+     -0.98056600913243175, -0.8793458743889202, -0.17593847769244828),
+    (1.8113511877225346, 0.14399979396679621, 0.17634645105214913,
+     -0.98432816133711831, -0.89983233183814704, -0.16120962647016274),
+    (1.8150510317784807, 0.12205709958524935, 0.15610948821796866,
+     -0.98773975706575867, -0.91852354093170219, -0.14517104001739675),
+    (1.8182925288197413, 0.10006802621144681, 0.13549574094137548,
+     -0.99077792879471627, -0.93602333307674279, -0.12800867856143744),
+    (1.821071685494891, 0.078016673692446373, 0.11452572935697461,
+     -0.99342028231522084, -0.95112385045254688, -0.10965064890920556),
+    (1.8233834565427998, 0.055887416001137141, 0.093223341590826445,
+     -0.99564522224667962, -0.96368362602021329, -0.090231797498176183),
+    (1.8252217774528923, 0.03366484230234558, 0.071615815013953282,
+     -0.99743229095507402, -0.97395276760288163, -0.069931055757802438),
+    (1.8265795913984428, 0.011333705660752967, 0.049733665943173709,
+     -0.99876251555204698, -0.98177893706111263, -0.048889055531294821),
+    (1.8274488715589303, -0.011121121248675203, 0.02761056315188902,
+     -0.99961875572762016, -0.98689335559342106, -0.027260179511112551),
+    (1.8278206398523622, -0.033714683874300592, 0.0052831412013899706,
+     -0.99998604411213976, -0.98885489268650051, -0.0052254487261787384),
+    (1.8276849830361703, -0.056461977241467148, -0.017209244112191453,
+     -0.99985190999321838, -0.98812791978941406, 0.017006330622021722),
+    (1.8270310671011307, -0.079377971247817633, -0.039824819694542858,
+     -0.99920667718760625, -0.98450390283226485, 0.039237693528591036),
+    (1.8258471508733223, -0.10247762977900209, -0.062519753866286318,
+     -0.99804372668560926, -0.97719363903338485, 0.061212532653782022),
+    (1.8241205997516765, -0.12577592348843702, -0.085248569812080663,
+     -0.99635971483445407, -0.9676884196650537, 0.08279433361404552),
+    (1.8218379005412455, -0.14928783595476772, -0.10796459505798382,
+     -0.99415473957224865, -0.95496997071328249, 0.103708042997466),
+    (1.8189846783932371, -0.17302836280361969, -0.13062045461685196,
+     -0.99143244693508337, -0.93935720014890423, 0.12375848445745616),
+    (1.8155457169306066, -0.19701250325490266, -0.15316858191907523,
+     -0.98820007362522466, -0.92129127811992539, 0.14279680293047756),
+    (1.8115049827211411, -0.22125524343262917, -0.17556174489393769,
+     -0.98446842190585071, -0.90059657633599033, 0.16060369204333666),
+    (1.8068456553568204, -0.24577153065112339, -0.19775357131740673,
+     -0.98025176614541798, -0.87754964758424281, 0.17703366424976713),
+    (1.8015501645066898, -0.27057623777063011, -0.21969906367786421,
+     -0.97556769186923664, -0.85256620853260556, 0.19199796793136439),
+    (1.795600235427889, -0.29568411659857541, -0.24135508622397667,
+     -0.97043687190554384, -0.82563764988942245, 0.20534143268497296),
+    (1.7889769445422168, -0.32110973920321939, -0.26268082100774776,
+     -0.96488278369690872, -0.79733324597202848, 0.2170659935022537),
+    (1.7816607868090406, -0.34686742590848585, -0.2836381738799823,
+     -0.95893137727265387, -0.76782873106500538, 0.22711183686998365),
+    (1.7736317567432636, -0.37297115865829411, -0.30419213280652752,
+     -0.95261070030659223, -0.73738784843863214, 0.23546528518662449),
+    (1.7648694450316516, -0.39943447838330554, -0.32431106313855179,
+     -0.94595049253433039, -0.70610753433104578, 0.2420821140887684),
+    (1.7553531527820141, -0.42627036498226889, -0.34396694344403017,
+     -0.93898175797923322, -0.67467492193224388, 0.24714550708779132),
+    (1.7450620254853675, -0.45349109855564618, -0.36313553561943973,
+     -0.93173632684916963, -0.64280482438985487, 0.25052642309609846),
+    (1.7339752087663065, -0.48110810061485765, -0.38179649614837224,
+     -0.92424641493966653, -0.61125522193929116, 0.2525024014306243),
+    (1.7220720279237476, -0.50913175415228962, -0.39993342129901005,
+     -0.91654419343972093, -0.58016017720123902, 0.25315181203742848),
+    (1.7093321931028456, -0.53757120171300954, -0.41753384214477446,
+     -0.90866137293483673, -0.54964898595174738, 0.25256545404213349),
+    (1.6957360316664474, -0.56643412097840984, -0.43458916639025197,
+     -0.9006288116955985, -0.51972307699036913, 0.25078642241782151),
+    (1.6812647489267576, -0.59572647787451594, -0.45109457862962754,
+     -0.89247615157546845, -0.49088885529812037, 0.24811508685641395),
+    (1.6659007178300929, -0.62545225787260295, -0.46704890024189227,
+     -0.88423148823305242, -0.46296888429931843, 0.24453849915066983),
+    (1.6496277974361373, -0.65561317697342414, -0.48245442463761296,
+     -0.87592107415428122, -0.43630053035835431, 0.24031239163872659),
+    (1.6324316790779632, -0.68620837486997288, -0.49731672574028513,
+     -0.86756906024763358, -0.41095471910587472, 0.23557116495526012),
+    (1.6143002579172752, -0.7172340939700278, -0.51164445121259294,
+     -0.85919727393850864, -0.38675403181117768, 0.23030820687574571),
+    (1.5952240262184549, -0.748683349319481, -0.52544910775441078,
+     -0.85082503204836046, -0.36408233991880601, 0.22484810685703166),
+    (1.5751964830679697, -0.780545595975073, -0.53874483689504815,
+     -0.84246899095392713, -0.34276938388236106, 0.21919491474821423),
+    (1.5542145534957212, -0.81280640198469889, -0.55154819264623711,
+     -0.83414302801658013, -0.32291038254364612, 0.21351295685970414),
+    (1.5322790080695103, -0.84544713677503402, -0.56387791937564813,
+     -0.82585815491559456, -0.30444498192901781, 0.20786805829508584),
+    (1.509394872119298, -0.87844468632349793, -0.57575473429345714,
+     -0.81762245929198296, -0.28737482236853346, 0.20236376870812761),
+    (1.4855718119204786, -0.91177120788178356, -0.58720111439137757,
+     -0.8094410733694728, -0.27173204177162957, 0.1971250636942595),
+    (1.4608244835702884, -0.94539393807497152, -0.59824108887263983,
+     -0.80131616705547515, -0.25741396251510884, 0.19217806498346204),
+    (1.4351728290976418, -0.97927506876108006, -0.60890003734400222,
+     -0.79324696313473042, -0.24447483076597848, 0.18765975242966434),
+    (1.4086423037364302, -1.0133717049339268, -0.61920449072176709,
+     -0.78522977444184905, -0.2328103488745005, 0.18358578028961331),
+    (1.3812640184459468, -1.047635918033063, -0.62918193604047223,
+     -0.77725805969469564, -0.22240924347971955, 0.18003762666126619),
+    (1.3530747828369902, -1.0820149061676485, -0.63886062143319422,
+     -0.76932249829443622, -0.21321450222713362, 0.17705731568712871),
+    (1.324117035769417, -1.1164512699023377, -0.64826936036845306,
+     -0.76141108240389876, -0.20523063810290823, 0.17473421668463424),
+    (1.294438654066159, -1.1508834084076087, -0.6574373330314689,
+     -0.75350922564788103, -0.19832793065660315, 0.17304104866571926),
+    (1.2640926339871807, -1.1852460360561055, -0.66639388399919663,
+     -0.74559988691553936, -0.1925723524639939, 0.17211494490833781),
+    (1.2331366451651338, -1.2194708141674491, -0.67516831336894412,
+     -0.73766370970960415, -0.18786875722059337, 0.17195217715824379),
+    (1.2016324623545263, -1.2534870868845998, -0.68378966242702388,
+     -0.72967917440333774, -0.18419866624510314, 0.17261421082264014),
+    (1.1696452862242701, -1.2872227045046678, -0.6922864921531301,
+     -0.72162276348679166, -0.18150423850780073, 0.17412530594411615),
+    (1.1372429700984137, -1.3206049124326309, -0.70068665547523046,
+     -0.71346913797229905, -0.17977253485932801, 0.17655152952198189),
+    (1.104495174566031, -1.3535612797241663, -0.70901706362016093,
+     -0.70519132403585671, -0.17898758721428376, 0.17995840096834861),
+    (1.0714724758096574, -1.3860206383273435, -0.71730344800943324,
+     -0.69676090839955884, -0.17907637813730631, 0.18435585053172451),
+    (1.0382454559924987, -1.4179140029119093, -0.72557011960647111,
+     -0.68814824095848082, -0.18003314612431659, 0.18982321746977993),
+    (1.0048838048727911, -1.4491754417344611, -0.73383972725116353,
+     -0.67932264404179699, -0.18184420373071258, 0.19643734513631667),
+    (0.97145546090878643, -1.4797428713062153, -0.74213301743428883,
+     -0.67025262732336799, -0.18446052990619061, 0.20424250843873848)
+]
diff --git a/y2018/control_loops/python/polydrivetrain.py b/y2018/control_loops/python/polydrivetrain.py
index c92e432..f406aa2 100644
--- a/y2018/control_loops/python/polydrivetrain.py
+++ b/y2018/control_loops/python/polydrivetrain.py
@@ -12,20 +12,22 @@
 FLAGS = gflags.FLAGS
 
 try:
-  gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
+    gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
 except gflags.DuplicateFlagError:
-  pass
+    pass
+
 
 def main(argv):
-  if FLAGS.plot:
-    polydrivetrain.PlotPolyDrivetrainMotions(drivetrain.kDrivetrain)
-  elif len(argv) != 7:
-    glog.fatal('Expected .h file name and .cc file name')
-  else:
-    polydrivetrain.WritePolyDrivetrain(argv[1:3], argv[3:5], argv[5:7], 'y2018',
-                                       drivetrain.kDrivetrain)
+    if FLAGS.plot:
+        polydrivetrain.PlotPolyDrivetrainMotions(drivetrain.kDrivetrain)
+    elif len(argv) != 7:
+        glog.fatal('Expected .h file name and .cc file name')
+    else:
+        polydrivetrain.WritePolyDrivetrain(argv[1:3], argv[3:5], argv[5:7],
+                                           'y2018', drivetrain.kDrivetrain)
+
 
 if __name__ == '__main__':
-  argv = FLAGS(sys.argv)
-  glog.init()
-  sys.exit(main(argv))
+    argv = FLAGS(sys.argv)
+    glog.init()
+    sys.exit(main(argv))
diff --git a/y2018/control_loops/python/polydrivetrain_test.py b/y2018/control_loops/python/polydrivetrain_test.py
index ccc3b77..3a1987a 100644
--- a/y2018/control_loops/python/polydrivetrain_test.py
+++ b/y2018/control_loops/python/polydrivetrain_test.py
@@ -10,73 +10,72 @@
 
 
 class TestVelocityDrivetrain(unittest.TestCase):
-  def MakeBox(self, x1_min, x1_max, x2_min, x2_max):
-    H = numpy.matrix([[1, 0],
-                      [-1, 0],
-                      [0, 1],
-                      [0, -1]])
-    K = numpy.matrix([[x1_max],
-                      [-x1_min],
-                      [x2_max],
-                      [-x2_min]])
-    return polytope.HPolytope(H, K)
 
-  def test_coerce_inside(self):
-    """Tests coercion when the point is inside the box."""
-    box = self.MakeBox(1, 2, 1, 2)
+    def MakeBox(self, x1_min, x1_max, x2_min, x2_max):
+        H = numpy.matrix([[1, 0], [-1, 0], [0, 1], [0, -1]])
+        K = numpy.matrix([[x1_max], [-x1_min], [x2_max], [-x2_min]])
+        return polytope.HPolytope(H, K)
 
-    # x1 = x2
-    K = numpy.matrix([[1, -1]])
-    w = 0
+    def test_coerce_inside(self):
+        """Tests coercion when the point is inside the box."""
+        box = self.MakeBox(1, 2, 1, 2)
 
-    assert_array_equal(polydrivetrain.CoerceGoal(box, K, w,
-                                                 numpy.matrix([[1.5], [1.5]])),
-                       numpy.matrix([[1.5], [1.5]]))
+        # x1 = x2
+        K = numpy.matrix([[1, -1]])
+        w = 0
 
-  def test_coerce_outside_intersect(self):
-    """Tests coercion when the line intersects the box."""
-    box = self.MakeBox(1, 2, 1, 2)
+        assert_array_equal(
+            polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[1.5], [1.5]])),
+            numpy.matrix([[1.5], [1.5]]))
 
-    # x1 = x2
-    K = numpy.matrix([[1, -1]])
-    w = 0
+    def test_coerce_outside_intersect(self):
+        """Tests coercion when the line intersects the box."""
+        box = self.MakeBox(1, 2, 1, 2)
 
-    assert_array_equal(polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
-                       numpy.matrix([[2.0], [2.0]]))
+        # x1 = x2
+        K = numpy.matrix([[1, -1]])
+        w = 0
 
-  def test_coerce_outside_no_intersect(self):
-    """Tests coercion when the line does not intersect the box."""
-    box = self.MakeBox(3, 4, 1, 2)
+        assert_array_equal(
+            polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
+            numpy.matrix([[2.0], [2.0]]))
 
-    # x1 = x2
-    K = numpy.matrix([[1, -1]])
-    w = 0
+    def test_coerce_outside_no_intersect(self):
+        """Tests coercion when the line does not intersect the box."""
+        box = self.MakeBox(3, 4, 1, 2)
 
-    assert_array_equal(polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
-                       numpy.matrix([[3.0], [2.0]]))
+        # x1 = x2
+        K = numpy.matrix([[1, -1]])
+        w = 0
 
-  def test_coerce_middle_of_edge(self):
-    """Tests coercion when the line intersects the middle of an edge."""
-    box = self.MakeBox(0, 4, 1, 2)
+        assert_array_equal(
+            polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
+            numpy.matrix([[3.0], [2.0]]))
 
-    # x1 = x2
-    K = numpy.matrix([[-1, 1]])
-    w = 0
+    def test_coerce_middle_of_edge(self):
+        """Tests coercion when the line intersects the middle of an edge."""
+        box = self.MakeBox(0, 4, 1, 2)
 
-    assert_array_equal(polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
-                       numpy.matrix([[2.0], [2.0]]))
+        # x1 = x2
+        K = numpy.matrix([[-1, 1]])
+        w = 0
 
-  def test_coerce_perpendicular_line(self):
-    """Tests coercion when the line does not intersect and is in quadrant 2."""
-    box = self.MakeBox(1, 2, 1, 2)
+        assert_array_equal(
+            polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
+            numpy.matrix([[2.0], [2.0]]))
 
-    # x1 = -x2
-    K = numpy.matrix([[1, 1]])
-    w = 0
+    def test_coerce_perpendicular_line(self):
+        """Tests coercion when the line does not intersect and is in quadrant 2."""
+        box = self.MakeBox(1, 2, 1, 2)
 
-    assert_array_equal(polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
-                       numpy.matrix([[1.0], [1.0]]))
+        # x1 = -x2
+        K = numpy.matrix([[1, 1]])
+        w = 0
+
+        assert_array_equal(
+            polydrivetrain.CoerceGoal(box, K, w, numpy.matrix([[5], [5]])),
+            numpy.matrix([[1.0], [1.0]]))
 
 
 if __name__ == '__main__':
-  unittest.main()
+    unittest.main()
diff --git a/y2018/control_loops/python/spline_generate.py b/y2018/control_loops/python/spline_generate.py
index cea3d7e..0dc5804 100644
--- a/y2018/control_loops/python/spline_generate.py
+++ b/y2018/control_loops/python/spline_generate.py
@@ -7,7 +7,7 @@
 # see yXXXX/control_loops/python/drivetrain.py for the current values

 

 kDrivetrain = drivetrain.DrivetrainParams(

-    J = 6.0,

+    J=6.0,

     mass=68.0,

     robot_radius=0.616 / 2.0,

     wheel_radius=0.127 / 2.0 * 120.0 / 118.0,

@@ -26,12 +26,15 @@
 

 drivetrain = drivetrain.Drivetrain(kDrivetrain)

 # set up coefficients for Hemrite basis function evaluation

-coeffs = np.array([[1, 0, 0, -10, 15, -6], [0, 1, 0, -6, 8, -3], [0, 0, 0.5, -1.5, 1.5, -0.5], [0, 0, 0, 0.5, -1, 0.5], [0, 0, 0, -4, 7, -3], [0, 0, 0, 10, -15, 6]])

+coeffs = np.array([[1, 0, 0, -10, 15, -6], [0, 1, 0, -6, 8, -3],

+                   [0, 0, 0.5, -1.5, 1.5, -0.5], [0, 0, 0, 0.5, -1, 0.5],

+                   [0, 0, 0, -4, 7, -3], [0, 0, 0, 10, -15, 6]])

 coeffs_prime = np.empty_like(coeffs)

 for ii in range(0, len(coeffs)):

     for jj in range(0, len(coeffs[ii]) - 1):

         coeffs_prime[ii][jj] = (jj + 1) * coeffs[ii][jj]

 

+

 def RungeKutta(f, x, dt):

     """4th order RungeKutta integration of F starting at X."""

     a = f(x)

@@ -41,24 +44,31 @@
 

     return x + dt * (a + 2.0 * b + 2.0 * c + d) / 6.0

 

+

 def normalize(v):

     norm = np.linalg.norm(v)

     return v / norm

 

+

 def theta(v):

     return np.arctan2(v[1], v[0])

 

+

 # evaluate Nth hermite basis function at t

 def nth_H(N, t):

-    return coeffs[N][0] + coeffs[N][1]*t + coeffs[N][2]*t**2 + coeffs[N][3]*t**3 + coeffs[N][4]*t**4 + coeffs[N][5]*t**5

+    return coeffs[N][0] + coeffs[N][1] * t + coeffs[N][2] * t**2 + coeffs[N][

+        3] * t**3 + coeffs[N][4] * t**4 + coeffs[N][5] * t**5

+

 

 def nth_H_prime(N, t):

-    return coeffs[N][0] + coeffs[N][1]*t + coeffs[N][2]*t**2 + coeffs[N][3]*t**3 + coeffs[N][4]*t**4 

+    return coeffs[N][0] + coeffs[N][1] * t + coeffs[N][2] * t**2 + coeffs[N][

+        3] * t**3 + coeffs[N][4] * t**4

+

 

 # class defining a quintic Hermite spline, with utilities for modification and plotting

 class Hermite_Spline:

     # init method given known parameters, ie savefile loading(if necessary)

-    def __init__(self, start, control1, control2, end, resolution = 200):

+    def __init__(self, start, control1, control2, end, resolution=200):

         self.start = start

         self.end = end

         self.control1 = control1

@@ -86,16 +96,18 @@
     # take paramters and compute coeffcicents for Hermite basis functions, to be called every time he change control points

     def compute_coefficients(self):

         self.coeffs = np.append(self.coeffs, np.array(self.start))

-        self.coeffs = np.append(self.coeffs, np.array(self.control1) - np.array(self.start))

-        self.coeffs = np.append(self.coeffs, [0,0])

-        self.coeffs = np.append(self.coeffs, [0,0])

-        self.coeffs = np.append(self.coeffs, np.array(self.end) - np.array(self.control2))

+        self.coeffs = np.append(self.coeffs,

+                                np.array(self.control1) - np.array(self.start))

+        self.coeffs = np.append(self.coeffs, [0, 0])

+        self.coeffs = np.append(self.coeffs, [0, 0])

+        self.coeffs = np.append(self.coeffs,

+                                np.array(self.end) - np.array(self.control2))

         self.coeffs = np.append(self.coeffs, np.array(self.end))

 

-        self.coeffs = np.reshape(self.coeffs, newshape = (6, 2))

+        self.coeffs = np.reshape(self.coeffs, newshape=(6, 2))

 

     # setters for control points, set coefficients

-    def set_positions(self, p1 = None, p2 = None):

+    def set_positions(self, p1=None, p2=None):

         if p1 != None:

             self.start = p1

         if p2 != None:

@@ -103,7 +115,7 @@
 

         self.compute_coefficients()

 

-    def set_controls(self, c1 = None, c2 = None):

+    def set_controls(self, c1=None, c2=None):

         if c1 != None:

             self.control1 = c1

         if c2 != None:

@@ -111,12 +123,12 @@
 

         self.compute_coefficients()

 

-    def set_velocities(self, v1 = None, v2 = None):

+    def set_velocities(self, v1=None, v2=None):

         if v1 != None:

             self.control1 = self.start + v1

         if v2 != None:

             self.control2 = self.end + v2

-        

+

         self.compute_coefficients()

 

     def get_smoothness(self):

@@ -125,14 +137,25 @@
 

     # given Basis functions and controls compute coordinate given t

     def spline_eval_hermite(self, t):

-        return np.array(self.coeffs[0]*nth_H(0, t) + self.coeffs[1]*nth_H(1, t)+ self.coeffs[2]*nth_H(2, t) + self.coeffs[3]*nth_H(3, t) + self.coeffs[4]* nth_H(4, t)+ self.coeffs[5]*nth_H(5, t))

-    

+        return np.array(self.coeffs[0] * nth_H(0, t) +

+                        self.coeffs[1] * nth_H(1, t) +

+                        self.coeffs[2] * nth_H(2, t) +

+                        self.coeffs[3] * nth_H(3, t) +

+                        self.coeffs[4] * nth_H(4, t) +

+                        self.coeffs[5] * nth_H(5, t))

+

     # given Basis functions and controls compute velocity given t

     def spline_eval_hermite_v(self, t):

-         return normalize(np.array(self.coeffs[0]*nth_H_prime(0, t) + self.coeffs[1]*nth_H_prime(1, t)+ self.coeffs[2]*nth_H_prime(2, t) + self.coeffs[3]*nth_H_prime(3, t) + self.coeffs[4]* nth_H_prime(4, t)+ self.coeffs[5]*nth_H_prime(5, t)))

+        return normalize(

+            np.array(self.coeffs[0] * nth_H_prime(0, t) +

+                     self.coeffs[1] * nth_H_prime(1, t) +

+                     self.coeffs[2] * nth_H_prime(2, t) +

+                     self.coeffs[3] * nth_H_prime(3, t) +

+                     self.coeffs[4] * nth_H_prime(4, t) +

+                     self.coeffs[5] * nth_H_prime(5, t)))

 

     # take coefficients and compute spline points/properties

-    def setup(self, resolution_multiplier = 10, dt = .000001):

+    def setup(self, resolution_multiplier=10, dt=.000001):

         points = []

         velocities = []

         accelerations = []

@@ -145,7 +168,7 @@
         distance = 0

 

         # iterate through interim points and compute pos_vectors, and at predefined points arc length,

-        # velocity, and acceleration vectors and store them at their associated index 

+        # velocity, and acceleration vectors and store them at their associated index

         for i in range(0, self.resolution * resolution_multiplier):

             t = i / (1.0 * self.resolution * resolution_multiplier)

 

@@ -157,7 +180,7 @@
 

             distance = current_s + distance

             # at important points compute important values and store

-            if i % resolution_multiplier  == 0:

+            if i % resolution_multiplier == 0:

                 s.append(distance)

                 points.append(current_point)

 

@@ -171,7 +194,8 @@
                 if np.linalg.norm(v) == 0:

                     curvature = 0

                 else:

-                    curvature = np.linalg.det(np.column_stack((v, a)) / (np.linalg.norm(v)**(3/2)))

+                    curvature = np.linalg.det(

+                        np.column_stack((v, a)) / (np.linalg.norm(v)**(3 / 2)))

 

                 velocities.append(v)

                 accelerations.append(a)

@@ -181,18 +205,17 @@
                     curvatures.append(0.0001)

                 else:

                     curvatures.append(curvature)

- 

+

             last_point = current_point

 

         self.arc_lengths = np.array(s)

-        self.points = np.reshape(points, newshape = (-1, 2))

-        self.velocities = np.reshape(velocities, newshape = (-1, 2))

-        self.accelerations = np.reshape(accelerations, newshape = (-1, 2))

+        self.points = np.reshape(points, newshape=(-1, 2))

+        self.velocities = np.reshape(velocities, newshape=(-1, 2))

+        self.accelerations = np.reshape(accelerations, newshape=(-1, 2))

         self.thetas = np.array(thetas)

         self.omegas = np.array(omegas)

         self.curvatures = np.array(curvatures)

 

-

     def plot_diagnostics(self):

         plt.figure("Spline")

         plt.title('Spline')

@@ -201,38 +224,48 @@
 

         plt.figure("Diagnostics")

 

-        plt.subplot(2, 2, 1)        

+        plt.subplot(2, 2, 1)

         plt.title('theta')

         plt.xlabel('arc_length')

         plt.ylabel('theta')

-        theta, = plt.plot(self.arc_lengths, self.thetas, label = 'theta')

-        plt.legend(handles = [theta])

+        theta, = plt.plot(self.arc_lengths, self.thetas, label='theta')

+        plt.legend(handles=[theta])

 

         plt.subplot(2, 2, 2)

         plt.title('omegas')

         plt.xlabel('arc_length')

         plt.ylabel('omega')

-        omega, = plt.plot(self.arc_lengths, self.omegas, label = 'omega')

-        plt.legend(handles = [omega])

+        omega, = plt.plot(self.arc_lengths, self.omegas, label='omega')

+        plt.legend(handles=[omega])

 

         plt.subplot(2, 2, 3)

         plt.title('Velocities')

         plt.xlabel('arc_length')

         plt.ylabel('velocity')

-        dxds, = plt.plot(self.arc_lengths, self.velocities[:, 0], label = 'dx/ds')

-        dyds, = plt.plot(self.arc_lengths, self.velocities[:, 1], label = 'dy/ds')

-        plt.legend(handles = [dxds, dyds])

+        dxds, = plt.plot(self.arc_lengths,

+                         self.velocities[:, 0],

+                         label='dx/ds')

+        dyds, = plt.plot(self.arc_lengths,

+                         self.velocities[:, 1],

+                         label='dy/ds')

+        plt.legend(handles=[dxds, dyds])

 

         plt.subplot(2, 2, 4)

         plt.title('Accelerations')

         plt.xlabel('arc_length')

         plt.ylabel('acceleration')

-        dx2ds2, = plt.plot(self.arc_lengths, self.accelerations[:, 0], label = 'd^2x/ds^2')

-        dy2ds2, = plt.plot(self.arc_lengths, self.accelerations[:, 1], label = 'd^2x/ds^2')

-        plt.legend(handles = [dx2ds2, dy2ds2])

+        dx2ds2, = plt.plot(self.arc_lengths,

+                           self.accelerations[:, 0],

+                           label='d^2x/ds^2')

+        dy2ds2, = plt.plot(self.arc_lengths,

+                           self.accelerations[:, 1],

+                           label='d^2x/ds^2')

+        plt.legend(handles=[dx2ds2, dy2ds2])

+

 

 # class defining a number of splines with convinience methods

 class Path:

+

     def __init__(self):

         self.splines = []

         self.knot_accels = []

@@ -270,7 +303,7 @@
         else:

             print("index %f out of bounds, no spline of that index" % i)

 

-    def join(self, first_priority = False):

+    def join(self, first_priority=False):

         for i in range(0, len(self.splines)):

             if first_priority & i != len(self.splines):

                 print("unfinished")

@@ -278,7 +311,12 @@
 

 # class which takes a Path object along with constraints and reparamterizes it with respect to time

 class Trajectory:

-    def __init__(self, path, max_angular_accel=3, max_voltage=11, max_normal_accel = .2):

+

+    def __init__(self,

+                 path,

+                 max_angular_accel=3,

+                 max_voltage=11,

+                 max_normal_accel=.2):

         self.path = path

         self.A = drivetrain.A_continuous

         self.B = drivetrain.B_continuous

@@ -291,43 +329,46 @@
 

         self.max_velocities_adhering_to_normal_accel = []

         self.max_velocities_adhering_to_voltage = []

-        self.path.splines[0].setup(resolution_multiplier = 100)

+        self.path.splines[0].setup(resolution_multiplier=100)

 

         self.set_max_v_adhering_to_normal_accel()

         self.max_voltageK_pass()

 

     def set_max_v_adhering_to_normal_accel(self):

         Ks = self.path.get_K()

-        accels = np.full_like(Ks, fill_value = self.max_normal_accel)

+        accels = np.full_like(Ks, fill_value=self.max_normal_accel)

         max_velocities = np.sqrt(np.abs(accels / Ks))

         self.max_velocities_adhering_to_normal_accel = max_velocities

 

     def max_voltageK_pass(self):

         max_ds_dt = []

         Ks = self.path.get_K()

-        turning_radii = np.full_like(Ks, fill_value = 1) / np.abs(Ks)

+        turning_radii = np.full_like(Ks, fill_value=1) / np.abs(Ks)

 

-    

-

-        # compute max steady-state velocity given voltage constraints  

+        # compute max steady-state velocity given voltage constraints

         for i in range(0, len(Ks)):

-            v_ratio = (turning_radii[i] + self.robot_radius) / (turning_radii[i] - self.robot_radius)

-            matrix = np.array([[self.A[1, 1], self.A[1, 3], self.B[1, 1]], [self.A[3, 1] - 1, self.A[3, 3], self.B[3, 1]], [-1, v_ratio, 0]])

-            sols = np.array([-1 * self.max_voltage * self.B[1, 0], -1 * self.max_voltage * self.B[3, 0], 0])

+            v_ratio = (turning_radii[i] + self.robot_radius) / (

+                turning_radii[i] - self.robot_radius)

+            matrix = np.array([[self.A[1, 1], self.A[1, 3], self.B[1, 1]],

+                               [self.A[3, 1] - 1, self.A[3, 3], self.B[3, 1]],

+                               [-1, v_ratio, 0]])

+            sols = np.array([

+                -1 * self.max_voltage * self.B[1, 0],

+                -1 * self.max_voltage * self.B[3, 0], 0

+            ])

             Vs = np.dot(np.linalg.inv(matrix), sols)

             max_ds_dt.append((Vs[0] + Vs[1]) / 2)

-            

+

         self.max_velocities_adhering_to_voltage = max_ds_dt

+

     # compute the maximum acceleration we can ask for given voltage and, ya know, staying on the path.

-

-

     '''

     These methods use the continuous form of our drivetrain state equation

     in order to compute the maximum acceleration which adheres to the path

     and voltage constraints, as well as any arbitary set of constraints

     on velocity as a function of arc_length

     '''

-    

+

     def forward_accel_pass(self):

         points = self.path.get_points()

         velocities = self.path.get_velocities()

@@ -335,27 +376,37 @@
         arc_lenghts = self.path.get_S()

 

         for i in range(0, len(points)):

-            Xn1 =   

-        

-    

+            #Xn1 =

+            pass

+

     def backward_accelaration_pass(self):

 

-        print("max backward accel pass")    

+        print("max backward accel pass")

 

-    

-    def plot_diagnostics(self, i = 0):

+    def plot_diagnostics(self, i=0):

 

         plt.figure('max velocity')

         plt.title('max_v_normal_accel')

         plt.xlabel('arc_length')

         plt.ylabel('max V')

-        max_v_normal = plt.plot(self.path.get_S(), self.max_velocities_adhering_to_normal_accel, label = 'ds/dt (normal)')#   , label = 'ds/dt')

-        curvature = plt.plot(self.path.get_S(), 1000 * np.abs(self.path.get_K()), label = 'K')

-        max_v_K_V = plt.plot(self.path.get_S(), self.max_velocities_adhering_to_voltage, label = 'ds/dt (voltage)')

-        plt.legend(handles = [max_v_normal[0], curvature[0], max_v_K_V[0]])

+        max_v_normal = plt.plot(self.path.get_S(),

+                                self.max_velocities_adhering_to_normal_accel,

+                                label='ds/dt (normal)')  #   , label = 'ds/dt')

+        curvature = plt.plot(self.path.get_S(),

+                             1000 * np.abs(self.path.get_K()),

+                             label='K')

+        max_v_K_V = plt.plot(self.path.get_S(),

+                             self.max_velocities_adhering_to_voltage,

+                             label='ds/dt (voltage)')

+        plt.legend(handles=[max_v_normal[0], curvature[0], max_v_K_V[0]])

+

 

 def main():

-    A = Hermite_Spline(np.array([0,0]), np.array([0,400]), np.array([200,300]), np.array([200,200]), resolution = 200)

+    A = Hermite_Spline(np.array([0, 0]),

+                       np.array([0, 400]),

+                       np.array([200, 300]),

+                       np.array([200, 200]),

+                       resolution=200)

     A.plot_diagnostics()

     path = Path()

     path.add_spline(A)

@@ -363,4 +414,5 @@
     trajectory.plot_diagnostics()

     plt.show()

 

+

 main()

diff --git a/y2019/control_loops/python/drivetrain.py b/y2019/control_loops/python/drivetrain.py
index 9c4db49..0ef54ef 100644
--- a/y2019/control_loops/python/drivetrain.py
+++ b/y2019/control_loops/python/drivetrain.py
@@ -47,5 +47,6 @@
         # Write the generated constants out to a file.
         drivetrain.WriteDrivetrain(argv[1:3], argv[3:5], 'y2019', kDrivetrain)
 
+
 if __name__ == '__main__':
     sys.exit(main(sys.argv))
diff --git a/y2019/control_loops/python/elevator.py b/y2019/control_loops/python/elevator.py
index b7fc74b..546ead5 100755
--- a/y2019/control_loops/python/elevator.py
+++ b/y2019/control_loops/python/elevator.py
@@ -18,18 +18,18 @@
 first_stage_mass = 0.7957
 carriage_mass = 2.754
 
-kElevator = linear_system.LinearSystemParams(
-    name='Elevator',
-    motor=control_loop.Vex775Pro(),
-    G=(8.0 / 82.0),
-    radius=2.25 * 0.0254 / 2.0,
-    mass=first_stage_mass + carriage_mass,
-    q_pos=0.070,
-    q_vel=1.35,
-    kalman_q_pos=0.12,
-    kalman_q_vel=2.00,
-    kalman_q_voltage=35.0,
-    kalman_r_position=0.05)
+kElevator = linear_system.LinearSystemParams(name='Elevator',
+                                             motor=control_loop.Vex775Pro(),
+                                             G=(8.0 / 82.0),
+                                             radius=2.25 * 0.0254 / 2.0,
+                                             mass=first_stage_mass +
+                                             carriage_mass,
+                                             q_pos=0.070,
+                                             q_vel=1.35,
+                                             kalman_q_pos=0.12,
+                                             kalman_q_vel=2.00,
+                                             kalman_q_voltage=35.0,
+                                             kalman_r_position=0.05)
 
 kElevatorBall = copy.copy(kElevator)
 kElevatorBall.q_pos = 0.15
@@ -43,8 +43,10 @@
     if FLAGS.plot:
         R = numpy.matrix([[1.5], [0.0]])
         linear_system.PlotKick(kElevatorBall, R, plant_params=kElevatorModel)
-        linear_system.PlotMotion(
-            kElevatorBall, R, max_velocity=5.0, plant_params=kElevatorModel)
+        linear_system.PlotMotion(kElevatorBall,
+                                 R,
+                                 max_velocity=5.0,
+                                 plant_params=kElevatorModel)
 
     # Write the generated constants out to a file.
     if len(argv) != 5:
diff --git a/y2019/control_loops/python/intake.py b/y2019/control_loops/python/intake.py
index 828f54e..36d737c 100755
--- a/y2019/control_loops/python/intake.py
+++ b/y2019/control_loops/python/intake.py
@@ -20,7 +20,7 @@
 kIntake = angular_system.AngularSystemParams(
     name='Intake',
     motor=control_loop.BAG(),
-    G=(1.0 / 7.0) * (1.0 / 4.0) * (1.0 / 4.0)* (18.0 / 38.0),
+    G=(1.0 / 7.0) * (1.0 / 4.0) * (1.0 / 4.0) * (18.0 / 38.0),
     # Suneel: Sampled moment of inertia at 6 different positions
     # J = the average of the six.
     # 1. 0.686
diff --git a/y2019/control_loops/python/polydrivetrain.py b/y2019/control_loops/python/polydrivetrain.py
index eaccb92..a045af1 100644
--- a/y2019/control_loops/python/polydrivetrain.py
+++ b/y2019/control_loops/python/polydrivetrain.py
@@ -12,20 +12,22 @@
 FLAGS = gflags.FLAGS
 
 try:
-  gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
+    gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
 except gflags.DuplicateFlagError:
-  pass
+    pass
+
 
 def main(argv):
-  if FLAGS.plot:
-    polydrivetrain.PlotPolyDrivetrainMotions(drivetrain.kDrivetrain)
-  elif len(argv) != 7:
-    glog.fatal('Expected .h file name and .cc file name')
-  else:
-    polydrivetrain.WritePolyDrivetrain(argv[1:3], argv[3:5], argv[5:7], 'y2019',
-                                       drivetrain.kDrivetrain)
+    if FLAGS.plot:
+        polydrivetrain.PlotPolyDrivetrainMotions(drivetrain.kDrivetrain)
+    elif len(argv) != 7:
+        glog.fatal('Expected .h file name and .cc file name')
+    else:
+        polydrivetrain.WritePolyDrivetrain(argv[1:3], argv[3:5], argv[5:7],
+                                           'y2019', drivetrain.kDrivetrain)
+
 
 if __name__ == '__main__':
-  argv = FLAGS(sys.argv)
-  glog.init()
-  sys.exit(main(argv))
+    argv = FLAGS(sys.argv)
+    glog.init()
+    sys.exit(main(argv))
diff --git a/y2019/control_loops/python/wrist.py b/y2019/control_loops/python/wrist.py
index c983a1d..f20975c 100755
--- a/y2019/control_loops/python/wrist.py
+++ b/y2019/control_loops/python/wrist.py
@@ -25,17 +25,17 @@
 # Wrist with hatch
 #  0.446
 
-kWrist = angular_system.AngularSystemParams(
-    name='Wrist',
-    motor=control_loop.BAG(),
-    G=(6.0 / 60.0) * (20.0 / 100.0) * (24.0 / 84.0),
-    J=0.30,
-    q_pos=0.20,
-    q_vel=5.0,
-    kalman_q_pos=0.12,
-    kalman_q_vel=2.0,
-    kalman_q_voltage=4.0,
-    kalman_r_position=0.05)
+kWrist = angular_system.AngularSystemParams(name='Wrist',
+                                            motor=control_loop.BAG(),
+                                            G=(6.0 / 60.0) * (20.0 / 100.0) *
+                                            (24.0 / 84.0),
+                                            J=0.30,
+                                            q_pos=0.20,
+                                            q_vel=5.0,
+                                            kalman_q_pos=0.12,
+                                            kalman_q_vel=2.0,
+                                            kalman_q_voltage=4.0,
+                                            kalman_r_position=0.05)
 
 kWristBall = copy.copy(kWrist)
 kWristBall.J = 0.4007
diff --git a/y2019/vision/undistort.py b/y2019/vision/undistort.py
index 8e1cbd2..feeb7c8 100755
--- a/y2019/vision/undistort.py
+++ b/y2019/vision/undistort.py
@@ -74,8 +74,9 @@
                 cv2.destroyAllWindows()
         fd.close()
 
-    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
-        objpoints, imgpoints, grey.shape[::-1], None, None)
+    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,
+                                                       grey.shape[::-1], None,
+                                                       None)
     newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (rows, cols),
                                                       1, (rows, cols))
 
diff --git a/y2020/control_loops/python/accelerator.py b/y2020/control_loops/python/accelerator.py
index bb6871c..54ec9d3 100644
--- a/y2020/control_loops/python/accelerator.py
+++ b/y2020/control_loops/python/accelerator.py
@@ -20,20 +20,19 @@
 # Gear ratio to the final wheel.
 G = (30.0 / 40.0) * numpy.power(G_per_wheel, 3.0)
 # Overall flywheel inertia.
-J = J_wheel * (
-    1.0 + numpy.power(G_per_wheel, -2.0) + numpy.power(G_per_wheel, -4.0) + numpy.power(G_per_wheel, -6.0))
+J = J_wheel * (1.0 + numpy.power(G_per_wheel, -2.0) +
+               numpy.power(G_per_wheel, -4.0) + numpy.power(G_per_wheel, -6.0))
 
 # The position and velocity are measured for the final wheel.
-kAccelerator = flywheel.FlywheelParams(
-    name='Accelerator',
-    motor=control_loop.Falcon(),
-    G=G,
-    J=J * 1.3,
-    q_pos=0.01,
-    q_vel=40.0,
-    q_voltage=1.0,
-    r_pos=0.03,
-    controller_poles=[.89])
+kAccelerator = flywheel.FlywheelParams(name='Accelerator',
+                                       motor=control_loop.Falcon(),
+                                       G=G,
+                                       J=J * 1.3,
+                                       q_pos=0.01,
+                                       q_vel=40.0,
+                                       q_voltage=1.0,
+                                       r_pos=0.03,
+                                       controller_poles=[.89])
 
 
 def main(argv):
diff --git a/y2020/control_loops/python/control_panel.py b/y2020/control_loops/python/control_panel.py
index 7a5bdf9..6bda841 100644
--- a/y2020/control_loops/python/control_panel.py
+++ b/y2020/control_loops/python/control_panel.py
@@ -17,17 +17,16 @@
 except gflags.DuplicateFlagError:
     pass
 
-kControlPanel = angular_system.AngularSystemParams(
-    name='ControlPanel',
-    motor=control_loop.BAG(),
-    G=1.0,
-    J=0.000009,
-    q_pos=0.20,
-    q_vel=5.0,
-    kalman_q_pos=0.12,
-    kalman_q_vel=2.0,
-    kalman_q_voltage=4.0,
-    kalman_r_position=0.05)
+kControlPanel = angular_system.AngularSystemParams(name='ControlPanel',
+                                                   motor=control_loop.BAG(),
+                                                   G=1.0,
+                                                   J=0.000009,
+                                                   q_pos=0.20,
+                                                   q_vel=5.0,
+                                                   kalman_q_pos=0.12,
+                                                   kalman_q_vel=2.0,
+                                                   kalman_q_voltage=4.0,
+                                                   kalman_r_position=0.05)
 
 
 def main(argv):
@@ -42,7 +41,9 @@
             'Expected .h file name and .cc file name for the control_panel and integral control_panel.'
         )
     else:
-        namespaces = ['y2020', 'control_loops', 'superstructure', 'control_panel']
+        namespaces = [
+            'y2020', 'control_loops', 'superstructure', 'control_panel'
+        ]
         angular_system.WriteAngularSystem(kControlPanel, argv[1:3], argv[3:5],
                                           namespaces)
 
diff --git a/y2020/control_loops/python/finisher.py b/y2020/control_loops/python/finisher.py
index 8c33cc3..7e703fc 100644
--- a/y2020/control_loops/python/finisher.py
+++ b/y2020/control_loops/python/finisher.py
@@ -22,14 +22,17 @@
 J = 0.00507464
 J = 0.0035
 
+
 def AddResistance(motor, resistance):
     motor.resistance += resistance
     return motor
 
+
 def ScaleKv(motor, scale):
     motor.Kv *= scale
     return motor
 
+
 # The position and velocity are measured for the final wheel.
 kFinisher = flywheel.FlywheelParams(
     name='Finisher',
diff --git a/y2020/control_loops/python/flywheel.py b/y2020/control_loops/python/flywheel.py
index 6f41a17..9153bc6 100755
--- a/y2020/control_loops/python/flywheel.py
+++ b/y2020/control_loops/python/flywheel.py
@@ -7,6 +7,7 @@
 
 
 class FlywheelParams(object):
+
     def __init__(self,
                  name,
                  motor,
@@ -31,6 +32,7 @@
 
 
 class VelocityFlywheel(control_loop.HybridControlLoop):
+
     def __init__(self, params, name="Flywheel"):
         super(VelocityFlywheel, self).__init__(name=name)
         self.params = params
@@ -76,6 +78,7 @@
 
 
 class Flywheel(VelocityFlywheel):
+
     def __init__(self, params, name="Flywheel"):
         super(Flywheel, self).__init__(params, name=name)
 
@@ -112,6 +115,7 @@
 
 
 class IntegralFlywheel(Flywheel):
+
     def __init__(self, params, name="IntegralFlywheel"):
         super(IntegralFlywheel, self).__init__(params, name=name)
 
@@ -125,7 +129,6 @@
         self.B_continuous = numpy.matrix(numpy.zeros((3, 1)))
         self.B_continuous[0:2, 0] = self.B_continuous_unaugmented
 
-
         # states
         # [position, velocity, voltage_error]
         self.C_unaugmented = self.C
@@ -146,24 +149,26 @@
         q_vel = self.params.q_vel
         q_voltage = self.params.q_voltage
         self.Q_continuous = numpy.matrix([[(q_pos**2.0), 0.0, 0.0],
-                               [0.0, (q_vel**2.0), 0.0],
-                               [0.0, 0.0, (q_voltage**2.0)]])
+                                          [0.0, (q_vel**2.0), 0.0],
+                                          [0.0, 0.0, (q_voltage**2.0)]])
 
         r_pos = self.params.r_pos
         self.R_continuous = numpy.matrix([[(r_pos**2.0)]])
 
-        _, _, self.Q, self.R = controls.kalmd(
-            A_continuous=self.A_continuous,
-            B_continuous=self.B_continuous,
-            Q_continuous=self.Q_continuous,
-            R_continuous=self.R_continuous,
-            dt=self.dt)
+        _, _, self.Q, self.R = controls.kalmd(A_continuous=self.A_continuous,
+                                              B_continuous=self.B_continuous,
+                                              Q_continuous=self.Q_continuous,
+                                              R_continuous=self.R_continuous,
+                                              dt=self.dt)
 
         glog.debug('Q_discrete %s' % (str(self.Q)))
         glog.debug('R_discrete %s' % (str(self.R)))
 
-        self.KalmanGain, self.P_steady_state = controls.kalman(
-            A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
+        self.KalmanGain, self.P_steady_state = controls.kalman(A=self.A,
+                                                               B=self.B,
+                                                               C=self.C,
+                                                               Q=self.Q,
+                                                               R=self.R)
         self.L = self.A * self.KalmanGain
 
         self.K_unaugmented = self.K
@@ -283,34 +288,32 @@
         for index, param in enumerate(params):
             flywheels.append(Flywheel(param, name=param.name + str(index)))
             integral_flywheels.append(
-                IntegralFlywheel(
-                    param, name='Integral' + param.name + str(index)))
+                IntegralFlywheel(param,
+                                 name='Integral' + param.name + str(index)))
     else:
         name = params.name
         flywheels.append(Flywheel(params, params.name))
         integral_flywheels.append(
             IntegralFlywheel(params, name='Integral' + params.name))
 
-    loop_writer = control_loop.ControlLoopWriter(
-        name, flywheels, namespaces=namespace)
+    loop_writer = control_loop.ControlLoopWriter(name,
+                                                 flywheels,
+                                                 namespaces=namespace)
     loop_writer.AddConstant(
-        control_loop.Constant('kOutputRatio', '%f',
-                              flywheels[0].G))
+        control_loop.Constant('kOutputRatio', '%f', flywheels[0].G))
     loop_writer.AddConstant(
         control_loop.Constant('kFreeSpeed', '%f',
                               flywheels[0].motor.free_speed))
     loop_writer.AddConstant(
-        control_loop.Constant(
-            'kBemf',
-            '%f',
-            flywheels[0].motor.Kv * flywheels[0].G,
-            comment="// Radians/sec / volt"))
+        control_loop.Constant('kBemf',
+                              '%f',
+                              flywheels[0].motor.Kv * flywheels[0].G,
+                              comment="// Radians/sec / volt"))
     loop_writer.AddConstant(
-        control_loop.Constant(
-            'kResistance',
-            '%f',
-            flywheels[0].motor.resistance,
-            comment="// Ohms"))
+        control_loop.Constant('kResistance',
+                              '%f',
+                              flywheels[0].motor.resistance,
+                              comment="// Ohms"))
     loop_writer.Write(plant_files[0], plant_files[1])
 
     integral_loop_writer = control_loop.ControlLoopWriter(
diff --git a/y2020/control_loops/python/hood.py b/y2020/control_loops/python/hood.py
index eacb7fd..8125f8b 100644
--- a/y2020/control_loops/python/hood.py
+++ b/y2020/control_loops/python/hood.py
@@ -29,17 +29,17 @@
 
 radians_per_turn_of_motor = 12.0 / 60.0 * radians_per_turn
 
-kHood = angular_system.AngularSystemParams(
-    name='Hood',
-    motor=control_loop.BAG(),
-    G=radians_per_turn_of_motor / (2.0 * numpy.pi),
-    J=0.1,
-    q_pos=0.15,
-    q_vel=10.0,
-    kalman_q_pos=0.12,
-    kalman_q_vel=10.0,
-    kalman_q_voltage=30.0,
-    kalman_r_position=0.02)
+kHood = angular_system.AngularSystemParams(name='Hood',
+                                           motor=control_loop.BAG(),
+                                           G=radians_per_turn_of_motor /
+                                           (2.0 * numpy.pi),
+                                           J=0.1,
+                                           q_pos=0.15,
+                                           q_vel=10.0,
+                                           kalman_q_pos=0.12,
+                                           kalman_q_vel=10.0,
+                                           kalman_q_voltage=30.0,
+                                           kalman_r_position=0.02)
 
 
 def main(argv):
diff --git a/y2020/control_loops/python/intake.py b/y2020/control_loops/python/intake.py
index 1035070..87896dd 100644
--- a/y2020/control_loops/python/intake.py
+++ b/y2020/control_loops/python/intake.py
@@ -17,17 +17,17 @@
 except gflags.DuplicateFlagError:
     pass
 
-kIntake = angular_system.AngularSystemParams(
-    name='Intake',
-    motor=control_loop.BAG(),
-    G=(12.0 / 24.0) * (1.0 / 5.0) * (1.0 / 5.0) * (16.0 / 32.0),
-    J=8 * 0.139 * 0.139,
-    q_pos=0.40,
-    q_vel=20.0,
-    kalman_q_pos=0.12,
-    kalman_q_vel=2.0,
-    kalman_q_voltage=4.0,
-    kalman_r_position=0.05)
+kIntake = angular_system.AngularSystemParams(name='Intake',
+                                             motor=control_loop.BAG(),
+                                             G=(12.0 / 24.0) * (1.0 / 5.0) *
+                                             (1.0 / 5.0) * (16.0 / 32.0),
+                                             J=8 * 0.139 * 0.139,
+                                             q_pos=0.40,
+                                             q_vel=20.0,
+                                             kalman_q_pos=0.12,
+                                             kalman_q_vel=2.0,
+                                             kalman_q_voltage=4.0,
+                                             kalman_r_position=0.05)
 
 
 def main(argv):
diff --git a/y2020/control_loops/python/polydrivetrain.py b/y2020/control_loops/python/polydrivetrain.py
index dad06b0..dbd5268 100644
--- a/y2020/control_loops/python/polydrivetrain.py
+++ b/y2020/control_loops/python/polydrivetrain.py
@@ -12,20 +12,22 @@
 FLAGS = gflags.FLAGS
 
 try:
-  gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
+    gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
 except gflags.DuplicateFlagError:
-  pass
+    pass
+
 
 def main(argv):
-  if FLAGS.plot:
-    polydrivetrain.PlotPolyDrivetrainMotions(drivetrain.kDrivetrain)
-  elif len(argv) != 7:
-    glog.fatal('Expected .h file name and .cc file name')
-  else:
-    polydrivetrain.WritePolyDrivetrain(argv[1:3], argv[3:5], argv[5:7], 'y2020',
-                                       drivetrain.kDrivetrain)
+    if FLAGS.plot:
+        polydrivetrain.PlotPolyDrivetrainMotions(drivetrain.kDrivetrain)
+    elif len(argv) != 7:
+        glog.fatal('Expected .h file name and .cc file name')
+    else:
+        polydrivetrain.WritePolyDrivetrain(argv[1:3], argv[3:5], argv[5:7],
+                                           'y2020', drivetrain.kDrivetrain)
+
 
 if __name__ == '__main__':
-  argv = FLAGS(sys.argv)
-  glog.init()
-  sys.exit(main(argv))
+    argv = FLAGS(sys.argv)
+    glog.init()
+    sys.exit(main(argv))
diff --git a/y2020/control_loops/python/turret.py b/y2020/control_loops/python/turret.py
index 6f15057..5f77f84 100644
--- a/y2020/control_loops/python/turret.py
+++ b/y2020/control_loops/python/turret.py
@@ -18,17 +18,18 @@
 except gflags.DuplicateFlagError:
     pass
 
-kTurret = angular_system.AngularSystemParams(
-    name='Turret',
-    motor=control_loop.MiniCIM(),
-    G=(26.0 / 150.0) * (14.0 / 60.0) * (20.0 / 60.0),
-    J=0.20,
-    q_pos=0.30,
-    q_vel=4.5,
-    kalman_q_pos=0.12,
-    kalman_q_vel=10.0,
-    kalman_q_voltage=20.0,
-    kalman_r_position=0.05)
+kTurret = angular_system.AngularSystemParams(name='Turret',
+                                             motor=control_loop.MiniCIM(),
+                                             G=(26.0 / 150.0) * (14.0 / 60.0) *
+                                             (20.0 / 60.0),
+                                             J=0.20,
+                                             q_pos=0.30,
+                                             q_vel=4.5,
+                                             kalman_q_pos=0.12,
+                                             kalman_q_vel=10.0,
+                                             kalman_q_voltage=20.0,
+                                             kalman_r_position=0.05)
+
 
 def main(argv):
     if FLAGS.plot:
@@ -38,13 +39,11 @@
 
     # Write the generated constants out to a file.
     if len(argv) != 5:
-        glog.fatal(
-            'Expected .h file name and .cc file name for the turret.'
-        )
+        glog.fatal('Expected .h file name and .cc file name for the turret.')
     else:
         namespaces = ['y2020', 'control_loops', 'superstructure', 'turret']
-        angular_system.WriteAngularSystem([kTurret],
-                                          argv[1:3], argv[3:5], namespaces)
+        angular_system.WriteAngularSystem([kTurret], argv[1:3], argv[3:5],
+                                          namespaces)
 
 
 if __name__ == '__main__':
diff --git a/y2020/vision/galactic_search_config.py b/y2020/vision/galactic_search_config.py
index 90988e3..870e039 100755
--- a/y2020/vision/galactic_search_config.py
+++ b/y2020/vision/galactic_search_config.py
@@ -20,7 +20,7 @@
 import os
 import sys
 
-_num_rects = 3 # can be 3 or 2, can be specified in commang line arg
+_num_rects = 3  # can be 3 or 2, can be specified in commang line arg
 
 _path = Path(Letter.kA, Alliance.kRed, [Rect(None, None, None, None)])
 
@@ -29,20 +29,24 @@
 
 _fig, _img_ax = plt.subplots()
 
-_txt = _img_ax.text(0, 0, "", size = 10, backgroundcolor = "white")
+_txt = _img_ax.text(0, 0, "", size=10, backgroundcolor="white")
 
 _confirm = Button(plt.axes([0.7, 0.05, 0.1, 0.075]), "Confirm")
 _cancel = Button(plt.axes([0.81, 0.05, 0.1, 0.075]), "Cancel")
 _submit = Button(plt.axes([0.4, 0.05, 0.1, 0.1]), "Submit")
 
+
 def draw_txt(txt):
-    txt.set_text("Click on top left point and bottom right point for rect #%u" % (_rect_index + 1))
+    txt.set_text(
+        "Click on top left point and bottom right point for rect #%u" %
+        (_rect_index + 1))
     txt.set_color(_path.alliance.value)
 
 
 def on_confirm(event):
     global _rect_index
-    if _path.rects[_rect_index].x1 != None and _path.rects[_rect_index].x2 != None:
+    if _path.rects[_rect_index].x1 != None and _path.rects[
+            _rect_index].x2 != None:
         _confirm.ax.set_visible(False)
         _cancel.ax.set_visible(False)
         _rect_index += 1
@@ -54,6 +58,7 @@
             _path.rects.append(Rect(None, None, None, None))
         plt.show()
 
+
 def on_cancel(event):
     global _rect_index
     if _rect_index < _num_rects:
@@ -66,6 +71,7 @@
         _path.rects[_rect_index].y2 = None
         plt.show()
 
+
 def on_submit(event):
     plt.close("all")
     dict = None
@@ -74,9 +80,12 @@
         dict[_path.letter.name] = {}
     if _path.alliance.name not in dict[_path.letter.name]:
         dict[_path.letter.name][_path.alliance.name] = []
-    dict[_path.letter.name][_path.alliance.name] = [rect.to_list() for rect in _path.rects]
+    dict[_path.letter.name][_path.alliance.name] = [
+        rect.to_list() for rect in _path.rects
+    ]
     with open(RECTS_JSON_PATH, 'w') as rects_json:
-        json.dump(dict, rects_json, indent = 2)
+        json.dump(dict, rects_json, indent=2)
+
 
 # Clears rect on screen
 def clear_rect():
@@ -85,6 +94,7 @@
     else:
         _img_ax.patches[-1].remove()
 
+
 def on_click(event):
     # This gets called for each click of the rectangle corners,
     # but also gets called when the user clicks on the Submit button.
@@ -94,9 +104,11 @@
     # the bounds of the axis
     if _rect_index < _num_rects and event.xdata != None and event.ydata != None:
         if _path.rects[_rect_index].x1 == None:
-            _path.rects[_rect_index].x1, _path.rects[_rect_index].y1 = int(event.xdata), int(event.ydata)
+            _path.rects[_rect_index].x1, _path.rects[_rect_index].y1 = int(
+                event.xdata), int(event.ydata)
         elif _path.rects[_rect_index].x2 == None:
-            _path.rects[_rect_index].x2, _path.rects[_rect_index].y2 = int(event.xdata), int(event.ydata)
+            _path.rects[_rect_index].x2, _path.rects[_rect_index].y2 = int(
+                event.xdata), int(event.ydata)
             if _path.rects[_rect_index].x2 < _path.rects[_rect_index].x1:
                 tmp = _path.rects[_rect_index].x1
                 _path.rects[_rect_index].x1 = _path.rects[_rect_index].x2
@@ -106,20 +118,27 @@
                 _path.rects[_rect_index].y1 = _path.rects[_rect_index].y2
                 _path.rects[_rect_index].y2 = tmp
 
-            _img_ax.add_patch(patches.Rectangle((_path.rects[_rect_index].x1, _path.rects[_rect_index].y1),
-                _path.rects[_rect_index].x2 - _path.rects[_rect_index].x1,
-                _path.rects[_rect_index].y2 - _path.rects[_rect_index].y1,
-                edgecolor = 'r', linewidth = 1, facecolor="none"))
+            _img_ax.add_patch(
+                patches.Rectangle(
+                    (_path.rects[_rect_index].x1, _path.rects[_rect_index].y1),
+                    _path.rects[_rect_index].x2 - _path.rects[_rect_index].x1,
+                    _path.rects[_rect_index].y2 - _path.rects[_rect_index].y1,
+                    edgecolor='r',
+                    linewidth=1,
+                    facecolor="none"))
             _confirm.ax.set_visible(True)
             _cancel.ax.set_visible(True)
             plt.show()
     else:
-        glog.info("Either submitted or user pressed out of the bounds of the axis")
+        glog.info(
+            "Either submitted or user pressed out of the bounds of the axis")
+
 
 def setup_button(button, on_clicked):
     button.on_clicked(on_clicked)
     button.ax.set_visible(False)
 
+
 def setup_ui():
     _img_ax.imshow(capture_img())
     release_stream()
@@ -131,11 +150,12 @@
     draw_txt(_txt)
     plt.show()
 
+
 def view_rects():
 
     rects_dict = load_json()
-    if (_path.letter.name in rects_dict and
-        _path.alliance.name in rects_dict[_path.letter.name]):
+    if (_path.letter.name in rects_dict
+            and _path.alliance.name in rects_dict[_path.letter.name]):
         _confirm.ax.set_visible(False)
         _cancel.ax.set_visible(False)
         _submit.ax.set_visible(False)
@@ -143,20 +163,26 @@
 
         for rect_list in rects_dict[_path.letter.name][_path.alliance.name]:
             rect = Rect.from_list(rect_list)
-            _img_ax.add_patch(patches.Rectangle((rect.x1, rect.y1),
-                rect.x2 - rect.x1, rect.y2 - rect.y1,
-                edgecolor = 'r', linewidth = 1, facecolor="none"))
+            _img_ax.add_patch(
+                patches.Rectangle((rect.x1, rect.y1),
+                                  rect.x2 - rect.x1,
+                                  rect.y2 - rect.y1,
+                                  edgecolor='r',
+                                  linewidth=1,
+                                  facecolor="none"))
         plt.show()
     else:
         glog.error("Could not find path %s %s in rects.json",
-            _path.alliance.value, _path.letter.value)
+                   _path.alliance.value, _path.letter.value)
+
 
 def main(argv):
     global _num_rects
 
     glog.setLevel("INFO")
-    opts = getopt.getopt(argv[1 : ], "a:l:n:",
-                        ["alliance = ", "letter = ", "_num_rects = ", "view"])[0]
+    opts = getopt.getopt(
+        argv[1:], "a:l:n:",
+        ["alliance = ", "letter = ", "_num_rects = ", "view"])[0]
     view = False
     for opt, arg in opts:
         if opt in ["-a", "--alliance"]:
diff --git a/y2020/vision/galactic_search_path.py b/y2020/vision/galactic_search_path.py
index 00b572a..11ec73d 100755
--- a/y2020/vision/galactic_search_path.py
+++ b/y2020/vision/galactic_search_path.py
@@ -8,6 +8,7 @@
 import numpy as np
 import os
 
+
 class Rect:
 
     # x1 and y1 are top left corner, x2 and y2 are bottom right
@@ -41,11 +42,14 @@
 
     @staticmethod
     def from_value(value):
-        return (Alliance.kRed if value == Alliance.kRed.value else Alliance.kBlue)
+        return (Alliance.kRed
+                if value == Alliance.kRed.value else Alliance.kBlue)
 
     @staticmethod
     def from_name(name):
-        return (Alliance.kRed if name == Alliance.kRed.name else Alliance.kBlue)
+        return (Alliance.kRed
+                if name == Alliance.kRed.name else Alliance.kBlue)
+
 
 class Letter(Enum):
     kA = 'A'
@@ -59,6 +63,7 @@
     def from_name(name):
         return (Letter.kA if name == Letter.kA.name else Letter.kB)
 
+
 class Path:
 
     def __init__(self, letter, alliance, rects):
@@ -72,15 +77,18 @@
     def to_dict(self):
         return {"alliance": self.alliance.name, "letter": self.letter.name}
 
+
 RECTS_JSON_PATH = "rects.json"
 
 AOS_SEND_PATH = "bazel-bin/aos/aos_send"
 
+
 def setup_if_pi():
     if os.path.isdir("/home/pi/bin"):
         AOS_SEND_PATH = "/home/pi/bin/aos_send.stripped"
         os.system("./starter_cmd stop camera_reader")
 
+
 setup_if_pi()
 
 # The minimum percentage of yellow for a region of a image to
@@ -89,12 +97,14 @@
 
 _paths = []
 
+
 def load_json():
     rects_dict = None
     with open(RECTS_JSON_PATH, 'r') as rects_json:
         rects_dict = json.load(rects_json)
     return rects_dict
 
+
 def _run_detection_loop():
     global img_fig, rects_dict
 
@@ -104,7 +114,9 @@
             rects = []
             for rect_list in rects_dict[letter][alliance]:
                 rects.append(Rect.from_list(rect_list))
-            _paths.append(Path(Letter.from_name(letter), Alliance.from_name(alliance), rects))
+            _paths.append(
+                Path(Letter.from_name(letter), Alliance.from_name(alliance),
+                     rects))
 
     plt.ion()
     img_fig = plt.figure()
@@ -113,6 +125,7 @@
     while running:
         _detect_path()
 
+
 def _detect_path():
     img = capture_img()
     img_fig.figimage(img)
@@ -138,7 +151,8 @@
                 current_path = path
                 num_current_paths += 1
         else:
-            glog.error("Error: len of pcts (%u) != len of rects: (%u)", len(pcts), len(rects))
+            glog.error("Error: len of pcts (%u) != len of rects: (%u)",
+                       len(pcts), len(rects))
 
     if num_current_paths != 1:
         if num_current_paths == 0:
@@ -147,24 +161,27 @@
         glog.warn("Expected 1 path but detected %u", num_current_paths)
         return
 
-
     path_dict = current_path.to_dict()
     glog.info("Path is %s", path_dict)
     os.system(AOS_SEND_PATH +
-              " /pi2/camera y2020.vision.GalacticSearchPath '" + json.dumps(path_dict) + "'")
+              " /pi2/camera y2020.vision.GalacticSearchPath '" +
+              json.dumps(path_dict) + "'")
+
 
 KERNEL = np.ones((5, 5), np.uint8)
 
+
 def _create_mask(img):
     hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)
-    lower_yellow = np.array([23, 100, 75], dtype = np.uint8)
-    higher_yellow = np.array([40, 255, 255], dtype = np.uint8)
+    lower_yellow = np.array([23, 100, 75], dtype=np.uint8)
+    higher_yellow = np.array([40, 255, 255], dtype=np.uint8)
     mask = cv.inRange(hsv, lower_yellow, higher_yellow)
-    mask = cv.erode(mask, KERNEL, iterations = 1)
-    mask = cv.dilate(mask, KERNEL, iterations = 3)
+    mask = cv.erode(mask, KERNEL, iterations=1)
+    mask = cv.dilate(mask, KERNEL, iterations=3)
 
     return mask
 
+
 # This function finds the percentage of yellow pixels in the rectangles
 # given that are regions of the given image. This allows us to determine
 # whether there is a ball in those rectangles
@@ -172,25 +189,30 @@
     pcts = np.zeros(len(rects))
     for i in range(len(rects)):
         rect = rects[i]
-        slice = mask[rect.y1 : rect.y2, rect.x1 : rect.x2]
+        slice = mask[rect.y1:rect.y2, rect.x1:rect.x2]
         yellow_px = np.count_nonzero(slice)
         pcts[i] = yellow_px / (slice.shape[0] * slice.shape[1])
 
     return pcts
 
+
 _video_stream = cv.VideoCapture(0)
 
+
 def capture_img():
     global _video_stream
     return _video_stream.read()[1]
 
+
 def release_stream():
     global _video_stream
     _video_stream.release()
 
+
 def main():
     _run_detection_loop()
     release_stream()
 
+
 if __name__ == "__main__":
     main()
diff --git a/y2020/vision/sift/demo_sift_training.py b/y2020/vision/sift/demo_sift_training.py
index 67874c9..4e9c306 100644
--- a/y2020/vision/sift/demo_sift_training.py
+++ b/y2020/vision/sift/demo_sift_training.py
@@ -8,87 +8,89 @@
 import frc971.vision.sift.TrainingData as TrainingData
 import frc971.vision.sift.Feature as Feature
 
+
 def main():
-  image4_cleaned_path = sys.argv[1]
-  output_path = sys.argv[2]
+    image4_cleaned_path = sys.argv[1]
+    output_path = sys.argv[2]
 
-  image4_cleaned = cv2.imread(image4_cleaned_path)
+    image4_cleaned = cv2.imread(image4_cleaned_path)
 
-  image = cv2.cvtColor(image4_cleaned, cv2.COLOR_BGR2GRAY)
-  image = cv2.resize(image, (640, 480))
-  sift = cv2.SIFT_create()
-  keypoints, descriptors = sift.detectAndCompute(image, None)
+    image = cv2.cvtColor(image4_cleaned, cv2.COLOR_BGR2GRAY)
+    image = cv2.resize(image, (640, 480))
+    sift = cv2.SIFT_create()
+    keypoints, descriptors = sift.detectAndCompute(image, None)
 
-  fbb = flatbuffers.Builder(0)
+    fbb = flatbuffers.Builder(0)
 
-  features_vector = []
+    features_vector = []
 
-  for keypoint, descriptor in zip(keypoints, descriptors):
-    Feature.FeatureStartDescriptorVector(fbb, len(descriptor))
-    for n in reversed(descriptor):
-      assert n == round(n)
-      fbb.PrependUint8(int(round(n)))
-    descriptor_vector = fbb.EndVector()
+    for keypoint, descriptor in zip(keypoints, descriptors):
+        Feature.FeatureStartDescriptorVector(fbb, len(descriptor))
+        for n in reversed(descriptor):
+            assert n == round(n)
+            fbb.PrependUint8(int(round(n)))
+        descriptor_vector = fbb.EndVector()
 
-    Feature.FeatureStart(fbb)
+        Feature.FeatureStart(fbb)
 
-    Feature.FeatureAddDescriptor(fbb, descriptor_vector)
-    Feature.FeatureAddX(fbb, keypoint.pt[0])
-    Feature.FeatureAddY(fbb, keypoint.pt[1])
-    Feature.FeatureAddSize(fbb, keypoint.size)
-    Feature.FeatureAddAngle(fbb, keypoint.angle)
-    Feature.FeatureAddResponse(fbb, keypoint.response)
-    Feature.FeatureAddOctave(fbb, keypoint.octave)
+        Feature.FeatureAddDescriptor(fbb, descriptor_vector)
+        Feature.FeatureAddX(fbb, keypoint.pt[0])
+        Feature.FeatureAddY(fbb, keypoint.pt[1])
+        Feature.FeatureAddSize(fbb, keypoint.size)
+        Feature.FeatureAddAngle(fbb, keypoint.angle)
+        Feature.FeatureAddResponse(fbb, keypoint.response)
+        Feature.FeatureAddOctave(fbb, keypoint.octave)
 
-    features_vector.append(Feature.FeatureEnd(fbb))
+        features_vector.append(Feature.FeatureEnd(fbb))
 
-  TrainingImage.TrainingImageStartFeaturesVector(fbb, len(features_vector))
-  for feature in reversed(features_vector):
-    fbb.PrependUOffsetTRelative(feature)
-  features_vector_table = fbb.EndVector()
+    TrainingImage.TrainingImageStartFeaturesVector(fbb, len(features_vector))
+    for feature in reversed(features_vector):
+        fbb.PrependUOffsetTRelative(feature)
+    features_vector_table = fbb.EndVector()
 
-  TrainingImage.TrainingImageStart(fbb)
-  TrainingImage.TrainingImageAddFeatures(fbb, features_vector_table)
-  # TODO(Brian): Fill out the transformation matrices.
-  training_image_offset = TrainingImage.TrainingImageEnd(fbb)
+    TrainingImage.TrainingImageStart(fbb)
+    TrainingImage.TrainingImageAddFeatures(fbb, features_vector_table)
+    # TODO(Brian): Fill out the transformation matrices.
+    training_image_offset = TrainingImage.TrainingImageEnd(fbb)
 
-  TrainingData.TrainingDataStartImagesVector(fbb, 1)
-  fbb.PrependUOffsetTRelative(training_image_offset)
-  images_offset = fbb.EndVector()
+    TrainingData.TrainingDataStartImagesVector(fbb, 1)
+    fbb.PrependUOffsetTRelative(training_image_offset)
+    images_offset = fbb.EndVector()
 
-  TrainingData.TrainingDataStart(fbb)
-  TrainingData.TrainingDataAddImages(fbb, images_offset)
-  fbb.Finish(TrainingData.TrainingDataEnd(fbb))
+    TrainingData.TrainingDataStart(fbb)
+    TrainingData.TrainingDataAddImages(fbb, images_offset)
+    fbb.Finish(TrainingData.TrainingDataEnd(fbb))
 
-  bfbs = fbb.Output()
+    bfbs = fbb.Output()
 
-  output_prefix = [
-      b'#ifndef Y2020_VISION_SIFT_DEMO_SIFT_H_',
-      b'#define Y2020_VISION_SIFT_DEMO_SIFT_H_',
-      b'#include <string_view>',
-      b'namespace frc971 {',
-      b'namespace vision {',
-      b'inline std::string_view DemoSiftData() {',
-  ]
-  output_suffix = [
-      b'  return std::string_view(kData, sizeof(kData));',
-      b'}',
-      b'}  // namespace vision',
-      b'}  // namespace frc971',
-      b'#endif  // Y2020_VISION_SIFT_DEMO_SIFT_H_',
-  ]
+    output_prefix = [
+        b'#ifndef Y2020_VISION_SIFT_DEMO_SIFT_H_',
+        b'#define Y2020_VISION_SIFT_DEMO_SIFT_H_',
+        b'#include <string_view>',
+        b'namespace frc971 {',
+        b'namespace vision {',
+        b'inline std::string_view DemoSiftData() {',
+    ]
+    output_suffix = [
+        b'  return std::string_view(kData, sizeof(kData));',
+        b'}',
+        b'}  // namespace vision',
+        b'}  // namespace frc971',
+        b'#endif  // Y2020_VISION_SIFT_DEMO_SIFT_H_',
+    ]
 
-  with open(output_path, 'wb') as output:
-    for line in output_prefix:
-      output.write(line)
-      output.write(b'\n')
-    output.write(b'alignas(64) static constexpr char kData[] = "')
-    for byte in fbb.Output():
-      output.write(b'\\x' + (b'%x' % byte).zfill(2))
-    output.write(b'";\n')
-    for line in output_suffix:
-      output.write(line)
-      output.write(b'\n')
+    with open(output_path, 'wb') as output:
+        for line in output_prefix:
+            output.write(line)
+            output.write(b'\n')
+        output.write(b'alignas(64) static constexpr char kData[] = "')
+        for byte in fbb.Output():
+            output.write(b'\\x' + (b'%x' % byte).zfill(2))
+        output.write(b'";\n')
+        for line in output_suffix:
+            output.write(line)
+            output.write(b'\n')
+
 
 if __name__ == '__main__':
-  main()
+    main()
diff --git a/y2020/vision/sift/fast_gaussian_runner.py b/y2020/vision/sift/fast_gaussian_runner.py
index d812f3f..9767300 100755
--- a/y2020/vision/sift/fast_gaussian_runner.py
+++ b/y2020/vision/sift/fast_gaussian_runner.py
@@ -8,190 +8,211 @@
 
 from bazel_tools.tools.python.runfiles import runfiles
 
+
 def main(params):
-  r = runfiles.Create()
-  generator = r.Rlocation('org_frc971/y2020/vision/sift/fast_gaussian_generator')
+    r = runfiles.Create()
+    generator = r.Rlocation(
+        'org_frc971/y2020/vision/sift/fast_gaussian_generator')
 
-  ruledir = sys.argv[2]
-  target_cpu = sys.argv[3]
+    ruledir = sys.argv[2]
+    target_cpu = sys.argv[3]
 
-  target = {
-      'aarch64': 'arm-64-linux-no_asserts',
-      'armv7': 'arm-32-linux-no_asserts',
-      'k8': 'x86-64-linux-no_asserts',
-  }[target_cpu]
+    target = {
+        'aarch64': 'arm-64-linux-no_asserts',
+        'armv7': 'arm-32-linux-no_asserts',
+        'k8': 'x86-64-linux-no_asserts',
+    }[target_cpu]
 
-  commands = []
+    commands = []
 
-  amd64_debian_sysroot = r.Rlocation('amd64_debian_sysroot/usr/lib/x86_64-linux-gnu/libc.so.6').rsplit('/', 4)[0]
-  env = os.environ.copy()
-  env['LD_LIBRARY_PATH'] = ':'.join([
-      # TODO(brian): Figure this out again.  It is a bit aggressive.
-      #amd64_debian_sysroot + '/lib/x86_64-linux-gnu',
-      #amd64_debian_sysroot + '/lib',
-      #amd64_debian_sysroot + '/usr/lib/x86_64-linux-gnu',
-      #amd64_debian_sysroot + '/usr/lib',
-  ])
+    amd64_debian_sysroot = r.Rlocation(
+        'amd64_debian_sysroot/usr/lib/x86_64-linux-gnu/libc.so.6').rsplit(
+            '/', 4)[0]
+    env = os.environ.copy()
+    env['LD_LIBRARY_PATH'] = ':'.join([
+        # TODO(brian): Figure this out again.  It is a bit aggressive.
+        #amd64_debian_sysroot + '/lib/x86_64-linux-gnu',
+        #amd64_debian_sysroot + '/lib',
+        #amd64_debian_sysroot + '/usr/lib/x86_64-linux-gnu',
+        #amd64_debian_sysroot + '/usr/lib',
+    ])
 
-  all_header = [
-      '#ifndef Y2020_VISION_SIFT_FAST_GAUSSIAN_ALL_H_',
-      '#define Y2020_VISION_SIFT_FAST_GAUSSIAN_ALL_H_',
-      '#include "HalideBuffer.h"',
-  ]
+    all_header = [
+        '#ifndef Y2020_VISION_SIFT_FAST_GAUSSIAN_ALL_H_',
+        '#define Y2020_VISION_SIFT_FAST_GAUSSIAN_ALL_H_',
+        '#include "HalideBuffer.h"',
+    ]
 
-  for cols, rows in params['sizes']:
-    for sigma, sigma_name, filter_width in params['sigmas']:
-      name = "fast_gaussian_%dx%d_%s" % (cols, rows, sigma_name)
+    for cols, rows in params['sizes']:
+        for sigma, sigma_name, filter_width in params['sigmas']:
+            name = "fast_gaussian_%dx%d_%s" % (cols, rows, sigma_name)
 
-      commands.append([
-          generator,
-          '-g', 'gaussian_generator',
-          '-o', ruledir,
-          '-f', name,
-          '-e', 'o,h,html',
-          'target=%s-no_runtime' % target,
-          'cols=%s' % cols,
-          'rows=%s' % rows,
-          'sigma=%s' % sigma,
-          'filter_width=%s' % filter_width,
-      ])
-      all_header += [
-          '#include "y2020/vision/sift/%s.h"' % name,
-      ]
+            commands.append([
+                generator,
+                '-g',
+                'gaussian_generator',
+                '-o',
+                ruledir,
+                '-f',
+                name,
+                '-e',
+                'o,h,html',
+                'target=%s-no_runtime' % target,
+                'cols=%s' % cols,
+                'rows=%s' % rows,
+                'sigma=%s' % sigma,
+                'filter_width=%s' % filter_width,
+            ])
+            all_header += [
+                '#include "y2020/vision/sift/%s.h"' % name,
+            ]
 
-      name = "fast_gaussian_subtract_%dx%d_%s" % (cols, rows, sigma_name)
+            name = "fast_gaussian_subtract_%dx%d_%s" % (cols, rows, sigma_name)
 
-      commands.append([
-          generator,
-          '-g', 'gaussian_and_subtract_generator',
-          '-o', ruledir,
-          '-f', name,
-          '-e', 'o,h,html',
-          'target=%s-no_runtime' % target,
-          'cols=%s' % cols,
-          'rows=%s' % rows,
-          'sigma=%s' % sigma,
-          'filter_width=%s' % filter_width,
-      ])
-      all_header += [
-          '#include "y2020/vision/sift/%s.h"' % name,
-      ]
+            commands.append([
+                generator,
+                '-g',
+                'gaussian_and_subtract_generator',
+                '-o',
+                ruledir,
+                '-f',
+                name,
+                '-e',
+                'o,h,html',
+                'target=%s-no_runtime' % target,
+                'cols=%s' % cols,
+                'rows=%s' % rows,
+                'sigma=%s' % sigma,
+                'filter_width=%s' % filter_width,
+            ])
+            all_header += [
+                '#include "y2020/vision/sift/%s.h"' % name,
+            ]
 
-    name = 'fast_subtract_%dx%d' % (cols, rows)
+        name = 'fast_subtract_%dx%d' % (cols, rows)
+        commands.append([
+            generator,
+            '-g',
+            'subtract_generator',
+            '-o',
+            ruledir,
+            '-f',
+            name,
+            '-e',
+            'o,h,html',
+            'target=%s-no_runtime' % target,
+            'cols=%s' % cols,
+            'rows=%s' % rows,
+        ])
+        all_header += [
+            '#include "y2020/vision/sift/%s.h"' % name,
+        ]
     commands.append([
         generator,
-        '-g', 'subtract_generator',
-        '-o', ruledir,
-        '-f', name,
-        '-e', 'o,h,html',
-        'target=%s-no_runtime' % target,
-        'cols=%s' % cols,
-        'rows=%s' % rows,
+        '-r',
+        'fast_gaussian_runtime',
+        '-o',
+        ruledir,
+        '-e',
+        'o',
+        'target=%s' % target,
     ])
+
     all_header += [
-        '#include "y2020/vision/sift/%s.h"' % name,
+        'namespace frc971 {',
+        'namespace vision {',
+        '// 0 is success. 1 is non-implemented size. Negative is a Halide error.',
+        'inline int DoGeneratedFastGaussian(',
+        '    Halide::Runtime::Buffer<const int16_t, 2> input,',
+        '    Halide::Runtime::Buffer<int16_t, 2> output,',
+        '    double sigma) {',
     ]
-  commands.append([
-      generator,
-      '-r', 'fast_gaussian_runtime',
-      '-o', ruledir,
-      '-e', 'o',
-      'target=%s' % target,
-  ])
 
-  all_header += [
-      'namespace frc971 {',
-      'namespace vision {',
-      '// 0 is success. 1 is non-implemented size. Negative is a Halide error.',
-      'inline int DoGeneratedFastGaussian(',
-      '    Halide::Runtime::Buffer<const int16_t, 2> input,',
-      '    Halide::Runtime::Buffer<int16_t, 2> output,',
-      '    double sigma) {',
-  ]
+    for sigma, sigma_name, filter_width in params['sigmas']:
+        for cols, rows in params['sizes']:
+            name = "fast_gaussian_%dx%d_%s" % (cols, rows, sigma_name)
+            all_header += [
+                '  if (input.dim(0).extent() == %s' % cols,
+                '      && input.dim(1).extent() == %s' % rows,
+                '      && sigma == %s) {' % sigma,
+                '    return %s(input, output);' % name,
+                '  }',
+            ]
 
-  for sigma, sigma_name, filter_width in params['sigmas']:
-    for cols, rows in params['sizes']:
-      name = "fast_gaussian_%dx%d_%s" % (cols, rows, sigma_name)
-      all_header += [
-          '  if (input.dim(0).extent() == %s' % cols,
-          '      && input.dim(1).extent() == %s' % rows,
-          '      && sigma == %s) {' % sigma,
-          '    return %s(input, output);' % name,
-          '  }',
-      ]
-
-  all_header += [
-      '  return 1;',
-      '}',
-      'inline int DoGeneratedFastGaussianAndSubtract(',
-      '    Halide::Runtime::Buffer<const int16_t, 2> input,',
-      '    Halide::Runtime::Buffer<int16_t, 2> blurred,',
-      '    Halide::Runtime::Buffer<int16_t, 2> difference,',
-      '    double sigma) {',
-  ]
-
-  for sigma, sigma_name, filter_width in params['sigmas']:
-    for cols, rows in params['sizes']:
-      name = "fast_gaussian_subtract_%dx%d_%s" % (cols, rows, sigma_name)
-      all_header += [
-          '  if (input.dim(0).extent() == %s' % cols,
-          '      && input.dim(1).extent() == %s' % rows,
-          '      && sigma == %s) {' % sigma,
-          '    return %s(input, blurred, difference);' % name,
-          '  }',
-      ]
-
-  all_header += [
-      '  return 1;',
-      '}',
-      'inline int DoGeneratedFastSubtract('
-      '    Halide::Runtime::Buffer<const int16_t, 2> input_a,',
-      '    Halide::Runtime::Buffer<const int16_t, 2> input_b,',
-      '    Halide::Runtime::Buffer<int16_t, 2> output) {',
-  ]
-  for cols, rows in params['sizes']:
-    name = 'fast_subtract_%dx%d' % (cols, rows)
     all_header += [
-        '  if (input_a.dim(0).extent() == %s' % cols,
-        '      && input_a.dim(1).extent() == %s) {' % rows,
-        '    return %s(input_a, input_b, output);' % name,
-        '  }',
+        '  return 1;',
+        '}',
+        'inline int DoGeneratedFastGaussianAndSubtract(',
+        '    Halide::Runtime::Buffer<const int16_t, 2> input,',
+        '    Halide::Runtime::Buffer<int16_t, 2> blurred,',
+        '    Halide::Runtime::Buffer<int16_t, 2> difference,',
+        '    double sigma) {',
     ]
-  all_header += [
-      '  return 1;',
-      '}',
-      '}  // namespace vision',
-      '}  // namespace frc971',
-      '#endif  // Y2020_VISION_SIFT_FAST_GAUSSIAN_ALL_H_',
-  ]
 
-  with open(os.path.join(ruledir, 'fast_gaussian_all.h'), 'w') as f:
-    f.writelines([line + '\n' for line in all_header])
+    for sigma, sigma_name, filter_width in params['sigmas']:
+        for cols, rows in params['sizes']:
+            name = "fast_gaussian_subtract_%dx%d_%s" % (cols, rows, sigma_name)
+            all_header += [
+                '  if (input.dim(0).extent() == %s' % cols,
+                '      && input.dim(1).extent() == %s' % rows,
+                '      && sigma == %s) {' % sigma,
+                '    return %s(input, blurred, difference);' % name,
+                '  }',
+            ]
 
-  commands_lock = threading.Lock()
-  success = [True]
+    all_header += [
+        '  return 1;',
+        '}',
+        'inline int DoGeneratedFastSubtract('
+        '    Halide::Runtime::Buffer<const int16_t, 2> input_a,',
+        '    Halide::Runtime::Buffer<const int16_t, 2> input_b,',
+        '    Halide::Runtime::Buffer<int16_t, 2> output) {',
+    ]
+    for cols, rows in params['sizes']:
+        name = 'fast_subtract_%dx%d' % (cols, rows)
+        all_header += [
+            '  if (input_a.dim(0).extent() == %s' % cols,
+            '      && input_a.dim(1).extent() == %s) {' % rows,
+            '    return %s(input_a, input_b, output);' % name,
+            '  }',
+        ]
+    all_header += [
+        '  return 1;',
+        '}',
+        '}  // namespace vision',
+        '}  // namespace frc971',
+        '#endif  // Y2020_VISION_SIFT_FAST_GAUSSIAN_ALL_H_',
+    ]
 
-  def run_commands():
-    while True:
-      with commands_lock:
-        if not commands:
-          return
-        if not success[0]:
-          return
-        command = commands.pop()
-      try:
-        subprocess.check_call(command, env=env)
-      except:
-        with commands_lock:
-          success[0] = False
-        raise
-  threads = [threading.Thread(target=run_commands) for _ in range(4)]
-  for thread in threads:
-    thread.start()
-  for thread in threads:
-    thread.join()
-  if not success[0]:
-    sys.exit(1)
+    with open(os.path.join(ruledir, 'fast_gaussian_all.h'), 'w') as f:
+        f.writelines([line + '\n' for line in all_header])
+
+    commands_lock = threading.Lock()
+    success = [True]
+
+    def run_commands():
+        while True:
+            with commands_lock:
+                if not commands:
+                    return
+                if not success[0]:
+                    return
+                command = commands.pop()
+            try:
+                subprocess.check_call(command, env=env)
+            except:
+                with commands_lock:
+                    success[0] = False
+                raise
+
+    threads = [threading.Thread(target=run_commands) for _ in range(4)]
+    for thread in threads:
+        thread.start()
+    for thread in threads:
+        thread.join()
+    if not success[0]:
+        sys.exit(1)
+
 
 if __name__ == '__main__':
-  main(json.loads(sys.argv[1]))
+    main(json.loads(sys.argv[1]))
diff --git a/y2020/vision/tools/python_code/calibrate_intrinsics.py b/y2020/vision/tools/python_code/calibrate_intrinsics.py
index fb95599..46c275c 100644
--- a/y2020/vision/tools/python_code/calibrate_intrinsics.py
+++ b/y2020/vision/tools/python_code/calibrate_intrinsics.py
@@ -10,6 +10,7 @@
 
 # From: https://pynative.com/python-serialize-numpy-ndarray-into-json/
 class NumpyArrayEncoder(json.JSONEncoder):
+
     def default(self, obj):
         if isinstance(obj, np.integer):
             return int(obj)
@@ -24,8 +25,8 @@
 def get_robot_info(hostname):
     hostname_split = hostname.split("-")
     if hostname_split[0] != "pi":
-        print(
-            "ERROR: expected hostname to start with pi!  Got '%s'" % hostname)
+        print("ERROR: expected hostname to start with pi!  Got '%s'" %
+              hostname)
         quit()
 
     team_number = int(hostname_split[1])
diff --git a/y2020/vision/tools/python_code/camera_definition.py b/y2020/vision/tools/python_code/camera_definition.py
index 3e3623d..50e3ea4 100644
--- a/y2020/vision/tools/python_code/camera_definition.py
+++ b/y2020/vision/tools/python_code/camera_definition.py
@@ -11,6 +11,7 @@
 
 
 class CameraIntrinsics:
+
     def __init__(self):
         self.camera_matrix = []
         self.dist_coeffs = []
@@ -19,12 +20,14 @@
 
 
 class CameraExtrinsics:
+
     def __init__(self):
         self.R = []
         self.T = []
 
 
 class CameraParameters:
+
     def __init__(self):
         self.camera_int = CameraIntrinsics()
         self.camera_ext = CameraExtrinsics()
diff --git a/y2020/vision/tools/python_code/define_training_data.py b/y2020/vision/tools/python_code/define_training_data.py
index 76e73da..0435f95 100644
--- a/y2020/vision/tools/python_code/define_training_data.py
+++ b/y2020/vision/tools/python_code/define_training_data.py
@@ -28,8 +28,10 @@
         image = cv2.circle(image, (point[0], point[1]), 5, (255, 0, 0), -1)
     if (len(polygon) > 1):
         np_poly = np.array(polygon)
-        image = cv2.polylines(
-            image, [np_poly], close_polygon, color, thickness=3)
+        image = cv2.polylines(image, [np_poly],
+                              close_polygon,
+                              color,
+                              thickness=3)
     return image
 
 
@@ -249,7 +251,8 @@
     try:
         from bazel_tools.tools.python.runfiles import runfiles
         r = runfiles.Create()
-        ret_name = r.Rlocation('org_frc971/y2020/vision/tools/python_code/' + filename)
+        ret_name = r.Rlocation('org_frc971/y2020/vision/tools/python_code/' +
+                               filename)
     except:
         pass
 
diff --git a/y2020/vision/tools/python_code/target_definition.py b/y2020/vision/tools/python_code/target_definition.py
index 0266997..a3aea4c 100644
--- a/y2020/vision/tools/python_code/target_definition.py
+++ b/y2020/vision/tools/python_code/target_definition.py
@@ -22,6 +22,7 @@
 
 
 class TargetData:
+
     def __init__(self, filename=None):
         if filename:
             self.image_filename = dtd.bazel_name_fix(filename)
diff --git a/y2020/vision/tools/python_code/transform_projection_test.py b/y2020/vision/tools/python_code/transform_projection_test.py
index 01fe695..166b5ec 100644
--- a/y2020/vision/tools/python_code/transform_projection_test.py
+++ b/y2020/vision/tools/python_code/transform_projection_test.py
@@ -23,10 +23,12 @@
 
 # Create fake set of points relative to camera capture (target) frame
 # at +/- 1 meter in x, 5 meter depth, and every 1 meter in y from 0 to 4m (above the camera, so negative y values)
-pts_3d_target = np.array(
-    [[-1., 0., depth], [1., 0., depth], [-1., -1., depth], [1., -1., depth],
-     [-1., -2., depth], [0., -2., depth], [1., -2., depth], [-1., -3., depth],
-     [1., -3., depth], [-1., -4., depth], [1., -4., depth]])
+pts_3d_target = np.array([[-1., 0., depth], [1., 0., depth], [-1., -1., depth],
+                          [1., -1., depth], [-1., -2.,
+                                             depth], [0., -2., depth],
+                          [1., -2., depth], [-1., -3.,
+                                             depth], [1., -3., depth],
+                          [-1., -4., depth], [1., -4., depth]])
 
 # Ground truth shift of camera from (cam) to (cam2), to compute projections
 R_cam_cam2_gt = np.array([[0., 0.2, 0.2]]).T
@@ -46,9 +48,10 @@
 #pts_proj_3d = cam_mat.dot(pts_3d_target_shifted.T).T
 #pts_proj_2d = np.divide(pts_proj_3d[:,0:2],(pts_proj_3d[:,2].reshape(-1,1)))
 
-pts_proj_2d_cam2, jac_2d = cv2.projectPoints(
-    pts_3d_target, R_cam_cam2_gt_mat_inv, T_cam_cam2_gt_inv, cam_mat,
-    dist_coeffs)
+pts_proj_2d_cam2, jac_2d = cv2.projectPoints(pts_3d_target,
+                                             R_cam_cam2_gt_mat_inv,
+                                             T_cam_cam2_gt_inv, cam_mat,
+                                             dist_coeffs)
 
 # Now, solve for the pose using the original 3d points (pts_3d_T_t) and the projections from the new location
 retval, R_cam2_cam_est, T_cam2_cam_est, inliers = cv2.solvePnPRansac(
diff --git a/y2021_bot3/control_loops/python/drivetrain.py b/y2021_bot3/control_loops/python/drivetrain.py
index fdd08c2..f3d4682 100644
--- a/y2021_bot3/control_loops/python/drivetrain.py
+++ b/y2021_bot3/control_loops/python/drivetrain.py
@@ -16,10 +16,10 @@
     J=6.0,
     mass=58.0,
     # TODO(austin): Measure radius a bit better.
-    robot_radius= 0.39,
-    wheel_radius= 3/39.37,
+    robot_radius=0.39,
+    wheel_radius=3 / 39.37,
     motor_type=control_loop.Falcon(),
-    num_motors = 3,
+    num_motors=3,
     G=8.0 / 80.0,
     q_pos=0.24,
     q_vel=2.5,
@@ -40,7 +40,8 @@
         print("Expected .h file name and .cc file name")
     else:
         # Write the generated constants out to a file.
-        drivetrain.WriteDrivetrain(argv[1:3], argv[3:5], 'y2021_bot3', kDrivetrain)
+        drivetrain.WriteDrivetrain(argv[1:3], argv[3:5], 'y2021_bot3',
+                                   kDrivetrain)
 
 
 if __name__ == '__main__':
diff --git a/y2021_bot3/control_loops/python/polydrivetrain.py b/y2021_bot3/control_loops/python/polydrivetrain.py
index 83576cc..f8c3417 100644
--- a/y2021_bot3/control_loops/python/polydrivetrain.py
+++ b/y2021_bot3/control_loops/python/polydrivetrain.py
@@ -12,20 +12,23 @@
 FLAGS = gflags.FLAGS
 
 try:
-  gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
+    gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
 except gflags.DuplicateFlagError:
-  pass
+    pass
+
 
 def main(argv):
-  if FLAGS.plot:
-    polydrivetrain.PlotPolyDrivetrainMotions(drivetrain.kDrivetrain)
-  elif len(argv) != 7:
-    glog.fatal('Expected .h file name and .cc file name')
-  else:
-    polydrivetrain.WritePolyDrivetrain(argv[1:3], argv[3:5], argv[5:7], 'y2021_bot3',
-                                       drivetrain.kDrivetrain)
+    if FLAGS.plot:
+        polydrivetrain.PlotPolyDrivetrainMotions(drivetrain.kDrivetrain)
+    elif len(argv) != 7:
+        glog.fatal('Expected .h file name and .cc file name')
+    else:
+        polydrivetrain.WritePolyDrivetrain(argv[1:3], argv[3:5], argv[5:7],
+                                           'y2021_bot3',
+                                           drivetrain.kDrivetrain)
+
 
 if __name__ == '__main__':
-  argv = FLAGS(sys.argv)
-  glog.init()
-  sys.exit(main(argv))
+    argv = FLAGS(sys.argv)
+    glog.init()
+    sys.exit(main(argv))
diff --git a/y2022/control_loops/python/catapult.py b/y2022/control_loops/python/catapult.py
index 9e9adf7..e984478 100755
--- a/y2022/control_loops/python/catapult.py
+++ b/y2022/control_loops/python/catapult.py
@@ -40,7 +40,6 @@
 M_cup = (1750 * 0.0254 * 0.04 * 2 * math.pi * (ball_diameter / 2.)**2.0)
 J_cup = M_cup * lever**2.0 + M_cup * (ball_diameter / 2.)**2.0
 
-
 J = (0.0 * J_ball + J_bar + J_cup * 0.0)
 JEmpty = (J_bar + J_cup * 0.0)
 
@@ -83,6 +82,7 @@
 # We really want our cost function to be robust so that we can tolerate the
 # battery not delivering like we want at the end.
 
+
 def quadratic_cost(catapult, X_initial, X_final, horizon):
     Q_final = numpy.matrix([[10000.0, 0.0], [0.0, 10000.0]])
 
@@ -98,28 +98,32 @@
 
     P_final = 2.0 * Bf.transpose() * Q_final * Bf
     q_final = (2.0 * (Af * X_initial - X_final).transpose() * Q_final *
-         Bf).transpose()
+               Bf).transpose()
 
     constant_final = (Af * X_initial - X_final).transpose() * Q_final * (
         Af * X_initial - X_final)
 
-    m = numpy.matrix([[catapult.A[1, 1] ** (n + 1)] for n in range(horizon)])
+    m = numpy.matrix([[catapult.A[1, 1]**(n + 1)] for n in range(horizon)])
     M = Bs[1:horizon * 2:2, :]
 
-    W = numpy.matrix(numpy.identity(horizon) - numpy.eye(horizon, horizon, -1)) / catapult.dt
+    W = numpy.matrix(
+        numpy.identity(horizon) -
+        numpy.eye(horizon, horizon, -1)) / catapult.dt
     w = -numpy.matrix(numpy.eye(horizon, 1, 0)) / catapult.dt
 
-
     Pi = numpy.diag([
-        (0.01 ** 2.0) + (0.02 * max(0.0, 20 - (horizon - n)) / 20.0) ** 2.0 for n in range(horizon)
+        (0.01**2.0) + (0.02 * max(0.0, 20 - (horizon - n)) / 20.0)**2.0
+        for n in range(horizon)
     ])
 
     P_accel = 2.0 * M.transpose() * W.transpose() * Pi * W * M
-    q_accel = 2.0 * (((W * m + w) * X_initial[1, 0]).transpose() * Pi * W * M).transpose()
+    q_accel = 2.0 * ((
+        (W * m + w) * X_initial[1, 0]).transpose() * Pi * W * M).transpose()
     constant_accel = ((W * m + w) * X_initial[1, 0]).transpose() * Pi * (
         (W * m + w) * X_initial[1, 0])
 
-    return ((P_accel + P_final), (q_accel + q_final), (constant_accel + constant_final))
+    return ((P_accel + P_final), (q_accel + q_final),
+            (constant_accel + constant_final))
 
 
 def new_cost(catapult, X_initial, X_final, u):
@@ -138,25 +142,28 @@
 
     P_final = 2.0 * Bf.transpose() * Q_final * Bf
     q_final = (2.0 * (Af * X_initial - X_final).transpose() * Q_final *
-         Bf).transpose()
+               Bf).transpose()
 
     constant_final = (Af * X_initial - X_final).transpose() * Q_final * (
         Af * X_initial - X_final)
 
-    m = numpy.matrix([[catapult.A[1, 1] ** (n + 1)] for n in range(len(u))])
+    m = numpy.matrix([[catapult.A[1, 1]**(n + 1)] for n in range(len(u))])
     M = Bs[1:len(u) * 2:2, :]
 
-    W = numpy.matrix(numpy.identity(len(u)) - numpy.eye(len(u), len(u), -1)) / catapult.dt
+    W = numpy.matrix(numpy.identity(len(u)) -
+                     numpy.eye(len(u), len(u), -1)) / catapult.dt
     w = -numpy.matrix(numpy.eye(len(u), 1, 0)) * X_initial[1, 0] / catapult.dt
 
     accel = W * (M * u_matrix + m * X_initial[1, 0]) + w
 
     Pi = numpy.diag([
-        (0.01 ** 2.0) + (0.02 * max(0.0, 20 - (len(u) - n)) / 20.0) ** 2.0 for n in range(len(u))
+        (0.01**2.0) + (0.02 * max(0.0, 20 - (len(u) - n)) / 20.0)**2.0
+        for n in range(len(u))
     ])
 
     P_accel = 2.0 * M.transpose() * W.transpose() * Pi * W * M
-    q_accel = 2.0 * ((W * m * X_initial[1, 0] + w).transpose() * Pi * W * M).transpose()
+    q_accel = 2.0 * (
+        (W * m * X_initial[1, 0] + w).transpose() * Pi * W * M).transpose()
     constant_accel = (W * m * X_initial[1, 0] +
                       w).transpose() * Pi * (W * m * X_initial[1, 0] + w)
 
@@ -205,6 +212,7 @@
 
 def SolveCatapult(catapult, X_initial, X_final, u):
     """ Solves for the optimal action given a seed, state, and target """
+
     def vbat_constraint(z, i):
         return 12.0 - z[i]
 
@@ -213,13 +221,11 @@
 
     P, q, c = quadratic_cost(catapult, X_initial, X_final, len(u))
 
-
     def mpc_cost2(u_solver):
         u_matrix = numpy.matrix(u_solver).transpose()
         cost = mpc_cost(catapult, X_initial, X_final, u_solver)
         return cost
 
-
     def mpc_cost3(u_solver):
         u_matrix = numpy.matrix(u_solver).transpose()
         return (0.5 * u_matrix.transpose() * P * u_matrix +
@@ -268,7 +274,6 @@
     X_initial = numpy.matrix([[0.0], [0.0]])
     X_final = numpy.matrix([[2.0], [25.0]])
 
-
     X_initial = numpy.matrix([[0.0], [0.0]])
     X = X_initial.copy()
 
@@ -382,7 +387,8 @@
         )
     else:
         namespaces = ['y2022', 'control_loops', 'superstructure', 'catapult']
-        catapult_lib.WriteCatapult([kCatapultWithBall, kCatapultEmpty], argv[1:3], argv[3:5], namespaces)
+        catapult_lib.WriteCatapult([kCatapultWithBall, kCatapultEmpty],
+                                   argv[1:3], argv[3:5], namespaces)
     return 0
 
 
diff --git a/y2022/control_loops/python/catapult_lib.py b/y2022/control_loops/python/catapult_lib.py
index 4a88b14..eceb2d2 100644
--- a/y2022/control_loops/python/catapult_lib.py
+++ b/y2022/control_loops/python/catapult_lib.py
@@ -13,6 +13,7 @@
 
 # TODO(austin): This is mostly the same as angular_system.  Can we either wrap an angular_system or assign it?
 class Catapult(angular_system.AngularSystem):
+
     def __init__(self, params, name="Catapult"):
         super(Catapult, self).__init__(params, name)
         # Signal that we have a 2 cycle output delay to compensate for in
@@ -23,6 +24,7 @@
 
 
 class IntegralCatapult(angular_system.IntegralAngularSystem):
+
     def __init__(self, params, name="IntegralCatapult"):
         super(IntegralCatapult, self).__init__(params, name=name)
         # Signal that we have a 2 cycle output delay to compensate for in
diff --git a/y2022/control_loops/python/climber.py b/y2022/control_loops/python/climber.py
index 5f5ae38..12d9cd0 100755
--- a/y2022/control_loops/python/climber.py
+++ b/y2022/control_loops/python/climber.py
@@ -27,12 +27,15 @@
     kalman_q_voltage=35.0,
     kalman_r_position=0.05)
 
+
 def main(argv):
     if FLAGS.plot:
         R = numpy.matrix([[0.2], [0.0]])
         linear_system.PlotKick(kClimber, R, plant_params=kClimber)
-        linear_system.PlotMotion(
-            kClimber, R, max_velocity=5.0, plant_params=kClimber)
+        linear_system.PlotMotion(kClimber,
+                                 R,
+                                 max_velocity=5.0,
+                                 plant_params=kClimber)
 
     # Write the generated constants out to a file.
     if len(argv) != 5:
@@ -41,8 +44,8 @@
         )
     else:
         namespaces = ['y2022', 'control_loops', 'superstructure', 'climber']
-        linear_system.WriteLinearSystem(kClimber,
-                                        argv[1:3], argv[3:5], namespaces)
+        linear_system.WriteLinearSystem(kClimber, argv[1:3], argv[3:5],
+                                        namespaces)
 
 
 if __name__ == '__main__':
diff --git a/y2022/control_loops/python/polydrivetrain.py b/y2022/control_loops/python/polydrivetrain.py
index d749580..cf54470 100644
--- a/y2022/control_loops/python/polydrivetrain.py
+++ b/y2022/control_loops/python/polydrivetrain.py
@@ -12,20 +12,22 @@
 FLAGS = gflags.FLAGS
 
 try:
-  gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
+    gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
 except gflags.DuplicateFlagError:
-  pass
+    pass
+
 
 def main(argv):
-  if FLAGS.plot:
-    polydrivetrain.PlotPolyDrivetrainMotions(drivetrain.kDrivetrain)
-  elif len(argv) != 7:
-    glog.fatal('Expected .h file name and .cc file name')
-  else:
-    polydrivetrain.WritePolyDrivetrain(argv[1:3], argv[3:5], argv[5:7], 'y2022',
-                                       drivetrain.kDrivetrain)
+    if FLAGS.plot:
+        polydrivetrain.PlotPolyDrivetrainMotions(drivetrain.kDrivetrain)
+    elif len(argv) != 7:
+        glog.fatal('Expected .h file name and .cc file name')
+    else:
+        polydrivetrain.WritePolyDrivetrain(argv[1:3], argv[3:5], argv[5:7],
+                                           'y2022', drivetrain.kDrivetrain)
+
 
 if __name__ == '__main__':
-  argv = FLAGS(sys.argv)
-  glog.init()
-  sys.exit(main(argv))
+    argv = FLAGS(sys.argv)
+    glog.init()
+    sys.exit(main(argv))
diff --git a/y2022/vision/camera_definition.py b/y2022/vision/camera_definition.py
index dbb23ad..911418b 100644
--- a/y2022/vision/camera_definition.py
+++ b/y2022/vision/camera_definition.py
@@ -28,6 +28,7 @@
 
 
 class CameraIntrinsics:
+
     def __init__(self):
         self.camera_matrix = []
         self.dist_coeffs = []
@@ -36,12 +37,14 @@
 
 
 class CameraExtrinsics:
+
     def __init__(self):
         self.R = []
         self.T = []
 
 
 class CameraParameters:
+
     def __init__(self):
         self.camera_int = CameraIntrinsics()
         self.camera_ext = CameraExtrinsics()