Merge changes Id649bda2,Ic5eb0887

* changes:
  Update Spline UI to graph multiple multisplines
  Add multiple multisplines to Spline UI
diff --git a/Cargo.toml b/Cargo.toml
index fe957f9..8f57539 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -47,6 +47,11 @@
 uuid = "1.0"
 toml = "0.5"
 anyhow = "1.0"
+futures = "0.3"
+once_cell = "1.13"
+thiserror = "1.0"
+bitflags = "1.3"
+smallvec = "1.9"
 
 # For bindgen.
 bindgen = "0.58.1"
diff --git a/WORKSPACE b/WORKSPACE
index e33527c..6afd00c 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -297,7 +297,9 @@
     "//tools/rust:rust-toolchain-x86",
     "//tools/rust:rust-toolchain-armv7",
     "//tools/rust:rust-toolchain-arm64",
-    "//tools/rust:rust-toolchain-roborio",
+    # TODO(Brian): Make this work. See the comment on
+    # //tools/platforms:linux_roborio for details.
+    #"//tools/rust:rust-toolchain-roborio",
     "//tools/rust:noop_rust_toolchain",
     "//tools/ts:noop_node_toolchain",
 )
@@ -849,9 +851,9 @@
         "armv7-unknown-linux-gnueabihf",
         "aarch64-unknown-linux-gnu",
     ],
-    rustfmt_version = "1.58.1",
+    rustfmt_version = "1.62.0",
     toolchain_name_prefix = "toolchain_for",
-    version = "1.58.1",
+    version = "1.62.0",
 )
 
 load("@io_bazel_rules_webtesting//web:repositories.bzl", "web_test_repositories")
diff --git a/aos/BUILD b/aos/BUILD
index b04bb3d..2bd0ec8 100644
--- a/aos/BUILD
+++ b/aos/BUILD
@@ -396,7 +396,7 @@
     ],
     data = [
         "//aos/events:pingpong_config",
-        "//aos/events:pong.bfbs",
+        "//aos/events:pong_fbs_reflection_out",
         "//aos/testdata:test_configs",
     ],
     target_compatible_with = ["@platforms//os:linux"],
diff --git a/aos/README.md b/aos/README.md
index b99f51d..356c259 100644
--- a/aos/README.md
+++ b/aos/README.md
@@ -14,6 +14,46 @@
 aos_graph_nodes | dot -Tx11
 ```
 
+### Rust
+
+AOS has experimental rust support. This involves creating Rust wrappers for all of the relevant
+C++ types. There must be exactly one wrapper for each type, or you will get confusing errors about
+trying to convert Rust types with very similar names (in different crates though) when you try
+using them together. To standardize this, we have some conventions.
+
+We use autocxx to generate the raw wrappers. Sometimes autocxx needs tweaked C++ signatures to
+generate usable Rust bindings. These go in a separate C++ file with a `_for_rust` suffix, and have
+functions with `ForRust` suffixes.
+
+We want to pass around pointers and references to the autocxx-generated flatbuffers types so we can
+create byte slices to use with the Rust versions, but we ignore many of the flatbuffers types needed
+to wrap individual methods. Some of them are tricky to wrap.
+
+Converting between the autocxx-generated and rustc-generated flatbuffers types is tricky. The Rust
+flatbuffers API is based on slices, but the C++ API that autocxx is wrapping just uses pointers. We
+can convert from a Rust flatbuffer to its C++ equivalent pretty easily, but going the other way
+doesn't work. To maximize flexibility, each C++ wrapper module exposes APIs that take
+autocxx-generated types and provide convenient conversions for the types belonging to that module.
+Flatbuffers returned from C++ by value (typically in a `aos::Flatbuffer`) get returned as Rust
+`aos_flatbuffers::Flatbuffer` objects, while ones being returned from C++ by pointer (or reference)
+are exposed as the autocxx types.
+
+For the file `aos/xyz.fbs`, Rust wrappers go in `aos/xyz.rs`. The Rust flatbuffers generated
+code will be in `aos/xyz_fbs.rs`.
+
+For the file `aos/abc.h`, Rust wrappers go in `aos/abc.rs`. These wrappers may be more sophisticated
+than simple unsafe wrappers, but they should avoid adding additional functionality. Any additional
+C++ code goes in `aos/abc_for_rust.h`/`aos/abc_for_rust.cc`.
+
+All Rust functions intended to be called from other files gets exported outside of the `ffi`
+module. In some cases, this is just giving the raw autocxx wrappers a new name. In other cases,
+these wrappers can attach lifetimes etc and be safe. This makes it clear which functions and
+types are being exported, because autocxx generates a lot of wrappers. Do not just make the
+entire `ffi` module, or any of its submodules, public.
+
+Rust modules map to Bazel rules. This means we end up lots of Rust modules. We name them like
+`aos_events_event_loop` for all the code in `aos/events/event_loop.rs`.
+
 ### NOTES
 
 Some functions need to be in separate translation units in order for them to be guaranteed to work. As the C standard says,
diff --git a/aos/configuration.cc b/aos/configuration.cc
index d05cba8..db00ce7 100644
--- a/aos/configuration.cc
+++ b/aos/configuration.cc
@@ -181,11 +181,11 @@
   // instead.  It is much faster to load .bfbs files than .json files.
   if (!binary_path_exists && !util::PathExists(raw_path)) {
     const bool path_is_absolute = raw_path.size() > 0 && raw_path[0] == '/';
-    if (path_is_absolute && !extra_import_paths.empty()) {
-      LOG(ERROR)
-          << "Can't specify extra import paths if attempting to read a config "
-             "file from an absolute path (path is "
-          << raw_path << ").";
+    if (path_is_absolute) {
+      // Nowhere else to look up an absolute path, so fail now. Note that we
+      // always have at least one extra import path based on /proc/self/exe, so
+      // warning about those paths existing isn't helpful.
+      LOG(ERROR) << ": Failed to find file " << path << ".";
       return std::nullopt;
     }
 
diff --git a/aos/events/logging/BUILD b/aos/events/logging/BUILD
index b00d2ad..9f6234a 100644
--- a/aos/events/logging/BUILD
+++ b/aos/events/logging/BUILD
@@ -497,3 +497,13 @@
     gen_reflections = 1,
     target_compatible_with = ["@platforms//os:linux"],
 )
+
+cc_binary(
+    name = "timestamp_plot",
+    srcs = ["timestamp_plot.cc"],
+    deps = [
+        "//aos:init",
+        "//frc971/analysis:in_process_plotter",
+        "@com_google_absl//absl/strings",
+    ],
+)
diff --git a/aos/events/logging/boot_timestamp.h b/aos/events/logging/boot_timestamp.h
index d0fde73..bd9b357 100644
--- a/aos/events/logging/boot_timestamp.h
+++ b/aos/events/logging/boot_timestamp.h
@@ -4,6 +4,7 @@
 #include <iostream>
 
 #include "aos/time/time.h"
+#include "glog/logging.h"
 
 namespace aos::logger {
 
@@ -24,6 +25,18 @@
     return {boot, duration - d};
   }
 
+  BootDuration operator-(BootDuration d) const {
+    CHECK_EQ(d.boot, boot);
+    return {boot, duration - d.duration};
+  }
+
+  BootDuration operator+(BootDuration d) const {
+    CHECK_EQ(d.boot, boot);
+    return {boot, duration + d.duration};
+  }
+
+  BootDuration operator/(int x) const { return {boot, duration / x}; }
+
   bool operator==(const BootDuration &m2) const {
     return boot == m2.boot && duration == m2.duration;
   }
@@ -77,12 +90,22 @@
   BootTimestamp operator+(monotonic_clock::duration d) const {
     return {boot, time + d};
   }
+
+  BootTimestamp operator+=(monotonic_clock::duration d) {
+    time += d;
+    return *this;
+  }
   BootTimestamp operator-(monotonic_clock::duration d) const {
     return {boot, time - d};
   }
   BootTimestamp operator+(BootDuration d) const {
     return {boot, time + d.duration};
   }
+
+  BootDuration operator-(BootTimestamp t) const {
+    CHECK_EQ(t.boot, boot);
+    return {boot, time - t.time};
+  }
 };
 
 // Structure to hold both a boot and queue index.  Queue indices reset after
diff --git a/aos/events/logging/logger_test.cc b/aos/events/logging/logger_test.cc
index bb5b41e..5f265e2 100644
--- a/aos/events/logging/logger_test.cc
+++ b/aos/events/logging/logger_test.cc
@@ -3482,9 +3482,9 @@
 }
 
 constexpr std::string_view kCombinedConfigSha1(
-    "158a244107a7dc637fc5934ac161cb9e6c26195930fd8f82bb351c3ad7cce349");
+    "bcc66bc13a90a4a268649744e244129c5d024f5abd67587dcfbd7158d8abfc44");
 constexpr std::string_view kSplitConfigSha1(
-    "c73aa7913a9e116ee0a793d8280fac170b7eeea8e7350f45c6ac5bfc4ab018e1");
+    "d97e998164a6f1bf078aad77ef127329728ac9198a13a5ab8d5f30d84a932662");
 
 INSTANTIATE_TEST_SUITE_P(
     All, MultinodeLoggerTest,
diff --git a/aos/events/logging/timestamp_plot.cc b/aos/events/logging/timestamp_plot.cc
new file mode 100644
index 0000000..9c5f2dd
--- /dev/null
+++ b/aos/events/logging/timestamp_plot.cc
@@ -0,0 +1,331 @@
+#include "absl/strings/str_cat.h"
+#include "absl/strings/str_split.h"
+#include "aos/init.h"
+#include "aos/util/file.h"
+#include "frc971/analysis/in_process_plotter.h"
+
+using frc971::analysis::Plotter;
+
+DEFINE_bool(all, false, "If true, plot *all* the nodes at once");
+DEFINE_bool(bounds, false, "If true, plot the noncausal bounds too.");
+DEFINE_bool(samples, true, "If true, plot the samples too.");
+
+DEFINE_string(offsets, "",
+              "Offsets to add to the monotonic clock for each node.  Use the "
+              "format of node=offset,node=offest");
+
+// Simple C++ application to read the CSV files and use the in process plotter
+// to plot them.  This smokes the pants off gnuplot in terms of interactivity.
+
+namespace aos {
+
+// Returns all the nodes.
+std::vector<std::string> Nodes() {
+  const std::string start_time_file = aos::util::ReadFileToStringOrDie(
+      "/tmp/timestamp_noncausal_starttime.csv");
+  std::vector<std::string_view> nodes = absl::StrSplit(start_time_file, '\n');
+
+  std::vector<std::string> formatted_nodes;
+  for (const std::string_view n : nodes) {
+    if (n == "") {
+      continue;
+    }
+
+    std::vector<std::string_view> l = absl::StrSplit(n, ", ");
+    CHECK_EQ(l.size(), 2u) << "'" << n << "'";
+    formatted_nodes.emplace_back(l[0]);
+  }
+
+  return formatted_nodes;
+}
+
+std::string SampleFile(std::string_view node1, std::string_view node2) {
+  return absl::StrCat("/tmp/timestamp_noncausal_", node1, "_", node2,
+                      "_samples.csv");
+}
+
+std::pair<std::vector<double>, std::vector<double>> ReadSamples(
+    std::string_view node1, std::string_view node2, bool flip) {
+  std::vector<double> samplefile12_t;
+  std::vector<double> samplefile12_o;
+
+  const std::string file =
+      aos::util::ReadFileToStringOrDie(SampleFile(node1, node2));
+  bool first = true;
+  std::vector<std::string_view> lines = absl::StrSplit(file, '\n');
+  samplefile12_t.reserve(lines.size());
+  for (const std::string_view n : lines) {
+    if (first) {
+      first = false;
+      continue;
+    }
+    if (n == "") {
+      continue;
+    }
+
+    std::vector<std::string_view> l = absl::StrSplit(n, ", ");
+    CHECK_EQ(l.size(), 4u);
+    double t;
+    double o;
+    CHECK(absl::SimpleAtod(l[0], &t));
+    CHECK(absl::SimpleAtod(l[1], &o));
+    samplefile12_t.emplace_back(t);
+    samplefile12_o.emplace_back(flip ? -o : o);
+  }
+  return std::make_pair(samplefile12_t, samplefile12_o);
+}
+
+void Offset(std::vector<double> *v, double offset) {
+  for (double &x : *v) {
+    x += offset;
+  }
+}
+
+// Returns all the nodes which talk to each other.
+std::vector<std::pair<std::string, std::string>> NodeConnections() {
+  const std::vector<std::string> nodes = Nodes();
+  std::vector<std::pair<std::string, std::string>> result;
+  for (size_t i = 1; i < nodes.size(); ++i) {
+    for (size_t j = 0; j < i; ++j) {
+      const std::string_view node1 = nodes[j];
+      const std::string_view node2 = nodes[i];
+      if (aos::util::PathExists(SampleFile(node1, node2))) {
+        result.emplace_back(node1, node2);
+        LOG(INFO) << "Found pairing " << node1 << ", " << node2;
+      }
+    }
+  }
+  return result;
+}
+
+// Class to encapsulate the plotter state to make it easy to plot multiple
+// connections.
+class NodePlotter {
+ public:
+  NodePlotter() : nodes_(Nodes()) {
+    plotter_.AddFigure("Time");
+    if (!FLAGS_offsets.empty()) {
+      for (std::string_view nodeoffset : absl::StrSplit(FLAGS_offsets, ',')) {
+        std::vector<std::string_view> node_offset =
+            absl::StrSplit(nodeoffset, '=');
+        CHECK_EQ(node_offset.size(), 2u);
+        double o;
+        CHECK(absl::SimpleAtod(node_offset[1], &o));
+        offset_.emplace(std::string(node_offset[0]), o);
+      }
+    }
+  }
+
+  void AddNodes(std::string_view node1, std::string_view node2);
+
+  void Serve() {
+    plotter_.Publish();
+    plotter_.Spin();
+  }
+
+ private:
+  std::pair<std::vector<double>, std::vector<double>> ReadLines(
+      std::string_view node1, std::string_view node2, bool flip);
+
+  std::pair<std::vector<double>, std::vector<double>> ReadOffset(
+      std::string_view node1, std::string_view node2);
+
+  double TimeOffset(std::string_view node) {
+    auto it = offset_.find(std::string(node));
+    if (it == offset_.end()) {
+      return 0.0;
+    } else {
+      return it->second;
+    }
+  }
+
+  std::map<std::string, double> offset_;
+
+  Plotter plotter_;
+
+  std::vector<std::string> nodes_;
+};
+
+std::pair<std::vector<double>, std::vector<double>> NodePlotter::ReadLines(
+    std::string_view node1, std::string_view node2, bool flip) {
+  std::vector<double> samplefile12_t;
+  std::vector<double> samplefile12_o;
+
+  const std::string file = aos::util::ReadFileToStringOrDie(
+      absl::StrCat("/tmp/timestamp_noncausal_", node1, "_", node2, ".csv"));
+  bool first = true;
+  std::vector<std::string_view> lines = absl::StrSplit(file, '\n');
+  samplefile12_t.reserve(lines.size());
+  for (const std::string_view n : lines) {
+    if (first) {
+      first = false;
+      continue;
+    }
+    if (n == "") {
+      continue;
+    }
+
+    std::vector<std::string_view> l = absl::StrSplit(n, ", ");
+    CHECK_EQ(l.size(), 3u);
+    double t;
+    double o;
+    CHECK(absl::SimpleAtod(l[0], &t));
+    CHECK(absl::SimpleAtod(l[2], &o));
+    samplefile12_t.emplace_back(t);
+    samplefile12_o.emplace_back(flip ? -o : o);
+  }
+  return std::make_pair(samplefile12_t, samplefile12_o);
+}
+
+std::pair<std::vector<double>, std::vector<double>> NodePlotter::ReadOffset(
+    std::string_view node1, std::string_view node2) {
+  int node1_index = -1;
+  int node2_index = -1;
+
+  {
+    int index = 0;
+    for (const std::string &n : nodes_) {
+      if (n == node1) {
+        node1_index = index;
+      }
+      if (n == node2) {
+        node2_index = index;
+      }
+      ++index;
+    }
+  }
+  CHECK_NE(node1_index, -1) << ": Unknown node " << node1;
+  CHECK_NE(node2_index, -1) << ": Unknown node " << node2;
+  std::vector<double> offsetfile_t;
+  std::vector<double> offsetfile_o;
+
+  const std::string file =
+      aos::util::ReadFileToStringOrDie("/tmp/timestamp_noncausal_offsets.csv");
+  bool first = true;
+  std::vector<std::string_view> lines = absl::StrSplit(file, '\n');
+  offsetfile_t.reserve(lines.size());
+  for (const std::string_view n : lines) {
+    if (first) {
+      first = false;
+      continue;
+    }
+    if (n == "") {
+      continue;
+    }
+
+    std::vector<std::string_view> l = absl::StrSplit(n, ", ");
+    CHECK_LT(static_cast<size_t>(node1_index + 1), l.size());
+    CHECK_LT(static_cast<size_t>(node2_index + 1), l.size());
+    double t;
+    double o1;
+    double o2;
+    CHECK(absl::SimpleAtod(l[0], &t));
+    CHECK(absl::SimpleAtod(l[1 + node1_index], &o1));
+    CHECK(absl::SimpleAtod(l[1 + node2_index], &o2));
+    offsetfile_t.emplace_back(t);
+    offsetfile_o.emplace_back(o2 - o1);
+  }
+  return std::make_pair(offsetfile_t, offsetfile_o);
+}
+
+void NodePlotter::AddNodes(std::string_view node1, std::string_view node2) {
+  const double offset1 = TimeOffset(node1);
+  const double offset2 = TimeOffset(node2);
+
+  std::pair<std::vector<double>, std::vector<double>> samplefile12 =
+      ReadSamples(node1, node2, false);
+  std::pair<std::vector<double>, std::vector<double>> samplefile21 =
+      ReadSamples(node2, node1, true);
+
+  std::pair<std::vector<double>, std::vector<double>> noncausalfile12 =
+      ReadLines(node1, node2, false);
+  std::pair<std::vector<double>, std::vector<double>> noncausalfile21 =
+      ReadLines(node2, node1, true);
+
+  std::pair<std::vector<double>, std::vector<double>> offsetfile =
+      ReadOffset(node1, node2);
+
+  Offset(&samplefile12.second, offset2 - offset1);
+  Offset(&samplefile21.second, offset2 - offset1);
+  Offset(&noncausalfile12.second, offset2 - offset1);
+  Offset(&noncausalfile21.second, offset2 - offset1);
+  Offset(&offsetfile.second, offset2 - offset1);
+
+  CHECK_EQ(samplefile12.first.size(), samplefile12.second.size());
+  CHECK_EQ(samplefile21.first.size(), samplefile21.second.size());
+  CHECK_EQ(noncausalfile12.first.size(), noncausalfile12.second.size());
+  CHECK_EQ(noncausalfile21.first.size(), noncausalfile21.second.size());
+
+  LOG(INFO) << samplefile12.first.size() + samplefile21.first.size() +
+                   noncausalfile12.first.size() + noncausalfile21.first.size()
+            << " points";
+
+  plotter_.AddLine(offsetfile.first, offsetfile.second,
+                   Plotter::LineOptions{
+                       .label = absl::StrCat("filter ", node2, " ", node1),
+                       // TODO(austin): roboRIO compiler wants all the fields
+                       // filled out, but other compilers don't...  Sigh.
+                       .line_style = "*-",
+                       .color = "yellow",
+                       .point_size = 2.0});
+
+  if (FLAGS_samples) {
+    plotter_.AddLine(samplefile12.first, samplefile12.second,
+                     Plotter::LineOptions{
+                         .label = absl::StrCat("sample ", node1, " ", node2),
+                         .line_style = "*",
+                         .color = "purple",
+                     });
+    plotter_.AddLine(samplefile21.first, samplefile21.second,
+                     Plotter::LineOptions{
+                         .label = absl::StrCat("sample ", node2, " ", node1),
+                         .line_style = "*",
+                         .color = "green",
+                     });
+  }
+
+  if (FLAGS_bounds) {
+    plotter_.AddLine(
+        noncausalfile12.first, noncausalfile12.second,
+        Plotter::LineOptions{.label = absl::StrCat("nc ", node1, " ", node2),
+                             .line_style = "-",
+                             .color = "blue"});
+    plotter_.AddLine(
+        noncausalfile21.first, noncausalfile21.second,
+        Plotter::LineOptions{.label = absl::StrCat("nc ", node2, " ", node1),
+                             .line_style = "-",
+                             .color = "orange"});
+  }
+}
+
+int Main(int argc, const char *const *argv) {
+  NodePlotter plotter;
+
+  if (FLAGS_all) {
+    for (std::pair<std::string, std::string> ab : NodeConnections()) {
+      plotter.AddNodes(ab.first, ab.second);
+    }
+  } else {
+    CHECK_EQ(argc, 3);
+
+    LOG(INFO) << argv[1];
+    LOG(INFO) << argv[2];
+
+    const std::string_view node1 = argv[1];
+    const std::string_view node2 = argv[2];
+
+    plotter.AddNodes(node1, node2);
+  }
+
+  plotter.Serve();
+
+  return 0;
+}
+
+}  // namespace aos
+
+int main(int argc, char **argv) {
+  aos::InitGoogle(&argc, &argv);
+
+  aos::Main(argc, argv);
+}
diff --git a/aos/network/multinode_timestamp_filter.cc b/aos/network/multinode_timestamp_filter.cc
index b87d5b9..81fc03f 100644
--- a/aos/network/multinode_timestamp_filter.cc
+++ b/aos/network/multinode_timestamp_filter.cc
@@ -20,6 +20,11 @@
 DEFINE_int32(max_invalid_distance_ns, 0,
              "The max amount of time we will let the solver go backwards.");
 
+DEFINE_bool(bounds_offset_error, false,
+            "If true, use the offset to the bounds for solving instead of to "
+            "the interpolation lines.  This seems to make startup a bit "
+            "better, but won't track the middle as well.");
+
 namespace aos {
 namespace message_bridge {
 namespace {
@@ -130,7 +135,16 @@
 TimestampProblem::Derivitives TimestampProblem::ComputeDerivitives(
       const Eigen::Ref<Eigen::VectorXd> time_offsets) {
   Derivitives result;
+
+  // We get back both interger and double remainders for the gradient.  We then
+  // add them all up.  Rather than doing that purely as doubles, let's save up
+  // both, compute the result, then convert the remainder to doubles.  This is a
+  // bigger issue at the start when we are extrapolating a lot, and the offsets
+  // can be quite large in each direction.
+  Eigen::Matrix<chrono::nanoseconds, Eigen::Dynamic, 1> intgrad =
+      Eigen::Matrix<chrono::nanoseconds, Eigen::Dynamic, 1>::Zero(live_nodes_);
   result.gradient = Eigen::VectorXd::Zero(live_nodes_);
+
   result.hessian = Eigen::MatrixXd::Zero(live_nodes_, live_nodes_);
 
   for (size_t i = 0; i < clock_offset_filter_for_node_.size(); ++i) {
@@ -167,30 +181,61 @@
       // extra factor, the solution will be the same (or close enough).
       constexpr double kMinNetworkDelay = 2.0;
 
-      const std::pair<NoncausalTimestampFilter::Pointer, double> offset_error =
-          filter.filter->OffsetError(
-              filter.b_filter, filter.pointer, base_clock_[i],
-              time_offsets(a_solution_index), base_clock_[filter.b_index],
-              time_offsets(b_solution_index));
-      const double error = 2.0 * (offset_error.second - kMinNetworkDelay);
+      const std::pair<NoncausalTimestampFilter::Pointer,
+                      std::pair<chrono::nanoseconds, double>>
+          offset_error =
+              FLAGS_bounds_offset_error
+                  ? filter.filter->BoundsOffsetError(
+                        filter.b_filter, filter.pointer, base_clock_[i],
+                        time_offsets(a_solution_index),
+                        base_clock_[filter.b_index],
+                        time_offsets(b_solution_index))
+                  : filter.filter->OffsetError(filter.b_filter, filter.pointer,
+                                               base_clock_[i],
+                                               time_offsets(a_solution_index),
+                                               base_clock_[filter.b_index],
+                                               time_offsets(b_solution_index));
       filter.pointer = offset_error.first;
 
-      result.gradient(a_solution_index) += -error;
-      result.gradient(b_solution_index) += error;
+      const std::pair<chrono::nanoseconds, double> error =
+          std::make_pair(offset_error.second.first,
+                         offset_error.second.second - kMinNetworkDelay);
+
+      std::pair<chrono::nanoseconds, double> grad;
+      double hess;
+
+      grad = std::make_pair(2 * error.first, 2 * error.second);
+      hess = 2.0;
+
+      intgrad(a_solution_index) += -grad.first;
+      intgrad(b_solution_index) += grad.first;
+      result.gradient(a_solution_index) += -grad.second;
+      result.gradient(b_solution_index) += grad.second;
+
+      VLOG(2) << "  Filter pair "
+              << filter.filter->node_a()->name()->string_view() << "("
+              << a_solution_index << ") -> "
+              << filter.filter->node_b()->name()->string_view() << "("
+              << b_solution_index << "): " << std::setprecision(12)
+              << error.first.count() << " + " << error.second;
 
       // Reminder, our cost function has the following form.
       //   ((tb - (1 + ma) ta - ba)^2
       // We are ignoring the slope when taking the derivative and applying the
-      // chain rule to keep the gradient smooth.  This means that the Hessian is
-      // 2 for d^2 cost/dta^2 and d^2 cost/dtb^2
-      result.hessian(a_solution_index, a_solution_index) += 2;
-      result.hessian(b_solution_index, a_solution_index) += -2;
+      // chain rule to keep the gradient smooth.  This means that the Hessian
+      // is 2 for d^2 cost/dta^2 and d^2 cost/dtb^2
+      result.hessian(a_solution_index, a_solution_index) += hess;
+      result.hessian(b_solution_index, a_solution_index) += -hess;
       result.hessian(a_solution_index, b_solution_index) =
           result.hessian(b_solution_index, a_solution_index);
-      result.hessian(b_solution_index, b_solution_index) += 2;
+      result.hessian(b_solution_index, b_solution_index) += hess;
     }
   }
 
+  for (int i = 0; i < intgrad.rows(); ++i) {
+    result.gradient(i) += static_cast<double>(intgrad(i).count());
+  }
+
   return result;
 }
 
@@ -307,9 +352,9 @@
                                              solution_node);
 }
 
-std::tuple<std::vector<BootTimestamp>, size_t> TimestampProblem::SolveNewton(
-    const std::vector<logger::BootTimestamp> &points) {
-  constexpr int kMaxIterations = 200;
+std::tuple<std::vector<BootTimestamp>, size_t, size_t>
+TimestampProblem::SolveNewton(const std::vector<logger::BootTimestamp> &points,
+                              const size_t max_iterations) {
   MaybeUpdateNodeMapping();
   for (size_t i = 0; i < points.size(); ++i) {
     if (points[i] != logger::BootTimestamp::max_time()) {
@@ -318,7 +363,7 @@
   }
   Eigen::VectorXd data = Eigen::VectorXd::Zero(live_nodes_);
 
-  int solution_number = 0;
+  size_t iteration = 0;
   size_t solution_node;
   while (true) {
     Eigen::VectorXd step;
@@ -335,12 +380,12 @@
           ComputeDerivitives(data).gradient +
           step(live_nodes_) * constraint_jacobian.transpose();
 
-      VLOG(2) << "Adjusted grad " << solution_number << " -> "
+      VLOG(2) << "Adjusted grad " << iteration << " -> "
               << std::setprecision(12) << std::fixed << std::setfill(' ')
               << adjusted_grad.transpose().format(kHeavyFormat);
     }
 
-    VLOG(2) << "Step " << solution_number << " -> " << std::setprecision(12)
+    VLOG(2) << "Step " << iteration << " -> " << std::setprecision(12)
             << std::fixed << std::setfill(' ')
             << step.transpose().format(kHeavyFormat);
     // We got there if the max step is small (this is strongly correlated to the
@@ -357,7 +402,7 @@
 
     data += step.block(0, 0, live_nodes_, 1);
 
-    ++solution_number;
+    ++iteration;
 
     // We are doing all our math with both an int64 base and a double offset.
     // This lets us handle large offsets while retaining precision down to the
@@ -376,11 +421,23 @@
         base_clock_[j].time += chrono::nanoseconds(dsolution);
         data(solution_index) -= dsolution;
       }
+      if (live(j)) {
+        VLOG(2) << "    live  "
+                << base_clock_[j].time +
+                       std::chrono::nanoseconds(static_cast<int64_t>(
+                           std::round(data(NodeToFullSolutionIndex(j)))))
+                << " "
+                << (data(NodeToFullSolutionIndex(j)) -
+                    std::round(data(NodeToFullSolutionIndex(j))))
+                << " (unrounded: " << data(NodeToFullSolutionIndex(j)) << ")";
+      } else {
+        VLOG(2) << "    dead  " << aos::monotonic_clock::min_time;
+      }
     }
 
     // And finally, don't let us iterate forever.  If it isn't converging,
     // report back.
-    if (solution_number > kMaxIterations) {
+    if (iteration > max_iterations) {
       break;
     }
   }
@@ -388,7 +445,7 @@
   for (size_t i = 0; i < points.size(); ++i) {
     if (points[i] != logger::BootTimestamp::max_time()) {
       VLOG(2) << "Solving for node " << i << " of " << points[i] << " in "
-              << solution_number << " cycles";
+              << iteration << " cycles";
     }
   }
   std::vector<BootTimestamp> result(size());
@@ -398,20 +455,24 @@
       result[i].time = base_clock(i).time +
                        std::chrono::nanoseconds(static_cast<int64_t>(
                            std::round(data(NodeToFullSolutionIndex(i)))));
-      VLOG(2) << "live  " << result[i] << " "
-              << (data(NodeToFullSolutionIndex(i)) -
-                  std::round(data(NodeToFullSolutionIndex(i))))
-              << " (unrounded: " << data(NodeToFullSolutionIndex(i)) << ")";
+      if (VLOG_IS_ON(2) || iteration > max_iterations) {
+        LOG(INFO) << "live  " << result[i] << " "
+                  << (data(NodeToFullSolutionIndex(i)) -
+                      std::round(data(NodeToFullSolutionIndex(i))))
+                  << " (unrounded: " << data(NodeToFullSolutionIndex(i)) << ")";
+      }
     } else {
       result[i] = BootTimestamp::min_time();
-      VLOG(2) << "dead  " << result[i];
+      if (VLOG_IS_ON(2) || iteration > max_iterations) {
+        LOG(INFO) << "dead  " << result[i];
+      }
     }
   }
-  if (solution_number > kMaxIterations) {
-    LOG(FATAL) << "Failed to converge.";
+  if (iteration > max_iterations) {
+    LOG(ERROR) << "Failed to converge.";
   }
 
-  return std::make_pair(std::move(result), solution_node);
+  return std::make_tuple(std::move(result), solution_node, iteration);
 }
 
 void TimestampProblem::MaybeUpdateNodeMapping() {
@@ -796,12 +857,31 @@
   }
 }
 
-MultiNodeNoncausalOffsetEstimator::~MultiNodeNoncausalOffsetEstimator() {
+void MultiNodeNoncausalOffsetEstimator::FlushAndClose(bool destructor) {
+  // Write out all the data in our filters.
   FlushAllSamples(true);
   if (fp_) {
     fclose(fp_);
     fp_ = NULL;
   }
+
+  if (filter_fps_.size() != 0 && !destructor) {
+    size_t node_a_index = 0;
+    for (const auto &filters : filters_per_node_) {
+      for (const auto &filter : filters) {
+        while (true) {
+          std::optional<std::tuple<logger::BootTimestamp, logger::BootDuration>>
+              sample = filter.filter->Consume();
+          if (!sample) {
+            break;
+          }
+          WriteFilter(filter.filter, *sample);
+        }
+      }
+      ++node_a_index;
+    }
+  }
+
   if (filter_fps_.size() != 0) {
     for (std::vector<FILE *> &filter_fp : filter_fps_) {
       for (FILE *&fp : filter_fp) {
@@ -820,6 +900,7 @@
       }
     }
   }
+
   if (all_done_) {
     size_t node_a_index = 0;
     for (const auto &filters : filters_per_node_) {
@@ -850,6 +931,10 @@
   }
 }
 
+MultiNodeNoncausalOffsetEstimator::~MultiNodeNoncausalOffsetEstimator() {
+  FlushAndClose(true);
+}
+
 UUID MultiNodeNoncausalOffsetEstimator::boot_uuid(size_t node_index,
                                                   size_t boot_count) {
   CHECK(boots_);
@@ -1554,8 +1639,14 @@
       problem->set_base_clock(node_index, {base_times[node_index].boot,
                                            base_times[node_index].time + dt});
     }
-    std::tuple<std::vector<BootTimestamp>, size_t> solution =
-        problem->SolveNewton(points);
+    std::tuple<std::vector<BootTimestamp>, size_t, size_t> solution =
+        problem->SolveNewton(points, kMaxIterations);
+
+    if (std::get<2>(solution) > kMaxIterations) {
+      UpdateSolution(std::move(std::get<0>(solution)));
+      FlushAndClose(false);
+      LOG(FATAL) << "Failed to converge.";
+    }
 
     if (!problem->ValidateSolution(std::get<0>(solution))) {
       LOG(WARNING) << "Invalid solution, constraints not met.";
@@ -1564,6 +1655,8 @@
       }
       problem->Debug();
       if (!skip_order_validation_) {
+        UpdateSolution(std::move(std::get<0>(solution)));
+        FlushAndClose(false);
         LOG(FATAL) << "Bailing, use --skip_order_validation to continue.  "
                       "Use at your own risk.";
       }
@@ -1607,6 +1700,8 @@
   if (skip_order_validation_) {
     LOG(ERROR) << "Skipping because --skip_order_validation";
   } else {
+    UpdateSolution(solution);
+    FlushAndClose(false);
     LOG(FATAL) << "Please investigate.  Use --max_invalid_distance_ns="
                << invalid_distance.count() << " to ignore this.";
   }
@@ -1701,8 +1796,14 @@
       continue;
     }
 
-    std::tuple<std::vector<BootTimestamp>, size_t> solution =
-        problem->SolveNewton(points);
+    std::tuple<std::vector<BootTimestamp>, size_t, size_t> solution =
+        problem->SolveNewton(points, kMaxIterations);
+
+    if (std::get<2>(solution) > kMaxIterations) {
+      UpdateSolution(std::move(std::get<0>(solution)));
+      FlushAndClose(false);
+      LOG(FATAL) << "Failed to converge.";
+    }
 
     // Bypass checking if order validation is turned off.  This lets us dump a
     // CSV file so we can view the problem and figure out what to do.  The
@@ -1714,6 +1815,8 @@
       }
       problem->Debug();
       if (!skip_order_validation_) {
+        UpdateSolution(std::get<0>(solution));
+        FlushAndClose(false);
         LOG(FATAL) << "Bailing, use --skip_order_validation to continue.  "
                       "Use at your own risk.";
       }
@@ -1799,6 +1902,72 @@
   return std::make_tuple(next_filter, std::move(result_times), solution_index);
 }
 
+void MultiNodeNoncausalOffsetEstimator::UpdateSolution(
+    std::vector<BootTimestamp> result_times) {
+  // Now, figure out what distributed should be.  It should move at the rate of
+  // the max elapsed time so that conversions to and from it don't round to bad
+  // values.
+  const chrono::nanoseconds dt = MaxElapsedTime(last_monotonics_, result_times);
+  last_distributed_ += dt;
+  for (size_t i = 0; i < result_times.size(); ++i) {
+    if (result_times[i] == BootTimestamp::min_time()) {
+      // Found an unknown node.  Move its time along by the amount the
+      // distributed clock moved.
+      result_times[i] = last_monotonics_[i] + dt;
+    }
+  }
+  last_monotonics_ = std::move(result_times);
+
+  if (fp_) {
+    fprintf(
+        fp_, "%.9f",
+        chrono::duration<double>(last_distributed_.time_since_epoch()).count());
+    for (const BootTimestamp t : last_monotonics_) {
+      fprintf(fp_, ", %.9f",
+              chrono::duration<double>(t.time.time_since_epoch()).count());
+    }
+    fprintf(fp_, "\n");
+  }
+}
+
+void MultiNodeNoncausalOffsetEstimator::WriteFilter(
+    NoncausalTimestampFilter *next_filter,
+    std::tuple<logger::BootTimestamp, logger::BootDuration> sample) {
+  if (filter_fps_.size() > 0 && next_filter) {
+    const int node_a_index =
+        configuration::GetNodeIndex(configuration(), next_filter->node_a());
+    const int node_b_index =
+        configuration::GetNodeIndex(configuration(), next_filter->node_b());
+
+    FILE *fp = filter_fps_[node_a_index][node_b_index];
+    if (fp == nullptr) {
+      fp = filter_fps_[node_a_index][node_b_index] = fopen(
+          absl::StrCat("/tmp/timestamp_noncausal_",
+                       next_filter->node_a()->name()->string_view(), "_",
+                       next_filter->node_b()->name()->string_view(), ".csv")
+              .c_str(),
+          "w");
+      fprintf(fp, "time_since_start,sample_ns,filtered_offset\n");
+    }
+
+    if (last_monotonics_[node_a_index].boot == std::get<0>(sample).boot) {
+      fprintf(fp, "%.9f, %.9f, %.9f\n",
+              std::chrono::duration_cast<std::chrono::duration<double>>(
+                  last_distributed_.time_since_epoch() +
+                  std::get<0>(sample).time - last_monotonics_[node_a_index].time)
+                  .count(),
+              std::chrono::duration_cast<std::chrono::duration<double>>(
+                  std::get<0>(sample).time.time_since_epoch())
+                  .count(),
+              std::chrono::duration_cast<std::chrono::duration<double>>(
+                  std::get<1>(sample).duration)
+                  .count());
+    } else {
+      LOG(WARNING) << "Not writing point, missmatched boot.";
+    }
+  }
+}
+
 std::optional<
     std::tuple<distributed_clock::time_point, std::vector<BootTimestamp>>>
 MultiNodeNoncausalOffsetEstimator::NextTimestamp() {
@@ -1920,19 +2089,8 @@
     }
   }
 
-  // Now, figure out what distributed should be.  It should move at the rate of
-  // the max elapsed time so that conversions to and from it don't round to bad
-  // values.
-  const chrono::nanoseconds dt = MaxElapsedTime(last_monotonics_, result_times);
-  last_distributed_ += dt;
-  for (size_t i = 0; i < result_times.size(); ++i) {
-    if (result_times[i] == BootTimestamp::min_time()) {
-      // Found an unknown node.  Move its time along by the amount the
-      // distributed clock moved.
-      result_times[i] = last_monotonics_[i] + dt;
-    }
-  }
-  last_monotonics_ = std::move(result_times);
+  UpdateSolution(std::move(result_times));
+  WriteFilter(next_filter, sample);
 
   // And freeze everything.
   {
@@ -1946,45 +2104,6 @@
     }
   }
 
-  if (filter_fps_.size() > 0 && next_filter) {
-    const int node_a_index =
-        configuration::GetNodeIndex(configuration(), next_filter->node_a());
-    const int node_b_index =
-        configuration::GetNodeIndex(configuration(), next_filter->node_b());
-
-    FILE *fp = filter_fps_[node_a_index][node_b_index];
-    if (fp == nullptr) {
-      fp = filter_fps_[node_a_index][node_b_index] = fopen(
-          absl::StrCat("/tmp/timestamp_noncausal_",
-                       next_filter->node_a()->name()->string_view(), "_",
-                       next_filter->node_b()->name()->string_view(), ".csv")
-              .c_str(),
-          "w");
-      fprintf(fp, "time_since_start,sample_ns,filtered_offset\n");
-    }
-
-    fprintf(fp, "%.9f, %.9f, %.9f\n",
-            std::chrono::duration_cast<std::chrono::duration<double>>(
-                last_distributed_.time_since_epoch())
-                .count(),
-            std::chrono::duration_cast<std::chrono::duration<double>>(
-                std::get<0>(sample).time.time_since_epoch())
-                .count(),
-            std::chrono::duration_cast<std::chrono::duration<double>>(
-                std::get<1>(sample).duration)
-                .count());
-  }
-
-  if (fp_) {
-    fprintf(
-        fp_, "%.9f",
-        chrono::duration<double>(last_distributed_.time_since_epoch()).count());
-    for (const BootTimestamp t : last_monotonics_) {
-      fprintf(fp_, ", %.9f",
-              chrono::duration<double>(t.time.time_since_epoch()).count());
-    }
-    fprintf(fp_, "\n");
-  }
   FlushAllSamples(false);
   return std::make_tuple(last_distributed_, last_monotonics_);
 }
diff --git a/aos/network/multinode_timestamp_filter.h b/aos/network/multinode_timestamp_filter.h
index eff6179..0954984 100644
--- a/aos/network/multinode_timestamp_filter.h
+++ b/aos/network/multinode_timestamp_filter.h
@@ -57,8 +57,8 @@
   // solver and returns the optimal time on each node, along with the node which
   // constrained the problem.  points is the list of potential constraint
   // points, and the solver uses the earliest point.
-  std::tuple<std::vector<logger::BootTimestamp>, size_t> SolveNewton(
-      const std::vector<logger::BootTimestamp> &points);
+  std::tuple<std::vector<logger::BootTimestamp>, size_t, size_t> SolveNewton(
+      const std::vector<logger::BootTimestamp> &points, size_t max_iterations);
 
   // Validates the solution, returning true if it meets all the constraints, and
   // false otherwise.
@@ -345,6 +345,8 @@
   const aos::Configuration *configuration() const { return configuration_; }
 
  private:
+  static constexpr int kMaxIterations = 400;
+
   struct CandidateTimes {
     logger::BootTimestamp next_node_time = logger::BootTimestamp::max_time();
     logger::BootDuration next_node_duration = logger::BootDuration::max_time();
@@ -389,6 +391,18 @@
   // Writes all samples to disk.
   void FlushAllSamples(bool finish);
 
+  // Adds the solution to last_distributed_.
+  void UpdateSolution(
+      std::vector<logger::BootTimestamp> result_times);
+
+  void WriteFilter(
+      NoncausalTimestampFilter *next_filter,
+      std::tuple<logger::BootTimestamp, logger::BootDuration> sample);
+
+  // Writes everything to disk anc closes it all out in preparation for either
+  // destruction or crashing.
+  void FlushAndClose(bool destructor);
+
   const Configuration *configuration_;
   const Configuration *logged_configuration_;
 
diff --git a/aos/network/multinode_timestamp_filter_test.cc b/aos/network/multinode_timestamp_filter_test.cc
index 841ff4d..dab8e06 100644
--- a/aos/network/multinode_timestamp_filter_test.cc
+++ b/aos/network/multinode_timestamp_filter_test.cc
@@ -372,15 +372,18 @@
   std::vector<BootTimestamp> points1(problem.size(), BootTimestamp::max_time());
   points1[0] = e + chrono::seconds(1);
 
-  std::tuple<std::vector<BootTimestamp>, size_t> result1 =
-      problem.SolveNewton(points1);
+  constexpr size_t kMaxIterations = 200u;
+  std::tuple<std::vector<BootTimestamp>, size_t, size_t> result1 =
+      problem.SolveNewton(points1, kMaxIterations);
+  EXPECT_LT(std::get<2>(result1), kMaxIterations);
   EXPECT_EQ(std::get<1>(result1), 0u);
   EXPECT_TRUE(problem.ValidateSolution(std::get<0>(result1)));
 
   std::vector<BootTimestamp> points2(problem.size(), BootTimestamp::max_time());
   points2[1] = std::get<0>(result1)[1];
-  std::tuple<std::vector<BootTimestamp>, size_t> result2 =
-      problem.SolveNewton(points2);
+  std::tuple<std::vector<BootTimestamp>, size_t, size_t> result2 =
+      problem.SolveNewton(points2, kMaxIterations);
+  EXPECT_LT(std::get<2>(result1), kMaxIterations);
   EXPECT_EQ(std::get<1>(result2), 1u);
   EXPECT_TRUE(problem.ValidateSolution(std::get<0>(result2)));
 
@@ -390,15 +393,17 @@
 
   // Confirm that the error is almost equal for both directions.  The solution
   // is an integer solution, so there will be a little bit of error left over.
-  EXPECT_NEAR(
+  std::pair<chrono::nanoseconds, double> a_error =
       a.OffsetError(nullptr, NoncausalTimestampFilter::Pointer(),
                     std::get<0>(result1)[0], 0.0, std::get<0>(result1)[1], 0.0)
-              .second -
-          b.OffsetError(nullptr, NoncausalTimestampFilter::Pointer(),
-                        std::get<0>(result1)[1], 0.0, std::get<0>(result1)[0],
-                        0.0)
-              .second,
-      0.0, 0.5);
+          .second;
+  std::pair<chrono::nanoseconds, double> b_error =
+      b.OffsetError(nullptr, NoncausalTimestampFilter::Pointer(),
+                    std::get<0>(result1)[1], 0.0, std::get<0>(result1)[0], 0.0)
+          .second;
+  EXPECT_NEAR(static_cast<double>((a_error.first - b_error.first).count()) +
+                  (a_error.second - b_error.second),
+              0.0, 0.5);
 }
 
 }  // namespace testing
diff --git a/aos/network/timestamp_filter.cc b/aos/network/timestamp_filter.cc
index c1cf612..8d1c9fe 100644
--- a/aos/network/timestamp_filter.cc
+++ b/aos/network/timestamp_filter.cc
@@ -491,15 +491,16 @@
 std::pair<Pointer, std::pair<std::tuple<BootTimestamp, BootDuration>,
                              std::tuple<BootTimestamp, BootDuration>>>
 NoncausalTimestampFilter::FindTimestamps(const NoncausalTimestampFilter *other,
-                                         Pointer pointer, BootTimestamp ta_base,
-                                         double ta, size_t sample_boot) const {
+                                         bool use_other, Pointer pointer,
+                                         BootTimestamp ta_base, double ta,
+                                         size_t sample_boot) const {
   CHECK_GE(ta, 0.0);
   CHECK_LT(ta, 1.0);
 
   // Since ta is less than an integer, and timestamps should be at least 1 ns
   // apart, we can ignore ta if we make sure that the end of the segment is
   // strictly > than ta_base.
-  return FindTimestamps(other, pointer, ta_base, sample_boot);
+  return FindTimestamps(other, use_other, pointer, ta_base, sample_boot);
 }
 
 std::pair<
@@ -507,7 +508,7 @@
     std::pair<std::tuple<monotonic_clock::time_point, chrono::nanoseconds>,
               std::tuple<monotonic_clock::time_point, chrono::nanoseconds>>>
 NoncausalTimestampFilter::SingleFilter::FindTimestamps(
-    const SingleFilter *other, Pointer pointer,
+    const SingleFilter *other, bool use_other, Pointer pointer,
     monotonic_clock::time_point ta_base, double ta) const {
   CHECK_GE(ta, 0.0);
   CHECK_LT(ta, 1.0);
@@ -515,7 +516,7 @@
   // Since ta is less than an integer, and timestamps should be at least 1 ns
   // apart, we can ignore ta if we make sure that the end of the segment is
   // strictly > than ta_base.
-  return FindTimestamps(other, pointer, ta_base);
+  return FindTimestamps(other, use_other, pointer, ta_base);
 }
 
 std::pair<
@@ -523,9 +524,12 @@
     std::pair<std::tuple<monotonic_clock::time_point, chrono::nanoseconds>,
               std::tuple<monotonic_clock::time_point, chrono::nanoseconds>>>
 NoncausalTimestampFilter::InterpolateWithOtherFilter(
-    Pointer pointer, monotonic_clock::time_point ta,
+    Pointer pointer, bool use_other, monotonic_clock::time_point ta,
     std::tuple<monotonic_clock::time_point, chrono::nanoseconds> t0,
     std::tuple<monotonic_clock::time_point, chrono::nanoseconds> t1) {
+  if (!use_other) {
+    return std::make_pair(pointer, std::make_pair(t0, t1));
+  }
   // We have 2 timestamps bookending everything, and a list of points in the
   // middle.
   //
@@ -576,7 +580,7 @@
     std::pair<std::tuple<monotonic_clock::time_point, chrono::nanoseconds>,
               std::tuple<monotonic_clock::time_point, chrono::nanoseconds>>>
 NoncausalTimestampFilter::SingleFilter::FindTimestamps(
-    const SingleFilter *other, Pointer pointer,
+    const SingleFilter *other, bool use_other, Pointer pointer,
     monotonic_clock::time_point ta) const {
   CHECK_GT(timestamps_size(), 1u);
 
@@ -638,7 +642,7 @@
               << ": Cache changed";
         }
 
-        return InterpolateWithOtherFilter(pointer, ta, t0, t1);
+        return InterpolateWithOtherFilter(pointer, use_other, ta, t0, t1);
       }
     }
   }
@@ -707,7 +711,7 @@
         }
 
         if (pointer.other_points_.size() > 0) {
-          return InterpolateWithOtherFilter(pointer, ta, t0, t1);
+          return InterpolateWithOtherFilter(pointer, use_other, ta, t0, t1);
         }
       }
     }
@@ -769,43 +773,24 @@
 chrono::nanoseconds NoncausalTimestampFilter::ExtrapolateOffset(
     std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p0,
     monotonic_clock::time_point ta) {
-  const chrono::nanoseconds dt = ta - std::get<0>(p0);
-  if (dt <= std::chrono::nanoseconds(0)) {
-    // Extrapolate backwards, using the (positive) MaxVelocity slope
-    // We've been asked to extrapolate the offset to a time before our first
-    // sample point.  To be conservative, we'll return an extrapolated
-    // offset that is less than (less tight an estimate of the network delay)
-    // than our sample offset, bound by the max slew velocity we allow
-    //       p0
-    //      /
-    //     /
-    //   ta
-    // Since dt < 0, we shift by dt * slope to get that value
-    return std::get<1>(p0) +
-           chrono::nanoseconds(static_cast<int64_t>(
-               (absl::int128(dt.count() - MaxVelocityRatio::den / 2) *
-                absl::int128(MaxVelocityRatio::num)) /
-               absl::int128(MaxVelocityRatio::den)));
-  } else {
-    // Extrapolate forwards, using the (negative) MaxVelocity slope
-    // Same concept, except going foward past our last (most recent) sample:
-    //       pN
-    //         |
-    //          |
-    //           ta
-    // Since dt > 0, we shift by - dt * slope to get that value
-    return std::get<1>(p0) -
-           chrono::nanoseconds(static_cast<int64_t>(
-               (absl::int128(dt.count() + MaxVelocityRatio::den / 2) *
-                absl::int128(MaxVelocityRatio::num)) /
-               absl::int128(MaxVelocityRatio::den)));
-  }
+  return ExtrapolateOffset(p0, ta, 0.0).first;
 }
 
 chrono::nanoseconds NoncausalTimestampFilter::InterpolateOffset(
     std::tuple<monotonic_clock::time_point, chrono::nanoseconds> p0,
     std::tuple<monotonic_clock::time_point, chrono::nanoseconds> p1,
     monotonic_clock::time_point ta) {
+  return InterpolateOffset(p0, p1, ta, 0.0).first;
+}
+
+std::pair<chrono::nanoseconds, double>
+NoncausalTimestampFilter::InterpolateOffset(
+    std::tuple<monotonic_clock::time_point, chrono::nanoseconds> p0,
+    std::tuple<monotonic_clock::time_point, chrono::nanoseconds> p1,
+    monotonic_clock::time_point ta_base, double ta) {
+  DCHECK_GE(ta, 0.0);
+  DCHECK_LT(ta, 1.0);
+
   // Given 2 points defining a line and the time along that line, interpolate.
   //
   // ta may be massive, but the points will be close, so compute everything
@@ -817,82 +802,123 @@
   //
   // Add (or subtract, integer division rounds towards 0...) 0.5 ((dt / 2) / dt)
   // to the numerator to round to the nearest number rather than round down.
-  const chrono::nanoseconds time_in = ta - std::get<0>(p0);
+  const chrono::nanoseconds time_in = ta_base - std::get<0>(p0);
   const chrono::nanoseconds dt = std::get<0>(p1) - std::get<0>(p0);
+  const chrono::nanoseconds doffset = std::get<1>(p1) - std::get<1>(p0);
 
   absl::int128 numerator =
-      absl::int128(time_in.count()) *
-      absl::int128((std::get<1>(p1) - std::get<1>(p0)).count());
+      absl::int128(time_in.count()) * absl::int128(doffset.count());
   numerator += numerator > 0 ? absl::int128(dt.count() / 2)
                              : -absl::int128(dt.count() / 2);
-  return std::get<1>(p0) + chrono::nanoseconds(static_cast<int64_t>(
-                               numerator / absl::int128(dt.count())));
+
+  const chrono::nanoseconds integer =
+      std::get<1>(p0) + chrono::nanoseconds(static_cast<int64_t>(
+                            numerator / absl::int128(dt.count())));
+  // Compute the remainder of the division in InterpolateOffset above, and
+  // then use double math to compute it accurately.  Since integer math rounds
+  // down, we need to undo the rounding to get the double remainder.  Add or
+  // subtract dt/2/dt (0.5) to undo the addition.
+  //
+  // We have good tests which confirm for small offsets this matches nicely. For
+  // large offsets, the 128 bit math will take care of us.
+  const double remainder =
+      static_cast<double>(numerator % absl::int128(dt.count())) / dt.count() +
+      (numerator > 0 ? -0.5 : 0.5) +
+      ta * static_cast<double>(doffset.count()) /
+          static_cast<double>(dt.count());
+  return std::make_pair(integer, remainder);
 }
 
-chrono::nanoseconds NoncausalTimestampFilter::InterpolateOffset(
+chrono::nanoseconds NoncausalTimestampFilter::BoundOffset(
     std::tuple<monotonic_clock::time_point, chrono::nanoseconds> p0,
-    std::tuple<monotonic_clock::time_point, chrono::nanoseconds> /*p1*/,
-    monotonic_clock::time_point /*ta_base*/, double /*ta*/) {
-  // For the double variant, we want to split the result up into a large integer
-  // portion, and the rest.  We want to do this without introducing numerical
-  // precision problems.
-  //
-  // One way would be to carefully compute the integer portion, and then compute
-  // the double portion in such a way that the two are guaranteed to add up
-  // correctly.
-  //
-  // The simpler way is to simply just use the offset from p0 as the integer
-  // portion, and make the rest be the double portion.  It will get us most of
-  // the way there for a lot less work, and we can revisit if this breaks down.
-  //
-  // oa = p0.o + (ta - p0.t) * (p1.o - p0.o) / (p1.t - p0.t)
-  //      ^^^^
-  // TODO(austin): Use 128 bit math and the remainder to be more accurate here.
-  return std::get<1>(p0);
+    std::tuple<monotonic_clock::time_point, chrono::nanoseconds> p1,
+    monotonic_clock::time_point ta) {
+  // We are trying to solve for worst case offset given the two known points.
+  // This is on the two worst case lines from the two points, and we switch
+  // lines at the interstection.  This is equivilent to the lowest of the two
+  // lines.
+  return std::max(NoncausalTimestampFilter::ExtrapolateOffset(p0, ta),
+                  NoncausalTimestampFilter::ExtrapolateOffset(p1, ta));
 }
 
-double NoncausalTimestampFilter::InterpolateOffsetRemainder(
+std::pair<chrono::nanoseconds, double> NoncausalTimestampFilter::BoundOffset(
     std::tuple<monotonic_clock::time_point, chrono::nanoseconds> p0,
     std::tuple<monotonic_clock::time_point, chrono::nanoseconds> p1,
     monotonic_clock::time_point ta_base, double ta) {
-  const chrono::nanoseconds time_in = ta_base - std::get<0>(p0);
-  const chrono::nanoseconds dt = std::get<0>(p1) - std::get<0>(p0);
+  DCHECK_GE(ta, 0.0);
+  DCHECK_LT(ta, 1.0);
 
-  // The remainder then is the rest of the equation.
-  //
-  // oa = p0.o + (ta - p0.t) * (p1.o - p0.o) / (p1.t - p0.t)
-  //             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  // TODO(austin): Use 128 bit math and the remainder to be more accurate here.
-  return static_cast<double>(ta + time_in.count()) /
-         static_cast<double>(dt.count()) *
-         (std::get<1>(p1) - std::get<1>(p0)).count();
+  const std::pair<chrono::nanoseconds, double> o0 =
+      NoncausalTimestampFilter::ExtrapolateOffset(p0, ta_base, ta);
+  const std::pair<chrono::nanoseconds, double> o1 =
+      NoncausalTimestampFilter::ExtrapolateOffset(p1, ta_base, ta);
+
+  // Want to calculate max(o0 + o0r, o1 + o1r) without precision problems.
+  if (static_cast<double>((o0.first - o1.first).count()) >
+      o1.second - o0.second) {
+    // Ok, o0 is now > o1.  We want the max, so return o0.
+    return o0;
+  } else {
+    return o1;
+  }
 }
 
-chrono::nanoseconds NoncausalTimestampFilter::ExtrapolateOffset(
-    std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p0,
-    monotonic_clock::time_point /*ta_base*/, double /*ta*/) {
-  // TODO(austin): 128 bit math again? ...
-  // For this version, use the base offset from p0 as the base for the offset
-  return std::get<1>(p0);
-}
-
-double NoncausalTimestampFilter::ExtrapolateOffsetRemainder(
+std::pair<chrono::nanoseconds, double>
+NoncausalTimestampFilter::ExtrapolateOffset(
     std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p0,
     monotonic_clock::time_point ta_base, double ta) {
-  // Compute the remainder portion of this offset
-  // oa = p0.o +/- ((ta + ta_base) - p0.t)) * kMaxVelocity()
-  //               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  // But compute (ta + ta_base - p0.t) as (ta + (ta_base - p0.t))
-  // to handle numerical precision
-  const chrono::nanoseconds time_in = ta_base - std::get<0>(p0);
-  const double dt = static_cast<double>(ta + time_in.count());
-  if (dt < 0.0) {
-    // Extrapolate backwards with max (positive) slope (which means
-    // the returned offset should be negative)
-    return dt * kMaxVelocity();
+  DCHECK_GE(ta, 0.0);
+  DCHECK_LT(ta, 1.0);
+  // Since the point (p0) is an integer, we now can guarantee that ta won't put
+  // us on a different side of p0.  This is because ta is between 0 and 1, and
+  // always positive.  Compute the integer and double portions and return them.
+  const chrono::nanoseconds dt = ta_base - std::get<0>(p0);
+
+  if (dt < std::chrono::nanoseconds(0)) {
+    // Extrapolate backwards, using the (positive) MaxVelocity slope
+    // We've been asked to extrapolate the offset to a time before our first
+    // sample point.  To be conservative, we'll return an extrapolated
+    // offset that is less than (less tight an estimate of the network delay)
+    // than our sample offset, bound by the max slew velocity we allow
+    //       p0
+    //      /
+    //     /
+    //   ta
+    // Since dt < 0, we shift by dt * slope to get that value
+    //
+    // Take the remainder of the math in ExtrapolateOffset above and compute it
+    // with floating point math.  Our tests are good enough to confirm that this
+    // works as designed.
+    const absl::int128 numerator =
+        (absl::int128(dt.count() - MaxVelocityRatio::den / 2) *
+         absl::int128(MaxVelocityRatio::num));
+    return std::make_pair(
+        std::get<1>(p0) + chrono::nanoseconds(static_cast<int64_t>(
+                              numerator / absl::int128(MaxVelocityRatio::den))),
+        static_cast<double>(numerator % absl::int128(MaxVelocityRatio::den)) /
+                static_cast<double>(MaxVelocityRatio::den) +
+            0.5 + ta * kMaxVelocity());
   } else {
-    // Extrapolate forwards with max (negative) slope
-    return -dt * kMaxVelocity();
+    // Extrapolate forwards, using the (negative) MaxVelocity slope
+    // Same concept, except going foward past our last (most recent) sample:
+    //       pN
+    //         |
+    //          |
+    //           ta
+    // Since dt > 0, we shift by - dt * slope to get that value
+    //
+    // Take the remainder of the math in ExtrapolateOffset above and compute it
+    // with floating point math.  Our tests are good enough to confirm that this
+    // works as designed.
+    const absl::int128 numerator =
+        absl::int128(dt.count() + MaxVelocityRatio::den / 2) *
+        absl::int128(MaxVelocityRatio::num);
+    return std::make_pair(
+        std::get<1>(p0) - chrono::nanoseconds(static_cast<int64_t>(
+                              numerator / absl::int128(MaxVelocityRatio::den))),
+        -static_cast<double>(numerator % absl::int128(MaxVelocityRatio::den)) /
+                static_cast<double>(MaxVelocityRatio::den) +
+            0.5 - ta * kMaxVelocity());
   }
 }
 
@@ -914,7 +940,7 @@
       Pointer,
       std::pair<std::tuple<monotonic_clock::time_point, chrono::nanoseconds>,
                 std::tuple<monotonic_clock::time_point, chrono::nanoseconds>>>
-      points = FindTimestamps(other, pointer, ta);
+      points = FindTimestamps(other, true, pointer, ta);
   return std::make_pair(points.first,
                         NoncausalTimestampFilter::InterpolateOffset(
                             points.second.first, points.second.second, ta));
@@ -931,32 +957,56 @@
     std::pair<Pointer,
               std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>>
         reference_timestamp = GetReferenceTimestamp(ta_base, ta);
-    return std::make_pair(
-        reference_timestamp.first,
-        std::make_pair(NoncausalTimestampFilter::ExtrapolateOffset(
-                           reference_timestamp.second, ta_base, ta),
-                       NoncausalTimestampFilter::ExtrapolateOffsetRemainder(
-                           reference_timestamp.second, ta_base, ta)));
+    return std::make_pair(reference_timestamp.first,
+                          NoncausalTimestampFilter::ExtrapolateOffset(
+                              reference_timestamp.second, ta_base, ta));
   }
 
   std::pair<
       Pointer,
       std::pair<std::tuple<monotonic_clock::time_point, chrono::nanoseconds>,
                 std::tuple<monotonic_clock::time_point, chrono::nanoseconds>>>
-      points = FindTimestamps(other, pointer, ta_base, ta);
+      points = FindTimestamps(other, true, pointer, ta_base, ta);
   CHECK_LT(std::get<0>(points.second.first), std::get<0>(points.second.second));
   // Return both the integer and double portion together to save a timestamp
   // lookup.
   return std::make_pair(
       points.first,
-      std::make_pair(
-          NoncausalTimestampFilter::InterpolateOffset(
-              points.second.first, points.second.second, ta_base, ta),
-          NoncausalTimestampFilter::InterpolateOffsetRemainder(
-              points.second.first, points.second.second, ta_base, ta)));
+      NoncausalTimestampFilter::InterpolateOffset(
+          points.second.first, points.second.second, ta_base, ta));
 }
 
-std::pair<Pointer, double> NoncausalTimestampFilter::SingleFilter::OffsetError(
+std::pair<Pointer, std::pair<chrono::nanoseconds, double>>
+NoncausalTimestampFilter::SingleFilter::BoundsOffset(
+    const SingleFilter *other, Pointer pointer,
+    monotonic_clock::time_point ta_base, double ta) const {
+  CHECK_GT(timestamps_size(), 0u) << node_names_;
+  if (IsOutsideSamples(ta_base, ta)) {
+    // Special case size = 1 or ta_base before first timestamp or
+    // after last timestamp, so we need to extrapolate out
+    std::pair<Pointer,
+              std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>>
+        reference_timestamp = GetReferenceTimestamp(ta_base, ta);
+    return std::make_pair(reference_timestamp.first,
+                          NoncausalTimestampFilter::ExtrapolateOffset(
+                              reference_timestamp.second, ta_base, ta));
+  }
+
+  std::pair<
+      Pointer,
+      std::pair<std::tuple<monotonic_clock::time_point, chrono::nanoseconds>,
+                std::tuple<monotonic_clock::time_point, chrono::nanoseconds>>>
+      points = FindTimestamps(other, false, pointer, ta_base, ta);
+  CHECK_LT(std::get<0>(points.second.first), std::get<0>(points.second.second));
+  // Return both the integer and double portion together to save a timestamp
+  // lookup.
+  return std::make_pair(points.first, NoncausalTimestampFilter::BoundOffset(
+                                          points.second.first,
+                                          points.second.second, ta_base, ta));
+}
+
+std::pair<Pointer, std::pair<chrono::nanoseconds, double>>
+NoncausalTimestampFilter::SingleFilter::OffsetError(
     const SingleFilter *other, Pointer pointer,
     aos::monotonic_clock::time_point ta_base, double ta,
     aos::monotonic_clock::time_point tb_base, double tb) const {
@@ -969,9 +1019,26 @@
   // Compute the integer portion first, and the double portion second.  Subtract
   // the results of each.  This handles large offsets without losing precision.
   return std::make_pair(
-      offset.first,
-      static_cast<double>(((tb_base - ta_base) - offset.second.first).count()) +
-          ((tb - ta) - offset.second.second));
+      offset.first, std::make_pair(((tb_base - ta_base) - offset.second.first),
+                                   (tb - ta) - offset.second.second));
+}
+
+std::pair<Pointer, std::pair<chrono::nanoseconds, double>>
+NoncausalTimestampFilter::SingleFilter::BoundsOffsetError(
+    const SingleFilter *other, Pointer pointer,
+    aos::monotonic_clock::time_point ta_base, double ta,
+    aos::monotonic_clock::time_point tb_base, double tb) const {
+  NormalizeTimestamps(&ta_base, &ta);
+  NormalizeTimestamps(&tb_base, &tb);
+
+  const std::pair<Pointer, std::pair<std::chrono::nanoseconds, double>> offset =
+      BoundsOffset(other, pointer, ta_base, ta);
+
+  // Compute the integer portion first, and the double portion second.  Subtract
+  // the results of each.  This handles large offsets without losing precision.
+  return std::make_pair(
+      offset.first, std::make_pair((tb_base - ta_base) - offset.second.first,
+                                   (tb - ta) - offset.second.second));
 }
 
 std::string NoncausalTimestampFilter::DebugOffsetError(
@@ -1010,7 +1077,7 @@
                        other == nullptr
                            ? nullptr
                            : &other->filter(tb_base.boot, ta_base.boot)->filter,
-                       pointer, ta_base.time, ta)
+                       true, pointer, ta_base.time, ta)
                    .second;
 
   // As a reminder, our cost function is essentially:
@@ -1061,45 +1128,45 @@
     auto reference_timestamp = GetReferenceTimestamp(ta_base, ta);
 
     // Special case size = 1 or ta before first timestamp, so we extrapolate
-    const chrono::nanoseconds offset_base =
+    const std::pair<chrono::nanoseconds, double> offset =
         NoncausalTimestampFilter::ExtrapolateOffset(reference_timestamp.second,
                                                     ta_base, ta);
-    const double offset_remainder =
-        NoncausalTimestampFilter::ExtrapolateOffsetRemainder(
-            reference_timestamp.second, ta_base, ta);
 
     // We want to do offset + ta > tb, but we need to do it with minimal
     // numerical precision problems.
     // See below for why this is a >=
-    if (static_cast<double>((offset_base + ta_base - tb_base).count()) >=
-        tb - ta - offset_remainder) {
+    if (static_cast<double>((offset.first + ta_base - tb_base).count()) >=
+        tb - ta - offset.second) {
       LOG(ERROR) << node_names_ << " "
-                 << TimeString(ta_base, ta, offset_base, offset_remainder)
+                 << TimeString(ta_base, ta, offset.first, offset.second)
                  << " > solution time "
                  << tb_base + chrono::nanoseconds(
                                   static_cast<int64_t>(std::round(tb)))
                  << ", " << tb - std::round(tb) << " foo";
-      LOG(INFO) << "Remainder " << offset_remainder;
+      LOG(INFO) << "Remainder " << offset.second;
       return false;
     }
     return true;
   }
 
+  // Honestly, here, we care about confirming that the worst case holds.  This
+  // means that each solution is plausible based on the points that we have. The
+  // only thing we actually know is that time will slew by at most the max slew
+  // rate, so the candidate solution must be within the max slew rate from the
+  // samples.
   std::pair<
       Pointer,
       std::pair<std::tuple<monotonic_clock::time_point, chrono::nanoseconds>,
                 std::tuple<monotonic_clock::time_point, chrono::nanoseconds>>>
-      points = FindTimestamps(other, pointer, ta_base, ta);
-  const chrono::nanoseconds offset_base =
-      NoncausalTimestampFilter::InterpolateOffset(
-          points.second.first, points.second.second, ta_base, ta);
-  const double offset = NoncausalTimestampFilter::InterpolateOffsetRemainder(
-      points.second.first, points.second.second, ta_base, ta);
+      points = FindTimestamps(other, false, pointer, ta_base, ta);
+  const std::pair<chrono::nanoseconds, double> offset =
+      NoncausalTimestampFilter::BoundOffset(points.second.first,
+                                            points.second.second, ta_base, ta);
   // See below for why this is a >=
-  if (static_cast<double>((offset_base + ta_base - tb_base).count()) >=
-      tb - offset - ta) {
+  if (static_cast<double>((offset.first + ta_base - tb_base).count()) >=
+      tb - offset.second - ta) {
     LOG(ERROR) << node_names_ << " "
-               << TimeString(ta_base, ta, offset_base, offset)
+               << TimeString(ta_base, ta, offset.first, offset.second)
                << " > solution time " << tb_base << ", " << tb;
     LOG(ERROR) << "Bracketing times are " << TimeString(points.second.first)
                << " and " << TimeString(points.second.second);
@@ -1118,12 +1185,13 @@
                << ") is before the start and we have forgotten the answer.";
     return false;
   }
+
+  // The logic here mirrors the double variant above almost perfectly.  See
+  // above for the comments.
+
   if (IsOutsideSamples(ta, 0.)) {
-    // Special case size = 1 or ta_base before first timestamp or
-    // after last timestamp, so we need to extrapolate out
     auto reference_timestamp = GetReferenceTimestamp(ta, 0.);
 
-    // Special case size = 1 or ta before first timestamp, so we extrapolate
     const chrono::nanoseconds offset =
         NoncausalTimestampFilter::ExtrapolateOffset(reference_timestamp.second,
                                                     ta);
@@ -1142,10 +1210,9 @@
       Pointer,
       std::pair<std::tuple<monotonic_clock::time_point, chrono::nanoseconds>,
                 std::tuple<monotonic_clock::time_point, chrono::nanoseconds>>>
-      points = FindTimestamps(other, pointer, ta);
-  const chrono::nanoseconds offset =
-      NoncausalTimestampFilter::InterpolateOffset(points.second.first,
-                                                  points.second.second, ta);
+      points = FindTimestamps(other, false, pointer, ta);
+  const chrono::nanoseconds offset = NoncausalTimestampFilter::BoundOffset(
+      points.second.first, points.second.second, ta);
 
   // Note: this needs to be >=.  The simulation code doesn't give us a good
   // way to preserve order well enough to have causality preserved when things
@@ -1215,7 +1282,8 @@
     // adheres to our +- velocity constraint. If the point is less than the max
     // negative slope, the point violates our constraint and will never be worth
     // considering.  Ignore it.
-    if (doffset < -dt * kMaxVelocity()) {
+    if (absl::int128(doffset.count()) * absl::int128(MaxVelocityRatio::den) <
+        -absl::int128(dt.count()) * absl::int128(MaxVelocityRatio::num)) {
       VLOG(1) << std::setprecision(1) << std::fixed << node_names_
               << " Rejected sample of " << TimeString(monotonic_now, sample_ns)
               << " because " << doffset.count() << " < "
@@ -1249,7 +1317,10 @@
     //
     // In this case, point 3 is now violating our constraint and we need to
     // remove it.  This is the non-causal part of the filter.
-    while (dt * kMaxVelocity() < doffset && timestamps_.size() > 1u) {
+    while (absl::int128(dt.count()) * absl::int128(MaxVelocityRatio::num) <
+               absl::int128(doffset.count()) *
+                   absl::int128(MaxVelocityRatio::den) &&
+           timestamps_.size() > 1u) {
       CHECK(!frozen(std::get<0>(back)))
           << ": " << node_names_ << " Can't pop an already frozen sample "
           << TimeString(back) << " while inserting "
@@ -1318,7 +1389,9 @@
         const chrono::nanoseconds dt = std::get<0>(*second) - monotonic_now;
         const chrono::nanoseconds doffset = std::get<1>(*second) - sample_ns;
 
-        if (doffset < -dt * kMaxVelocity()) {
+        if (absl::int128(doffset.count()) *
+                absl::int128(MaxVelocityRatio::den) <
+            -absl::int128(dt.count()) * absl::int128(MaxVelocityRatio::num)) {
           VLOG(1) << node_names_ << " Removing redundant sample of "
                   << TimeString(*second) << " because "
                   << TimeString(timestamps_.front())
@@ -1345,7 +1418,9 @@
           const chrono::nanoseconds doffset =
               std::get<1>(*third) - std::get<1>(*second);
 
-          if (doffset > dt * kMaxVelocity()) {
+          if (absl::int128(doffset.count()) *
+                  absl::int128(MaxVelocityRatio::den) >
+              absl::int128(dt.count()) * absl::int128(MaxVelocityRatio::num)) {
             VLOG(1) << node_names_ << " Removing invalid sample of "
                     << TimeString(*second) << " because " << TimeString(*third)
                     << " would make the slope too positive.";
@@ -1369,14 +1444,19 @@
       chrono::nanoseconds next_doffset = std::get<1>(*it) - sample_ns;
 
       // If we are worse than either the previous or next point, discard.
-      if (prior_doffset < -prior_dt * kMaxVelocity()) {
+      if (absl::int128(prior_doffset.count()) *
+              absl::int128(MaxVelocityRatio::den) <
+          absl::int128(-prior_dt.count()) *
+              absl::int128(MaxVelocityRatio::num)) {
         VLOG(1) << node_names_ << " Ignoring timestamp "
                 << TimeString(monotonic_now, sample_ns) << " because "
                 << TimeString(*(it - 1))
                 << " is before and the slope would be too negative.";
         return;
       }
-      if (next_doffset > next_dt * kMaxVelocity()) {
+      if (absl::int128(next_doffset.count()) *
+              absl::int128(MaxVelocityRatio::den) >
+          absl::int128(next_dt.count()) * absl::int128(MaxVelocityRatio::num)) {
         VLOG(1) << node_names_ << " Ignoring timestamp "
                 << TimeString(monotonic_now, sample_ns) << " because "
                 << TimeString(*it)
@@ -1415,7 +1495,10 @@
         const chrono::nanoseconds next_doffset =
             std::get<1>(*next_it) - std::get<1>(*middle_it);
 
-        if (next_doffset < -next_dt * kMaxVelocity()) {
+        if (absl::int128(next_doffset.count()) *
+                absl::int128(MaxVelocityRatio::den) <
+            absl::int128(-next_dt.count()) *
+                absl::int128(MaxVelocityRatio::num)) {
           VLOG(1) << node_names_
                   << " Next slope is too negative, removing next point "
                   << TimeString(*next_it);
@@ -1434,7 +1517,10 @@
         const chrono::nanoseconds prior_doffset =
             std::get<1>(*middle_it) - std::get<1>(*prior_it);
 
-        if (prior_doffset > prior_dt * kMaxVelocity()) {
+        if (absl::int128(prior_doffset.count()) *
+                absl::int128(MaxVelocityRatio::den) >
+            absl::int128(prior_dt.count()) *
+                absl::int128(MaxVelocityRatio::num)) {
           CHECK(!frozen(std::get<0>(*prior_it)))
               << ": " << node_names_
               << " Can't pop an already frozen sample.  Increase "
diff --git a/aos/network/timestamp_filter.h b/aos/network/timestamp_filter.h
index fd430ce..2b5ff47 100644
--- a/aos/network/timestamp_filter.h
+++ b/aos/network/timestamp_filter.h
@@ -16,9 +16,6 @@
 namespace aos {
 namespace message_bridge {
 
-// TODO<jim>: Should do something to help with precision, like make it an
-// integer and divide by the value (e.g., / 1000)
-
 // Max velocity to clamp the filter to in seconds/second.
 typedef std::ratio<1, 1000> MaxVelocityRatio;
 inline constexpr double kMaxVelocity() {
@@ -317,22 +314,41 @@
   // Returns the error between the offset in the provided timestamps, and the
   // offset at ta.  Also returns a pointer to the timestamps used for the
   // lookup to be passed back in again for a more efficient second lookup.
-  std::pair<Pointer, double> OffsetError(const NoncausalTimestampFilter *other,
-                                         Pointer pointer,
-                                         logger::BootTimestamp ta_base,
-                                         double ta,
-                                         logger::BootTimestamp tb_base,
-                                         double tb) const {
+  std::pair<Pointer, std::pair<std::chrono::nanoseconds, double>> OffsetError(
+      const NoncausalTimestampFilter *other, Pointer pointer,
+      logger::BootTimestamp ta_base, double ta, logger::BootTimestamp tb_base,
+      double tb) const {
     const BootFilter *boot_filter = filter(pointer, ta_base.boot, tb_base.boot);
     const SingleFilter *other_filter =
         other == nullptr
             ? nullptr
             : other->maybe_single_filter(tb_base.boot, ta_base.boot);
-    std::pair<Pointer, double> result = boot_filter->filter.OffsetError(
-        other_filter, pointer, ta_base.time, ta, tb_base.time, tb);
+    std::pair<Pointer, std::pair<std::chrono::nanoseconds, double>> result =
+        boot_filter->filter.OffsetError(other_filter, pointer, ta_base.time, ta,
+                                        tb_base.time, tb);
     result.first.boot_filter_ = boot_filter;
     return result;
   }
+
+  // Returns the error between the offset in the provided timestamps, and the
+  // bounds offset at ta.  Also returns a pointer to the timestamps used for the
+  // lookup to be passed back in again for a more efficient second lookup.
+  std::pair<Pointer, std::pair<std::chrono::nanoseconds, double>>
+  BoundsOffsetError(const NoncausalTimestampFilter *other, Pointer pointer,
+                    logger::BootTimestamp ta_base, double ta,
+                    logger::BootTimestamp tb_base, double tb) const {
+    const BootFilter *boot_filter = filter(pointer, ta_base.boot, tb_base.boot);
+    const SingleFilter *other_filter =
+        other == nullptr
+            ? nullptr
+            : other->maybe_single_filter(tb_base.boot, ta_base.boot);
+    std::pair<Pointer, std::pair<std::chrono::nanoseconds, double>> result =
+        boot_filter->filter.BoundsOffsetError(
+            other_filter, pointer, ta_base.time, ta, tb_base.time, tb);
+    result.first.boot_filter_ = boot_filter;
+    return result;
+  }
+
   // Returns the string representation of 2 * OffsetError(ta, tb)
   std::string DebugOffsetError(const NoncausalTimestampFilter *other,
                                Pointer pointer, logger::BootTimestamp ta_base,
@@ -503,43 +519,14 @@
   }
 
   // Public for testing.
-  // Returns the offset for the point in time, using the timestamps in the deque
-  // to form a polyline used to interpolate.
-  logger::BootDuration Offset(const NoncausalTimestampFilter *other,
-                              Pointer pointer, logger::BootTimestamp ta,
-                              size_t sample_boot) const {
-    return {sample_boot,
-            filter(ta.boot, sample_boot)
-                ->filter
-                .Offset(other == nullptr
-                            ? nullptr
-                            : &other->filter(sample_boot, ta.boot)->filter,
-                        pointer, ta.time)
-                .second};
-  }
-
-  std::pair<logger::BootDuration, double> Offset(
-      const NoncausalTimestampFilter *other, Pointer pointer,
-      logger::BootTimestamp ta_base, double ta, size_t sample_boot) const {
-    std::pair<Pointer, std::pair<std::chrono::nanoseconds, double>> result =
-        filter(ta_base.boot, sample_boot)
-            ->filter.Offset(
-                other == nullptr
-                    ? nullptr
-                    : &other->filter(sample_boot, ta_base.boot)->filter,
-                pointer, ta_base.time, ta);
-    return std::make_pair(
-        logger::BootDuration{sample_boot, result.second.first},
-        result.second.second);
-  }
-
   // Assuming that there are at least 2 points in timestamps_, finds the 2
   // matching points.
   std::pair<Pointer,
             std::pair<std::tuple<logger::BootTimestamp, logger::BootDuration>,
                       std::tuple<logger::BootTimestamp, logger::BootDuration>>>
-  FindTimestamps(const NoncausalTimestampFilter *other, Pointer pointer,
-                 logger::BootTimestamp ta, size_t sample_boot) const {
+  FindTimestamps(const NoncausalTimestampFilter *other, bool use_other,
+                 Pointer pointer, logger::BootTimestamp ta,
+                 size_t sample_boot) const {
     const BootFilter *boot_filter = filter(ta.boot, sample_boot);
     std::pair<
         Pointer,
@@ -549,7 +536,7 @@
         result = boot_filter->filter.FindTimestamps(
             other == nullptr ? nullptr
                              : &other->filter(sample_boot, ta.boot)->filter,
-            pointer, ta.time);
+            use_other, pointer, ta.time);
     result.first.boot_filter_ = boot_filter;
     return std::make_pair(
         result.first,
@@ -568,8 +555,8 @@
   std::pair<Pointer,
             std::pair<std::tuple<logger::BootTimestamp, logger::BootDuration>,
                       std::tuple<logger::BootTimestamp, logger::BootDuration>>>
-  FindTimestamps(const NoncausalTimestampFilter *other, Pointer pointer,
-                 logger::BootTimestamp ta_base, double ta,
+  FindTimestamps(const NoncausalTimestampFilter *other, bool use_other,
+                 Pointer pointer, logger::BootTimestamp ta_base, double ta,
                  size_t sample_boot) const;
 
   static std::chrono::nanoseconds InterpolateOffset(
@@ -577,25 +564,26 @@
       std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p1,
       monotonic_clock::time_point ta);
 
+  static std::chrono::nanoseconds BoundOffset(
+      std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p0,
+      std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p1,
+      monotonic_clock::time_point ta);
+
   static std::chrono::nanoseconds ExtrapolateOffset(
       std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p0,
       monotonic_clock::time_point ta);
 
-  static std::chrono::nanoseconds InterpolateOffset(
+  static std::pair<std::chrono::nanoseconds, double> InterpolateOffset(
       std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p0,
       std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p1,
       monotonic_clock::time_point ta_base, double ta);
 
-  static double InterpolateOffsetRemainder(
-      std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> /*p0*/,
-      std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> /*p1*/,
+  static std::pair<std::chrono::nanoseconds, double> BoundOffset(
+      std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p0,
+      std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p1,
       monotonic_clock::time_point ta_base, double ta);
 
-  static std::chrono::nanoseconds ExtrapolateOffset(
-      std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p0,
-      monotonic_clock::time_point /*ta_base*/, double /*ta*/);
-
-  static double ExtrapolateOffsetRemainder(
+  static std::pair<std::chrono::nanoseconds, double> ExtrapolateOffset(
       std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p0,
       monotonic_clock::time_point ta_base, double ta);
 
@@ -625,14 +613,14 @@
         std::pair<
             std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>,
             std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>>>
-    FindTimestamps(const SingleFilter *other, Pointer pointer,
+    FindTimestamps(const SingleFilter *other, bool use_other, Pointer pointer,
                    monotonic_clock::time_point ta) const;
     std::pair<
         Pointer,
         std::pair<
             std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>,
             std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>>>
-    FindTimestamps(const SingleFilter *other, Pointer pointer,
+    FindTimestamps(const SingleFilter *other, bool use_other, Pointer pointer,
                    monotonic_clock::time_point ta_base, double ta) const;
 
     // Check whether the given timestamp falls within our current samples
@@ -649,11 +637,22 @@
     std::pair<Pointer, std::pair<std::chrono::nanoseconds, double>> Offset(
         const SingleFilter *other, Pointer pointer,
         monotonic_clock::time_point ta_base, double ta) const;
-    std::pair<Pointer, double> OffsetError(
+
+    std::pair<Pointer, std::pair<std::chrono::nanoseconds, double>>
+    BoundsOffset(const SingleFilter *other, Pointer pointer,
+                 monotonic_clock::time_point ta_base, double ta) const;
+
+    std::pair<Pointer, std::pair<std::chrono::nanoseconds, double>> OffsetError(
         const SingleFilter *other, Pointer pointer,
         aos::monotonic_clock::time_point ta_base, double ta,
         aos::monotonic_clock::time_point tb_base, double tb) const;
 
+    std::pair<Pointer, std::pair<std::chrono::nanoseconds, double>>
+    BoundsOffsetError(const SingleFilter *other, Pointer pointer,
+                      aos::monotonic_clock::time_point ta_base, double ta,
+                      aos::monotonic_clock::time_point tb_base,
+                      double tb) const;
+
     bool has_unobserved_line() const;
     monotonic_clock::time_point unobserved_line_end() const;
     monotonic_clock::time_point unobserved_line_remote_end() const;
@@ -705,7 +704,7 @@
               std::get<1>(timestamps_[1]) -
               aos::monotonic_clock::duration(
                   static_cast<aos::monotonic_clock::duration::rep>(
-                      absl::int128(dt.count() + MaxVelocityRatio::den / 2) *
+                      absl::int128(dt.count()) *
                       absl::int128(MaxVelocityRatio::num) /
                       absl::int128(MaxVelocityRatio::den)));
 
@@ -870,7 +869,7 @@
           std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>,
           std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>>>
   InterpolateWithOtherFilter(
-      Pointer pointer, monotonic_clock::time_point ta,
+      Pointer pointer, bool use_other, monotonic_clock::time_point ta,
       std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> t0,
       std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> t1);
 
diff --git a/aos/network/timestamp_filter_test.cc b/aos/network/timestamp_filter_test.cc
index 5932e3d..5d8eeeb 100644
--- a/aos/network/timestamp_filter_test.cc
+++ b/aos/network/timestamp_filter_test.cc
@@ -34,8 +34,73 @@
     return std::make_tuple(BootTimestamp{0, std::get<0>(result)},
                            BootDuration{0, std::get<1>(result)});
   }
+
+  logger::BootDuration Offset(const TestingNoncausalTimestampFilter *other,
+                              Pointer pointer, logger::BootTimestamp ta,
+                              size_t sample_boot) const {
+    return {sample_boot,
+            filter(ta.boot, sample_boot)
+                ->filter
+                .Offset(other == nullptr
+                            ? nullptr
+                            : &other->filter(sample_boot, ta.boot)->filter,
+                        pointer, ta.time)
+                .second};
+  }
+
+  std::pair<logger::BootDuration, double> Offset(
+      const TestingNoncausalTimestampFilter *other, Pointer pointer,
+      logger::BootTimestamp ta_base, double ta, size_t sample_boot) const {
+    std::pair<Pointer, std::pair<std::chrono::nanoseconds, double>> result =
+        filter(ta_base.boot, sample_boot)
+            ->filter.Offset(
+                other == nullptr
+                    ? nullptr
+                    : &other->filter(sample_boot, ta_base.boot)->filter,
+                pointer, ta_base.time, ta);
+    return std::make_pair(
+        logger::BootDuration{sample_boot, result.second.first},
+        result.second.second);
+  }
+
+  std::pair<logger::BootDuration, double> BoundsOffset(
+      const TestingNoncausalTimestampFilter *other, Pointer pointer,
+      logger::BootTimestamp ta_base, double ta, size_t sample_boot) const {
+    std::pair<Pointer, std::pair<std::chrono::nanoseconds, double>> result =
+        filter(ta_base.boot, sample_boot)
+            ->filter.BoundsOffset(
+                other == nullptr
+                    ? nullptr
+                    : &other->filter(sample_boot, ta_base.boot)->filter,
+                pointer, ta_base.time, ta);
+    return std::make_pair(
+        logger::BootDuration{sample_boot, result.second.first},
+        result.second.second);
+  }
 };
 
+void NormalizeTimestamps(monotonic_clock::time_point *ta_base, double *ta) {
+  double ta_orig = *ta;
+  chrono::nanoseconds ta_digits(static_cast<int64_t>(std::floor(*ta)));
+  *ta_base += ta_digits;
+  *ta -= static_cast<double>(ta_digits.count());
+
+  // Sign, numerical precision wins again.
+  //   *ta_base=1000.300249970sec, *ta=-1.35525e-20
+  // We then promptly round this to
+  //   *ta_base=1000.300249969sec, *ta=1
+  // The 1.0 then breaks the LT assumption below, so we kersplat.
+  //
+  // Detect this case directly and move the 1.0 back into ta_base.
+  if (*ta == 1.0) {
+    *ta = 0.0;
+    *ta_base += chrono::nanoseconds(1);
+  }
+
+  CHECK_GE(*ta, 0.0) << ta_digits.count() << "ns " << ta_orig;
+  CHECK_LT(*ta, 1.0);
+}
+
 // Tests that adding samples tracks more negative offsets down quickly, and
 // slowly comes back up.
 TEST(TimestampFilterTest, Sample) {
@@ -246,6 +311,28 @@
   }
 
   {
+    const BootTimestamp ta_close{
+        0, monotonic_clock::time_point(chrono::nanoseconds(0))};
+    const BootTimestamp tb_close{
+        0, monotonic_clock::time_point(chrono::nanoseconds(1999))};
+
+    // Confirm that we round the first point correctly.  We should always round
+    // the slope down to avoid invalid slopes.
+    TestingNoncausalTimestampFilter filter(node_a, node_b);
+
+    filter.Sample(ta_close, {0, chrono::microseconds(-10)});
+    filter.Debug();
+    filter.Sample(tb_close, {0, chrono::microseconds(0)});
+    filter.Debug();
+    ASSERT_EQ(filter.timestamps_size(), 2u);
+
+    EXPECT_EQ(std::get<1>(filter.timestamp(0)),
+              (BootDuration{0, -chrono::nanoseconds(1)}));
+    EXPECT_EQ(std::get<1>(filter.timestamp(1)),
+              (BootDuration{0, chrono::nanoseconds(0)}));
+  }
+
+  {
     // Too much positive slope removes points.
     TestingNoncausalTimestampFilter filter(node_a, node_b);
 
@@ -775,31 +862,23 @@
 
   const monotonic_clock::time_point t1 = e + chrono::nanoseconds(10000);
   const chrono::nanoseconds o1 = chrono::nanoseconds(100);
-  const double o1d = static_cast<double>(o1.count());
 
   const monotonic_clock::time_point t2 = t1 + chrono::nanoseconds(1000);
   const chrono::nanoseconds o2 = chrono::nanoseconds(150);
-  const double o2d = static_cast<double>(o2.count());
 
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
                 std::make_tuple(t1, o1), std::make_tuple(t2, o2), t1),
             o1);
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
                 std::make_tuple(t1, o1), std::make_tuple(t2, o2), t1, 0.0),
-            o1);
-  EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffsetRemainder(
-                std::make_tuple(t1, o1), std::make_tuple(t2, o2), t1, 0.0),
-            0.0);
+            std::make_pair(o1, 0.0));
 
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
                 std::make_tuple(t1, o1), std::make_tuple(t2, o2), t2),
             o2);
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
                 std::make_tuple(t1, o1), std::make_tuple(t2, o2), t2, 0.0),
-            o1);
-  EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffsetRemainder(
-                std::make_tuple(t1, o1), std::make_tuple(t2, o2), t2, 0.0),
-            o2d - o1d);
+            std::make_pair(o2, 0.0));
 
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
                 std::make_tuple(t1, o1), std::make_tuple(t2, o2),
@@ -808,56 +887,87 @@
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
                 std::make_tuple(t1, o1), std::make_tuple(t2, o2),
                 t1 + chrono::nanoseconds(500), 0.0),
-            o1);
-  EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffsetRemainder(
-                std::make_tuple(t1, o1), std::make_tuple(t2, o2),
-                t1 + chrono::nanoseconds(500), 0.0),
-            25.);
+            std::make_pair(o1 + chrono::nanoseconds(25), 0.0));
 
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
                 std::make_tuple(t1, o1), std::make_tuple(t2, o2),
                 t1 + chrono::nanoseconds(-200)),
             chrono::nanoseconds(90));
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
-                std::make_tuple(t1, o1), std::make_tuple(t2, o2), t1, -200.),
-            o1);
-  EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffsetRemainder(
-                std::make_tuple(t1, o1), std::make_tuple(t2, o2), t1, -200.),
-            -10.);
+                std::make_tuple(t1, o1), std::make_tuple(t2, o2),
+                t1 - chrono::nanoseconds(200), 0.0),
+            std::make_pair(o1 - chrono::nanoseconds(10), 0.0));
 
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
                 std::make_tuple(t1, o1), std::make_tuple(t2, o2),
                 t1 + chrono::nanoseconds(200)),
             chrono::nanoseconds(110));
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
-                std::make_tuple(t1, o1), std::make_tuple(t2, o2), t1, 200.),
-            o1);
-  EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffsetRemainder(
-                std::make_tuple(t1, o1), std::make_tuple(t2, o2), t1, 200.),
-            10.);
+                std::make_tuple(t1, o1), std::make_tuple(t2, o2),
+                t1 + chrono::nanoseconds(200), 0.0),
+            std::make_pair(o1 + chrono::nanoseconds(10), 0.0));
 
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
                 std::make_tuple(t1, o1), std::make_tuple(t2, o2),
                 t1 + chrono::nanoseconds(800)),
             chrono::nanoseconds(140));
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
-                std::make_tuple(t1, o1), std::make_tuple(t2, o2), t1, 800.),
-            o1);
-  EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffsetRemainder(
-                std::make_tuple(t1, o1), std::make_tuple(t2, o2), t1, 800.),
-            40.);
+                std::make_tuple(t1, o1), std::make_tuple(t2, o2),
+                t1 + chrono::nanoseconds(800), 0.0),
+            std::make_pair(o1 + chrono::nanoseconds(40), 0.0));
 
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
                 std::make_tuple(t1, o1), std::make_tuple(t2, o2),
                 t1 + chrono::nanoseconds(1200)),
             chrono::nanoseconds(160));
   EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffset(
-                std::make_tuple(t1, o1), std::make_tuple(t2, o2), t1, 1200.),
-            o1);
-  EXPECT_EQ(NoncausalTimestampFilter::InterpolateOffsetRemainder(
-                std::make_tuple(t1, o1), std::make_tuple(t2, o2), t1, 1200.),
-            60.);
+                std::make_tuple(t1, o1), std::make_tuple(t2, o2),
+                t1 + chrono::nanoseconds(1200), 0.0),
+            std::make_pair(o1 + chrono::nanoseconds(60), 0.0));
 
+  for (int i = -MaxVelocityRatio::den * MaxVelocityRatio::num * 6;
+       i <
+       MaxVelocityRatio::den * MaxVelocityRatio::num * 6 + (t2 - t1).count();
+       ++i) {
+    monotonic_clock::time_point ta_base = t1;
+    const double ta_orig = static_cast<double>(i) / 3.0;
+    double ta = ta_orig;
+
+    NormalizeTimestamps(&ta_base, &ta);
+    CHECK_GE(ta, 0.0);
+    CHECK_LT(ta, 1.0);
+
+    const chrono::nanoseconds expected_offset =
+        NoncausalTimestampFilter::InterpolateOffset(
+            std::make_tuple(t1, o1), std::make_tuple(t2, o2), ta_base);
+
+    const std::pair<chrono::nanoseconds, double> offset =
+        NoncausalTimestampFilter::InterpolateOffset(
+            std::make_tuple(t1, o1), std::make_tuple(t2, o2), ta_base, ta);
+    EXPECT_EQ(expected_offset, offset.first);
+
+    const double expected_double_offset =
+        static_cast<double>(o1.count()) +
+        static_cast<double>(ta_orig) / static_cast<double>((t2 - t1).count()) *
+            (o2 - o1).count();
+
+    EXPECT_NEAR(static_cast<double>(offset.first.count()) + offset.second,
+                expected_double_offset, 1e-9)
+        << ": i " << i << " t " << ta_base << " " << ta << " t1 " << t1
+        << " o1 " << o1.count() << "ns t2 " << t2 << " o2 " << o2.count()
+        << "ns Non-rounded: " << expected_offset.count() << "ns";
+  }
+}
+
+// Tests that all variants of ExtrapolateOffset do reasonable things.
+TEST_F(NoncausalTimestampFilterTest, ExtrapolateOffset) {
+  const monotonic_clock::time_point e = monotonic_clock::epoch();
+
+  const monotonic_clock::time_point t1 = e + chrono::nanoseconds(10000);
+  const chrono::nanoseconds o1 = chrono::nanoseconds(100);
+
+  const monotonic_clock::time_point t2 = t1 + chrono::nanoseconds(1000);
+  const chrono::nanoseconds o2 = chrono::nanoseconds(150);
   // Test extrapolation functions before t1 and after t2
   EXPECT_EQ(
       NoncausalTimestampFilter::ExtrapolateOffset(std::make_tuple(t1, o1), t1),
@@ -889,24 +999,14 @@
   // Test base + double version
   EXPECT_EQ(NoncausalTimestampFilter::ExtrapolateOffset(std::make_tuple(t1, o1),
                                                         e, 0.),
-            o1);
-  EXPECT_EQ(NoncausalTimestampFilter::ExtrapolateOffsetRemainder(
-                std::make_tuple(t1, o1), e, 0.),
-            -(t1 - e).count() * kMaxVelocity());
-
+            std::make_pair(chrono::nanoseconds(90), 0.0));
   EXPECT_EQ(NoncausalTimestampFilter::ExtrapolateOffset(std::make_tuple(t1, o1),
                                                         t1, 0.),
-            o1);
-  EXPECT_EQ(NoncausalTimestampFilter::ExtrapolateOffsetRemainder(
-                std::make_tuple(t1, o1), t1, 0.),
-            0.);
+            std::make_pair(o1, 0.0));
 
   EXPECT_EQ(NoncausalTimestampFilter::ExtrapolateOffset(std::make_tuple(t1, o1),
-                                                        t1, -1000.),
-            o1);
-  EXPECT_EQ(NoncausalTimestampFilter::ExtrapolateOffsetRemainder(
-                std::make_tuple(t1, o1), t1, -1000.),
-            -1000. * kMaxVelocity());
+                                                        t1, 0.5),
+            std::make_pair(o1, -0.5 * kMaxVelocity()));
 
   EXPECT_EQ(
       NoncausalTimestampFilter::ExtrapolateOffset(std::make_tuple(t2, o2), t2),
@@ -914,10 +1014,7 @@
 
   EXPECT_EQ(NoncausalTimestampFilter::ExtrapolateOffset(std::make_tuple(t2, o2),
                                                         t2, 0.0),
-            o2);
-  EXPECT_EQ(NoncausalTimestampFilter::ExtrapolateOffsetRemainder(
-                std::make_tuple(t2, o2), t2, 0.0),
-            0.0);
+            std::make_pair(o2, 0.0));
 
   // Test points past our last sample
   EXPECT_EQ(NoncausalTimestampFilter::ExtrapolateOffset(
@@ -925,12 +1022,116 @@
             chrono::nanoseconds(
                 static_cast<int64_t>(o2.count() - 10000. * kMaxVelocity())));
 
-  EXPECT_EQ(NoncausalTimestampFilter::ExtrapolateOffset(std::make_tuple(t2, o2),
-                                                        t2, 100.0),
+  EXPECT_EQ(
+      NoncausalTimestampFilter::ExtrapolateOffset(
+          std::make_tuple(t2, o2), t2 + chrono::nanoseconds(10000), 0.5),
+      std::make_pair(o2 - chrono::nanoseconds(10), -0.5 * kMaxVelocity()));
+
+  // Now, test that offset + remainder functions add up to the right answer for
+  // a lot of cases.  This is enough to catch all the various rounding cases.
+  for (int i = -MaxVelocityRatio::den * MaxVelocityRatio::num * 6;
+       i < MaxVelocityRatio::den * MaxVelocityRatio::num * 4; ++i) {
+    monotonic_clock::time_point ta_base = t1;
+    const double ta_orig = static_cast<double>(i) / 3.0;
+    double ta = ta_orig;
+
+    NormalizeTimestamps(&ta_base, &ta);
+    CHECK_GE(ta, 0.0);
+    CHECK_LT(ta, 1.0);
+
+    const chrono::nanoseconds expected_offset =
+        NoncausalTimestampFilter::ExtrapolateOffset(std::make_tuple(t1, o1),
+                                                    ta_base);
+
+    std::pair<chrono::nanoseconds, double> offset =
+        NoncausalTimestampFilter::ExtrapolateOffset(std::make_tuple(t1, o1),
+                                                    ta_base, ta);
+
+    EXPECT_EQ(expected_offset, offset.first);
+    EXPECT_NEAR(
+        static_cast<double>(offset.first.count()) + offset.second,
+        static_cast<double>(o1.count()) - std::abs(ta_orig) * kMaxVelocity(),
+        1e-9)
+        << ": i " << i << " t " << ta_base << " " << ta
+        << " Non-rounded: " << expected_offset.count() << "ns";
+  }
+}
+
+// Tests that all variants of BoundOffset do reasonable things.
+TEST_F(NoncausalTimestampFilterTest, BoundOffset) {
+  const monotonic_clock::time_point e = monotonic_clock::epoch();
+
+  const monotonic_clock::time_point t1 = e + chrono::nanoseconds(10000);
+  const chrono::nanoseconds o1 = chrono::nanoseconds(100);
+
+  const monotonic_clock::time_point t2 = t1 + chrono::nanoseconds(100000);
+  const chrono::nanoseconds o2 = chrono::nanoseconds(150);
+
+  EXPECT_EQ(NoncausalTimestampFilter::BoundOffset(std::make_tuple(t1, o1),
+                                                  std::make_tuple(t2, o2), t1),
+            o1);
+  EXPECT_EQ(NoncausalTimestampFilter::BoundOffset(
+                std::make_tuple(t1, o1), std::make_tuple(t2, o2), t1, 0.0),
+            std::pair(o1, 0.0));
+
+  EXPECT_EQ(NoncausalTimestampFilter::BoundOffset(std::make_tuple(t1, o1),
+                                                  std::make_tuple(t2, o2), t2),
             o2);
-  EXPECT_EQ(NoncausalTimestampFilter::ExtrapolateOffsetRemainder(
-                std::make_tuple(t2, o2), t2, 100.0),
-            -100.0 * kMaxVelocity());
+  EXPECT_EQ(NoncausalTimestampFilter::BoundOffset(
+                std::make_tuple(t1, o1), std::make_tuple(t2, o2), t2, 0.0),
+            std::pair(o2, 0.0));
+
+  // Iterate from before t1 to after t2 and confirm that the solution is right.
+  // We must always be >= than interpolation, and must also be equal to the max
+  // of extrapolating both.  Since the numbers are small enough (by
+  // construction!), the double calculation will be close enough that we can
+  // trust it.
+
+  for (int i = -MaxVelocityRatio::den * MaxVelocityRatio::num * 6;
+       i <
+       MaxVelocityRatio::den * MaxVelocityRatio::num * 6 + (t2 - t1).count();
+       ++i) {
+    monotonic_clock::time_point ta_base = t1;
+    const double ta_orig = static_cast<double>(i) / 3.0;
+    double ta = ta_orig;
+
+    NormalizeTimestamps(&ta_base, &ta);
+    CHECK_GE(ta, 0.0);
+    CHECK_LT(ta, 1.0);
+
+    const chrono::nanoseconds expected_offset_1 =
+        NoncausalTimestampFilter::ExtrapolateOffset(std::make_tuple(t1, o1),
+                                                    ta_base);
+    const chrono::nanoseconds expected_offset_2 =
+        NoncausalTimestampFilter::ExtrapolateOffset(std::make_tuple(t2, o2),
+                                                    ta_base);
+
+    // Each of the extrapolation functions have their max at the points.  They
+    // slope up before and down after.  So, we want the max.
+    //
+    //
+    //   p0  p1                                                               |
+    //  /  \/  \                                                              |
+    // /        \                                                             |
+
+    const std::pair<chrono::nanoseconds, double> offset =
+        NoncausalTimestampFilter::BoundOffset(
+            std::make_tuple(t1, o1), std::make_tuple(t2, o2), ta_base, ta);
+
+    EXPECT_EQ(std::max(expected_offset_1, expected_offset_2), offset.first);
+
+    const double expected_double_offset = std::max(
+        static_cast<double>(o1.count()) - std::abs(ta_orig) * kMaxVelocity(),
+        static_cast<double>(o2.count()) -
+            std::abs(ta_orig - (t2 - t1).count()) * kMaxVelocity());
+
+    EXPECT_NEAR(static_cast<double>(offset.first.count()) + offset.second,
+                expected_double_offset, 1e-9)
+        << ": i " << i << " t " << ta_base << " " << ta << " t1 " << t1
+        << " o1 " << o1.count() << "ns t2 " << t2 << " o2 " << o2.count()
+        << "ns Non-rounded: "
+        << std::max(expected_offset_1, expected_offset_2).count() << "ns";
+  }
 }
 
 // Tests that FindTimestamps finds timestamps in a sequence.
@@ -956,122 +1157,122 @@
   filter.Sample(t2, o2);
   filter.Sample(t3, o3);
 
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e - chrono::microseconds(10), 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t1, o1)),
                               ::testing::Eq(std::make_tuple(t2, o2))));
-  EXPECT_EQ(result, filter.FindTimestamps(nullptr, result.first,
+  EXPECT_EQ(result, filter.FindTimestamps(nullptr, true, result.first,
                                           e - chrono::microseconds(10), 0));
 
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e - chrono::microseconds(10), 0.9, 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t1, o1)),
                               ::testing::Eq(std::make_tuple(t2, o2))));
   EXPECT_EQ(result,
-            filter.FindTimestamps(nullptr, result.first,
+            filter.FindTimestamps(nullptr, true, result.first,
                                   e - chrono::microseconds(10), 0.9, 0));
 
-  result =
-      filter.FindTimestamps(nullptr, Pointer(), e + chrono::microseconds(0), 0);
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
+                                 e + chrono::microseconds(0), 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t1, o1)),
                               ::testing::Eq(std::make_tuple(t2, o2))));
-  EXPECT_EQ(result, filter.FindTimestamps(nullptr, result.first,
+  EXPECT_EQ(result, filter.FindTimestamps(nullptr, true, result.first,
                                           e + chrono::microseconds(0), 0));
 
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e + chrono::microseconds(0), 0.8, 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t1, o1)),
                               ::testing::Eq(std::make_tuple(t2, o2))));
-  EXPECT_EQ(result, filter.FindTimestamps(nullptr, result.first,
+  EXPECT_EQ(result, filter.FindTimestamps(nullptr, true, result.first,
                                           e + chrono::microseconds(0), 0.8, 0));
 
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e + chrono::microseconds(100), 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t1, o1)),
                               ::testing::Eq(std::make_tuple(t2, o2))));
-  EXPECT_EQ(result, filter.FindTimestamps(nullptr, result.first,
+  EXPECT_EQ(result, filter.FindTimestamps(nullptr, true, result.first,
                                           e + chrono::microseconds(100), 0));
 
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e + chrono::microseconds(100), 0.7, 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t1, o1)),
                               ::testing::Eq(std::make_tuple(t2, o2))));
   EXPECT_EQ(result,
-            filter.FindTimestamps(nullptr, result.first,
+            filter.FindTimestamps(nullptr, true, result.first,
                                   e + chrono::microseconds(100), 0.7, 0));
 
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e + chrono::microseconds(1000), 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t2, o2)),
                               ::testing::Eq(std::make_tuple(t3, o3))));
-  EXPECT_EQ(result, filter.FindTimestamps(nullptr, result.first,
+  EXPECT_EQ(result, filter.FindTimestamps(nullptr, true, result.first,
 
                                           e + chrono::microseconds(1000), 0));
 
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e + chrono::microseconds(1000), 0.0, 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t2, o2)),
                               ::testing::Eq(std::make_tuple(t3, o3))));
   EXPECT_EQ(result,
-            filter.FindTimestamps(nullptr, result.first,
+            filter.FindTimestamps(nullptr, true, result.first,
                                   e + chrono::microseconds(1000), 0.0, 0));
 
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e + chrono::microseconds(1500), 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t2, o2)),
                               ::testing::Eq(std::make_tuple(t3, o3))));
-  EXPECT_EQ(result, filter.FindTimestamps(nullptr, result.first,
+  EXPECT_EQ(result, filter.FindTimestamps(nullptr, true, result.first,
                                           e + chrono::microseconds(1500), 0));
 
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e + chrono::microseconds(1500), 0.0, 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t2, o2)),
                               ::testing::Eq(std::make_tuple(t3, o3))));
   EXPECT_EQ(result,
-            filter.FindTimestamps(nullptr, result.first,
+            filter.FindTimestamps(nullptr, true, result.first,
                                   e + chrono::microseconds(1500), 0.0, 0));
 
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e + chrono::microseconds(2000), 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t2, o2)),
                               ::testing::Eq(std::make_tuple(t3, o3))));
-  EXPECT_EQ(result, filter.FindTimestamps(nullptr, result.first,
+  EXPECT_EQ(result, filter.FindTimestamps(nullptr, true, result.first,
                                           e + chrono::microseconds(2000), 0));
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e + chrono::microseconds(2000), 0.1, 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t2, o2)),
                               ::testing::Eq(std::make_tuple(t3, o3))));
   EXPECT_EQ(result,
-            filter.FindTimestamps(nullptr, result.first,
+            filter.FindTimestamps(nullptr, true, result.first,
                                   e + chrono::microseconds(2000), 0.1, 0));
 
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e + chrono::microseconds(2500), 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t2, o2)),
                               ::testing::Eq(std::make_tuple(t3, o3))));
-  EXPECT_EQ(result, filter.FindTimestamps(nullptr, result.first,
+  EXPECT_EQ(result, filter.FindTimestamps(nullptr, true, result.first,
                                           e + chrono::microseconds(2500), 0));
 
-  result = filter.FindTimestamps(nullptr, Pointer(),
+  result = filter.FindTimestamps(nullptr, true, Pointer(),
                                  e + chrono::microseconds(2500), 0.0, 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t2, o2)),
                               ::testing::Eq(std::make_tuple(t3, o3))));
   EXPECT_EQ(result,
-            filter.FindTimestamps(nullptr, result.first,
+            filter.FindTimestamps(nullptr, true, result.first,
                                   e + chrono::microseconds(2500), 0.0, 0));
 }
 
@@ -1114,68 +1315,145 @@
 
   // Confirm the problem statement is reasonable...  We've had enough trouble
   // here in the past.
-  EXPECT_TRUE(
-      filter_a.ValidateSolution(&filter_b, Pointer(), t1_a, t1_a + o1_a + chrono::nanoseconds(1)));
-  EXPECT_TRUE(
-      filter_a.ValidateSolution(&filter_b, Pointer(), t2_a, t2_a + o2_a + chrono::nanoseconds(1)));
+  EXPECT_TRUE(filter_a.ValidateSolution(&filter_b, Pointer(), t1_a,
+                                        t1_a + o1_a + chrono::nanoseconds(1)));
+  EXPECT_TRUE(filter_a.ValidateSolution(&filter_b, Pointer(), t2_a,
+                                        t2_a + o2_a + chrono::nanoseconds(1)));
 
-  EXPECT_TRUE(
-      filter_b.ValidateSolution(&filter_a, Pointer(), t1_b, t1_b + o1_b + chrono::nanoseconds(1)));
-  EXPECT_TRUE(
-      filter_b.ValidateSolution(&filter_a, Pointer(), t2_b, t2_b + o2_b + chrono::nanoseconds(1)));
-  EXPECT_TRUE(
-      filter_b.ValidateSolution(&filter_a, Pointer(), t3_b, t3_b + o3_b + chrono::nanoseconds(1)));
+  EXPECT_TRUE(filter_b.ValidateSolution(&filter_a, Pointer(), t1_b,
+                                        t1_b + o1_b + chrono::nanoseconds(1)));
+  EXPECT_TRUE(filter_b.ValidateSolution(&filter_a, Pointer(), t2_b,
+                                        t2_b + o2_b + chrono::nanoseconds(1)));
+  EXPECT_TRUE(filter_b.ValidateSolution(&filter_a, Pointer(), t3_b,
+                                        t3_b + o3_b + chrono::nanoseconds(1)));
 
   // Before the start
-  result = filter_a.FindTimestamps(&filter_b, Pointer(),
+  result = filter_a.FindTimestamps(&filter_b, true, Pointer(),
                                    e - chrono::microseconds(10), 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t1_a, o1_a)),
                               ::testing::Eq(std::make_tuple(
                                   t2_b + o2_b, -o2_b - kMinNetworkDelay()))));
-  EXPECT_EQ(result, filter_a.FindTimestamps(&filter_b, result.first,
+  EXPECT_EQ(result, filter_a.FindTimestamps(&filter_b, true, result.first,
                                             e - chrono::microseconds(10), 0));
 
   // Before the first opposite point.
-  result = filter_a.FindTimestamps(&filter_b, Pointer(),
+  result = filter_a.FindTimestamps(&filter_b, true, Pointer(),
                                    e + chrono::microseconds(10), 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(t1_a, o1_a)),
                               ::testing::Eq(std::make_tuple(
                                   t2_b + o2_b, -o2_b - kMinNetworkDelay()))));
-  EXPECT_EQ(result, filter_a.FindTimestamps(&filter_b, result.first,
+  EXPECT_EQ(result, filter_a.FindTimestamps(&filter_b, true, result.first,
                                             e + chrono::microseconds(10), 0));
 
   // Between the two opposite points.
-  result = filter_a.FindTimestamps(&filter_b, Pointer(),
+  result = filter_a.FindTimestamps(&filter_b, true, Pointer(),
                                    e + chrono::microseconds(250), 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(
                                   t2_b + o2_b, -o2_b - kMinNetworkDelay())),
                               ::testing::Eq(std::make_tuple(
                                   t3_b + o3_b, -o3_b - kMinNetworkDelay()))));
-  EXPECT_EQ(result, filter_a.FindTimestamps(&filter_b, result.first,
+  EXPECT_EQ(result, filter_a.FindTimestamps(&filter_b, true, result.first,
                                             e + chrono::microseconds(250), 0));
 
   // After the last opposite point.
-  result = filter_a.FindTimestamps(&filter_b, Pointer(),
+  result = filter_a.FindTimestamps(&filter_b, true, Pointer(),
                                    e + chrono::microseconds(450), 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(
                                   t3_b + o3_b, -o3_b - kMinNetworkDelay())),
                               ::testing::Eq(std::make_tuple(t2_a, o2_a))));
-  EXPECT_EQ(result, filter_a.FindTimestamps(&filter_b, result.first,
+  EXPECT_EQ(result, filter_a.FindTimestamps(&filter_b, true, result.first,
                                             e + chrono::microseconds(450), 0));
 
   // And after the end.
-  result = filter_a.FindTimestamps(&filter_b, Pointer(),
+  result = filter_a.FindTimestamps(&filter_b, true, Pointer(),
                                    e + chrono::microseconds(1100), 0);
   EXPECT_THAT(result.second,
               ::testing::Pair(::testing::Eq(std::make_tuple(
                                   t3_b + o3_b, -o3_b - kMinNetworkDelay())),
                               ::testing::Eq(std::make_tuple(t2_a, o2_a))));
-  EXPECT_EQ(result, filter_a.FindTimestamps(&filter_b, result.first,
+  EXPECT_EQ(result, filter_a.FindTimestamps(&filter_b, true, result.first,
                                             e + chrono::microseconds(1100), 0));
+
+  // And make sure that the FindTimestamps with "use_other==false" flag will
+  // return the same answer as no filter.
+  for (BootTimestamp t = t1_a - chrono::microseconds(500);
+       t < t2_a + chrono::microseconds(500); t += chrono::microseconds(100)) {
+    EXPECT_EQ(filter_a.FindTimestamps(&filter_b, false, Pointer(), t, 0),
+              filter_a.FindTimestamps(nullptr, true, Pointer(), t, 0));
+  }
+}
+
+// Tests that we can validate a solution reasonably.
+TEST_F(NoncausalTimestampFilterTest, ValidateSolution) {
+  const BootTimestamp e{0, monotonic_clock::epoch()};
+  // Note: t1, t2, t3 need to be picked such that the slop is small so filter
+  // doesn't modify the timestamps.
+  const BootTimestamp t1_a = e + chrono::nanoseconds(0);
+  const BootDuration o1_a{0, chrono::nanoseconds(100)};
+  const BootTimestamp t2_a = e + chrono::microseconds(1000);
+  const BootDuration o2_a{0, chrono::nanoseconds(100)};
+
+  const BootTimestamp tmid_a = e + chrono::microseconds(500);
+  const BootDuration omid_a{0, chrono::nanoseconds(-400)};
+
+  const BootTimestamp tbefore_a = e - chrono::microseconds(500);
+  const BootDuration obefore_a{0, chrono::nanoseconds(-400)};
+  const BootTimestamp tafter_a = e + chrono::microseconds(1500);
+  const BootDuration oafter_a{0, chrono::nanoseconds(-400)};
+
+  TestingNoncausalTimestampFilter filter_a(node_a, node_b);
+  TestingNoncausalTimestampFilter filter_b(node_b, node_a);
+
+  std::pair<Pointer,
+            std::pair<std::tuple<logger::BootTimestamp, logger::BootDuration>,
+                      std::tuple<logger::BootTimestamp, logger::BootDuration>>>
+      result;
+
+  filter_a.Sample(t1_a, o1_a);
+  filter_a.Sample(t2_a, o2_a);
+
+  // At the control points, we should see that the boundary is right at the
+  // edge.
+  EXPECT_TRUE(filter_a.ValidateSolution(&filter_b, Pointer(), t1_a,
+                                        t1_a + o1_a + chrono::nanoseconds(1)));
+  EXPECT_TRUE(filter_a.ValidateSolution(&filter_b, Pointer(), t1_a, 0.0,
+                                        t1_a + o1_a, 0.00001));
+  EXPECT_FALSE(filter_a.ValidateSolution(&filter_b, Pointer(), t1_a,
+                                         t1_a + o1_a - chrono::nanoseconds(1)));
+  EXPECT_FALSE(filter_a.ValidateSolution(&filter_b, Pointer(), t1_a, 0.0,
+                                         t1_a + o1_a, -0.0001));
+
+  EXPECT_TRUE(filter_a.ValidateSolution(&filter_b, Pointer(), t2_a,
+                                        t2_a + o2_a + chrono::nanoseconds(1)));
+  EXPECT_TRUE(filter_a.ValidateSolution(&filter_b, Pointer(), t2_a, 0.0,
+                                        t2_a + o2_a, 0.00001));
+  EXPECT_FALSE(filter_a.ValidateSolution(&filter_b, Pointer(), t2_a,
+                                         t2_a + o2_a - chrono::nanoseconds(1)));
+  EXPECT_FALSE(filter_a.ValidateSolution(&filter_b, Pointer(), t2_a, 0.0,
+                                         t2_a + o2_a, -0.00001));
+
+  // Now that we've checked the control points, check in the middle to confirm
+  // it looks like we are using BoundOffset rather than interpolate.
+  EXPECT_TRUE(filter_a.ValidateSolution(
+      &filter_b, Pointer(), tmid_a, tmid_a + omid_a + chrono::nanoseconds(1)));
+  EXPECT_TRUE(filter_a.ValidateSolution(&filter_b, Pointer(), tmid_a, 0.0,
+                                        tmid_a + omid_a, 0.00001));
+
+  EXPECT_FALSE(filter_a.ValidateSolution(
+      &filter_b, Pointer(), tbefore_a,
+      tbefore_a + obefore_a - chrono::nanoseconds(1)));
+  EXPECT_FALSE(filter_a.ValidateSolution(&filter_b, Pointer(), tbefore_a, 0.0,
+                                         tbefore_a + obefore_a, -0.00001));
+
+  EXPECT_FALSE(
+      filter_a.ValidateSolution(&filter_b, Pointer(), tafter_a,
+                                tafter_a + oafter_a - chrono::nanoseconds(1)));
+  EXPECT_FALSE(filter_a.ValidateSolution(&filter_b, Pointer(), tafter_a, 0.0,
+                                         tafter_a + oafter_a, -0.00001));
 }
 
 // Tests that Offset returns results indicative of it calling InterpolateOffset
@@ -1186,15 +1464,12 @@
   // doesn't modify the timestamps.
   const BootTimestamp t1 = e + chrono::nanoseconds(1000);
   const BootDuration o1{0, chrono::nanoseconds(100)};
-  const double o1d = static_cast<double>(o1.duration.count());
 
   const BootTimestamp t2 = e + chrono::microseconds(2000);
   const BootDuration o2{0, chrono::nanoseconds(150)};
-  const double o2d = static_cast<double>(o2.duration.count());
 
   const BootTimestamp t3 = e + chrono::microseconds(3000);
   const BootDuration o3{0, chrono::nanoseconds(50)};
-  const double o3d = static_cast<double>(o3.duration.count());
 
   const BootTimestamp t4 = e + chrono::microseconds(4000);
 
@@ -1206,26 +1481,46 @@
   EXPECT_EQ(filter.Offset(nullptr, Pointer(), t1, 0), o1);
   EXPECT_EQ(filter.Offset(nullptr, Pointer(), t1, 0.0, 0),
             std::make_pair(o1, 0.0));
+  EXPECT_EQ(filter.BoundsOffset(nullptr, Pointer(), t1, 0.0, 0),
+            std::make_pair(o1, 0.0));
   // Check if we ask for something away from point that we get an offset
   // based on the MaxVelocity allowed
   const double offset_pre = -(t1.time - e.time).count() * kMaxVelocity();
   EXPECT_EQ(filter.Offset(nullptr, Pointer(), e, 0),
             o1 + chrono::nanoseconds(static_cast<int64_t>(offset_pre)));
-  EXPECT_EQ(filter.Offset(nullptr, Pointer(), e, 0.0, 0),
-            std::make_pair(o1, offset_pre));
+  EXPECT_EQ(
+      filter.Offset(nullptr, Pointer(), e, 0.0, 0),
+      std::make_pair(o1 + chrono::nanoseconds(static_cast<int64_t>(offset_pre)),
+                     0.0));
+  EXPECT_EQ(
+      filter.BoundsOffset(nullptr, Pointer(), e, 0.0, 0),
+      std::make_pair(o1 + chrono::nanoseconds(static_cast<int64_t>(offset_pre)),
+                     0.0));
 
   double offset_post = -(t2.time - t1.time).count() * kMaxVelocity();
   EXPECT_EQ(filter.Offset(nullptr, Pointer(), t2, 0),
             o1 + chrono::nanoseconds(static_cast<int64_t>(offset_post)));
-  EXPECT_EQ(filter.Offset(nullptr, Pointer(), t2, 0.0, 0),
-            std::make_pair(o1, offset_post));
+  EXPECT_EQ(
+      filter.Offset(nullptr, Pointer(), t2, 0.0, 0),
+      std::make_pair(
+          o1 + chrono::nanoseconds(static_cast<int64_t>(offset_post)), 0.0));
+  EXPECT_EQ(
+      filter.BoundsOffset(nullptr, Pointer(), t2, 0.0, 0),
+      std::make_pair(
+          o1 + chrono::nanoseconds(static_cast<int64_t>(offset_post)), 0.0));
 
   filter.Sample(t2, o2);
   filter.Sample(t3, o3);
 
   EXPECT_EQ(filter.Offset(nullptr, Pointer(), t1, 0), o1);
+  EXPECT_EQ(filter.BoundsOffset(nullptr, Pointer(), t1, 0.0, 0),
+            std::make_pair(o1, 0.0));
   EXPECT_EQ(filter.Offset(nullptr, Pointer(), t2, 0), o2);
+  EXPECT_EQ(filter.BoundsOffset(nullptr, Pointer(), t2, 0.0, 0),
+            std::make_pair(o2, 0.0));
   EXPECT_EQ(filter.Offset(nullptr, Pointer(), t3, 0), o3);
+  EXPECT_EQ(filter.BoundsOffset(nullptr, Pointer(), t3, 0.0, 0),
+            std::make_pair(o3, 0.0));
 
   EXPECT_EQ(filter.Offset(nullptr, Pointer(), t1, 0.0, 0),
             std::make_pair(o1, 0.0));
@@ -1234,7 +1529,7 @@
       filter.Offset(nullptr, Pointer(),
                     e + (t2.time_since_epoch() + t1.time_since_epoch()) / 2,
                     0.0, 0),
-      std::make_pair(o1, (o2d - o1d) / 2.));
+      std::make_pair(o1 + (o2 - o1) / 2, 0.0));
 
   EXPECT_EQ(filter.Offset(nullptr, Pointer(), t2, 0.0, 0),
             std::make_pair(o2, 0.0));
@@ -1243,7 +1538,7 @@
       filter.Offset(nullptr, Pointer(),
                     e + (t2.time_since_epoch() + t3.time_since_epoch()) / 2,
                     0.0, 0),
-      std::make_pair(o2, (o2d + o3d) / 2. - o2d));
+      std::make_pair((o2 + o3) / 2, 0.0));
 
   EXPECT_EQ(filter.Offset(nullptr, Pointer(), t3, 0.0, 0),
             std::make_pair(o3, 0.0));
@@ -1251,14 +1546,21 @@
   // Check that we still get same answer for times before our sample data...
   EXPECT_EQ(filter.Offset(nullptr, Pointer(), e, 0),
             o1 + chrono::nanoseconds(static_cast<int64_t>(offset_pre)));
-  EXPECT_EQ(filter.Offset(nullptr, Pointer(), e, 0.0, 0),
-            std::make_pair(o1, offset_pre));
+  EXPECT_EQ(
+      filter.Offset(nullptr, Pointer(), e, 0.0, 0),
+      std::make_pair(o1 + chrono::nanoseconds(static_cast<int64_t>(offset_pre)),
+                     0.0));
   // ... and after
   offset_post = -(t4.time - t3.time).count() * kMaxVelocity();
   EXPECT_EQ(filter.Offset(nullptr, Pointer(), t4, 0),
             (o3 + chrono::nanoseconds(static_cast<int64_t>(offset_post))));
-  EXPECT_EQ(filter.Offset(nullptr, Pointer(), t4, 0.0, 0),
-            std::make_pair(o3, offset_post));
+  EXPECT_EQ(
+      filter.Offset(nullptr, Pointer(), t4, 0.0, 0),
+      std::make_pair(
+          o3 + chrono::nanoseconds(static_cast<int64_t>(offset_post)), 0.0));
+
+  EXPECT_EQ(filter.BoundsOffset(nullptr, Pointer(), t2 + (t3 - t2) / 2, 0.0, 0),
+            std::make_pair(o2 - chrono::nanoseconds(500), 0.0));
 }
 
 // Tests that adding duplicates gets correctly deduplicated.
diff --git a/aos/network/www/BUILD b/aos/network/www/BUILD
index 442e8be..03f41a5 100644
--- a/aos/network/www/BUILD
+++ b/aos/network/www/BUILD
@@ -2,6 +2,8 @@
 load("//tools/build_rules:js.bzl", "rollup_bundle")
 load("//aos:config.bzl", "aos_config")
 
+exports_files(["styles.css"])
+
 filegroup(
     name = "files",
     srcs = glob([
diff --git a/aos/network/www/plotter.ts b/aos/network/www/plotter.ts
index 8d462f3..89b360c 100644
--- a/aos/network/www/plotter.ts
+++ b/aos/network/www/plotter.ts
@@ -105,6 +105,7 @@
   private colorLocation: WebGLUniformLocation | null;
   private pointSizeLocation: WebGLUniformLocation | null;
   private _label: string|null = null;
+  private _hidden: boolean = false;
   constructor(
       private readonly ctx: WebGLRenderingContext,
       private readonly program: WebGLProgram,
@@ -190,6 +191,15 @@
     }
   }
 
+  hidden(): boolean {
+    return this._hidden;
+  }
+
+  setHidden(hidden: boolean) {
+    this._hasUpdate = true;
+    this._hidden = hidden;
+  }
+
   getPoints(): Point[] {
     return this.points;
   }
@@ -220,6 +230,10 @@
       return;
     }
 
+    if (this._hidden) {
+      return;
+    }
+
     this.ctx.bindBuffer(this.ctx.ARRAY_BUFFER, this.buffer);
     // Note: if this is generating errors associated with the buffer size,
     // confirm that this.points really is a Float32Array.
@@ -297,92 +311,120 @@
 export class Legend {
   // Location, in pixels, of the legend in the text canvas.
   private location: number[] = [0, 0];
-  constructor(private ctx: CanvasRenderingContext2D, private lines: Line[]) {
-    this.location = [80, 30];
+  constructor(
+      private plot: Plot, private lines: Line[],
+      private legend: HTMLDivElement) {
+    this.setPosition([80, 30]);
   }
 
   setPosition(location: number[]): void {
     this.location = location;
+    this.legend.style.left = location[0] + 'px';
+    this.legend.style.top = location[1] + 'px';
   }
 
   draw(): void {
-    this.ctx.save();
+    // First, figure out if anything has changed.  The legend is created and
+    // then titles are changed afterwords, so we have to do this lazily.
+    let needsUpdate = false;
+    {
+      let child = 0;
+      for (let line of this.lines) {
+        if (line.label() === null) {
+          continue;
+        }
 
-    this.ctx.translate(this.location[0], this.location[1]);
+        if (child >= this.legend.children.length) {
+          needsUpdate = true;
+          break;
+        }
 
-    // Space between rows of the legend.
-    const step = 20;
-
-    let maxWidth = 0;
-
-    // In the legend, we render both a small line of the appropriate color as
-    // well as the text label--start/endPoint are the relative locations of the
-    // endpoints of the miniature line within the row, and textStart is where
-    // we begin rendering the text within the row.
-    const startPoint = [0, 0];
-    const endPoint = [10, -10];
-    const textStart = endPoint[0] + 5;
-
-    // Calculate how wide the legend needs to be to fit all the text.
-    this.ctx.textAlign = 'left';
-    let numLabels = 0;
-    for (let line of this.lines) {
-      if (line.label() === null) {
-        continue;
+        // Make sure both have text in the right spot.  Don't be too picky since
+        // nothing should really be changing here, and it's handy to let the
+        // user edit the HTML for testing.
+        if (this.legend.children[child].lastChild.textContent.length == 0 &&
+            line.label().length != 0) {
+          needsUpdate = true;
+          break;
+        }
+        child += 1;
       }
-      ++numLabels;
-      const width =
-          textStart + this.ctx.measureText(line.label()).actualBoundingBoxRight;
-      maxWidth = Math.max(width, maxWidth);
-    }
 
-    if (numLabels === 0) {
-      this.ctx.restore();
+      // If we got through everything, we should be pointed past the last child.
+      // If not, more children exists than lines.
+      if (child != this.legend.children.length) {
+        needsUpdate = true;
+      }
+    }
+    if (!needsUpdate) {
       return;
     }
 
-    // Total height of the body of the legend.
-    const height = step * numLabels;
+    // Nuke the old legend.
+    while (this.legend.firstChild) {
+      this.legend.removeChild(this.legend.firstChild);
+    }
 
-    // Set the legend background to be white and opaque.
-    this.ctx.fillStyle = 'rgba(255, 255, 255, 1.0)';
-    const backgroundBuffer = 5;
-    this.ctx.fillRect(
-        -backgroundBuffer, 0, maxWidth + 2.0 * backgroundBuffer,
-        height + backgroundBuffer);
-
-    // Go through each line and render the little lines and text for each Line.
+    // Now, build up a new legend.
     for (let line of this.lines) {
       if (line.label() === null) {
         continue;
       }
-      this.ctx.translate(0, step);
+
+      // The legend is a div containing both a canvas for the style/color, and a
+      // div for the text.  Make those, color in the canvas, and add it to the
+      // page.
+      let l = document.createElement('div');
+      l.classList.add('aos_legend_line');
+      let text = document.createElement('div');
+      text.textContent = line.label();
+
+      l.appendChild(text);
+      this.legend.appendChild(l);
+
+      let c = document.createElement('canvas');
+      c.width = text.offsetHeight;
+      c.height = text.offsetHeight;
+
+      const linestyleContext = c.getContext("2d");
+      linestyleContext.clearRect(0, 0, c.width, c.height);
+
       const color = line.color();
-      this.ctx.strokeStyle = `rgb(${255.0 * color[0]}, ${255.0 * color[1]}, ${255.0 * color[2]})`;
-      this.ctx.fillStyle = this.ctx.strokeStyle;
-      if (line.drawLine()) {
-        this.ctx.beginPath();
-        this.ctx.moveTo(startPoint[0], startPoint[1]);
-        this.ctx.lineTo(endPoint[0], endPoint[1]);
-        this.ctx.closePath();
-        this.ctx.stroke();
-      }
+      linestyleContext.strokeStyle = `rgb(${255.0 * color[0]}, ${
+          255.0 * color[1]}, ${255.0 * color[2]})`;
+      linestyleContext.fillStyle = linestyleContext.strokeStyle;
+
       const pointSize = line.pointSize();
-      if (pointSize > 0) {
-        this.ctx.fillRect(
-            startPoint[0] - pointSize / 2.0, startPoint[1] - pointSize / 2.0,
-            pointSize, pointSize);
-        this.ctx.fillRect(
-            endPoint[0] - pointSize / 2.0, endPoint[1] - pointSize / 2.0,
-            pointSize, pointSize);
+      const kDistanceIn = pointSize / 2.0;
+
+      if (line.drawLine()) {
+        linestyleContext.beginPath();
+        linestyleContext.moveTo(0, 0);
+        linestyleContext.lineTo(c.height, c.width);
+        linestyleContext.closePath();
+        linestyleContext.stroke();
       }
 
-      this.ctx.fillStyle = 'black';
-      this.ctx.textAlign = 'left';
-      this.ctx.fillText(line.label(), textStart, 0);
-    }
+      if (pointSize > 0) {
+        linestyleContext.fillRect(0, 0, pointSize, pointSize);
+        linestyleContext.fillRect(
+            c.height - 1 - pointSize, c.width - 1 - pointSize, pointSize,
+            pointSize);
+      }
 
-    this.ctx.restore();
+      c.addEventListener('click', (e) => {
+        if (!line.hidden()) {
+          l.classList.add('aos_legend_line_hidden');
+        } else {
+          l.classList.remove('aos_legend_line_hidden');
+        }
+
+        line.setHidden(!line.hidden());
+        this.plot.draw();
+      });
+
+      l.prepend(c);
+    }
   }
 }
 
@@ -440,7 +482,7 @@
     return divideVec(subtractVec(canvasPos, this.zoom.offset), this.zoom.scale);
   }
 
-  // Tehse return the max/min rendered points, in plot-space (this is helpful
+  // These return the max/min rendered points, in plot-space (this is helpful
   // for drawing axis labels).
   maxVisiblePoint(): number[] {
     return this.canvasToPlotCoordinates([1.0, 1.0]);
@@ -850,6 +892,7 @@
 export class Plot {
   private canvas = document.createElement('canvas');
   private textCanvas = document.createElement('canvas');
+  private legendDiv = document.createElement('div');
   private lineDrawerContext: WebGLRenderingContext;
   private drawer: LineDrawer;
   private static keysPressed:
@@ -873,24 +916,20 @@
   constructor(wrapperDiv: HTMLDivElement) {
     wrapperDiv.appendChild(this.canvas);
     wrapperDiv.appendChild(this.textCanvas);
+    this.legendDiv.classList.add('aos_legend');
+    wrapperDiv.appendChild(this.legendDiv);
     this.lastTimeMs = (new Date()).getTime();
 
     this.canvas.style.paddingLeft = this.axisLabelBuffer.left.toString() + "px";
     this.canvas.style.paddingRight = this.axisLabelBuffer.right.toString() + "px";
     this.canvas.style.paddingTop = this.axisLabelBuffer.top.toString() + "px";
     this.canvas.style.paddingBottom = this.axisLabelBuffer.bottom.toString() + "px";
-    this.canvas.style.width = "100%";
-    this.canvas.style.height = "100%";
-    this.canvas.style.boxSizing = "border-box";
+    this.canvas.classList.add('aos_plot');
 
-    this.canvas.style.position = 'absolute';
     this.lineDrawerContext = this.canvas.getContext('webgl');
     this.drawer = new LineDrawer(this.lineDrawerContext);
 
-    this.textCanvas.style.position = 'absolute';
-    this.textCanvas.style.width = "100%";
-    this.textCanvas.style.height = "100%";
-    this.textCanvas.style.pointerEvents = 'none';
+    this.textCanvas.classList.add('aos_plot_text');
 
     this.canvas.addEventListener('dblclick', (e) => {
       this.handleDoubleClick(e);
@@ -922,7 +961,7 @@
     const textCtx = this.textCanvas.getContext("2d");
     this.axisLabels =
         new AxisLabels(textCtx, this.drawer, this.axisLabelBuffer);
-    this.legend = new Legend(textCtx, this.drawer.getLines());
+    this.legend = new Legend(this, this.drawer.getLines(), this.legendDiv);
 
     this.zoomRectangle = this.getDrawer().addLine(false);
     this.zoomRectangle.setColor(Colors.WHITE);
diff --git a/aos/network/www/styles.css b/aos/network/www/styles.css
index 23ceb21..547ec94 100644
--- a/aos/network/www/styles.css
+++ b/aos/network/www/styles.css
@@ -3,3 +3,59 @@
   border-bottom: 1px solid;
   font-size: 24px;
 }
+
+.aos_plot {
+  position: absolute;
+  width: 100%;
+  height: 100%;
+  box-sizing: border-box;
+}
+
+.aos_plot_text {
+  position: absolute;
+  width: 100%;
+  height: 100%;
+  pointer-events: none;
+}
+
+.aos_legend {
+  position: absolute;
+  z-index: 1;
+  pointer-events: none;
+}
+
+.aos_legend_line {
+  background: white;
+  padding: 2px;
+  border-radius: 2px;
+  margin-top: 3px;
+  margin-bottom: 3px;
+  font-size: 12;
+}
+
+.aos_legend_line>div {
+  display: inline-block;
+  vertical-align: middle;
+  margin-left: 5px;
+}
+.aos_legend_line>canvas {
+  vertical-align: middle;
+  pointer-events: all;
+}
+
+.aos_legend_line_hidden {
+  filter: contrast(0.75);
+}
+
+.aos_cpp_plot {
+  width: 100%;
+  display: flex;
+  flex-direction: column;
+  height: 100%;
+  align-items: flex-start;
+}
+
+.aos_cpp_plot>div {
+  flex: 1;
+  width: 100%;
+}
diff --git a/build_tests/BUILD b/build_tests/BUILD
index 24fdad8..e7f236a 100644
--- a/build_tests/BUILD
+++ b/build_tests/BUILD
@@ -5,6 +5,7 @@
 load("@rules_rust//rust:defs.bzl", "rust_binary", "rust_library", "rust_test")
 load("@npm//@bazel/typescript:index.bzl", "ts_library")
 load("@npm//@bazel/concatjs:index.bzl", "karma_web_test_suite")
+load("//tools/build_rules:autocxx.bzl", "autocxx_library")
 
 cc_test(
     name = "gflags_build_test",
@@ -119,7 +120,11 @@
 
 go_library(
     name = "build_tests_lib",
-    srcs = ["hello.go"],
+    srcs = [
+        "hello.go",
+        # Not sure why gazelle wants this here?
+        "hello_autocxx.h",
+    ],
     importpath = "github.com/frc971/971-Robot-Code/build_tests",
     target_compatible_with = ["@platforms//cpu:x86_64"],
     visibility = ["//visibility:private"],
@@ -141,19 +146,19 @@
 rust_library(
     name = "hello_lib",
     srcs = ["hello_lib.rs"],
-    target_compatible_with = ["@platforms//os:linux"],
+    target_compatible_with = ["//tools/platforms/rust:has_support"],
 )
 
 rust_test(
     name = "hello_lib_test",
     crate = ":hello_lib",
-    target_compatible_with = ["@platforms//os:linux"],
+    target_compatible_with = ["//tools/platforms/rust:has_support"],
 )
 
 rust_binary(
     name = "rust_hello",
     srcs = ["rust_hello.rs"],
-    target_compatible_with = ["@platforms//os:linux"],
+    target_compatible_with = ["//tools/platforms/rust:has_support"],
     deps = [":hello_lib"],
 )
 
@@ -182,7 +187,7 @@
 rust_library(
     name = "rust_in_cc_rs",
     srcs = ["rust_in_cc.rs"],
-    target_compatible_with = ["@platforms//os:linux"],
+    target_compatible_with = ["//tools/platforms/rust:has_support"],
 )
 
 cc_test(
@@ -191,3 +196,27 @@
     target_compatible_with = ["@platforms//os:linux"],
     deps = [":rust_in_cc_rs"],
 )
+
+cc_library(
+    name = "hello_autocxx_cc",
+    hdrs = [
+        "hello_autocxx.h",
+    ],
+)
+
+autocxx_library(
+    name = "hello_autocxx",
+    srcs = ["hello_autocxx.rs"],
+    libs = [":hello_autocxx_cc"],
+    override_cc_toolchain = "@llvm_toolchain//:cc-clang-x86_64-linux",
+    target_compatible_with = ["//tools/platforms/rust:has_support"],
+)
+
+rust_test(
+    name = "hello_autocxx_test",
+    crate = ":hello_autocxx",
+    # TODO: Make Rust play happy with pic vs nopic. Details at:
+    # https://github.com/bazelbuild/rules_rust/issues/118
+    rustc_flags = ["-Crelocation-model=static"],
+    target_compatible_with = ["//tools/platforms/rust:has_support"],
+)
diff --git a/build_tests/hello_autocxx.h b/build_tests/hello_autocxx.h
new file mode 100644
index 0000000..8b6e285
--- /dev/null
+++ b/build_tests/hello_autocxx.h
@@ -0,0 +1,8 @@
+#ifndef BUILD_TESTS_HELLO_AUTOCXX_H_
+#define BUILD_TESTS_HELLO_AUTOCXX_H_
+
+#include <stdlib.h>
+
+int plain_function() { return 971; }
+
+#endif  // BUILD_TESTS_HELLO_AUTOCXX_H_
diff --git a/build_tests/hello_autocxx.rs b/build_tests/hello_autocxx.rs
new file mode 100644
index 0000000..483f8be
--- /dev/null
+++ b/build_tests/hello_autocxx.rs
@@ -0,0 +1,16 @@
+use autocxx::include_cpp;
+
+include_cpp! (
+#include "build_tests/hello_autocxx.h"
+generate!("plain_function")
+);
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_plain_function() {
+        assert_eq!(autocxx::c_int::from(971), unsafe { ffi::plain_function() });
+    }
+}
diff --git a/frc971/analysis/BUILD b/frc971/analysis/BUILD
index 2d71410..abb950b 100644
--- a/frc971/analysis/BUILD
+++ b/frc971/analysis/BUILD
@@ -70,11 +70,21 @@
     ],
 )
 
+genrule(
+    name = "copy_css",
+    srcs = [
+        "//aos/network/www:styles.css",
+    ],
+    outs = ["styles.css"],
+    cmd = "cp $< $@",
+)
+
 filegroup(
     name = "plotter_files",
     srcs = [
         "index.html",
         "plot_index_bundle.min.js",
+        "styles.css",
     ],
 )
 
diff --git a/frc971/analysis/cpp_plot/BUILD b/frc971/analysis/cpp_plot/BUILD
index 156594c..d6e74c3 100644
--- a/frc971/analysis/cpp_plot/BUILD
+++ b/frc971/analysis/cpp_plot/BUILD
@@ -23,10 +23,21 @@
     ],
 )
 
+genrule(
+    name = "copy_css",
+    srcs = [
+        "//aos/network/www:styles.css",
+    ],
+    outs = ["styles.css"],
+    cmd = "cp $< $@",
+)
+
 filegroup(
     name = "cpp_plot_files",
     srcs = [
+        "cpp_plot_bundle.js",
         "cpp_plot_bundle.min.js",
         "index.html",
+        "styles.css",
     ],
 )
diff --git a/frc971/analysis/cpp_plot/cpp_plot.ts b/frc971/analysis/cpp_plot/cpp_plot.ts
index ffbd485..a704e89 100644
--- a/frc971/analysis/cpp_plot/cpp_plot.ts
+++ b/frc971/analysis/cpp_plot/cpp_plot.ts
@@ -4,7 +4,7 @@
 import {plotData} from 'org_frc971/frc971/analysis/plot_data_utils';
 
 const rootDiv = document.createElement('div');
-rootDiv.style.width = '100%';
+rootDiv.classList.add('aos_cpp_plot');
 document.body.appendChild(rootDiv);
 
 const conn = new Connection();
diff --git a/frc971/analysis/cpp_plot/index.html b/frc971/analysis/cpp_plot/index.html
index 776c103..fbb5199 100644
--- a/frc971/analysis/cpp_plot/index.html
+++ b/frc971/analysis/cpp_plot/index.html
@@ -1,6 +1,7 @@
 <html>
   <head>
     <script src="cpp_plot_bundle.min.js" defer></script>
+    <link rel="stylesheet" href="styles.css">
   </head>
   <body>
   </body>
diff --git a/frc971/analysis/in_process_plotter.cc b/frc971/analysis/in_process_plotter.cc
index 585c933..545740d 100644
--- a/frc971/analysis/in_process_plotter.cc
+++ b/frc971/analysis/in_process_plotter.cc
@@ -19,15 +19,26 @@
       builder_(plot_sender_.MakeBuilder()) {
   web_proxy_.SetDataPath(kDataPath);
   event_loop_->SkipTimingReport();
-  color_wheel_.push_back(Color(1, 0, 0));
-  color_wheel_.push_back(Color(0, 1, 0));
-  color_wheel_.push_back(Color(0, 0, 1));
-  color_wheel_.push_back(Color(1, 1, 0));
-  color_wheel_.push_back(Color(0, 1, 1));
-  color_wheel_.push_back(Color(1, 0, 1));
-  color_wheel_.push_back(Color(1, 0.6, 0));
-  color_wheel_.push_back(Color(0.6, 0.3, 0));
-  color_wheel_.push_back(Color(1, 1, 1));
+
+  color_wheel_.emplace_back(ColorWheelColor{.name = "red", .color = {1, 0, 0}});
+  color_wheel_.emplace_back(
+      ColorWheelColor{.name = "green", .color = {0, 1, 0}});
+  color_wheel_.emplace_back(
+      ColorWheelColor{.name = "purple", .color = {0.54, 0.3, 0.75}});
+  color_wheel_.emplace_back(
+      ColorWheelColor{.name = "blue", .color = {0, 0, 1}});
+  color_wheel_.emplace_back(
+      ColorWheelColor{.name = "yellow", .color = {1, 1, 0}});
+  color_wheel_.emplace_back(
+      ColorWheelColor{.name = "teal", .color = {0, 1, 1}});
+  color_wheel_.emplace_back(
+      ColorWheelColor{.name = "pink", .color = {1, 0, 1}});
+  color_wheel_.emplace_back(
+      ColorWheelColor{.name = "orange", .color = {1, 0.6, 0}});
+  color_wheel_.emplace_back(
+      ColorWheelColor{.name = "brown", .color = {0.6, 0.3, 0}});
+  color_wheel_.emplace_back(
+      ColorWheelColor{.name = "white", .color = {1, 1, 1}});
 }
 
 void Plotter::Spin() { event_loop_factory_.Run(); }
@@ -63,15 +74,14 @@
 }
 
 void Plotter::AddLine(const std::vector<double> &x,
-                      const std::vector<double> &y, std::string_view label,
-                      std::string_view line_style) {
+                      const std::vector<double> &y, LineOptions options) {
   CHECK_EQ(x.size(), y.size());
   CHECK(!position_.IsNull())
       << "You must call AddFigure() before calling AddLine().";
 
   flatbuffers::Offset<flatbuffers::String> label_offset;
-  if (!label.empty()) {
-    label_offset = builder_.fbb()->CreateString(label);
+  if (!options.label.empty()) {
+    label_offset = builder_.fbb()->CreateString(options.label);
   }
 
   std::vector<Point> points;
@@ -81,16 +91,28 @@
   const flatbuffers::Offset<flatbuffers::Vector<const Point *>> points_offset =
       builder_.fbb()->CreateVectorOfStructs(points);
 
-  const Color *color = &color_wheel_.at(color_wheel_position_);
-  color_wheel_position_ = (color_wheel_position_ + 1) % color_wheel_.size();
+  const Color *color;
+  if (options.color.empty()) {
+    color = &color_wheel_.at(color_wheel_position_).color;
+    color_wheel_position_ = (color_wheel_position_ + 1) % color_wheel_.size();
+  } else {
+    auto it = std::find_if(
+        color_wheel_.begin(), color_wheel_.end(),
+        [options_color = options.color](const ColorWheelColor &color) {
+          return color.name == options_color;
+        });
+    CHECK(it != color_wheel_.end()) << ": Failed to find " << options.color;
+    color = &(it->color);
+  }
 
   LineStyle::Builder style_builder = builder_.MakeBuilder<LineStyle>();
-  if (line_style.find('*') != line_style.npos) {
-    style_builder.add_point_size(3.0);
+  if (options.line_style.find('*') != options.line_style.npos) {
+    style_builder.add_point_size(options.point_size);
   } else {
     style_builder.add_point_size(0.0);
   }
-  style_builder.add_draw_line(line_style.find('-') != line_style.npos);
+  style_builder.add_draw_line(options.line_style.find('-') !=
+                              options.line_style.npos);
   const flatbuffers::Offset<LineStyle> style_offset = style_builder.Finish();
 
   auto line_builder = builder_.MakeBuilder<Line>();
@@ -132,8 +154,7 @@
   plot_builder.add_title(title_);
   plot_builder.add_figures(figures_offset);
 
-  CHECK_EQ(builder_.Send(plot_builder.Finish()),
-           aos::RawSender::Error::kOk);
+  CHECK_EQ(builder_.Send(plot_builder.Finish()), aos::RawSender::Error::kOk);
 
   builder_ = plot_sender_.MakeBuilder();
 
diff --git a/frc971/analysis/in_process_plotter.h b/frc971/analysis/in_process_plotter.h
index 8d4c84c..4f05c19 100644
--- a/frc971/analysis/in_process_plotter.h
+++ b/frc971/analysis/in_process_plotter.h
@@ -42,14 +42,31 @@
   void Title(std::string_view title);
   void AddFigure(std::string_view title = "", double width = 0,
                  double height = 0);
+  struct LineOptions {
+    std::string_view label = "";
+    std::string_view line_style = "*-";
+    std::string_view color = "";
+    double point_size = 3.0;
+  };
+
   void AddLine(const std::vector<double> &x, const std::vector<double> &y,
-               std::string_view label = "", std::string_view line_style = "*-");
+               std::string_view label) {
+    AddLine(x, y, LineOptions{.label = label});
+  }
+  void AddLine(const std::vector<double> &x, const std::vector<double> &y,
+               std::string_view label, std::string_view line_style) {
+    AddLine(x, y, LineOptions{.label = label, .line_style = line_style});
+  }
+  void AddLine(const std::vector<double> &x, const std::vector<double> &y,
+               LineOptions options);
+
   void ShareXAxis(bool share) { share_x_axis_ = share; }
   void XLabel(std::string_view label);
   void YLabel(std::string_view label);
   void Publish();
 
   void Spin();
+
  private:
   void MaybeFinishFigure();
 
@@ -70,8 +87,13 @@
   std::vector<flatbuffers::Offset<Figure>> figures_;
   std::vector<flatbuffers::Offset<Line>> lines_;
 
+  struct ColorWheelColor {
+    std::string name;
+    Color color;
+  };
+
   size_t color_wheel_position_ = 0;
-  std::vector<Color> color_wheel_;
+  std::vector<ColorWheelColor> color_wheel_;
 };
 
 }  // namespace analysis
diff --git a/frc971/analysis/index.html b/frc971/analysis/index.html
index edd2483..22b42c7 100644
--- a/frc971/analysis/index.html
+++ b/frc971/analysis/index.html
@@ -1,6 +1,7 @@
 <html>
   <head>
     <script src="plot_index_bundle.min.js" defer></script>
+    <link rel="stylesheet" href="styles.css">
   </head>
   <body>
   </body>
diff --git a/frc971/analysis/plot_data_utils.ts b/frc971/analysis/plot_data_utils.ts
index f8debe7..1362a09 100644
--- a/frc971/analysis/plot_data_utils.ts
+++ b/frc971/analysis/plot_data_utils.ts
@@ -48,10 +48,7 @@
             figureDiv.style.width = figure.position().width().toString() + 'px';
           }
           if (figure.position().height() == 0) {
-            // TODO(austin): I don't know the css for 100%, excluding other
-            // stuff in the div...  Just go with a little less for now, it's
-            // good enough and quite helpful.
-            figureDiv.style.height = '97%';
+            figureDiv.style.height = '100%';
           } else {
             figureDiv.style.height =
                 figure.position().height().toString() + 'px';
diff --git a/frc971/analysis/plotter_config.json b/frc971/analysis/plotter_config.json
index 49266ee..fef4a50 100644
--- a/frc971/analysis/plotter_config.json
+++ b/frc971/analysis/plotter_config.json
@@ -3,7 +3,7 @@
     {
       "name": "/analysis",
       "type": "frc971.analysis.Plot",
-      "max_size": 10000000
+      "max_size": 1000000000
     }
   ],
   "imports": [
diff --git a/third_party/autocxx/BUILD b/third_party/autocxx/BUILD
new file mode 100644
index 0000000..8a26c11
--- /dev/null
+++ b/third_party/autocxx/BUILD
@@ -0,0 +1,32 @@
+load("@rules_rust//rust:defs.bzl", "rust_library")
+
+package(default_visibility = ["//visibility:public"])
+
+licenses([
+    "notice",  # MIT from expression "MIT OR Apache-2.0"
+])
+
+rust_library(
+    name = "autocxx",
+    srcs = glob(["**/*.rs"]),
+    compile_data = ["README.md"],
+    crate_root = "src/lib.rs",
+    edition = "2021",
+    proc_macro_deps = [
+        "//third_party/cargo:aquamarine",
+        "//third_party/autocxx/macro:autocxx_macro",
+    ],
+    rustc_flags = [
+        "--cap-lints=allow",
+    ],
+    tags = [
+        "cargo-raze",
+        "crate-name=autocxx",
+        "manual",
+    ],
+    version = "0.16.0",
+    deps = [
+        "//third_party/cargo:cxx",
+        "//third_party/cargo:moveit",
+    ],
+)
diff --git a/third_party/autocxx/engine/BUILD b/third_party/autocxx/engine/BUILD
new file mode 100644
index 0000000..b3335d2
--- /dev/null
+++ b/third_party/autocxx/engine/BUILD
@@ -0,0 +1,51 @@
+load("@rules_rust//rust:defs.bzl", "rust_library")
+
+package(default_visibility = ["//visibility:public"])
+
+licenses([
+    "notice",  # MIT from expression "MIT OR Apache-2.0"
+])
+
+rust_library(
+    name = "autocxx_engine",
+    srcs = glob(["**/*.rs"]),
+    crate_features = [
+        "default",
+        "reproduction_case",
+        "serde_json",
+    ],
+    crate_root = "src/lib.rs",
+    edition = "2021",
+    proc_macro_deps = [
+        "//third_party/cargo:indoc",
+        "//third_party/cargo:aquamarine",
+        "//third_party/cargo:strum_macros",
+    ],
+    rustc_flags = [
+        "--cap-lints=allow",
+    ],
+    tags = [
+        "cargo-raze",
+        "crate-name=autocxx-engine",
+        "manual",
+    ],
+    version = "0.16.0",
+    deps = [
+        "//third_party/autocxx/parser:autocxx_parser",
+        "//third_party/cargo:autocxx_bindgen",
+        "//third_party/cargo:cxx_gen",
+        "//third_party/cargo:indexmap",
+        "//third_party/cargo:itertools",
+        "//third_party/cargo:log",
+        "//third_party/cargo:miette",
+        "//third_party/cargo:once_cell",
+        "//third_party/cargo:proc_macro2",
+        "//third_party/cargo:quote",
+        "//third_party/cargo:regex",
+        "//third_party/cargo:serde_json",
+        "//third_party/cargo:syn",
+        "//third_party/cargo:tempfile",
+        "//third_party/cargo:thiserror",
+        "//third_party/cargo:version_check",
+    ],
+)
diff --git a/third_party/autocxx/gen/cmd/BUILD b/third_party/autocxx/gen/cmd/BUILD
new file mode 100644
index 0000000..2cee72d
--- /dev/null
+++ b/third_party/autocxx/gen/cmd/BUILD
@@ -0,0 +1,32 @@
+load("@rules_rust//rust:defs.bzl", "rust_binary")
+
+package(default_visibility = ["//visibility:public"])
+
+licenses([
+    "notice",  # MIT from expression "MIT OR Apache-2.0"
+])
+
+rust_binary(
+    name = "gen",
+    srcs = glob(["**/*.rs"]),
+    crate_root = "src/main.rs",
+    edition = "2021",
+    rustc_flags = [
+        "--cap-lints=allow",
+    ],
+    tags = [
+        "cargo-raze",
+        "crate-name=autocxx-gen",
+        "manual",
+    ],
+    version = "0.16.0",
+    deps = [
+        "//third_party/autocxx/engine:autocxx_engine",
+        "@//third_party/cargo:clap",
+        "@//third_party/cargo:env_logger",
+        "@//third_party/cargo:indexmap",
+        "@//third_party/cargo:miette",
+        "@//third_party/cargo:pathdiff",
+        "@//third_party/cargo:proc_macro2",
+    ],
+)
diff --git a/third_party/autocxx/macro/BUILD b/third_party/autocxx/macro/BUILD
new file mode 100644
index 0000000..a924a69
--- /dev/null
+++ b/third_party/autocxx/macro/BUILD
@@ -0,0 +1,30 @@
+load("@rules_rust//rust:defs.bzl", "rust_proc_macro")
+
+package(default_visibility = ["//visibility:public"])
+
+licenses([
+    "notice",  # MIT from expression "MIT OR Apache-2.0"
+])
+
+rust_proc_macro(
+    name = "autocxx_macro",
+    srcs = glob(["**/*.rs"]),
+    crate_root = "src/lib.rs",
+    edition = "2021",
+    rustc_flags = [
+        "--cap-lints=allow",
+    ],
+    tags = [
+        "cargo-raze",
+        "crate-name=autocxx-macro",
+        "manual",
+    ],
+    version = "0.16.0",
+    deps = [
+        "//third_party/autocxx/parser:autocxx_parser",
+        "@//third_party/cargo:proc_macro2",
+        "@//third_party/cargo:proc_macro_error",
+        "@//third_party/cargo:quote",
+        "@//third_party/cargo:syn",
+    ],
+)
diff --git a/third_party/autocxx/parser/BUILD b/third_party/autocxx/parser/BUILD
new file mode 100644
index 0000000..c91ea85
--- /dev/null
+++ b/third_party/autocxx/parser/BUILD
@@ -0,0 +1,38 @@
+load("@rules_rust//rust:defs.bzl", "rust_library")
+
+package(default_visibility = ["//visibility:public"])
+
+licenses([
+    "notice",  # MIT from expression "MIT OR Apache-2.0"
+])
+
+rust_library(
+    name = "autocxx_parser",
+    srcs = glob(["**/*.rs"]),
+    crate_features = [
+        "reproduction_case",
+    ],
+    crate_root = "src/lib.rs",
+    edition = "2021",
+    rustc_flags = [
+        "--cap-lints=allow",
+    ],
+    tags = [
+        "cargo-raze",
+        "crate-name=autocxx-parser",
+        "manual",
+    ],
+    version = "0.16.0",
+    deps = [
+        "@//third_party/cargo:indexmap",
+        "@//third_party/cargo:itertools",
+        "@//third_party/cargo:log",
+        "@//third_party/cargo:once_cell",
+        "@//third_party/cargo:proc_macro2",
+        "@//third_party/cargo:quote",
+        "@//third_party/cargo:serde",
+        "@//third_party/cargo:serde_json",
+        "@//third_party/cargo:syn",
+        "@//third_party/cargo:thiserror",
+    ],
+)
diff --git a/third_party/cargo/BUILD.bazel b/third_party/cargo/BUILD.bazel
index 417ffae..4bf6ce8 100644
--- a/third_party/cargo/BUILD.bazel
+++ b/third_party/cargo/BUILD.bazel
@@ -86,7 +86,7 @@
 
 alias(
     name = "clap",
-    actual = "@raze__clap__3_2_11//:clap",
+    actual = "@raze__clap__3_2_12//:clap",
     tags = [
         "cargo-raze",
         "manual",
@@ -157,6 +157,15 @@
 )
 
 alias(
+    name = "futures",
+    actual = "@raze__futures__0_3_21//:futures",
+    tags = [
+        "cargo-raze",
+        "manual",
+    ],
+)
+
+alias(
     name = "indexmap",
     actual = "@raze__indexmap__1_9_1//:indexmap",
     tags = [
diff --git a/third_party/cargo/Cargo.raze.lock b/third_party/cargo/Cargo.raze.lock
index 58310ac..bc0a380 100644
--- a/third_party/cargo/Cargo.raze.lock
+++ b/third_party/cargo/Cargo.raze.lock
@@ -151,7 +151,7 @@
  "autocxx",
  "autocxx-engine",
  "autocxx-integration-tests",
- "clap 3.2.11",
+ "clap 3.2.12",
  "cxx",
  "env_logger 0.9.0",
  "indexmap",
@@ -331,9 +331,9 @@
 
 [[package]]
 name = "clap"
-version = "3.2.11"
+version = "3.2.12"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d646c7ade5eb07c4aa20e907a922750df0c448892513714fd3e4acbc7130829f"
+checksum = "ab8b79fe3946ceb4a0b1c080b4018992b8d27e9ff363644c1c9b6387c854614d"
 dependencies = [
  "atty",
  "bitflags",
@@ -373,8 +373,11 @@
  "cxx",
  "cxxbridge-cmd",
  "cxxbridge-macro",
+ "futures",
  "libloading 0.6.3",
  "link-cplusplus",
+ "once_cell",
+ "thiserror",
  "toml",
  "uuid",
 ]
@@ -418,7 +421,7 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "384d7699599cc149694e38151d20820e8ab5550037526870bee8a27b069ed922"
 dependencies = [
- "clap 3.2.11",
+ "clap 3.2.12",
  "codespan-reporting",
  "proc-macro2",
  "quote",
@@ -508,6 +511,95 @@
 ]
 
 [[package]]
+name = "futures"
+version = "0.3.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e"
+dependencies = [
+ "futures-channel",
+ "futures-core",
+ "futures-executor",
+ "futures-io",
+ "futures-sink",
+ "futures-task",
+ "futures-util",
+]
+
+[[package]]
+name = "futures-channel"
+version = "0.3.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010"
+dependencies = [
+ "futures-core",
+ "futures-sink",
+]
+
+[[package]]
+name = "futures-core"
+version = "0.3.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3"
+
+[[package]]
+name = "futures-executor"
+version = "0.3.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6"
+dependencies = [
+ "futures-core",
+ "futures-task",
+ "futures-util",
+]
+
+[[package]]
+name = "futures-io"
+version = "0.3.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b"
+
+[[package]]
+name = "futures-macro"
+version = "0.3.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "futures-sink"
+version = "0.3.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868"
+
+[[package]]
+name = "futures-task"
+version = "0.3.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a"
+
+[[package]]
+name = "futures-util"
+version = "0.3.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a"
+dependencies = [
+ "futures-channel",
+ "futures-core",
+ "futures-io",
+ "futures-macro",
+ "futures-sink",
+ "futures-task",
+ "memchr",
+ "pin-project-lite",
+ "pin-utils",
+ "slab",
+]
+
+[[package]]
 name = "gimli"
 version = "0.26.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -756,9 +848,9 @@
 
 [[package]]
 name = "os_str_bytes"
-version = "6.1.0"
+version = "6.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "21326818e99cfe6ce1e524c2a805c189a99b5ae555a35d19f9a284b427d86afa"
+checksum = "648001efe5d5c0102d8cea768e348da85d90af8ba91f0bea908f157951493cd4"
 
 [[package]]
 name = "owo-colors"
@@ -779,6 +871,18 @@
 checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099"
 
 [[package]]
+name = "pin-project-lite"
+version = "0.2.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116"
+
+[[package]]
+name = "pin-utils"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
+
+[[package]]
 name = "predicates"
 version = "2.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -908,9 +1012,9 @@
 
 [[package]]
 name = "rustversion"
-version = "1.0.7"
+version = "1.0.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a0a5f7c728f5d284929a1cccb5bc19884422bfe6ef4d6c409da2c41838983fcf"
+checksum = "24c8ad4f0c00e1eb5bc7614d236a7f1300e3dbd76b68cac8e06fb00b015ad8d8"
 
 [[package]]
 name = "ryu"
@@ -956,6 +1060,12 @@
 checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3"
 
 [[package]]
+name = "slab"
+version = "0.4.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32"
+
+[[package]]
 name = "smallvec"
 version = "1.9.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1168,9 +1278,9 @@
 
 [[package]]
 name = "unicode-ident"
-version = "1.0.1"
+version = "1.0.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c"
+checksum = "15c61ba63f9235225a22310255a29b806b907c9b8c964bcbd0a2c70f3f2deea7"
 
 [[package]]
 name = "unicode-linebreak"
diff --git a/third_party/cargo/crates.bzl b/third_party/cargo/crates.bzl
index 515b67a..51d9a05 100644
--- a/third_party/cargo/crates.bzl
+++ b/third_party/cargo/crates.bzl
@@ -223,12 +223,12 @@
 
     maybe(
         http_archive,
-        name = "raze__clap__3_2_11",
-        url = "https://crates.io/api/v1/crates/clap/3.2.11/download",
+        name = "raze__clap__3_2_12",
+        url = "https://crates.io/api/v1/crates/clap/3.2.12/download",
         type = "tar.gz",
-        sha256 = "d646c7ade5eb07c4aa20e907a922750df0c448892513714fd3e4acbc7130829f",
-        strip_prefix = "clap-3.2.11",
-        build_file = Label("//third_party/cargo/remote:BUILD.clap-3.2.11.bazel"),
+        sha256 = "ab8b79fe3946ceb4a0b1c080b4018992b8d27e9ff363644c1c9b6387c854614d",
+        strip_prefix = "clap-3.2.12",
+        build_file = Label("//third_party/cargo/remote:BUILD.clap-3.2.12.bazel"),
     )
 
     maybe(
@@ -363,6 +363,96 @@
 
     maybe(
         http_archive,
+        name = "raze__futures__0_3_21",
+        url = "https://crates.io/api/v1/crates/futures/0.3.21/download",
+        type = "tar.gz",
+        sha256 = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e",
+        strip_prefix = "futures-0.3.21",
+        build_file = Label("//third_party/cargo/remote:BUILD.futures-0.3.21.bazel"),
+    )
+
+    maybe(
+        http_archive,
+        name = "raze__futures_channel__0_3_21",
+        url = "https://crates.io/api/v1/crates/futures-channel/0.3.21/download",
+        type = "tar.gz",
+        sha256 = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010",
+        strip_prefix = "futures-channel-0.3.21",
+        build_file = Label("//third_party/cargo/remote:BUILD.futures-channel-0.3.21.bazel"),
+    )
+
+    maybe(
+        http_archive,
+        name = "raze__futures_core__0_3_21",
+        url = "https://crates.io/api/v1/crates/futures-core/0.3.21/download",
+        type = "tar.gz",
+        sha256 = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3",
+        strip_prefix = "futures-core-0.3.21",
+        build_file = Label("//third_party/cargo/remote:BUILD.futures-core-0.3.21.bazel"),
+    )
+
+    maybe(
+        http_archive,
+        name = "raze__futures_executor__0_3_21",
+        url = "https://crates.io/api/v1/crates/futures-executor/0.3.21/download",
+        type = "tar.gz",
+        sha256 = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6",
+        strip_prefix = "futures-executor-0.3.21",
+        build_file = Label("//third_party/cargo/remote:BUILD.futures-executor-0.3.21.bazel"),
+    )
+
+    maybe(
+        http_archive,
+        name = "raze__futures_io__0_3_21",
+        url = "https://crates.io/api/v1/crates/futures-io/0.3.21/download",
+        type = "tar.gz",
+        sha256 = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b",
+        strip_prefix = "futures-io-0.3.21",
+        build_file = Label("//third_party/cargo/remote:BUILD.futures-io-0.3.21.bazel"),
+    )
+
+    maybe(
+        http_archive,
+        name = "raze__futures_macro__0_3_21",
+        url = "https://crates.io/api/v1/crates/futures-macro/0.3.21/download",
+        type = "tar.gz",
+        sha256 = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512",
+        strip_prefix = "futures-macro-0.3.21",
+        build_file = Label("//third_party/cargo/remote:BUILD.futures-macro-0.3.21.bazel"),
+    )
+
+    maybe(
+        http_archive,
+        name = "raze__futures_sink__0_3_21",
+        url = "https://crates.io/api/v1/crates/futures-sink/0.3.21/download",
+        type = "tar.gz",
+        sha256 = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868",
+        strip_prefix = "futures-sink-0.3.21",
+        build_file = Label("//third_party/cargo/remote:BUILD.futures-sink-0.3.21.bazel"),
+    )
+
+    maybe(
+        http_archive,
+        name = "raze__futures_task__0_3_21",
+        url = "https://crates.io/api/v1/crates/futures-task/0.3.21/download",
+        type = "tar.gz",
+        sha256 = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a",
+        strip_prefix = "futures-task-0.3.21",
+        build_file = Label("//third_party/cargo/remote:BUILD.futures-task-0.3.21.bazel"),
+    )
+
+    maybe(
+        http_archive,
+        name = "raze__futures_util__0_3_21",
+        url = "https://crates.io/api/v1/crates/futures-util/0.3.21/download",
+        type = "tar.gz",
+        sha256 = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a",
+        strip_prefix = "futures-util-0.3.21",
+        build_file = Label("//third_party/cargo/remote:BUILD.futures-util-0.3.21.bazel"),
+    )
+
+    maybe(
+        http_archive,
         name = "raze__gimli__0_26_1",
         url = "https://crates.io/api/v1/crates/gimli/0.26.1/download",
         type = "tar.gz",
@@ -663,12 +753,12 @@
 
     maybe(
         http_archive,
-        name = "raze__os_str_bytes__6_1_0",
-        url = "https://crates.io/api/v1/crates/os_str_bytes/6.1.0/download",
+        name = "raze__os_str_bytes__6_2_0",
+        url = "https://crates.io/api/v1/crates/os_str_bytes/6.2.0/download",
         type = "tar.gz",
-        sha256 = "21326818e99cfe6ce1e524c2a805c189a99b5ae555a35d19f9a284b427d86afa",
-        strip_prefix = "os_str_bytes-6.1.0",
-        build_file = Label("//third_party/cargo/remote:BUILD.os_str_bytes-6.1.0.bazel"),
+        sha256 = "648001efe5d5c0102d8cea768e348da85d90af8ba91f0bea908f157951493cd4",
+        strip_prefix = "os_str_bytes-6.2.0",
+        build_file = Label("//third_party/cargo/remote:BUILD.os_str_bytes-6.2.0.bazel"),
     )
 
     maybe(
@@ -703,6 +793,26 @@
 
     maybe(
         http_archive,
+        name = "raze__pin_project_lite__0_2_9",
+        url = "https://crates.io/api/v1/crates/pin-project-lite/0.2.9/download",
+        type = "tar.gz",
+        sha256 = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116",
+        strip_prefix = "pin-project-lite-0.2.9",
+        build_file = Label("//third_party/cargo/remote:BUILD.pin-project-lite-0.2.9.bazel"),
+    )
+
+    maybe(
+        http_archive,
+        name = "raze__pin_utils__0_1_0",
+        url = "https://crates.io/api/v1/crates/pin-utils/0.1.0/download",
+        type = "tar.gz",
+        sha256 = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184",
+        strip_prefix = "pin-utils-0.1.0",
+        build_file = Label("//third_party/cargo/remote:BUILD.pin-utils-0.1.0.bazel"),
+    )
+
+    maybe(
+        http_archive,
         name = "raze__predicates__2_1_1",
         url = "https://crates.io/api/v1/crates/predicates/2.1.1/download",
         type = "tar.gz",
@@ -853,12 +963,12 @@
 
     maybe(
         http_archive,
-        name = "raze__rustversion__1_0_7",
-        url = "https://crates.io/api/v1/crates/rustversion/1.0.7/download",
+        name = "raze__rustversion__1_0_8",
+        url = "https://crates.io/api/v1/crates/rustversion/1.0.8/download",
         type = "tar.gz",
-        sha256 = "a0a5f7c728f5d284929a1cccb5bc19884422bfe6ef4d6c409da2c41838983fcf",
-        strip_prefix = "rustversion-1.0.7",
-        build_file = Label("//third_party/cargo/remote:BUILD.rustversion-1.0.7.bazel"),
+        sha256 = "24c8ad4f0c00e1eb5bc7614d236a7f1300e3dbd76b68cac8e06fb00b015ad8d8",
+        strip_prefix = "rustversion-1.0.8",
+        build_file = Label("//third_party/cargo/remote:BUILD.rustversion-1.0.8.bazel"),
     )
 
     maybe(
@@ -913,6 +1023,16 @@
 
     maybe(
         http_archive,
+        name = "raze__slab__0_4_6",
+        url = "https://crates.io/api/v1/crates/slab/0.4.6/download",
+        type = "tar.gz",
+        sha256 = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32",
+        strip_prefix = "slab-0.4.6",
+        build_file = Label("//third_party/cargo/remote:BUILD.slab-0.4.6.bazel"),
+    )
+
+    maybe(
+        http_archive,
         name = "raze__smallvec__1_9_0",
         url = "https://crates.io/api/v1/crates/smallvec/1.9.0/download",
         type = "tar.gz",
@@ -1113,12 +1233,12 @@
 
     maybe(
         http_archive,
-        name = "raze__unicode_ident__1_0_1",
-        url = "https://crates.io/api/v1/crates/unicode-ident/1.0.1/download",
+        name = "raze__unicode_ident__1_0_2",
+        url = "https://crates.io/api/v1/crates/unicode-ident/1.0.2/download",
         type = "tar.gz",
-        sha256 = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c",
-        strip_prefix = "unicode-ident-1.0.1",
-        build_file = Label("//third_party/cargo/remote:BUILD.unicode-ident-1.0.1.bazel"),
+        sha256 = "15c61ba63f9235225a22310255a29b806b907c9b8c964bcbd0a2c70f3f2deea7",
+        strip_prefix = "unicode-ident-1.0.2",
+        build_file = Label("//third_party/cargo/remote:BUILD.unicode-ident-1.0.2.bazel"),
     )
 
     maybe(
diff --git a/third_party/cargo/cxx/include.BUILD.bazel b/third_party/cargo/cxx/include.BUILD.bazel
index f6b9f5a..31afbfb 100644
--- a/third_party/cargo/cxx/include.BUILD.bazel
+++ b/third_party/cargo/cxx/include.BUILD.bazel
@@ -6,4 +6,5 @@
     hdrs = ["include/cxx.h"],
     srcs = ["src/cxx.cc"],
     includes = ["include"],
+    target_compatible_with = ["@//tools/platforms/rust:has_support"],
 )
diff --git a/third_party/cargo/remote/BUILD.clap-3.2.11.bazel b/third_party/cargo/remote/BUILD.clap-3.2.12.bazel
similarity index 98%
rename from third_party/cargo/remote/BUILD.clap-3.2.11.bazel
rename to third_party/cargo/remote/BUILD.clap-3.2.12.bazel
index 0ae48b7..1fa3fef 100644
--- a/third_party/cargo/remote/BUILD.clap-3.2.11.bazel
+++ b/third_party/cargo/remote/BUILD.clap-3.2.12.bazel
@@ -58,7 +58,7 @@
         "crate-name=stdio-fixture",
         "manual",
     ],
-    version = "3.2.11",
+    version = "3.2.12",
     # buildifier: leave-alone
     deps = [
         ":clap",
@@ -200,7 +200,7 @@
         "crate-name=clap",
         "manual",
     ],
-    version = "3.2.11",
+    version = "3.2.12",
     # buildifier: leave-alone
     deps = [
         "@raze__atty__0_2_14//:atty",
diff --git a/third_party/cargo/remote/BUILD.clap_lex-0.2.4.bazel b/third_party/cargo/remote/BUILD.clap_lex-0.2.4.bazel
index 7ad903b..6566a06 100644
--- a/third_party/cargo/remote/BUILD.clap_lex-0.2.4.bazel
+++ b/third_party/cargo/remote/BUILD.clap_lex-0.2.4.bazel
@@ -50,6 +50,6 @@
     version = "0.2.4",
     # buildifier: leave-alone
     deps = [
-        "@raze__os_str_bytes__6_1_0//:os_str_bytes",
+        "@raze__os_str_bytes__6_2_0//:os_str_bytes",
     ],
 )
diff --git a/third_party/cargo/remote/BUILD.cxx-1.0.71.bazel b/third_party/cargo/remote/BUILD.cxx-1.0.71.bazel
index 4158d4d..1ede3fc 100644
--- a/third_party/cargo/remote/BUILD.cxx-1.0.71.bazel
+++ b/third_party/cargo/remote/BUILD.cxx-1.0.71.bazel
@@ -81,4 +81,5 @@
     hdrs = ["include/cxx.h"],
     srcs = ["src/cxx.cc"],
     includes = ["include"],
+    target_compatible_with = ["@//tools/platforms/rust:has_support"],
 )
diff --git a/third_party/cargo/remote/BUILD.cxxbridge-cmd-1.0.71.bazel b/third_party/cargo/remote/BUILD.cxxbridge-cmd-1.0.71.bazel
index 5a5fae3..ec67c21 100644
--- a/third_party/cargo/remote/BUILD.cxxbridge-cmd-1.0.71.bazel
+++ b/third_party/cargo/remote/BUILD.cxxbridge-cmd-1.0.71.bazel
@@ -54,7 +54,7 @@
     # buildifier: leave-alone
     deps = [
         ":cxxbridge_cmd",
-        "@raze__clap__3_2_11//:clap",
+        "@raze__clap__3_2_12//:clap",
         "@raze__codespan_reporting__0_11_1//:codespan_reporting",
         "@raze__proc_macro2__1_0_40//:proc_macro2",
         "@raze__quote__1_0_20//:quote",
@@ -82,7 +82,7 @@
     version = "1.0.71",
     # buildifier: leave-alone
     deps = [
-        "@raze__clap__3_2_11//:clap",
+        "@raze__clap__3_2_12//:clap",
         "@raze__codespan_reporting__0_11_1//:codespan_reporting",
         "@raze__proc_macro2__1_0_40//:proc_macro2",
         "@raze__quote__1_0_20//:quote",
diff --git a/third_party/cargo/remote/BUILD.futures-0.3.21.bazel b/third_party/cargo/remote/BUILD.futures-0.3.21.bazel
new file mode 100644
index 0000000..118d747
--- /dev/null
+++ b/third_party/cargo/remote/BUILD.futures-0.3.21.bazel
@@ -0,0 +1,177 @@
+"""
+@generated
+cargo-raze crate build file.
+
+DO NOT EDIT! Replaced on runs of cargo-raze
+"""
+
+# buildifier: disable=load
+load("@bazel_skylib//lib:selects.bzl", "selects")
+
+# buildifier: disable=load
+load(
+    "@rules_rust//rust:defs.bzl",
+    "rust_binary",
+    "rust_library",
+    "rust_proc_macro",
+    "rust_test",
+)
+
+package(default_visibility = [
+    # Public for visibility by "@raze__crate__version//" targets.
+    #
+    # Prefer access through "//third_party/cargo", which limits external
+    # visibility to explicit Cargo.toml dependencies.
+    "//visibility:public",
+])
+
+licenses([
+    "notice",  # MIT from expression "MIT OR Apache-2.0"
+])
+
+# Generated Targets
+
+rust_library(
+    name = "futures",
+    srcs = glob(["**/*.rs"]),
+    crate_features = [
+        "alloc",
+        "async-await",
+        "default",
+        "executor",
+        "futures-executor",
+        "std",
+    ],
+    crate_root = "src/lib.rs",
+    data = [],
+    edition = "2018",
+    rustc_flags = [
+        "--cap-lints=allow",
+    ],
+    tags = [
+        "cargo-raze",
+        "crate-name=futures",
+        "manual",
+    ],
+    version = "0.3.21",
+    # buildifier: leave-alone
+    deps = [
+        "@raze__futures_channel__0_3_21//:futures_channel",
+        "@raze__futures_core__0_3_21//:futures_core",
+        "@raze__futures_executor__0_3_21//:futures_executor",
+        "@raze__futures_io__0_3_21//:futures_io",
+        "@raze__futures_sink__0_3_21//:futures_sink",
+        "@raze__futures_task__0_3_21//:futures_task",
+        "@raze__futures_util__0_3_21//:futures_util",
+    ],
+)
+
+# Unsupported target "_require_features" with type "test" omitted
+
+# Unsupported target "async_await_macros" with type "test" omitted
+
+# Unsupported target "auto_traits" with type "test" omitted
+
+# Unsupported target "compat" with type "test" omitted
+
+# Unsupported target "eager_drop" with type "test" omitted
+
+# Unsupported target "eventual" with type "test" omitted
+
+# Unsupported target "future_abortable" with type "test" omitted
+
+# Unsupported target "future_basic_combinators" with type "test" omitted
+
+# Unsupported target "future_fuse" with type "test" omitted
+
+# Unsupported target "future_inspect" with type "test" omitted
+
+# Unsupported target "future_join_all" with type "test" omitted
+
+# Unsupported target "future_obj" with type "test" omitted
+
+# Unsupported target "future_select_all" with type "test" omitted
+
+# Unsupported target "future_select_ok" with type "test" omitted
+
+# Unsupported target "future_shared" with type "test" omitted
+
+# Unsupported target "future_try_flatten_stream" with type "test" omitted
+
+# Unsupported target "future_try_join_all" with type "test" omitted
+
+# Unsupported target "io_buf_reader" with type "test" omitted
+
+# Unsupported target "io_buf_writer" with type "test" omitted
+
+# Unsupported target "io_cursor" with type "test" omitted
+
+# Unsupported target "io_line_writer" with type "test" omitted
+
+# Unsupported target "io_lines" with type "test" omitted
+
+# Unsupported target "io_read" with type "test" omitted
+
+# Unsupported target "io_read_exact" with type "test" omitted
+
+# Unsupported target "io_read_line" with type "test" omitted
+
+# Unsupported target "io_read_to_end" with type "test" omitted
+
+# Unsupported target "io_read_to_string" with type "test" omitted
+
+# Unsupported target "io_read_until" with type "test" omitted
+
+# Unsupported target "io_window" with type "test" omitted
+
+# Unsupported target "io_write" with type "test" omitted
+
+# Unsupported target "lock_mutex" with type "test" omitted
+
+# Unsupported target "macro_comma_support" with type "test" omitted
+
+# Unsupported target "object_safety" with type "test" omitted
+
+# Unsupported target "oneshot" with type "test" omitted
+
+# Unsupported target "ready_queue" with type "test" omitted
+
+# Unsupported target "recurse" with type "test" omitted
+
+# Unsupported target "sink" with type "test" omitted
+
+# Unsupported target "sink_fanout" with type "test" omitted
+
+# Unsupported target "stream" with type "test" omitted
+
+# Unsupported target "stream_abortable" with type "test" omitted
+
+# Unsupported target "stream_buffer_unordered" with type "test" omitted
+
+# Unsupported target "stream_catch_unwind" with type "test" omitted
+
+# Unsupported target "stream_futures_ordered" with type "test" omitted
+
+# Unsupported target "stream_futures_unordered" with type "test" omitted
+
+# Unsupported target "stream_into_async_read" with type "test" omitted
+
+# Unsupported target "stream_peekable" with type "test" omitted
+
+# Unsupported target "stream_select_all" with type "test" omitted
+
+# Unsupported target "stream_select_next_some" with type "test" omitted
+
+# Unsupported target "stream_split" with type "test" omitted
+
+# Unsupported target "stream_try_stream" with type "test" omitted
+
+# Unsupported target "stream_unfold" with type "test" omitted
+
+# Unsupported target "task_arc_wake" with type "test" omitted
+
+# Unsupported target "task_atomic_waker" with type "test" omitted
+
+# Unsupported target "test_macro" with type "test" omitted
+
+# Unsupported target "try_join" with type "test" omitted
diff --git a/third_party/cargo/remote/BUILD.rustversion-1.0.7.bazel b/third_party/cargo/remote/BUILD.futures-channel-0.3.21.bazel
similarity index 65%
copy from third_party/cargo/remote/BUILD.rustversion-1.0.7.bazel
copy to third_party/cargo/remote/BUILD.futures-channel-0.3.21.bazel
index f407ef6..931d9a8 100644
--- a/third_party/cargo/remote/BUILD.rustversion-1.0.7.bazel
+++ b/third_party/cargo/remote/BUILD.futures-channel-0.3.21.bazel
@@ -38,13 +38,17 @@
 )
 
 cargo_build_script(
-    name = "rustversion_build_script",
+    name = "futures_channel_build_script",
     srcs = glob(["**/*.rs"]),
     build_script_env = {
     },
     crate_features = [
+        "alloc",
+        "futures-sink",
+        "sink",
+        "std",
     ],
-    crate_root = "build/build.rs",
+    crate_root = "build.rs",
     data = glob(["**"]),
     edition = "2018",
     rustc_flags = [
@@ -54,16 +58,22 @@
         "cargo-raze",
         "manual",
     ],
-    version = "1.0.7",
+    version = "0.3.21",
     visibility = ["//visibility:private"],
     deps = [
     ],
 )
 
-rust_proc_macro(
-    name = "rustversion",
+# Unsupported target "sync_mpsc" with type "bench" omitted
+
+rust_library(
+    name = "futures_channel",
     srcs = glob(["**/*.rs"]),
     crate_features = [
+        "alloc",
+        "futures-sink",
+        "sink",
+        "std",
     ],
     crate_root = "src/lib.rs",
     data = [],
@@ -73,20 +83,22 @@
     ],
     tags = [
         "cargo-raze",
-        "crate-name=rustversion",
+        "crate-name=futures-channel",
         "manual",
     ],
-    version = "1.0.7",
+    version = "0.3.21",
     # buildifier: leave-alone
     deps = [
-        ":rustversion_build_script",
+        ":futures_channel_build_script",
+        "@raze__futures_core__0_3_21//:futures_core",
+        "@raze__futures_sink__0_3_21//:futures_sink",
     ],
 )
 
-# Unsupported target "compiletest" with type "test" omitted
+# Unsupported target "channel" with type "test" omitted
 
-# Unsupported target "test_const" with type "test" omitted
+# Unsupported target "mpsc" with type "test" omitted
 
-# Unsupported target "test_eval" with type "test" omitted
+# Unsupported target "mpsc-close" with type "test" omitted
 
-# Unsupported target "test_parse" with type "test" omitted
+# Unsupported target "oneshot" with type "test" omitted
diff --git a/third_party/cargo/remote/BUILD.rustversion-1.0.7.bazel b/third_party/cargo/remote/BUILD.futures-core-0.3.21.bazel
similarity index 75%
copy from third_party/cargo/remote/BUILD.rustversion-1.0.7.bazel
copy to third_party/cargo/remote/BUILD.futures-core-0.3.21.bazel
index f407ef6..ee392e2 100644
--- a/third_party/cargo/remote/BUILD.rustversion-1.0.7.bazel
+++ b/third_party/cargo/remote/BUILD.futures-core-0.3.21.bazel
@@ -38,13 +38,15 @@
 )
 
 cargo_build_script(
-    name = "rustversion_build_script",
+    name = "futures_core_build_script",
     srcs = glob(["**/*.rs"]),
     build_script_env = {
     },
     crate_features = [
+        "alloc",
+        "std",
     ],
-    crate_root = "build/build.rs",
+    crate_root = "build.rs",
     data = glob(["**"]),
     edition = "2018",
     rustc_flags = [
@@ -54,16 +56,18 @@
         "cargo-raze",
         "manual",
     ],
-    version = "1.0.7",
+    version = "0.3.21",
     visibility = ["//visibility:private"],
     deps = [
     ],
 )
 
-rust_proc_macro(
-    name = "rustversion",
+rust_library(
+    name = "futures_core",
     srcs = glob(["**/*.rs"]),
     crate_features = [
+        "alloc",
+        "std",
     ],
     crate_root = "src/lib.rs",
     data = [],
@@ -73,20 +77,12 @@
     ],
     tags = [
         "cargo-raze",
-        "crate-name=rustversion",
+        "crate-name=futures-core",
         "manual",
     ],
-    version = "1.0.7",
+    version = "0.3.21",
     # buildifier: leave-alone
     deps = [
-        ":rustversion_build_script",
+        ":futures_core_build_script",
     ],
 )
-
-# Unsupported target "compiletest" with type "test" omitted
-
-# Unsupported target "test_const" with type "test" omitted
-
-# Unsupported target "test_eval" with type "test" omitted
-
-# Unsupported target "test_parse" with type "test" omitted
diff --git a/third_party/cargo/remote/BUILD.os_str_bytes-6.1.0.bazel b/third_party/cargo/remote/BUILD.futures-executor-0.3.21.bazel
similarity index 71%
copy from third_party/cargo/remote/BUILD.os_str_bytes-6.1.0.bazel
copy to third_party/cargo/remote/BUILD.futures-executor-0.3.21.bazel
index cbbf9c5..236d50b 100644
--- a/third_party/cargo/remote/BUILD.os_str_bytes-6.1.0.bazel
+++ b/third_party/cargo/remote/BUILD.futures-executor-0.3.21.bazel
@@ -31,11 +31,13 @@
 
 # Generated Targets
 
+# Unsupported target "thread_notify" with type "bench" omitted
+
 rust_library(
-    name = "os_str_bytes",
+    name = "futures_executor",
     srcs = glob(["**/*.rs"]),
     crate_features = [
-        "raw_os_str",
+        "std",
     ],
     crate_root = "src/lib.rs",
     data = [],
@@ -45,11 +47,16 @@
     ],
     tags = [
         "cargo-raze",
-        "crate-name=os_str_bytes",
+        "crate-name=futures-executor",
         "manual",
     ],
-    version = "6.1.0",
+    version = "0.3.21",
     # buildifier: leave-alone
     deps = [
+        "@raze__futures_core__0_3_21//:futures_core",
+        "@raze__futures_task__0_3_21//:futures_task",
+        "@raze__futures_util__0_3_21//:futures_util",
     ],
 )
+
+# Unsupported target "local_pool" with type "test" omitted
diff --git a/third_party/cargo/remote/BUILD.os_str_bytes-6.1.0.bazel b/third_party/cargo/remote/BUILD.futures-io-0.3.21.bazel
similarity index 89%
copy from third_party/cargo/remote/BUILD.os_str_bytes-6.1.0.bazel
copy to third_party/cargo/remote/BUILD.futures-io-0.3.21.bazel
index cbbf9c5..595d1fc 100644
--- a/third_party/cargo/remote/BUILD.os_str_bytes-6.1.0.bazel
+++ b/third_party/cargo/remote/BUILD.futures-io-0.3.21.bazel
@@ -32,10 +32,10 @@
 # Generated Targets
 
 rust_library(
-    name = "os_str_bytes",
+    name = "futures_io",
     srcs = glob(["**/*.rs"]),
     crate_features = [
-        "raw_os_str",
+        "std",
     ],
     crate_root = "src/lib.rs",
     data = [],
@@ -45,10 +45,10 @@
     ],
     tags = [
         "cargo-raze",
-        "crate-name=os_str_bytes",
+        "crate-name=futures-io",
         "manual",
     ],
-    version = "6.1.0",
+    version = "0.3.21",
     # buildifier: leave-alone
     deps = [
     ],
diff --git a/third_party/cargo/remote/BUILD.os_str_bytes-6.1.0.bazel b/third_party/cargo/remote/BUILD.futures-macro-0.3.21.bazel
similarity index 80%
copy from third_party/cargo/remote/BUILD.os_str_bytes-6.1.0.bazel
copy to third_party/cargo/remote/BUILD.futures-macro-0.3.21.bazel
index cbbf9c5..62ab29e 100644
--- a/third_party/cargo/remote/BUILD.os_str_bytes-6.1.0.bazel
+++ b/third_party/cargo/remote/BUILD.futures-macro-0.3.21.bazel
@@ -31,11 +31,10 @@
 
 # Generated Targets
 
-rust_library(
-    name = "os_str_bytes",
+rust_proc_macro(
+    name = "futures_macro",
     srcs = glob(["**/*.rs"]),
     crate_features = [
-        "raw_os_str",
     ],
     crate_root = "src/lib.rs",
     data = [],
@@ -45,11 +44,14 @@
     ],
     tags = [
         "cargo-raze",
-        "crate-name=os_str_bytes",
+        "crate-name=futures-macro",
         "manual",
     ],
-    version = "6.1.0",
+    version = "0.3.21",
     # buildifier: leave-alone
     deps = [
+        "@raze__proc_macro2__1_0_40//:proc_macro2",
+        "@raze__quote__1_0_20//:quote",
+        "@raze__syn__1_0_98//:syn",
     ],
 )
diff --git a/third_party/cargo/remote/BUILD.os_str_bytes-6.1.0.bazel b/third_party/cargo/remote/BUILD.futures-sink-0.3.21.bazel
similarity index 88%
copy from third_party/cargo/remote/BUILD.os_str_bytes-6.1.0.bazel
copy to third_party/cargo/remote/BUILD.futures-sink-0.3.21.bazel
index cbbf9c5..0353eba 100644
--- a/third_party/cargo/remote/BUILD.os_str_bytes-6.1.0.bazel
+++ b/third_party/cargo/remote/BUILD.futures-sink-0.3.21.bazel
@@ -32,10 +32,11 @@
 # Generated Targets
 
 rust_library(
-    name = "os_str_bytes",
+    name = "futures_sink",
     srcs = glob(["**/*.rs"]),
     crate_features = [
-        "raw_os_str",
+        "alloc",
+        "std",
     ],
     crate_root = "src/lib.rs",
     data = [],
@@ -45,10 +46,10 @@
     ],
     tags = [
         "cargo-raze",
-        "crate-name=os_str_bytes",
+        "crate-name=futures-sink",
         "manual",
     ],
-    version = "6.1.0",
+    version = "0.3.21",
     # buildifier: leave-alone
     deps = [
     ],
diff --git a/third_party/cargo/remote/BUILD.rustversion-1.0.7.bazel b/third_party/cargo/remote/BUILD.futures-task-0.3.21.bazel
similarity index 75%
copy from third_party/cargo/remote/BUILD.rustversion-1.0.7.bazel
copy to third_party/cargo/remote/BUILD.futures-task-0.3.21.bazel
index f407ef6..ccb5218 100644
--- a/third_party/cargo/remote/BUILD.rustversion-1.0.7.bazel
+++ b/third_party/cargo/remote/BUILD.futures-task-0.3.21.bazel
@@ -38,13 +38,15 @@
 )
 
 cargo_build_script(
-    name = "rustversion_build_script",
+    name = "futures_task_build_script",
     srcs = glob(["**/*.rs"]),
     build_script_env = {
     },
     crate_features = [
+        "alloc",
+        "std",
     ],
-    crate_root = "build/build.rs",
+    crate_root = "build.rs",
     data = glob(["**"]),
     edition = "2018",
     rustc_flags = [
@@ -54,16 +56,18 @@
         "cargo-raze",
         "manual",
     ],
-    version = "1.0.7",
+    version = "0.3.21",
     visibility = ["//visibility:private"],
     deps = [
     ],
 )
 
-rust_proc_macro(
-    name = "rustversion",
+rust_library(
+    name = "futures_task",
     srcs = glob(["**/*.rs"]),
     crate_features = [
+        "alloc",
+        "std",
     ],
     crate_root = "src/lib.rs",
     data = [],
@@ -73,20 +77,12 @@
     ],
     tags = [
         "cargo-raze",
-        "crate-name=rustversion",
+        "crate-name=futures-task",
         "manual",
     ],
-    version = "1.0.7",
+    version = "0.3.21",
     # buildifier: leave-alone
     deps = [
-        ":rustversion_build_script",
+        ":futures_task_build_script",
     ],
 )
-
-# Unsupported target "compiletest" with type "test" omitted
-
-# Unsupported target "test_const" with type "test" omitted
-
-# Unsupported target "test_eval" with type "test" omitted
-
-# Unsupported target "test_parse" with type "test" omitted
diff --git a/third_party/cargo/remote/BUILD.futures-util-0.3.21.bazel b/third_party/cargo/remote/BUILD.futures-util-0.3.21.bazel
new file mode 100644
index 0000000..5ea950a
--- /dev/null
+++ b/third_party/cargo/remote/BUILD.futures-util-0.3.21.bazel
@@ -0,0 +1,126 @@
+"""
+@generated
+cargo-raze crate build file.
+
+DO NOT EDIT! Replaced on runs of cargo-raze
+"""
+
+# buildifier: disable=load
+load("@bazel_skylib//lib:selects.bzl", "selects")
+
+# buildifier: disable=load
+load(
+    "@rules_rust//rust:defs.bzl",
+    "rust_binary",
+    "rust_library",
+    "rust_proc_macro",
+    "rust_test",
+)
+
+package(default_visibility = [
+    # Public for visibility by "@raze__crate__version//" targets.
+    #
+    # Prefer access through "//third_party/cargo", which limits external
+    # visibility to explicit Cargo.toml dependencies.
+    "//visibility:public",
+])
+
+licenses([
+    "notice",  # MIT from expression "MIT OR Apache-2.0"
+])
+
+# Generated Targets
+# buildifier: disable=out-of-order-load
+# buildifier: disable=load-on-top
+load(
+    "@rules_rust//cargo:cargo_build_script.bzl",
+    "cargo_build_script",
+)
+
+cargo_build_script(
+    name = "futures_util_build_script",
+    srcs = glob(["**/*.rs"]),
+    build_script_env = {
+    },
+    crate_features = [
+        "alloc",
+        "async-await",
+        "async-await-macro",
+        "channel",
+        "futures-channel",
+        "futures-io",
+        "futures-macro",
+        "futures-sink",
+        "io",
+        "memchr",
+        "sink",
+        "slab",
+        "std",
+    ],
+    crate_root = "build.rs",
+    data = glob(["**"]),
+    edition = "2018",
+    rustc_flags = [
+        "--cap-lints=allow",
+    ],
+    tags = [
+        "cargo-raze",
+        "manual",
+    ],
+    version = "0.3.21",
+    visibility = ["//visibility:private"],
+    deps = [
+    ],
+)
+
+# Unsupported target "flatten_unordered" with type "bench" omitted
+
+# Unsupported target "futures_unordered" with type "bench" omitted
+
+rust_library(
+    name = "futures_util",
+    srcs = glob(["**/*.rs"]),
+    crate_features = [
+        "alloc",
+        "async-await",
+        "async-await-macro",
+        "channel",
+        "futures-channel",
+        "futures-io",
+        "futures-macro",
+        "futures-sink",
+        "io",
+        "memchr",
+        "sink",
+        "slab",
+        "std",
+    ],
+    crate_root = "src/lib.rs",
+    data = [],
+    edition = "2018",
+    proc_macro_deps = [
+        "@raze__futures_macro__0_3_21//:futures_macro",
+    ],
+    rustc_flags = [
+        "--cap-lints=allow",
+    ],
+    tags = [
+        "cargo-raze",
+        "crate-name=futures-util",
+        "manual",
+    ],
+    version = "0.3.21",
+    # buildifier: leave-alone
+    deps = [
+        ":futures_util_build_script",
+        "@raze__futures_channel__0_3_21//:futures_channel",
+        "@raze__futures_core__0_3_21//:futures_core",
+        "@raze__futures_io__0_3_21//:futures_io",
+        "@raze__futures_sink__0_3_21//:futures_sink",
+        "@raze__futures_task__0_3_21//:futures_task",
+        "@raze__memchr__2_5_0//:memchr",
+        "@raze__pin_project_lite__0_2_9//:pin_project_lite",
+        "@raze__pin_utils__0_1_0//:pin_utils",
+        "@raze__slab__0_4_6//:slab",
+    ],
+)
diff --git a/third_party/cargo/remote/BUILD.os_str_bytes-6.1.0.bazel b/third_party/cargo/remote/BUILD.os_str_bytes-6.2.0.bazel
similarity index 95%
rename from third_party/cargo/remote/BUILD.os_str_bytes-6.1.0.bazel
rename to third_party/cargo/remote/BUILD.os_str_bytes-6.2.0.bazel
index cbbf9c5..7238c9f 100644
--- a/third_party/cargo/remote/BUILD.os_str_bytes-6.1.0.bazel
+++ b/third_party/cargo/remote/BUILD.os_str_bytes-6.2.0.bazel
@@ -39,7 +39,7 @@
     ],
     crate_root = "src/lib.rs",
     data = [],
-    edition = "2018",
+    edition = "2021",
     rustc_flags = [
         "--cap-lints=allow",
     ],
@@ -48,7 +48,7 @@
         "crate-name=os_str_bytes",
         "manual",
     ],
-    version = "6.1.0",
+    version = "6.2.0",
     # buildifier: leave-alone
     deps = [
     ],
diff --git a/third_party/cargo/remote/BUILD.os_str_bytes-6.1.0.bazel b/third_party/cargo/remote/BUILD.pin-project-lite-0.2.9.bazel
similarity index 64%
copy from third_party/cargo/remote/BUILD.os_str_bytes-6.1.0.bazel
copy to third_party/cargo/remote/BUILD.pin-project-lite-0.2.9.bazel
index cbbf9c5..4a952bc 100644
--- a/third_party/cargo/remote/BUILD.os_str_bytes-6.1.0.bazel
+++ b/third_party/cargo/remote/BUILD.pin-project-lite-0.2.9.bazel
@@ -26,16 +26,15 @@
 ])
 
 licenses([
-    "notice",  # MIT from expression "MIT OR Apache-2.0"
+    "notice",  # Apache-2.0 from expression "Apache-2.0 OR MIT"
 ])
 
 # Generated Targets
 
 rust_library(
-    name = "os_str_bytes",
+    name = "pin_project_lite",
     srcs = glob(["**/*.rs"]),
     crate_features = [
-        "raw_os_str",
     ],
     crate_root = "src/lib.rs",
     data = [],
@@ -45,11 +44,23 @@
     ],
     tags = [
         "cargo-raze",
-        "crate-name=os_str_bytes",
+        "crate-name=pin-project-lite",
         "manual",
     ],
-    version = "6.1.0",
+    version = "0.2.9",
     # buildifier: leave-alone
     deps = [
     ],
 )
+
+# Unsupported target "compiletest" with type "test" omitted
+
+# Unsupported target "drop_order" with type "test" omitted
+
+# Unsupported target "expandtest" with type "test" omitted
+
+# Unsupported target "lint" with type "test" omitted
+
+# Unsupported target "proper_unpin" with type "test" omitted
+
+# Unsupported target "test" with type "test" omitted
diff --git a/third_party/cargo/remote/BUILD.os_str_bytes-6.1.0.bazel b/third_party/cargo/remote/BUILD.pin-utils-0.1.0.bazel
similarity index 83%
copy from third_party/cargo/remote/BUILD.os_str_bytes-6.1.0.bazel
copy to third_party/cargo/remote/BUILD.pin-utils-0.1.0.bazel
index cbbf9c5..f3852e0 100644
--- a/third_party/cargo/remote/BUILD.os_str_bytes-6.1.0.bazel
+++ b/third_party/cargo/remote/BUILD.pin-utils-0.1.0.bazel
@@ -32,10 +32,9 @@
 # Generated Targets
 
 rust_library(
-    name = "os_str_bytes",
+    name = "pin_utils",
     srcs = glob(["**/*.rs"]),
     crate_features = [
-        "raw_os_str",
     ],
     crate_root = "src/lib.rs",
     data = [],
@@ -45,11 +44,15 @@
     ],
     tags = [
         "cargo-raze",
-        "crate-name=os_str_bytes",
+        "crate-name=pin-utils",
         "manual",
     ],
-    version = "6.1.0",
+    version = "0.1.0",
     # buildifier: leave-alone
     deps = [
     ],
 )
+
+# Unsupported target "projection" with type "test" omitted
+
+# Unsupported target "stack_pin" with type "test" omitted
diff --git a/third_party/cargo/remote/BUILD.proc-macro2-1.0.40.bazel b/third_party/cargo/remote/BUILD.proc-macro2-1.0.40.bazel
index 72d4c9a..56b71b4 100644
--- a/third_party/cargo/remote/BUILD.proc-macro2-1.0.40.bazel
+++ b/third_party/cargo/remote/BUILD.proc-macro2-1.0.40.bazel
@@ -86,7 +86,7 @@
     # buildifier: leave-alone
     deps = [
         ":proc_macro2_build_script",
-        "@raze__unicode_ident__1_0_1//:unicode_ident",
+        "@raze__unicode_ident__1_0_2//:unicode_ident",
     ],
 )
 
diff --git a/third_party/cargo/remote/BUILD.rustversion-1.0.7.bazel b/third_party/cargo/remote/BUILD.rustversion-1.0.8.bazel
similarity index 97%
rename from third_party/cargo/remote/BUILD.rustversion-1.0.7.bazel
rename to third_party/cargo/remote/BUILD.rustversion-1.0.8.bazel
index f407ef6..2609fab 100644
--- a/third_party/cargo/remote/BUILD.rustversion-1.0.7.bazel
+++ b/third_party/cargo/remote/BUILD.rustversion-1.0.8.bazel
@@ -54,7 +54,7 @@
         "cargo-raze",
         "manual",
     ],
-    version = "1.0.7",
+    version = "1.0.8",
     visibility = ["//visibility:private"],
     deps = [
     ],
@@ -76,7 +76,7 @@
         "crate-name=rustversion",
         "manual",
     ],
-    version = "1.0.7",
+    version = "1.0.8",
     # buildifier: leave-alone
     deps = [
         ":rustversion_build_script",
diff --git a/third_party/cargo/remote/BUILD.os_str_bytes-6.1.0.bazel b/third_party/cargo/remote/BUILD.slab-0.4.6.bazel
similarity index 78%
copy from third_party/cargo/remote/BUILD.os_str_bytes-6.1.0.bazel
copy to third_party/cargo/remote/BUILD.slab-0.4.6.bazel
index cbbf9c5..fa2b3e8 100644
--- a/third_party/cargo/remote/BUILD.os_str_bytes-6.1.0.bazel
+++ b/third_party/cargo/remote/BUILD.slab-0.4.6.bazel
@@ -26,16 +26,17 @@
 ])
 
 licenses([
-    "notice",  # MIT from expression "MIT OR Apache-2.0"
+    "notice",  # MIT from expression "MIT"
 ])
 
 # Generated Targets
 
 rust_library(
-    name = "os_str_bytes",
+    name = "slab",
     srcs = glob(["**/*.rs"]),
     crate_features = [
-        "raw_os_str",
+        "default",
+        "std",
     ],
     crate_root = "src/lib.rs",
     data = [],
@@ -45,11 +46,15 @@
     ],
     tags = [
         "cargo-raze",
-        "crate-name=os_str_bytes",
+        "crate-name=slab",
         "manual",
     ],
-    version = "6.1.0",
+    version = "0.4.6",
     # buildifier: leave-alone
     deps = [
     ],
 )
+
+# Unsupported target "serde" with type "test" omitted
+
+# Unsupported target "slab" with type "test" omitted
diff --git a/third_party/cargo/remote/BUILD.strum_macros-0.24.2.bazel b/third_party/cargo/remote/BUILD.strum_macros-0.24.2.bazel
index 13aceb8..3378bff 100644
--- a/third_party/cargo/remote/BUILD.strum_macros-0.24.2.bazel
+++ b/third_party/cargo/remote/BUILD.strum_macros-0.24.2.bazel
@@ -40,7 +40,7 @@
     data = [],
     edition = "2018",
     proc_macro_deps = [
-        "@raze__rustversion__1_0_7//:rustversion",
+        "@raze__rustversion__1_0_8//:rustversion",
     ],
     rustc_flags = [
         "--cap-lints=allow",
diff --git a/third_party/cargo/remote/BUILD.syn-1.0.98.bazel b/third_party/cargo/remote/BUILD.syn-1.0.98.bazel
index 4f04f19..dde8690 100644
--- a/third_party/cargo/remote/BUILD.syn-1.0.98.bazel
+++ b/third_party/cargo/remote/BUILD.syn-1.0.98.bazel
@@ -104,7 +104,7 @@
         ":syn_build_script",
         "@raze__proc_macro2__1_0_40//:proc_macro2",
         "@raze__quote__1_0_20//:quote",
-        "@raze__unicode_ident__1_0_1//:unicode_ident",
+        "@raze__unicode_ident__1_0_2//:unicode_ident",
     ],
 )
 
diff --git a/third_party/cargo/remote/BUILD.unicode-ident-1.0.1.bazel b/third_party/cargo/remote/BUILD.unicode-ident-1.0.2.bazel
similarity index 91%
rename from third_party/cargo/remote/BUILD.unicode-ident-1.0.1.bazel
rename to third_party/cargo/remote/BUILD.unicode-ident-1.0.2.bazel
index c3676d5..001aae9 100644
--- a/third_party/cargo/remote/BUILD.unicode-ident-1.0.1.bazel
+++ b/third_party/cargo/remote/BUILD.unicode-ident-1.0.2.bazel
@@ -26,7 +26,7 @@
 ])
 
 licenses([
-    "notice",  # MIT from expression "MIT OR Apache-2.0"
+    "notice",  # MIT from expression "(MIT OR Apache-2.0) AND Unicode-DFS-2016"
 ])
 
 # Generated Targets
@@ -49,7 +49,7 @@
         "crate-name=unicode-ident",
         "manual",
     ],
-    version = "1.0.1",
+    version = "1.0.2",
     # buildifier: leave-alone
     deps = [
     ],
diff --git a/third_party/cargo_raze/cargo_raze.patch b/third_party/cargo_raze/cargo_raze.patch
index 306fcf3..f4cd02a 100644
--- a/third_party/cargo_raze/cargo_raze.patch
+++ b/third_party/cargo_raze/cargo_raze.patch
@@ -95,12 +95,13 @@
 
 --- third_party/libssh2/BUILD.libssh2.bazel	2022-02-04 00:03:43.831120614 -0800
 +++ third_party/libssh2/BUILD.libssh2.bazel	2022-02-04 00:04:19.100745883 -0800
-@@ -29,6 +29,14 @@ cmake(
+@@ -29,6 +29,15 @@ cmake(
          "@rules_rust//rust/platform:windows": ["ssh2.lib"],
          "//conditions:default": ["libssh2.a"],
      }),
 +    copts = [
 +        "-Wno-cast-qual",
++        # See https://github.com/openbsd/src/commit/04a2240bd8f465bcae6b595d912af3e2965856de, it's a false positive.
 +        "-Wno-sizeof-array-div",
 +        "-Wno-unused-parameter",
 +        "-DHAVE_SNPRINTF=1",
@@ -162,6 +163,15 @@
 
 --- third_party/cargo/remote/BUILD.libssh2-sys-0.2.21.bazel	2022-02-04 00:54:43.031966734 -0800
 +++ third_party/cargo/remote/BUILD.libssh2-sys-0.2.21.bazel	2022-02-04 00:54:44.272023742 -0800
+@@ -41,6 +41,8 @@ cargo_build_script(
+     name = "libssh2_sys_build_script",
+     srcs = glob(["**/*.rs"]),
+     build_script_env = {
++        # See https://github.com/openbsd/src/commit/04a2240bd8f465bcae6b595d912af3e2965856de, it's a false positive.
++        "CFLAGS": "-Wno-sizeof-array-div",
+     },
+     crate_features = [
+     ],
 @@ -48,6 +48,7 @@ cargo_build_script(
      data = glob(["**"]) + [
          "@cargo_raze__libssh2//:libssh2",
diff --git a/third_party/flatbuffers/build_defs.bzl b/third_party/flatbuffers/build_defs.bzl
index 7eaa4d3..c3e2bb0 100644
--- a/third_party/flatbuffers/build_defs.bzl
+++ b/third_party/flatbuffers/build_defs.bzl
@@ -6,6 +6,8 @@
 """
 
 load("@io_bazel_rules_go//go:def.bzl", "go_library")
+load("@rules_rust//rust:defs.bzl", "rust_library")
+load("@rules_rust//rust:rust_common.bzl", "CrateInfo")
 load("@build_bazel_rules_nodejs//:index.bzl", "js_library")
 load("@npm//@bazel/typescript:index.bzl", "ts_project")
 load("@rules_cc//cc:defs.bzl", "cc_library")
@@ -14,9 +16,6 @@
 
 DEFAULT_INCLUDE_PATHS = [
     "./",
-    "$(GENDIR)",
-    "$(BINDIR)",
-    "$(execpath @com_github_google_flatbuffers//:flatc).runfiles/com_github_google_flatbuffers",
 ]
 
 DEFAULT_FLATC_ARGS = [
@@ -28,7 +27,8 @@
     "--require-explicit-ids",
     "--gen-mutable",
     "--reflect-names",
-    "--cpp-ptr-type flatbuffers::unique_ptr",
+    "--cpp-ptr-type",
+    "flatbuffers::unique_ptr",
     "--force-empty",
     "--scoped-enums",
     "--gen-name-strings",
@@ -40,6 +40,11 @@
     "--require-explicit-ids",
 ]
 
+DEFAULT_FLATC_RUST_ARGS = [
+    "--gen-object-api",
+    "--require-explicit-ids",
+]
+
 DEFAULT_FLATC_TS_ARGS = [
     "--gen-object-api",
     "--gen-mutable",
@@ -49,12 +54,80 @@
     "--keep-prefix",
 ]
 
+"""Contains information about a set of flatbuffers which have their code for
+reading/writing generated in a single library-style rule.
+
+Fields:
+    srcs: [File], the .fbs source files
+"""
+FlatbufferLibraryInfo = provider()
+
+def _flatbuffer_library_compile_impl(ctx):
+    outs = []
+    commands = []
+    for src in ctx.files.srcs:
+        if ctx.attr.tables_for_filenames:
+            out_dir = None
+            for table in ctx.attr.tables_for_filenames:
+                out = ctx.actions.declare_file(ctx.attr.out_prefix + table + ctx.attr.output_suffix)
+                this_out_dir = "/".join(out.dirname.split("/")[:-(len(ctx.attr.out_prefix.split("/")) - 1)])
+                if out_dir:
+                    if this_out_dir != out_dir:
+                        fail("Trying to write to multiple directories")
+                else:
+                    out_dir = this_out_dir
+                outs.append(out)
+        else:
+            out = ctx.actions.declare_file(ctx.attr.out_prefix + src.basename.replace(".fbs", "") + ctx.attr.output_suffix)
+            outs.append(out)
+            out_dir = out.dirname
+        arguments = [ctx.executable._flatc.path]
+        for path in ctx.attr.include_paths:
+            for subpath in ["", ctx.bin_dir.path + "/"]:
+                arguments.append("-I")
+                arguments.append(subpath + path)
+        arguments.append("-I")
+        arguments.append("%s.runfiles/com_github_google_flatbuffers" % ctx.executable._flatc.path)
+        arguments.extend(ctx.attr.flatc_args)
+        arguments.extend(ctx.attr.language_flags)
+        arguments.extend([
+            "-o",
+            out_dir,
+        ])
+        arguments.append(src.path)
+        commands.append(arguments)
+    ctx.actions.run_shell(
+        outputs = outs,
+        inputs = ctx.files.srcs + ctx.files.includes,
+        tools = [ctx.executable._flatc],
+        command = " && ".join([" ".join(arguments) for arguments in commands]),
+        mnemonic = "Flatc",
+        progress_message = "Generating flatbuffer files for %{input}:",
+    )
+    return [DefaultInfo(files = depset(outs), runfiles = ctx.runfiles(files = outs)), FlatbufferLibraryInfo(srcs = ctx.files.srcs)]
+
+_flatbuffer_library_compile = rule(
+    implementation = _flatbuffer_library_compile_impl,
+    attrs = {
+        "srcs": attr.label_list(mandatory = True, allow_files = True),
+        "output_suffix": attr.string(mandatory = True),
+        "tables_for_filenames": attr.string_list(mandatory = False),
+        "language_flags": attr.string_list(mandatory = True),
+        "includes": attr.label_list(default = [], allow_files = True),
+        "include_paths": attr.string_list(default = []),
+        "flatc_args": attr.string_list(default = []),
+        "out_prefix": attr.string(default = ""),
+        "_flatc": attr.label(executable = True, cfg = "exec", default = Label(flatc_path)),
+    },
+)
+
 def flatbuffer_library_public(
         name,
         srcs,
-        outs,
+        output_suffix,
         language_flag,
         out_prefix = "",
+        tables_for_filenames = None,
         includes = [],
         include_paths = DEFAULT_INCLUDE_PATHS,
         flatc_args = DEFAULT_FLATC_ARGS,
@@ -63,14 +136,15 @@
         compatible_with = None,
         restricted_to = None,
         target_compatible_with = None,
-        output_to_bindir = False):
+        output_to_bindir = False,
+        visibility = None):
     """Generates code files for reading/writing the given flatbuffers in the
     requested language using the public compiler.
 
     Args:
       name: Rule name.
       srcs: Source .fbs files. Sent in order to the compiler.
-      outs: Output files from flatc.
+      output_suffix: Suffix for output files from flatc.
       language_flag: Target language flag. One of [-c, -j, -js].
       out_prefix: Prepend this path to the front of all generated files except on
           single source targets. Usually is a directory name.
@@ -94,73 +168,36 @@
     optionally a Fileset([reflection_name]) with all generated reflection
     binaries.
     """
-    include_paths_cmd = ["-I %s" % (s) for s in include_paths]
-
-    # '$(@D)' when given a single source target will give the appropriate
-    # directory. Appending 'out_prefix' is only necessary when given a build
-    # target with multiple sources.
-    output_directory = (
-        ("-o $(@D)/%s" % (out_prefix)) if len(srcs) > 1 else ("-o $(@D)")
-    )
-    genrule_cmd = " ".join([
-        "SRCS=($(SRCS));",
-        "for f in $${SRCS[@]:0:%s}; do" % len(srcs),
-        "$(location %s)" % (flatc_path),
-        " ".join(include_paths_cmd),
-        " ".join(flatc_args),
-        language_flag,
-        output_directory,
-        "$$f;",
-        "done",
-    ])
-    native.genrule(
+    _flatbuffer_library_compile(
         name = name,
-        srcs = srcs + includes,
-        outs = outs,
-        output_to_bindir = output_to_bindir,
-        tools = [flatc_path],
-        cmd = genrule_cmd,
+        srcs = srcs,
+        output_suffix = output_suffix,
+        language_flags = [language_flag],
+        includes = includes,
+        include_paths = include_paths,
+        flatc_args = flatc_args,
+        out_prefix = out_prefix,
+        tables_for_filenames = tables_for_filenames,
         compatible_with = compatible_with,
         target_compatible_with = target_compatible_with,
         restricted_to = restricted_to,
-        message = "Generating flatbuffer files for %s:" % (name),
+        visibility = visibility,
     )
+
     if reflection_name:
-        reflection_genrule_cmd = " ".join([
-            "SRCS=($(SRCS));",
-            "for f in $${SRCS[@]:0:%s}; do" % len(srcs),
-            "$(location %s)" % (flatc_path),
-            "-b --schema",
-            " ".join(flatc_args),
-            " ".join(include_paths_cmd),
-            language_flag,
-            output_directory,
-            "$$f;",
-            "done",
-        ])
-        reflection_outs = [
-            (out_prefix + "%s.bfbs") % (s.replace(".fbs", "").split("/")[-1])
-            for s in srcs
-        ]
-        native.genrule(
-            name = "%s_srcs" % reflection_name,
-            srcs = srcs + includes,
-            outs = reflection_outs,
-            output_to_bindir = output_to_bindir,
-            tools = [flatc_path],
-            compatible_with = compatible_with,
-            restricted_to = restricted_to,
-            target_compatible_with = target_compatible_with,
-            cmd = reflection_genrule_cmd,
-            message = "Generating flatbuffer reflection binary for %s:" % (name),
-            visibility = reflection_visibility,
-        )
-        native.filegroup(
+        _flatbuffer_library_compile(
             name = "%s_out" % reflection_name,
-            srcs = reflection_outs,
-            visibility = reflection_visibility,
+            srcs = srcs,
+            output_suffix = ".bfbs",
+            language_flags = ["-b", "--schema"],
+            includes = includes,
+            include_paths = include_paths,
+            flatc_args = flatc_args,
+            out_prefix = out_prefix,
             compatible_with = compatible_with,
+            target_compatible_with = target_compatible_with,
             restricted_to = restricted_to,
+            visibility = reflection_visibility,
         )
 
 def flatbuffer_cc_library(
@@ -220,10 +257,6 @@
       Fileset([name]_reflection): (Optional) all generated reflection binaries.
       cc_library([name]): library with sources and flatbuffers deps.
     """
-    output_headers = [
-        (out_prefix + "%s_generated.h") % (s.replace(".fbs", "").split("/")[-1].split(":")[-1])
-        for s in srcs
-    ]
     if deps and includes:
         # There is no inherent reason we couldn't support both, but this discourages
         # use of includes without good reason.
@@ -236,7 +269,7 @@
     flatbuffer_library_public(
         name = srcs_lib,
         srcs = srcs,
-        outs = output_headers,
+        output_suffix = "_generated.h",
         language_flag = "-c",
         out_prefix = out_prefix,
         includes = includes,
@@ -309,23 +342,27 @@
         been parsed. As such, we just force the user to manually specify
         things.
     """
-    python_files = ["%s/%s.py" % (namespace.replace(".", "/"), table) for table in tables]
 
     srcs_lib = "%s_srcs" % (name)
+    if not tables:
+        fail("Must specify the list of tables")
     flatbuffer_library_public(
         name = srcs_lib,
         srcs = srcs,
-        outs = python_files,
+        output_suffix = ".py",
+        out_prefix = namespace.replace(".", "/") + "/",
+        tables_for_filenames = tables,
         language_flag = "--python",
         includes = includes,
         include_paths = include_paths,
         flatc_args = flatc_args,
         compatible_with = compatible_with,
         target_compatible_with = target_compatible_with,
+        visibility = ["//visibility:private"],
     )
     native.py_library(
         name = name,
-        srcs = python_files,
+        srcs = [srcs_lib],
         visibility = visibility,
         compatible_with = compatible_with,
         target_compatible_with = target_compatible_with,
@@ -345,23 +382,23 @@
         visibility = None,
         srcs_filegroup_visibility = None):
     srcs_lib = "%s_srcs" % (name)
-    outs = ["%s_generated.go" % (s.replace(".fbs", "").split("/")[-1]) for s in srcs]
     flatc_args = flatc_args + ["--go-namespace", importpath.split("/")[-1]]
 
     flatbuffer_library_public(
         name = srcs_lib,
         srcs = srcs,
-        outs = outs,
+        output_suffix = "_generated.go",
         language_flag = "--go",
         includes = includes,
         include_paths = include_paths,
         flatc_args = flatc_args,
         compatible_with = compatible_with,
         target_compatible_with = target_compatible_with,
+        visibility = ["//visibility:private"],
     )
     go_library(
         name = name,
-        srcs = outs,
+        srcs = [srcs_lib],
         deps = ["@com_github_google_flatbuffers//go"],
         importpath = importpath,
         visibility = visibility,
@@ -369,6 +406,93 @@
         target_compatible_with = target_compatible_with,
     )
 
+def _flatbuffer_rust_lib_gen_impl(ctx):
+    # TODO(Brian): I think this needs changes to properly handle multiple .fbs files in a rule.
+    uses = []
+    for (dep, dep_srcs) in zip(ctx.attr.deps, ctx.attr.dep_srcs):
+        for dep_src in dep_srcs[FlatbufferLibraryInfo].srcs:
+            uses.append((dep[CrateInfo].name, dep_src.basename.replace(".fbs", "_generated")))
+    lib_rs_content = "\n".join(
+        [
+            "// Automatically generated by the Flatbuffers Bazel rules. Do not modify",
+            "#![allow(unused_imports)]",
+        ] + ["use %s as %s;" % (crate, use_as) for (crate, use_as) in uses] +
+        ["include!(\"%s\");" % src.basename for src in ctx.files.srcs_lib],
+    )
+    output = ctx.actions.declare_file(ctx.attr.name + "_lib.rs")
+    ctx.actions.write(
+        output = output,
+        content = lib_rs_content,
+    )
+    return [DefaultInfo(files = depset([output]))]
+
+"""Generates a lib.rs for a flatbuffer_rust_library.
+
+flatc generates individual .rs files for us. It can also generate a top-level mod.rs to be included
+in a crate, but that is laid out to include all flatbuffers files in a project. That's not a good
+fit for Bazel rules and monorepos, so we generate an alternative that imports all dependencies under
+their expected names."""
+_flatbuffer_rust_lib_gen = rule(
+    implementation = _flatbuffer_rust_lib_gen_impl,
+    attrs = {
+        "srcs_lib": attr.label(mandatory = True, doc = "The generated srcs for this rule"),
+        "dep_srcs": attr.label_list(mandatory = True, providers = [FlatbufferLibraryInfo], doc = "The _srcs rules for all our direct dependencies"),
+        "deps": attr.label_list(mandatory = True, providers = [CrateInfo]),
+    },
+)
+
+def flatbuffer_rust_library(
+        name,
+        srcs,
+        compatible_with = None,
+        target_compatible_with = None,
+        deps = [],
+        include_paths = DEFAULT_INCLUDE_PATHS,
+        flatc_args = DEFAULT_FLATC_RUST_ARGS,
+        include_reflection = True,
+        crate_name = None,
+        visibility = None,
+        srcs_filegroup_visibility = None):
+    includes = [d + "_includes" for d in deps]
+    srcs_lib = "%s_srcs" % (name)
+    lib_gen = "%s_lib_gen" % (name)
+    deps = list(deps)
+    if include_reflection:
+        deps.append("@com_github_google_flatbuffers//reflection:reflection_rust_fbs")
+
+    flatbuffer_library_public(
+        name = srcs_lib,
+        srcs = srcs,
+        language_flag = "--rust",
+        output_suffix = "_generated.rs",
+        includes = includes,
+        include_paths = include_paths,
+        flatc_args = flatc_args,
+        compatible_with = compatible_with,
+        target_compatible_with = target_compatible_with,
+        visibility = visibility,
+    )
+    _flatbuffer_rust_lib_gen(
+        name = lib_gen,
+        deps = deps,
+        dep_srcs = [dep + "_srcs" for dep in deps],
+        srcs_lib = srcs_lib,
+        visibility = ["//visibility:private"],
+        compatible_with = compatible_with,
+        target_compatible_with = target_compatible_with,
+    )
+    rust_library(
+        name = name,
+        srcs = [srcs_lib, lib_gen],
+        crate_root = lib_gen,
+        crate_name = crate_name,
+        deps = ["@com_github_google_flatbuffers//rust"] + deps,
+        edition = "2018",
+        visibility = visibility,
+        compatible_with = compatible_with,
+        target_compatible_with = target_compatible_with,
+    )
+
 def flatbuffer_ts_library(
         name,
         srcs,
@@ -411,13 +535,12 @@
     # third_party/.
     # TODO(james): There absolutely are better ways to do this, but this was the quick and dirty
     # one....
-    pre_outs = ["%s_pregenerated.ts" % (s.replace(".fbs", "").split("/")[-1]) for s in srcs]
     outs = ["%s_generated.ts" % (s.replace(".fbs", "").split("/")[-1]) for s in srcs]
     includes = [d + "_includes" for d in deps]
     flatbuffer_library_public(
         name = srcs_lib,
         srcs = srcs,
-        outs = pre_outs,
+        output_suffix = "_pregenerated.ts",
         language_flag = "--ts",
         includes = includes,
         include_paths = include_paths,
@@ -435,8 +558,8 @@
         "done",
     ])
     native.genrule(
-        name = name + "_reimporter",
-        srcs = pre_outs,
+        name = name + "_reimporter.ts",
+        srcs = [srcs_lib],
         outs = outs,
         cmd = genrule_cmd,
     )
diff --git a/third_party/flatbuffers/reflection/BUILD.bazel b/third_party/flatbuffers/reflection/BUILD.bazel
index aa421db..9b08734 100644
--- a/third_party/flatbuffers/reflection/BUILD.bazel
+++ b/third_party/flatbuffers/reflection/BUILD.bazel
@@ -1,4 +1,4 @@
-load("//:build_defs.bzl", "flatbuffer_ts_library")
+load("//:build_defs.bzl", "flatbuffer_rust_library", "flatbuffer_ts_library")
 
 filegroup(
     name = "reflection_fbs_schema",
@@ -13,3 +13,11 @@
     include_reflection = False,
     visibility = ["//visibility:public"],
 )
+
+flatbuffer_rust_library(
+    name = "reflection_rust_fbs",
+    srcs = ["reflection.fbs"],
+    crate_name = "flatbuffers_reflection",
+    include_reflection = False,
+    visibility = ["//visibility:public"],
+)
diff --git a/third_party/flatbuffers/rust/BUILD.bazel b/third_party/flatbuffers/rust/BUILD.bazel
new file mode 100644
index 0000000..246f5a0
--- /dev/null
+++ b/third_party/flatbuffers/rust/BUILD.bazel
@@ -0,0 +1,16 @@
+load("@rules_rust//rust:defs.bzl", "rust_library")
+
+rust_library(
+    name = "rust",
+    srcs = glob(["flatbuffers/**/*.rs"]),
+    crate_name = "flatbuffers",
+    crate_root = "flatbuffers/src/lib.rs",
+    edition = "2018",
+    version = "2.1.1",
+    visibility = ["//visibility:public"],
+    deps = [
+        "@//third_party/cargo:bitflags",
+        "@//third_party/cargo:smallvec",
+        "@//third_party/cargo:thiserror",
+    ],
+)
diff --git a/third_party/flatbuffers/src/idl_gen_rust.cpp b/third_party/flatbuffers/src/idl_gen_rust.cpp
index 17853a0..b15cdf5 100644
--- a/third_party/flatbuffers/src/idl_gen_rust.cpp
+++ b/third_party/flatbuffers/src/idl_gen_rust.cpp
@@ -366,6 +366,7 @@
       if (symbol.generated) continue;
       code_.Clear();
       code_ += "// " + std::string(FlatBuffersGeneratedWarning());
+      code_ += "#![allow(unused_imports)]";
       code_ += "extern crate flatbuffers;";
       code_ += "use std::mem;";
       code_ += "use std::cmp::Ordering;";
diff --git a/third_party/rules_rust/docs/flatten.md b/third_party/rules_rust/docs/flatten.md
index 0f82ad7..18fee16 100644
--- a/third_party/rules_rust/docs/flatten.md
+++ b/third_party/rules_rust/docs/flatten.md
@@ -1306,7 +1306,7 @@
 
 <pre>
 CrateInfo(<a href="#CrateInfo-aliases">aliases</a>, <a href="#CrateInfo-compile_data">compile_data</a>, <a href="#CrateInfo-deps">deps</a>, <a href="#CrateInfo-edition">edition</a>, <a href="#CrateInfo-is_test">is_test</a>, <a href="#CrateInfo-name">name</a>, <a href="#CrateInfo-output">output</a>, <a href="#CrateInfo-owner">owner</a>, <a href="#CrateInfo-proc_macro_deps">proc_macro_deps</a>, <a href="#CrateInfo-root">root</a>,
-          <a href="#CrateInfo-rustc_env">rustc_env</a>, <a href="#CrateInfo-srcs">srcs</a>, <a href="#CrateInfo-type">type</a>, <a href="#CrateInfo-wrapped_crate_type">wrapped_crate_type</a>)
+          <a href="#CrateInfo-rustc_env">rustc_env</a>, <a href="#CrateInfo-rustc_env_files">rustc_env_files</a>, <a href="#CrateInfo-srcs">srcs</a>, <a href="#CrateInfo-type">type</a>, <a href="#CrateInfo-wrapped_crate_type">wrapped_crate_type</a>)
 </pre>
 
 A provider containing general Crate information.
@@ -1327,6 +1327,7 @@
 | <a id="CrateInfo-proc_macro_deps"></a>proc_macro_deps |  depset[DepVariantInfo]: This crate's rust proc_macro dependencies' providers.    |
 | <a id="CrateInfo-root"></a>root |  File: The source File entrypoint to this crate, eg. lib.rs    |
 | <a id="CrateInfo-rustc_env"></a>rustc_env |  Dict[String, String]: Additional <code>"key": "value"</code> environment variables to set for rustc.    |
+| <a id="CrateInfo-rustc_env_files"></a>rustc_env_files |  [File]: Files containing additional environment variables to set for rustc.    |
 | <a id="CrateInfo-srcs"></a>srcs |  depset[File]: All source Files that are part of the crate.    |
 | <a id="CrateInfo-type"></a>type |  str: The type of this crate (see [rustc --crate-type](https://doc.rust-lang.org/rustc/command-line-arguments.html#--crate-type-a-list-of-types-of-crates-for-the-compiler-to-emit)).    |
 | <a id="CrateInfo-wrapped_crate_type"></a>wrapped_crate_type |  str, optional: The original crate type for targets generated using a previously defined crate (typically tests using the <code>rust_test::crate</code> attribute)    |
diff --git a/third_party/rules_rust/docs/providers.md b/third_party/rules_rust/docs/providers.md
index f4aa819..713d618 100644
--- a/third_party/rules_rust/docs/providers.md
+++ b/third_party/rules_rust/docs/providers.md
@@ -11,7 +11,7 @@
 
 <pre>
 CrateInfo(<a href="#CrateInfo-aliases">aliases</a>, <a href="#CrateInfo-compile_data">compile_data</a>, <a href="#CrateInfo-deps">deps</a>, <a href="#CrateInfo-edition">edition</a>, <a href="#CrateInfo-is_test">is_test</a>, <a href="#CrateInfo-name">name</a>, <a href="#CrateInfo-output">output</a>, <a href="#CrateInfo-owner">owner</a>, <a href="#CrateInfo-proc_macro_deps">proc_macro_deps</a>, <a href="#CrateInfo-root">root</a>,
-          <a href="#CrateInfo-rustc_env">rustc_env</a>, <a href="#CrateInfo-srcs">srcs</a>, <a href="#CrateInfo-type">type</a>, <a href="#CrateInfo-wrapped_crate_type">wrapped_crate_type</a>)
+          <a href="#CrateInfo-rustc_env">rustc_env</a>, <a href="#CrateInfo-rustc_env_files">rustc_env_files</a>, <a href="#CrateInfo-srcs">srcs</a>, <a href="#CrateInfo-type">type</a>, <a href="#CrateInfo-wrapped_crate_type">wrapped_crate_type</a>)
 </pre>
 
 A provider containing general Crate information.
@@ -32,6 +32,7 @@
 | <a id="CrateInfo-proc_macro_deps"></a>proc_macro_deps |  depset[DepVariantInfo]: This crate's rust proc_macro dependencies' providers.    |
 | <a id="CrateInfo-root"></a>root |  File: The source File entrypoint to this crate, eg. lib.rs    |
 | <a id="CrateInfo-rustc_env"></a>rustc_env |  Dict[String, String]: Additional <code>"key": "value"</code> environment variables to set for rustc.    |
+| <a id="CrateInfo-rustc_env_files"></a>rustc_env_files |  [File]: Files containing additional environment variables to set for rustc.    |
 | <a id="CrateInfo-srcs"></a>srcs |  depset[File]: All source Files that are part of the crate.    |
 | <a id="CrateInfo-type"></a>type |  str: The type of this crate (see [rustc --crate-type](https://doc.rust-lang.org/rustc/command-line-arguments.html#--crate-type-a-list-of-types-of-crates-for-the-compiler-to-emit)).    |
 | <a id="CrateInfo-wrapped_crate_type"></a>wrapped_crate_type |  str, optional: The original crate type for targets generated using a previously defined crate (typically tests using the <code>rust_test::crate</code> attribute)    |
diff --git a/third_party/rules_rust/rust/private/common.bzl b/third_party/rules_rust/rust/private/common.bzl
index 78a3011..66425d2 100644
--- a/third_party/rules_rust/rust/private/common.bzl
+++ b/third_party/rules_rust/rust/private/common.bzl
@@ -48,6 +48,8 @@
     """
     if not "wrapped_crate_type" in kwargs:
         kwargs.update({"wrapped_crate_type": None})
+    if not "rustc_env_files" in kwargs:
+        kwargs.update({"rustc_env_files": []})
     return CrateInfo(**kwargs)
 
 rust_common = struct(
diff --git a/third_party/rules_rust/rust/private/providers.bzl b/third_party/rules_rust/rust/private/providers.bzl
index de6d314..0846498 100644
--- a/third_party/rules_rust/rust/private/providers.bzl
+++ b/third_party/rules_rust/rust/private/providers.bzl
@@ -28,6 +28,7 @@
         "proc_macro_deps": "depset[DepVariantInfo]: This crate's rust proc_macro dependencies' providers.",
         "root": "File: The source File entrypoint to this crate, eg. lib.rs",
         "rustc_env": "Dict[String, String]: Additional `\"key\": \"value\"` environment variables to set for rustc.",
+        "rustc_env_files": "[File]: Files containing additional environment variables to set for rustc.",
         "srcs": "depset[File]: All source Files that are part of the crate.",
         "type": (
             "str: The type of this crate " +
diff --git a/third_party/rules_rust/rust/private/rust.bzl b/third_party/rules_rust/rust/private/rust.bzl
index ad2fba8..688d9bf 100644
--- a/third_party/rules_rust/rust/private/rust.bzl
+++ b/third_party/rules_rust/rust/private/rust.bzl
@@ -282,6 +282,7 @@
             output = rust_lib,
             edition = get_edition(ctx.attr, toolchain),
             rustc_env = ctx.attr.rustc_env,
+            rustc_env_files = ctx.files.rustc_env_files,
             is_test = False,
             compile_data = depset(ctx.files.compile_data),
             owner = ctx.label,
@@ -322,6 +323,7 @@
             output = output,
             edition = get_edition(ctx.attr, toolchain),
             rustc_env = ctx.attr.rustc_env,
+            rustc_env_files = ctx.files.rustc_env_files,
             is_test = False,
             compile_data = depset(ctx.files.compile_data),
             owner = ctx.label,
@@ -357,6 +359,9 @@
             compile_data = depset(ctx.files.compile_data, transitive = [crate.compile_data])
         else:
             compile_data = depset(ctx.files.compile_data)
+        rustc_env_files = ctx.files.rustc_env_files + crate.rustc_env_files
+        rustc_env = dict(crate.rustc_env)
+        rustc_env.update(**ctx.attr.rustc_env)
 
         # Build the test binary using the dependency's srcs.
         crate_info = rust_common.create_crate_info(
@@ -369,7 +374,8 @@
             aliases = ctx.attr.aliases,
             output = output,
             edition = crate.edition,
-            rustc_env = ctx.attr.rustc_env,
+            rustc_env = rustc_env,
+            rustc_env_files = rustc_env_files,
             is_test = True,
             compile_data = compile_data,
             wrapped_crate_type = crate.type,
@@ -388,6 +394,7 @@
             output = output,
             edition = get_edition(ctx.attr, toolchain),
             rustc_env = ctx.attr.rustc_env,
+            rustc_env_files = ctx.files.rustc_env_files,
             is_test = True,
             compile_data = depset(ctx.files.compile_data),
             owner = ctx.label,
diff --git a/third_party/rules_rust/rust/private/rustc.bzl b/third_party/rules_rust/rust/private/rustc.bzl
index 0c3df5e..fc7c5f6 100644
--- a/third_party/rules_rust/rust/private/rustc.bzl
+++ b/third_party/rules_rust/rust/private/rustc.bzl
@@ -581,7 +581,7 @@
         ],
     )
 
-    build_env_files = getattr(files, "rustc_env_files", [])
+    build_env_files = getattr(files, "rustc_env_files", []) + crate_info.rustc_env_files
     compile_inputs, out_dir, build_env_file, build_flags_files = _process_build_scripts(build_info, dep_info, compile_inputs)
     if build_env_file:
         build_env_files = [f for f in build_env_files] + [build_env_file]
diff --git a/third_party/rules_rust/rust/private/rustdoc.bzl b/third_party/rules_rust/rust/private/rustdoc.bzl
index 86c2acd..ca91ff0 100644
--- a/third_party/rules_rust/rust/private/rustdoc.bzl
+++ b/third_party/rules_rust/rust/private/rustdoc.bzl
@@ -39,6 +39,7 @@
         output = None,
         edition = crate_info.edition,
         rustc_env = crate_info.rustc_env,
+        rustc_env_files = crate_info.rustc_env_files,
         is_test = crate_info.is_test,
         compile_data = crate_info.compile_data,
     )
diff --git a/third_party/rules_rust/test/build_env/BUILD.bazel b/third_party/rules_rust/test/build_env/BUILD.bazel
index 4fd7df3..511d23a 100644
--- a/third_party/rules_rust/test/build_env/BUILD.bazel
+++ b/third_party/rules_rust/test/build_env/BUILD.bazel
@@ -2,7 +2,7 @@
     "//cargo:cargo_build_script.bzl",
     "cargo_build_script",
 )
-load("//rust:defs.bzl", "rust_test")
+load("//rust:defs.bzl", "rust_library", "rust_test")
 
 package(default_visibility = ["//visibility:public"])
 
@@ -12,6 +12,21 @@
     data = ["src/manifest_dir_file.txt"],
 )
 
+rust_library(
+    name = "arbitrary_env_lib",
+    srcs = ["tests/arbitrary_env_lib.rs"],
+    edition = "2018",
+    rustc_env = {
+        "USER_DEFINED_KEY": "USER_DEFINED_VALUE",
+    },
+)
+
+rust_test(
+    name = "arbitrary_env_lib_test",
+    crate = ":arbitrary_env_lib",
+    edition = "2018",
+)
+
 rust_test(
     name = "arbitrary_env_test",
     srcs = ["tests/arbitrary_env.rs"],
diff --git a/third_party/rules_rust/test/build_env/tests/arbitrary_env_lib.rs b/third_party/rules_rust/test/build_env/tests/arbitrary_env_lib.rs
new file mode 100644
index 0000000..e89fc2e
--- /dev/null
+++ b/third_party/rules_rust/test/build_env/tests/arbitrary_env_lib.rs
@@ -0,0 +1,16 @@
+pub fn from_lib() -> &'static str {
+    env!("USER_DEFINED_KEY")
+}
+
+#[cfg(test)]
+mod tests {
+    #[test]
+    fn verify_from_lib() {
+        assert_eq!(super::from_lib(), "USER_DEFINED_VALUE");
+    }
+
+    #[test]
+    fn verify_from_test() {
+        assert_eq!(env!("USER_DEFINED_KEY"), "USER_DEFINED_VALUE");
+    }
+}
diff --git a/third_party/rules_rust/test/rustc_env_files/BUILD.bazel b/third_party/rules_rust/test/rustc_env_files/BUILD.bazel
index 431cd07..efb21b7 100644
--- a/third_party/rules_rust/test/rustc_env_files/BUILD.bazel
+++ b/third_party/rules_rust/test/rustc_env_files/BUILD.bazel
@@ -1,4 +1,4 @@
-load("//rust:defs.bzl", "rust_binary")
+load("//rust:defs.bzl", "rust_binary", "rust_library", "rust_test")
 
 package(default_visibility = ["//visibility:public"])
 
@@ -20,3 +20,15 @@
     args = ["$(location :hello_env)"],
     data = [":hello_env"],
 )
+
+rust_library(
+    name = "hello_env_crate",
+    srcs = ["src/lib.rs"],
+    edition = "2018",
+    rustc_env_files = [":generate_rustc_env_file"],
+)
+
+rust_test(
+    name = "hello_env_crate_test",
+    crate = ":hello_env_crate",
+)
diff --git a/third_party/rules_rust/test/rustc_env_files/src/lib.rs b/third_party/rules_rust/test/rustc_env_files/src/lib.rs
new file mode 100644
index 0000000..8cd7cab
--- /dev/null
+++ b/third_party/rules_rust/test/rustc_env_files/src/lib.rs
@@ -0,0 +1,16 @@
+pub fn from_lib() -> &'static str {
+    env!("GREETING")
+}
+
+#[cfg(test)]
+mod tests {
+    #[test]
+    fn verify_from_lib() {
+        assert_eq!(super::from_lib(), "Howdy");
+    }
+
+    #[test]
+    fn verify_from_test() {
+        assert_eq!(env!("GREETING"), "Howdy");
+    }
+}
diff --git a/tools/build_rules/autocxx.bzl b/tools/build_rules/autocxx.bzl
new file mode 100644
index 0000000..ee8b4d4
--- /dev/null
+++ b/tools/build_rules/autocxx.bzl
@@ -0,0 +1,345 @@
+load("@rules_rust//rust:defs.bzl", "rust_library")
+load("@rules_cc//cc:find_cc_toolchain.bzl", "find_cc_toolchain")
+load("@bazel_tools//tools/build_defs/cc:action_names.bzl", "ACTION_NAMES")
+
+def _cc_toolchain_flags(ctx, cc_toolchain):
+    feature_configuration = cc_common.configure_features(
+        ctx = ctx,
+        cc_toolchain = cc_toolchain,
+        requested_features = ctx.features,
+        unsupported_features = ctx.disabled_features,
+    )
+    compiler_path = cc_common.get_tool_for_action(
+        feature_configuration = feature_configuration,
+        action_name = ACTION_NAMES.cpp_compile,
+    )
+    compile_variables = cc_common.create_compile_variables(
+        feature_configuration = feature_configuration,
+        cc_toolchain = cc_toolchain,
+        user_compile_flags = ctx.fragments.cpp.copts + ctx.fragments.cpp.cxxopts,
+    )
+    command_line = cc_common.get_memory_inefficient_command_line(
+        feature_configuration = feature_configuration,
+        action_name = ACTION_NAMES.cpp_compile,
+        variables = compile_variables,
+    )
+    env = cc_common.get_environment_variables(
+        feature_configuration = feature_configuration,
+        action_name = ACTION_NAMES.cpp_compile,
+        variables = compile_variables,
+    )
+    return command_line, env
+
+# All the stuff about AUTOCXX_RS_FILE and--fix-rs-include-name only applies when
+# using --gen-rs-include. We use --gen-rs-archive so none of that matters.
+#
+# The generated .rs file uses `include!` on the top-level C++ headers imported
+# via `#include` in `include_cpp!`. This always operates relative to the source
+# file (I don't see any way to change it), nor does autocxx have a way to change
+# the path. There are headers involved which use `#pragma once`, so just copying
+# them is a bad idea. Instead, we generate forwarding headers.
+def _autocxx_library_gen_impl(ctx):
+    rust_toolchain = ctx.toolchains[Label("@rules_rust//rust:toolchain")]
+
+    # TODO(Brian): Provide some way to override this globally in WORKSPACE? Need
+    # a real strategy for coordinating toolchains and flags, see the TODO below
+    # where cc_command_line is used for details.
+    if ctx.attr.override_cc_toolchain:
+        cc_toolchain = ctx.attr.override_cc_toolchain[cc_common.CcToolchainInfo]
+    else:
+        cc_toolchain = find_cc_toolchain(ctx)
+
+    # The directory where we generate files. Needs to be unique and in our package.
+    gendir = ctx.label.name + "__dir"
+
+    cc_command_line, cc_env = _cc_toolchain_flags(ctx, cc_toolchain)
+
+    includes = []
+    in_headers = []
+    forwarding_headers = []
+    for lib in ctx.attr.libs:
+        compilation = lib[CcInfo].compilation_context
+
+        # TODO(Brian): How should we be juggling includes, quote_includes, and system_includes?
+        includes.append(compilation.includes)
+        includes.append(compilation.quote_includes)
+        includes.append(compilation.system_includes)
+        in_headers.append(compilation.headers)
+        for header in compilation.direct_public_headers:
+            # TODO(Brian): This doesn't work if it's being `#include`ed (via
+            # `include_cpp!`) using one of the includes paths. Maybe we should
+            # add each `includes` prefixed with `gendir` to solve that?
+            forwarding = ctx.actions.declare_file("%s/%s" % (gendir, header.short_path))
+            forwarding_headers.append(forwarding)
+            ctx.actions.write(forwarding, '#include "%s"' % header.short_path)
+    includes = depset(transitive = includes)
+    action_inputs = depset(
+        direct = ctx.files.srcs + ctx.files.cxxbridge_srcs,
+        transitive = in_headers + [cc_toolchain.all_files],
+    )
+
+    # This is always the name with --gen-rs-archive, regardless of other flags.
+    out_rs_json = ctx.actions.declare_file("%s/gen.rs.json" % gendir)
+    out_env_file = ctx.actions.declare_file("%s/rustc_env" % gendir)
+    ctx.actions.write(
+        output = out_env_file,
+        content = "AUTOCXX_RS_JSON_ARCHIVE=%s" % out_rs_json.path,
+    )
+
+    out_h = ctx.actions.declare_file("%s_cxxgen.h" % ctx.label.name.rstrip("__gen"))
+    out_h_guard = out_h.short_path.replace("/", "_").replace(".", "_")
+    out_h_contents = [
+        "#ifndef %s" % out_h_guard,
+        "#define %s" % out_h_guard,
+        "// GENERATED FILE, DO NOT EDIT",
+        "//",
+        "// #includes all of the declarations exported to C++ from %s" % ctx.label,
+    ]
+    out_cc = []
+
+    # See `gen --help` for details on the naming of these outputs.
+    for cc_index in range(ctx.attr.sections_to_generate):
+        out_cc.append(ctx.actions.declare_file("%s/gen%d.cc" % (gendir, cc_index)))
+        gen_h = ctx.actions.declare_file("%s/gen%d.h" % (gendir, cc_index))
+        out_cc.append(gen_h)
+        out_h_contents.append("#include \"%s\"" % gen_h.short_path)
+        autocxxgen_h = ctx.actions.declare_file("%s/autocxxgen%d.h" % (gendir, cc_index))
+        out_cc.append(autocxxgen_h)
+        out_h_contents.append("#include \"%s\"" % autocxxgen_h.short_path)
+
+    cxxbridge_cc_srcs = []
+    for src in ctx.files.cxxbridge_srcs:
+        cxxbridge_cc = ctx.actions.declare_file("%s/cxxbridge.cc" % gendir)
+        cxxbridge_cc_srcs.append(cxxbridge_cc)
+        cxxbridge_h = ctx.actions.declare_file("%s/cxxbridge.h" % gendir)
+        cxxbridge_cc_srcs.append(cxxbridge_h)
+        out_h_contents.append("#include \"%s\"" % cxxbridge_h.short_path)
+        ctx.actions.run(
+            mnemonic = "CxxCodegen",
+            executable = ctx.executable._cxx_codegen,
+            inputs = [src],
+            outputs = [cxxbridge_cc, cxxbridge_h],
+            arguments = [src.path, "--output", cxxbridge_h.path, "--output", cxxbridge_cc.path],
+        )
+
+    out_h_contents.append("#endif  // %s" % out_h_guard)
+    ctx.actions.write(
+        output = out_h,
+        content = "\n".join(out_h_contents),
+    )
+
+    gen_rs = ctx.actions.args()
+    gen_rs.add_all(["--outdir", out_rs_json.dirname])
+    gen_rs.add("--gen-rs-archive")
+    gen_rs.add("--gen-cpp")
+
+    gen_rs.add_all(["--generate-exact", ctx.attr.sections_to_generate])
+
+    gen_rs.add_all(ctx.files.srcs)
+    gen_rs.add_all(ctx.files.cxxbridge_srcs)
+
+    # TODO: Do these go before or after the --? They're partially redundant with
+    # cc_command_line too.
+    gen_rs.add_all(includes, before_each = "-I")
+    gen_rs.add("--")
+
+    # TODO: These are flags for the cc_toolchain, not the libclang they're being passed to.
+    # Figure out how to handle that nicely. Maybe just require they're compatible, and direct
+    # people to overriding the toolchain in use instead?
+    gen_rs.add_all(cc_command_line)
+
+    gen_rs.add("-Wno-unused-private-field")
+    env = dict(cc_env)
+    env.update(
+        LIBCLANG_PATH = ctx.file._libclang.path,
+    )
+    if ctx.attr.gen_debug:
+        env.update(
+            RUST_BACKTRACE = "full",
+            RUST_LOG = "autocxx_engine=info",
+        )
+    ctx.actions.run(
+        arguments = [gen_rs],
+        outputs = [out_rs_json] + out_cc,
+        tools = [ctx.file._libclang],
+        inputs = action_inputs,
+        env = env,
+        executable = ctx.executable._autocxx_gen,
+        mnemonic = "AutocxxGen",
+    )
+
+    return [
+        OutputGroupInfo(
+            cc_srcs = out_cc + cxxbridge_cc_srcs,
+            hdr_srcs = [out_h],
+            compile_data = forwarding_headers + [out_rs_json],
+            env_files = [out_env_file],
+        ),
+    ]
+
+_autocxx_library_gen = rule(
+    implementation = _autocxx_library_gen_impl,
+    attrs = {
+        "libs": attr.label_list(
+            mandatory = True,
+            providers = [CcInfo],
+            doc = "C++ libraries to let Rust use headers from",
+        ),
+        "srcs": attr.label_list(
+            allow_files = [".rs"],
+            mandatory = False,
+            doc = "Rust sources with `include_cpp!` macros",
+            default = [],
+        ),
+        # TODO(Brian): Do we need to support this? Or just throw them in srcs?
+        "cxxbridge_srcs": attr.label_list(
+            allow_files = [".rs"],
+            mandatory = False,
+            doc = "Rust sources with only [cxx::bridge] annotations",
+            default = [],
+        ),
+        "sections_to_generate": attr.int(
+            default = 20,
+            doc = (
+                "The number of `cxx::bridge` sections to support," +
+                " including ones created by `autocxx::include_cpp!`." +
+                " The default is sufficient for most use cases." +
+                " Setting this too large has a small performance impact, setting it" +
+                " too low will result in a build failure."
+            ),
+        ),
+        "gen_debug": attr.bool(
+            default = False,
+            doc = "Print (lots of) debug info about autocxx's codegen at build time.",
+        ),
+        "_autocxx_gen": attr.label(
+            executable = True,
+            default = Label("@//third_party/autocxx/gen/cmd:gen"),
+            cfg = "exec",
+        ),
+        "_cxx_codegen": attr.label(
+            executable = True,
+            default = Label("@//third_party/cargo:cargo_bin_cxxbridge"),
+            cfg = "exec",
+        ),
+        "_libclang": attr.label(
+            cfg = "exec",
+            default = Label("@llvm_k8//:libclang"),
+            allow_single_file = True,
+        ),
+        "override_cc_toolchain": attr.label(mandatory = False, providers = [cc_common.CcToolchainInfo]),
+        "_cc_toolchain": attr.label(default = Label("@bazel_tools//tools/cpp:current_cc_toolchain")),
+    },
+    toolchains = [
+        "@rules_rust//rust:toolchain",
+        "@bazel_tools//tools/cpp:toolchain_type",
+    ],
+    fragments = ["cpp"],
+)
+
+def autocxx_library(
+        name,
+        visibility = None,
+        target_compatible_with = None,
+        libs = [],
+        srcs = [],
+        cxxbridge_srcs = [],
+        override_cc_toolchain = None,
+        deps = [],
+        rs_deps = [],
+        testonly = None,
+        crate_features = None,
+        crate_name = None,
+        gen_debug = None):
+    """A macro to generate Rust <-> C++ interop code with autocxx.
+
+    Creates the following rules:
+      * A rust_library with the given name, which includes the given srcs. Note that it will not
+        include them directly due to how autocxx works, instead they will be copied into a
+        generated file along with changes from autocxx.
+      * A cc_library with the given name + `_cc`. This is for C++ code that wants to use APIs
+        from the given Rust code. Rust dependencies should _not_ depend on this. The header for C++
+        to #include will be named by the given name + `_cxxgen.h`.
+
+      `deps` is for other `autocxx_library` rules. `rs_deps` is for dependencies of the Rust code.
+    """
+    library_gen_name = "%s__gen" % name
+    _autocxx_library_gen(
+        name = library_gen_name,
+        visibility = ["//visibility:private"],
+        target_compatible_with = target_compatible_with,
+        testonly = testonly,
+        libs = libs,
+        srcs = srcs,
+        cxxbridge_srcs = cxxbridge_srcs,
+        override_cc_toolchain = override_cc_toolchain,
+        gen_debug = gen_debug,
+    )
+    gen_cc_srcs_name = "%s__cc_srcs" % name
+    native.filegroup(
+        name = gen_cc_srcs_name,
+        visibility = ["//visibility:private"],
+        target_compatible_with = target_compatible_with,
+        testonly = testonly,
+        srcs = [library_gen_name],
+        output_group = "cc_srcs",
+    )
+    gen_hdr_srcs_name = "%s__hdr_srcs" % name
+    native.filegroup(
+        name = gen_hdr_srcs_name,
+        visibility = ["//visibility:private"],
+        target_compatible_with = target_compatible_with,
+        testonly = testonly,
+        srcs = [library_gen_name],
+        output_group = "hdr_srcs",
+    )
+    gen_compile_data_name = "%s__compile_data" % name
+    native.filegroup(
+        name = gen_compile_data_name,
+        visibility = ["//visibility:private"],
+        target_compatible_with = target_compatible_with,
+        testonly = testonly,
+        srcs = [library_gen_name],
+        output_group = "compile_data",
+    )
+    gen_env_files_name = "%s__env_files" % name
+    native.filegroup(
+        name = gen_env_files_name,
+        visibility = ["//visibility:private"],
+        target_compatible_with = target_compatible_with,
+        testonly = testonly,
+        srcs = [library_gen_name],
+        output_group = "env_files",
+    )
+    cc_library_name = "%s__cc" % name
+    native.cc_library(
+        name = cc_library_name,
+        visibility = visibility,
+        target_compatible_with = target_compatible_with,
+        testonly = testonly,
+        deps = deps + libs + [
+            "//third_party/cargo:cxx_cc",
+        ],
+        srcs = [gen_cc_srcs_name],
+        hdrs = [gen_hdr_srcs_name],
+    )
+
+    rust_library(
+        name = name,
+        visibility = visibility,
+        target_compatible_with = target_compatible_with,
+        testonly = testonly,
+        srcs = srcs + cxxbridge_srcs,
+        proc_macro_deps = [
+            "//third_party/cargo:cxxbridge_macro",
+        ],
+        crate_features = crate_features,
+        crate_name = crate_name,
+        deps = deps + rs_deps + [
+            cc_library_name,
+            "//third_party/cargo:cxx",
+            "//third_party/autocxx",
+        ],
+        compile_data = [gen_compile_data_name],
+        rustc_env_files = [gen_env_files_name],
+    )
diff --git a/tools/platforms/BUILD b/tools/platforms/BUILD
index 37fed2e..1d79f06 100644
--- a/tools/platforms/BUILD
+++ b/tools/platforms/BUILD
@@ -42,7 +42,12 @@
         "@platforms//cpu:armv7",
         "//tools/platforms/hardware:roborio",
         "//tools/platforms/go:lacks_support",
-        "//tools/platforms/rust:has_support",
+        # TODO(Brian): This almost works, but cxx assumes llvm-ld's linking
+        # behavior and doesn't have an easy way to support GNU ld. See
+        # https://github.com/dtolnay/cxx/pull/1069 for a bit more explanation.
+        # Bazel needs to group multiple things into a single cc_library to
+        # handle that, need to figure out how to do that here or switch linkers.
+        "//tools/platforms/rust:lacks_support",
         "//tools/platforms/nodejs:lacks_support",
     ],
 )
diff --git a/tools/rust/BUILD b/tools/rust/BUILD
index 74ed92d..a5a2f89 100644
--- a/tools/rust/BUILD
+++ b/tools/rust/BUILD
@@ -98,7 +98,7 @@
     exec_triple = "none",
     os = "none",
     rust_doc = ":noop_error_exit",
-    rust_lib = ":empty_stdlib",
+    rust_std = ":empty_stdlib",
     rustc = ":noop_error_exit",
     rustc_lib = ":noop_error_exit",
     rustc_srcs = None,
@@ -124,7 +124,7 @@
 rust_binary(
     name = "tweak_cargo_raze_output",
     srcs = ["tweak_cargo_raze_output.rs"],
-    target_compatible_with = ["@platforms//os:linux"],
+    target_compatible_with = ["//tools/platforms/rust:has_support"],
     visibility = ["//visibility:public"],
     deps = [
         "//third_party/cargo:anyhow",
diff --git a/tools/rust/forward_allocator.c b/tools/rust/forward_allocator.c
index 3e6ba96..a321af3 100644
--- a/tools/rust/forward_allocator.c
+++ b/tools/rust/forward_allocator.c
@@ -2,7 +2,8 @@
 
 // This file has some exciting magic to get Rust code linking in a cc_binary.
 // The Rust compiler generates some similar symbol aliases when it links, so we
-// have to do it manually.
+// have to do it manually. We mark all our symbols as weak so that linking this
+// via Rust tooling to produce a binary with a Rust main works.
 //
 // It is intended to be used in rust_toolchain.allocator_library.
 //
@@ -20,27 +21,32 @@
 // not work with any other allocated switched in via `#[global_allocator]`.
 
 // New feature as of https://github.com/rust-lang/rust/pull/88098.
-uint8_t __rust_alloc_error_handler_should_panic = 0;
+__attribute__((weak)) uint8_t __rust_alloc_error_handler_should_panic = 0;
 
 uint8_t *__rdl_alloc(uintptr_t size, uintptr_t align);
+__attribute__((weak))
 uint8_t *__rust_alloc(uintptr_t size, uintptr_t align) {
   return __rdl_alloc(size, align);
 }
 void __rdl_dealloc(uint8_t *ptr, uintptr_t size, uintptr_t align);
+__attribute__((weak))
 void __rust_dealloc(uint8_t *ptr, uintptr_t size, uintptr_t align) {
   __rdl_dealloc(ptr, size, align);
 }
 uint8_t *__rdl_realloc(uint8_t *ptr, uintptr_t old_size, uintptr_t align,
                        uintptr_t new_size);
+__attribute__((weak))
 uint8_t *__rust_realloc(uint8_t *ptr, uintptr_t old_size, uintptr_t align,
                         uintptr_t new_size) {
   return __rdl_realloc(ptr, old_size, align, new_size);
 }
 uint8_t *__rdl_alloc_zeroed(uintptr_t size, uintptr_t align);
+__attribute__((weak))
 uint8_t *__rust_alloc_zeroed(uintptr_t size, uintptr_t align) {
   return __rdl_alloc_zeroed(size, align);
 }
 void __rdl_oom(uintptr_t size, uintptr_t align);
+__attribute__((weak))
 void __rust_alloc_error_handler(uintptr_t size, uintptr_t align) {
   __rdl_oom(size, align);
 }