Merge "shrink config for webproxy"
diff --git a/README.md b/README.md
index 99154c4..d54d963 100644
--- a/README.md
+++ b/README.md
@@ -128,3 +128,21 @@
 # log messages and changes.
    apt-get install gitg
 ```
+
+### Roborio Kernel Traces
+
+Currently (as of 2020.02.26), top tends to produce misleading statistics. As
+such, you can get more useful information about CPU usage by using kernel
+traces. Sample usage:
+```console
+# Note that you will need to install the trace-cmd package on the roborio.
+# This may be not be a trivial task.
+# Start the trace
+trace-cmd start -e sched_switch -e workqueue
+# Stop the trace
+trace-cmd stop
+# Save the trace to trace.dat
+trace-cmd extract
+```
+You can then scp the `trace.dat` file to your computer and run `kernelshark
+trace.dat` (may require installing the `kernelshark` apt package).
diff --git a/aos/BUILD b/aos/BUILD
index 12d25b2..1d6d388 100644
--- a/aos/BUILD
+++ b/aos/BUILD
@@ -477,6 +477,7 @@
         "testdata/config2_multinode.json",
         "testdata/config3.json",
         "testdata/expected.json",
+        "testdata/expected_merge_with.json",
         "testdata/expected_multinode.json",
         "testdata/good_multinode.json",
         "testdata/good_multinode_hostnames.json",
diff --git a/aos/configuration.cc b/aos/configuration.cc
index ac897bf..d6d276e 100644
--- a/aos/configuration.cc
+++ b/aos/configuration.cc
@@ -417,6 +417,14 @@
   return MergeConfiguration(ReadConfig(path, &visited_paths));
 }
 
+FlatbufferDetachedBuffer<Configuration> MergeWithConfig(
+    const Configuration *config, std::string_view json) {
+  FlatbufferDetachedBuffer<Configuration> addition =
+      JsonToFlatbuffer(json, Configuration::MiniReflectTypeTable());
+
+  return MergeConfiguration(MergeFlatBuffers(config, &addition.message()));
+}
+
 const Channel *GetChannel(const Configuration *config, std::string_view name,
                           std::string_view type,
                           std::string_view application_name, const Node *node) {
diff --git a/aos/configuration.h b/aos/configuration.h
index bf2ec6c..9193fd5 100644
--- a/aos/configuration.h
+++ b/aos/configuration.h
@@ -32,6 +32,11 @@
     const Flatbuffer<Configuration> &config,
     const std::vector<aos::FlatbufferString<reflection::Schema>> &schemas);
 
+// Merges a configuration json snippet into the provided configuration and
+// returns the modified config.
+FlatbufferDetachedBuffer<Configuration> MergeWithConfig(
+    const Configuration *config, std::string_view json);
+
 // Returns the resolved location for a name, type, and application name. Returns
 // nullptr if none is found.
 //
diff --git a/aos/configuration_test.cc b/aos/configuration_test.cc
index c36bd93..f069277 100644
--- a/aos/configuration_test.cc
+++ b/aos/configuration_test.cc
@@ -13,6 +13,8 @@
 namespace configuration {
 namespace testing {
 
+const std::string kConfigPrefix = "aos/testdata/";
+
 class ConfigurationTest : public ::testing::Test {
  public:
   ConfigurationTest() { ::aos::testing::EnableTestLogging(); }
@@ -30,19 +32,19 @@
 // Tests that we can read and merge a configuration.
 TEST_F(ConfigurationTest, ConfigMerge) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/config1.json");
+      ReadConfig(kConfigPrefix + "config1.json");
   LOG(INFO) << "Read: " << FlatbufferToJson(config, true);
 
   EXPECT_EQ(
       absl::StripSuffix(
-          util::ReadFileToStringOrDie("aos/testdata/expected.json"), "\n"),
+          util::ReadFileToStringOrDie(kConfigPrefix + "expected.json"), "\n"),
       FlatbufferToJson(config, true));
 }
 
 // Tests that we can get back a ChannelIndex.
 TEST_F(ConfigurationTest, ChannelIndex) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/config1.json");
+      ReadConfig(kConfigPrefix + "config1.json");
 
   EXPECT_EQ(
       ChannelIndex(&config.message(), config.message().channels()->Get(1u)),
@@ -52,12 +54,12 @@
 // Tests that we can read and merge a multinode configuration.
 TEST_F(ConfigurationTest, ConfigMergeMultinode) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/config1_multinode.json");
+      ReadConfig(kConfigPrefix + "config1_multinode.json");
   LOG(INFO) << "Read: " << FlatbufferToJson(config, true);
 
   EXPECT_EQ(
       std::string(absl::StripSuffix(
-          util::ReadFileToStringOrDie("aos/testdata/expected_multinode.json"),
+          util::ReadFileToStringOrDie(kConfigPrefix + "expected_multinode.json"),
           "\n")),
       FlatbufferToJson(config, true));
 }
@@ -65,7 +67,7 @@
 // Tests that we sort the entries in a config so we can look entries up.
 TEST_F(ConfigurationTest, UnsortedConfig) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/backwards.json");
+      ReadConfig(kConfigPrefix + "backwards.json");
 
   LOG(INFO) << "Read: " << FlatbufferToJson(config, true);
 
@@ -80,16 +82,41 @@
   EXPECT_DEATH(
       {
         FlatbufferDetachedBuffer<Configuration> config =
-            ReadConfig("aos/testdata/config1_bad.json");
+            ReadConfig(kConfigPrefix + "config1_bad.json");
       },
-      "aos/testdata/config1_bad.json");
+      kConfigPrefix + "config1_bad.json");
+}
+
+// Tests that we can modify a config with a json snippet.
+TEST_F(ConfigurationTest, MergeWithConfig) {
+  FlatbufferDetachedBuffer<Configuration> config =
+      ReadConfig(kConfigPrefix + "config1.json");
+  LOG(INFO) << "Read: " << FlatbufferToJson(config, true);
+
+  FlatbufferDetachedBuffer<Configuration> updated_config =
+      MergeWithConfig(&config.message(),
+                      R"channel({
+  "channels": [
+    {
+      "name": "/foo",
+      "type": ".aos.bar",
+      "max_size": 100
+    }
+  ]
+})channel");
+
+  EXPECT_EQ(
+      absl::StripSuffix(util::ReadFileToStringOrDie(
+                            kConfigPrefix + "expected_merge_with.json"),
+                        "\n"),
+      FlatbufferToJson(updated_config, true));
 }
 
 // Tests that we can lookup a location, complete with maps, from a merged
 // config.
 TEST_F(ConfigurationTest, GetChannel) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/config1.json");
+      ReadConfig(kConfigPrefix + "config1.json");
 
   // Test a basic lookup first.
   EXPECT_EQ(
@@ -123,7 +150,7 @@
 // Tests that we can lookup a location with node specific maps.
 TEST_F(ConfigurationTest, GetChannelMultinode) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/good_multinode.json");
+      ReadConfig(kConfigPrefix + "good_multinode.json");
   const Node *pi1 = GetNode(&config.message(), "pi1");
   const Node *pi2 = GetNode(&config.message(), "pi2");
 
@@ -159,7 +186,7 @@
 // Tests that we can lookup a location with type specific maps.
 TEST_F(ConfigurationTest, GetChannelTypedMultinode) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/good_multinode.json");
+      ReadConfig(kConfigPrefix + "good_multinode.json");
   const Node *pi1 = GetNode(&config.message(), "pi1");
 
   // Test a basic lookup first.
@@ -182,28 +209,28 @@
   EXPECT_DEATH(
       {
         FlatbufferDetachedBuffer<Configuration> config =
-            ReadConfig("aos/testdata/invalid_nodes.json");
+            ReadConfig(kConfigPrefix + "invalid_nodes.json");
       },
       "source_node");
 
   EXPECT_DEATH(
       {
         FlatbufferDetachedBuffer<Configuration> config =
-            ReadConfig("aos/testdata/invalid_source_node.json");
+            ReadConfig(kConfigPrefix + "invalid_source_node.json");
       },
       "source_node");
 
   EXPECT_DEATH(
       {
         FlatbufferDetachedBuffer<Configuration> config =
-            ReadConfig("aos/testdata/invalid_destination_node.json");
+            ReadConfig(kConfigPrefix + "invalid_destination_node.json");
       },
       "destination_nodes");
 
   EXPECT_DEATH(
       {
         FlatbufferDetachedBuffer<Configuration> config =
-            ReadConfig("aos/testdata/self_forward.json");
+            ReadConfig(kConfigPrefix + "self_forward.json");
       },
       "forwarding data to itself");
 }
@@ -582,7 +609,7 @@
 // Tests that we can deduce source nodes from a multinode config.
 TEST_F(ConfigurationTest, SourceNodeNames) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/config1_multinode.json");
+      ReadConfig(kConfigPrefix + "config1_multinode.json");
 
   // This is a bit simplistic in that it doesn't test deduplication, but it does
   // exercise a lot of the logic.
@@ -597,7 +624,7 @@
 // Tests that we can deduce destination nodes from a multinode config.
 TEST_F(ConfigurationTest, DestinationNodeNames) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/config1_multinode.json");
+      ReadConfig(kConfigPrefix + "config1_multinode.json");
 
   // This is a bit simplistic in that it doesn't test deduplication, but it does
   // exercise a lot of the logic.
@@ -613,7 +640,7 @@
 TEST_F(ConfigurationTest, GetNodes) {
   {
     FlatbufferDetachedBuffer<Configuration> config =
-        ReadConfig("aos/testdata/good_multinode.json");
+        ReadConfig(kConfigPrefix + "good_multinode.json");
     const Node *pi1 = GetNode(&config.message(), "pi1");
     const Node *pi2 = GetNode(&config.message(), "pi2");
 
@@ -622,7 +649,7 @@
 
   {
     FlatbufferDetachedBuffer<Configuration> config =
-        ReadConfig("aos/testdata/config1.json");
+        ReadConfig(kConfigPrefix + "config1.json");
     EXPECT_THAT(GetNodes(&config.message()), ::testing::ElementsAre(nullptr));
   }
 }
@@ -630,9 +657,9 @@
 // Tests that we can extract a node index from a config.
 TEST_F(ConfigurationTest, GetNodeIndex) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/good_multinode.json");
+      ReadConfig(kConfigPrefix + "good_multinode.json");
   FlatbufferDetachedBuffer<Configuration> config2 =
-      ReadConfig("aos/testdata/good_multinode.json");
+      ReadConfig(kConfigPrefix + "good_multinode.json");
   const Node *pi1 = GetNode(&config.message(), "pi1");
   const Node *pi2 = GetNode(&config.message(), "pi2");
 
@@ -653,13 +680,13 @@
 // valid nodes.
 TEST_F(ConfigurationDeathTest, GetNodeOrDie) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/good_multinode.json");
+      ReadConfig(kConfigPrefix + "good_multinode.json");
   FlatbufferDetachedBuffer<Configuration> config2 =
-      ReadConfig("aos/testdata/good_multinode.json");
+      ReadConfig(kConfigPrefix + "good_multinode.json");
   {
     // Simple case, nullptr -> nullptr
     FlatbufferDetachedBuffer<Configuration> single_node_config =
-        ReadConfig("aos/testdata/config1.json");
+        ReadConfig(kConfigPrefix + "config1.json");
     EXPECT_EQ(nullptr, GetNodeOrDie(&single_node_config.message(), nullptr));
 
     // Confirm that we die when a node is passed in.
@@ -679,7 +706,7 @@
 
 TEST_F(ConfigurationTest, GetNodeFromHostname) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/good_multinode.json");
+      ReadConfig(kConfigPrefix + "good_multinode.json");
   EXPECT_EQ("pi1",
             CHECK_NOTNULL(GetNodeFromHostname(&config.message(), "raspberrypi"))
                 ->name()
@@ -695,7 +722,7 @@
 
 TEST_F(ConfigurationTest, GetNodeFromHostnames) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/good_multinode_hostnames.json");
+      ReadConfig(kConfigPrefix + "good_multinode_hostnames.json");
   EXPECT_EQ("pi1",
             CHECK_NOTNULL(GetNodeFromHostname(&config.message(), "raspberrypi"))
                 ->name()
diff --git a/aos/controls/control_loop_test.h b/aos/controls/control_loop_test.h
index 7ad5683..536df02 100644
--- a/aos/controls/control_loop_test.h
+++ b/aos/controls/control_loop_test.h
@@ -114,6 +114,10 @@
     return &configuration_.message();
   }
 
+  SimulatedEventLoopFactory *event_loop_factory() {
+    return &event_loop_factory_;
+  }
+
  private:
   // Sends out all of the required queue messages.
   void SendJoystickState() {
diff --git a/aos/events/shm_event_loop.cc b/aos/events/shm_event_loop.cc
index dad62cd..57bcb1c 100644
--- a/aos/events/shm_event_loop.cc
+++ b/aos/events/shm_event_loop.cc
@@ -58,7 +58,7 @@
 }
 std::string ShmPath(const Channel *channel) {
   CHECK(channel->has_type());
-  return ShmFolder(channel) + channel->type()->str() + ".v1";
+  return ShmFolder(channel) + channel->type()->str() + ".v2";
 }
 
 class MMapedQueue {
diff --git a/aos/events/shm_event_loop_test.cc b/aos/events/shm_event_loop_test.cc
index 87411d2..0c39704 100644
--- a/aos/events/shm_event_loop_test.cc
+++ b/aos/events/shm_event_loop_test.cc
@@ -27,12 +27,12 @@
     }
 
     // Clean up anything left there before.
-    unlink((FLAGS_shm_base + "/test/aos.TestMessage.v1").c_str());
-    unlink((FLAGS_shm_base + "/test1/aos.TestMessage.v1").c_str());
-    unlink((FLAGS_shm_base + "/test2/aos.TestMessage.v1").c_str());
-    unlink((FLAGS_shm_base + "/test2/aos.TestMessage.v1").c_str());
-    unlink((FLAGS_shm_base + "/aos/aos.timing.Report.v1").c_str());
-    unlink((FLAGS_shm_base + "/aos/aos.logging.LogMessageFbs.v1").c_str());
+    unlink((FLAGS_shm_base + "/test/aos.TestMessage.v2").c_str());
+    unlink((FLAGS_shm_base + "/test1/aos.TestMessage.v2").c_str());
+    unlink((FLAGS_shm_base + "/test2/aos.TestMessage.v2").c_str());
+    unlink((FLAGS_shm_base + "/test2/aos.TestMessage.v2").c_str());
+    unlink((FLAGS_shm_base + "/aos/aos.timing.Report.v2").c_str());
+    unlink((FLAGS_shm_base + "/aos/aos.logging.LogMessageFbs.v2").c_str());
   }
 
   ~ShmEventLoopTestFactory() { FLAGS_override_hostname = ""; }
diff --git a/aos/ipc_lib/lockless_queue.cc b/aos/ipc_lib/lockless_queue.cc
index c323b8b..7126ffd 100644
--- a/aos/ipc_lib/lockless_queue.cc
+++ b/aos/ipc_lib/lockless_queue.cc
@@ -306,6 +306,7 @@
     }
 
     memory->next_queue_index.Invalidate();
+    memory->uid = getuid();
 
     for (size_t i = 0; i < memory->num_senders(); ++i) {
       ::aos::ipc_lib::Sender *s = memory->GetSender(i);
@@ -319,6 +320,8 @@
     // Signal everything is done.  This needs to be done last, so if we die, we
     // redo initialization.
     memory->initialized = true;
+  } else {
+    CHECK_EQ(getuid(), memory->uid) << ": UIDs must match for all processes";
   }
 
   return memory;
@@ -871,6 +874,8 @@
               << memory->next_queue_index.Load(queue_size).DebugString()
               << ::std::endl;
 
+  ::std::cout << "    uid_t uid = " << memory->uid << ::std::endl;
+
   ::std::cout << "  }" << ::std::endl;
   ::std::cout << "  AtomicIndex queue[" << queue_size << "] {" << ::std::endl;
   for (size_t i = 0; i < queue_size; ++i) {
diff --git a/aos/ipc_lib/lockless_queue_memory.h b/aos/ipc_lib/lockless_queue_memory.h
index cbe76a7..a10609c 100644
--- a/aos/ipc_lib/lockless_queue_memory.h
+++ b/aos/ipc_lib/lockless_queue_memory.h
@@ -50,6 +50,11 @@
   // A message is valid iff its internal index matches the index in the queue.
   AtomicQueueIndex next_queue_index;
 
+  // All processes using a queue need to be able to send messages to each other.
+  // We're going to require they have matching UIDs, which is sufficient to do
+  // that.
+  uid_t uid;
+
   // There is then memory allocated after this structure.  That memory is used
   // to store the messages, queue, watchers, and senders.  This is equivalent to
   // writing:
diff --git a/aos/logging/BUILD b/aos/logging/BUILD
index 214df63..68a493b 100644
--- a/aos/logging/BUILD
+++ b/aos/logging/BUILD
@@ -156,6 +156,6 @@
 flatbuffer_cc_library(
     name = "log_message_fbs",
     srcs = ["log_message.fbs"],
-    visibility = ["//visibility:public"],
     gen_reflections = 1,
+    visibility = ["//visibility:public"],
 )
diff --git a/aos/network/www/BUILD b/aos/network/www/BUILD
index bab5198..67e54f8 100644
--- a/aos/network/www/BUILD
+++ b/aos/network/www/BUILD
@@ -7,7 +7,7 @@
         "**/*.html",
         "**/*.css",
     ]),
-    visibility=["//visibility:public"],
+    visibility = ["//visibility:public"],
 )
 
 ts_library(
@@ -16,12 +16,12 @@
         "config_handler.ts",
         "proxy.ts",
     ],
+    visibility = ["//visibility:public"],
     deps = [
-        "//aos/network:web_proxy_ts_fbs",
-        "//aos/network:connect_ts_fbs",
         "//aos:configuration_ts_fbs",
+        "//aos/network:connect_ts_fbs",
+        "//aos/network:web_proxy_ts_fbs",
     ],
-    visibility=["//visibility:public"],
 )
 
 ts_library(
@@ -39,10 +39,10 @@
 rollup_bundle(
     name = "main_bundle",
     entry_point = "aos/network/www/main",
+    visibility = ["//aos:__subpackages__"],
     deps = [
         "main",
     ],
-    visibility=["//aos:__subpackages__"],
 )
 
 genrule(
@@ -54,5 +54,5 @@
         "flatbuffers.js",
     ],
     cmd = "cp $(location @com_github_google_flatbuffers//:flatjs) $@",
-    visibility=["//aos:__subpackages__"],
+    visibility = ["//aos:__subpackages__"],
 )
diff --git a/aos/testdata/expected_merge_with.json b/aos/testdata/expected_merge_with.json
new file mode 100644
index 0000000..0ce2056
--- /dev/null
+++ b/aos/testdata/expected_merge_with.json
@@ -0,0 +1,64 @@
+{
+ "channels": [
+  {
+   "name": "/foo",
+   "type": ".aos.bar",
+   "max_size": 100
+  },
+  {
+   "name": "/foo2",
+   "type": ".aos.bar"
+  },
+  {
+   "name": "/foo3",
+   "type": ".aos.bar",
+   "max_size": 9
+  }
+ ],
+ "maps": [
+  {
+   "match": {
+    "name": "/batman"
+   },
+   "rename": {
+    "name": "/bar"
+   }
+  },
+  {
+   "match": {
+    "name": "/batman"
+   },
+   "rename": {
+    "name": "/foo"
+   }
+  }
+ ],
+ "applications": [
+  {
+   "name": "app1",
+   "maps": [
+    {
+     "match": {
+      "name": "/bar"
+     },
+     "rename": {
+      "name": "/foo"
+     }
+    }
+   ]
+  },
+  {
+   "name": "app2",
+   "maps": [
+    {
+     "match": {
+      "name": "/baz"
+     },
+     "rename": {
+      "name": "/foo"
+     }
+    }
+   ]
+  }
+ ]
+}
diff --git a/aos/testing/BUILD b/aos/testing/BUILD
index 9aa058d..8693f27 100644
--- a/aos/testing/BUILD
+++ b/aos/testing/BUILD
@@ -24,9 +24,9 @@
     visibility = ["//visibility:public"],
     deps = [
         ":googletest",
-        "@com_google_absl//absl/base",
         "//aos/logging:implementations",
         "//aos/mutex",
+        "@com_google_absl//absl/base",
     ],
 )
 
diff --git a/debian/webrtc.BUILD b/debian/webrtc.BUILD
index e90e268..23e029d 100644
--- a/debian/webrtc.BUILD
+++ b/debian/webrtc.BUILD
@@ -2,17 +2,17 @@
 
 cc_library(
     name = "webrtc",
-    visibility = ["//visibility:public"],
-    hdrs = glob(["include/**/*.h"]),
     srcs = cpu_select({
         "arm": ["lib/arm/Release/libwebrtc_full.a"],
         "else": ["lib/x64/Release/libwebrtc_full.a"],
     }),
+    hdrs = glob(["include/**/*.h"]),
     includes = ["include"],
+    visibility = ["//visibility:public"],
     deps = [
+        "@com_google_absl//absl/algorithm:container",
         "@com_google_absl//absl/strings",
         "@com_google_absl//absl/types:optional",
         "@com_google_absl//absl/types:variant",
-        "@com_google_absl//absl/algorithm:container",
     ],
 )
diff --git a/frc971/control_loops/drivetrain/drivetrain.cc b/frc971/control_loops/drivetrain/drivetrain.cc
index a19f57b..77c52c4 100644
--- a/frc971/control_loops/drivetrain/drivetrain.cc
+++ b/frc971/control_loops/drivetrain/drivetrain.cc
@@ -148,7 +148,7 @@
   }
 
   while (imu_values_fetcher_.FetchNext()) {
-    imu_zeroer_.ProcessMeasurement(*imu_values_fetcher_);
+    imu_zeroer_.InsertMeasurement(*imu_values_fetcher_);
     last_gyro_time_ = monotonic_now;
     if (!imu_zeroer_.Zeroed()) {
       continue;
@@ -165,6 +165,7 @@
 
   bool got_imu_reading = false;
   if (imu_values_fetcher_.get() != nullptr) {
+    imu_zeroer_.ProcessMeasurements();
     got_imu_reading = true;
     switch (dt_config_.imu_type) {
       case IMUType::IMU_X:
diff --git a/frc971/control_loops/python/BUILD b/frc971/control_loops/python/BUILD
index 1fae02b..7953474 100644
--- a/frc971/control_loops/python/BUILD
+++ b/frc971/control_loops/python/BUILD
@@ -167,22 +167,22 @@
     name = "spline_graph",
     srcs = [
         "color.py",
-        "spline_drawing.py",
-        "spline_writer.py",
+        "graph.py",
         "path_edit.py",
         "points.py",
-        "graph.py",
+        "spline_drawing.py",
         "spline_graph.py",
+        "spline_writer.py",
     ],
     legacy_create_init = False,
     restricted_to = ["//tools:k8"],
     visibility = ["//visibility:public"],
     deps = [
+        ":basic_window",
         ":libspline",
         ":python_init",
-        "@python_gtk",
         "@matplotlib_repo//:matplotlib2.7",
-        ":basic_window",
+        "@python_gtk",
     ],
 )
 
diff --git a/frc971/zeroing/imu_zeroer.cc b/frc971/zeroing/imu_zeroer.cc
index b13b43d..9e82d13 100644
--- a/frc971/zeroing/imu_zeroer.cc
+++ b/frc971/zeroing/imu_zeroer.cc
@@ -35,15 +35,23 @@
          accel_averager_.GetRange() < kAccelMaxVariation;
 }
 
-void ImuZeroer::ProcessMeasurement(const IMUValues &values) {
+void ImuZeroer::InsertAndProcessMeasurement(const IMUValues &values) {
+  InsertMeasurement(values);
+  ProcessMeasurements();
+}
+
+void ImuZeroer::InsertMeasurement(const IMUValues &values) {
   last_gyro_sample_ << values.gyro_x(), values.gyro_y(), values.gyro_z();
   gyro_averager_.AddData(last_gyro_sample_);
   last_accel_sample_ << values.accelerometer_x(), values.accelerometer_y(),
                            values.accelerometer_z();
   accel_averager_.AddData(last_accel_sample_);
+}
+
+void ImuZeroer::ProcessMeasurements() {
   if (GyroZeroReady() && AccelZeroReady()) {
     ++good_iters_;
-    if (good_iters_ > kSamplesToAverage / 4) {
+    if (good_iters_ > kSamplesToAverage / 40) {
       const Eigen::Vector3d current_gyro_average = gyro_averager_.GetAverage();
       constexpr double kAverageUpdateWeight = 0.05;
       if (num_zeroes_ > 0) {
diff --git a/frc971/zeroing/imu_zeroer.h b/frc971/zeroing/imu_zeroer.h
index b55c6dd..fd82571 100644
--- a/frc971/zeroing/imu_zeroer.h
+++ b/frc971/zeroing/imu_zeroer.h
@@ -22,7 +22,10 @@
   ImuZeroer();
   bool Zeroed() const;
   bool Faulted() const;
-  void ProcessMeasurement(const IMUValues &values);
+  void InsertMeasurement(const IMUValues &values);
+  // PErforms the heavier-duty processing for managing zeroing.
+  void ProcessMeasurements();
+  void InsertAndProcessMeasurement(const IMUValues &values);
   Eigen::Vector3d GyroOffset() const;
   Eigen::Vector3d ZeroedGyro() const;
   Eigen::Vector3d ZeroedAccel() const;
diff --git a/frc971/zeroing/imu_zeroer_test.cc b/frc971/zeroing/imu_zeroer_test.cc
index eee3f57..e97fbe2 100644
--- a/frc971/zeroing/imu_zeroer_test.cc
+++ b/frc971/zeroing/imu_zeroer_test.cc
@@ -32,7 +32,7 @@
   ASSERT_EQ(0.0, zeroer.ZeroedAccel().norm());
   // A measurement before we are zeroed should just result in the measurement
   // being passed through without modification.
-  zeroer.ProcessMeasurement(
+  zeroer.InsertAndProcessMeasurement(
       MakeMeasurement({0.01, 0.02, 0.03}, {4, 5, 6}).message());
   ASSERT_FALSE(zeroer.Zeroed());
   ASSERT_FALSE(zeroer.Faulted());
@@ -50,7 +50,7 @@
   ImuZeroer zeroer;
   ASSERT_FALSE(zeroer.Zeroed());
   for (size_t ii = 0; ii < kMinSamplesToZero; ++ii) {
-    zeroer.ProcessMeasurement(
+    zeroer.InsertAndProcessMeasurement(
         MakeMeasurement({0.01, 0.02, 0.03}, {4, 5, 6}).message());
   }
   ASSERT_TRUE(zeroer.Zeroed());
@@ -68,7 +68,7 @@
   ASSERT_EQ(6.0, zeroer.ZeroedAccel().z());
   // If we get another measurement offset by {1, 1, 1} we should read the result
   // as {1, 1, 1}.
-  zeroer.ProcessMeasurement(
+  zeroer.InsertAndProcessMeasurement(
       MakeMeasurement({0.02, 0.03, 0.04}, {0, 0, 0}).message());
   ASSERT_FALSE(zeroer.Faulted());
   ASSERT_FLOAT_EQ(0.01, zeroer.ZeroedGyro().x());
@@ -82,7 +82,7 @@
   ImuZeroer zeroer;
   ASSERT_FALSE(zeroer.Zeroed());
   for (size_t ii = 0; ii < kMinSamplesToZero; ++ii) {
-    zeroer.ProcessMeasurement(
+    zeroer.InsertAndProcessMeasurement(
         MakeMeasurement({0.1, 0.2, 0.3}, {4, 5, 6}).message());
     ASSERT_FALSE(zeroer.Zeroed());
   }
@@ -97,10 +97,9 @@
   for (size_t ii = 0; ii < kMinSamplesToZero; ++ii) {
     const double offset =
         (static_cast<double>(ii) / (kMinSamplesToZero - 1) - 0.5) * 0.001;
-    zeroer.ProcessMeasurement(
+    zeroer.InsertAndProcessMeasurement(
         MakeMeasurement({0.01 + offset, 0.02 + offset, 0.03 + offset},
-                        {4 + offset, 5 + offset, 6 + offset})
-            .message());
+                        {4 + offset, 5 + offset, 6 + offset}).message());
   }
   ASSERT_TRUE(zeroer.Zeroed());
   ASSERT_FALSE(zeroer.Faulted());
@@ -109,7 +108,7 @@
   ASSERT_NEAR(0.03, zeroer.GyroOffset().z(), 1e-3);
   // If we get another measurement offset by {0.01, 0.01, 0.01} we should read
   // the result as {0.01, 0.01, 0.01}.
-  zeroer.ProcessMeasurement(
+  zeroer.InsertAndProcessMeasurement(
       MakeMeasurement({0.02, 0.03, 0.04}, {0, 0, 0}).message());
   ASSERT_FALSE(zeroer.Faulted());
   ASSERT_NEAR(0.01, zeroer.ZeroedGyro().x(), 1e-3);
@@ -128,10 +127,9 @@
     ASSERT_FALSE(zeroer.Zeroed());
     const double offset =
         (static_cast<double>(ii) / (kMinSamplesToZero - 1) - 0.5) * 1.0;
-    zeroer.ProcessMeasurement(
+    zeroer.InsertAndProcessMeasurement(
         MakeMeasurement({0.01 + offset, 0.02 + offset, 0.03 + offset},
-                        {4 + offset, 5 + offset, 6 + offset})
-            .message());
+                        {4 + offset, 5 + offset, 6 + offset}).message());
   }
   ASSERT_FALSE(zeroer.Zeroed());
   ASSERT_FALSE(zeroer.Faulted());
@@ -143,14 +141,14 @@
   ImuZeroer zeroer;
   ASSERT_FALSE(zeroer.Zeroed());
   for (size_t ii = 0; ii < kMinSamplesToZero; ++ii) {
-    zeroer.ProcessMeasurement(
+    zeroer.InsertAndProcessMeasurement(
         MakeMeasurement({0.01, 0.02, 0.03}, {4, 5, 6}).message());
   }
   ASSERT_TRUE(zeroer.Zeroed());
   ASSERT_FALSE(zeroer.Faulted())
       << "We should not fault until we complete a second cycle of zeroing.";
   for (size_t ii = 0; ii < kMinSamplesToZero; ++ii) {
-    zeroer.ProcessMeasurement(
+    zeroer.InsertAndProcessMeasurement(
         MakeMeasurement({0.01, 0.05, 0.03}, {4, 5, 6}).message());
   }
   ASSERT_TRUE(zeroer.Faulted());
@@ -161,12 +159,12 @@
   ImuZeroer zeroer;
   ASSERT_FALSE(zeroer.Zeroed());
   for (size_t ii = 0; ii < kMinSamplesToZero; ++ii) {
-    zeroer.ProcessMeasurement(
+    zeroer.InsertAndProcessMeasurement(
         MakeMeasurement({0.01, 0.02, 0.03}, {4, 5, 6}).message());
   }
   ASSERT_TRUE(zeroer.Zeroed());
   for (size_t ii = 0; ii < kMinSamplesToZero; ++ii) {
-    zeroer.ProcessMeasurement(
+    zeroer.InsertAndProcessMeasurement(
         MakeMeasurement({0.01, 0.020001, 0.03}, {4, 5, 6}).message());
   }
   ASSERT_FALSE(zeroer.Faulted());
diff --git a/third_party/BUILD b/third_party/BUILD
index 13f015e..fd10d43 100644
--- a/third_party/BUILD
+++ b/third_party/BUILD
@@ -99,4 +99,3 @@
         "roborio": ["@webrtc_rio//:webrtc"],
     }),
 )
-
diff --git a/third_party/flatbuffers/build_defs.bzl b/third_party/flatbuffers/build_defs.bzl
index 6ac1d87..f4c1dc7 100644
--- a/third_party/flatbuffers/build_defs.bzl
+++ b/third_party/flatbuffers/build_defs.bzl
@@ -282,7 +282,7 @@
         been parsed. As such, we just force the user to manually specify
         things.
     """
-    python_files = ["%s/%s.py" % (namespace.replace('.', '/'), table) for table in tables]
+    python_files = ["%s/%s.py" % (namespace.replace(".", "/"), table) for table in tables]
 
     srcs_lib = "%s_srcs" % (name)
     flatbuffer_library_public(
diff --git a/third_party/gperftools/BUILD b/third_party/gperftools/BUILD
index 25a8740..7f5e9d6 100644
--- a/third_party/gperftools/BUILD
+++ b/third_party/gperftools/BUILD
@@ -86,7 +86,7 @@
     "-DPERFTOOLS_DLL_DECL=",
     "-DSTDC_HEADERS=1",
     "-DSTL_NAMESPACE=std",
-    "-DPACKAGE_STRING=\\\"gperftools\ 2.4\\\"",
+    "-DPACKAGE_STRING=\\\"gperftools\\ 2.4\\\"",
     "-DPACKAGE_BUGREPORT=\\\"http://www.frc971.org/contact\\\"",
     "-DPACKAGE_VERSION=\\\"2.4\\\"",
 ] + cpu_select({
@@ -141,7 +141,7 @@
         "-lrt",
         "-lpthread",
     ],
-    nocopts = "-std=gnu\+\+1y",
+    nocopts = "-std=gnu\\+\\+1y",
     visibility = ["//visibility:public"],
     deps = [
         "//third_party/empty_config_h",
diff --git a/third_party/libevent/BUILD b/third_party/libevent/BUILD
index da05f3c..347ec0b 100644
--- a/third_party/libevent/BUILD
+++ b/third_party/libevent/BUILD
@@ -148,7 +148,7 @@
         "_EVENT_STDC_HEADERS=1",
         "_EVENT_TIME_WITH_SYS_TIME=1",
         "_EVENT_NUMERIC_VERSION=0x02001600",
-        '_EVENT_VERSION=\\"2.0.22-stable\\"',
+        "_EVENT_VERSION=\\\"2.0.22-stable\\\"",
     ] + address_size_select({
         "32": [
             "_EVENT_SIZEOF_LONG_LONG=4",
diff --git a/y2019/control_loops/drivetrain/localized_drivetrain_test.cc b/y2019/control_loops/drivetrain/localized_drivetrain_test.cc
index 1459248..c5a2d18 100644
--- a/y2019/control_loops/drivetrain/localized_drivetrain_test.cc
+++ b/y2019/control_loops/drivetrain/localized_drivetrain_test.cc
@@ -84,7 +84,7 @@
         });
 
     // Run for enough time to allow the gyro/imu zeroing code to run.
-    RunFor(std::chrono::seconds(10));
+    RunFor(std::chrono::seconds(15));
   }
 
   virtual ~LocalizedDrivetrainTest() {}
diff --git a/y2020/control_loops/drivetrain/BUILD b/y2020/control_loops/drivetrain/BUILD
index 03d2dc2..1bd760f 100644
--- a/y2020/control_loops/drivetrain/BUILD
+++ b/y2020/control_loops/drivetrain/BUILD
@@ -59,6 +59,7 @@
     hdrs = ["localizer.h"],
     deps = [
         "//aos/containers:ring_buffer",
+        "//aos/network:message_bridge_server_fbs",
         "//frc971/control_loops:profiled_subsystem_fbs",
         "//frc971/control_loops/drivetrain:hybrid_ekf",
         "//frc971/control_loops/drivetrain:localizer",
diff --git a/y2020/control_loops/drivetrain/localizer.cc b/y2020/control_loops/drivetrain/localizer.cc
index 0178790..927eec0 100644
--- a/y2020/control_loops/drivetrain/localizer.cc
+++ b/y2020/control_loops/drivetrain/localizer.cc
@@ -22,13 +22,19 @@
   }
   return result;
 }
+
 }  // namespace
 
 Localizer::Localizer(
     aos::EventLoop *event_loop,
     const frc971::control_loops::drivetrain::DrivetrainConfig<double>
         &dt_config)
-    : event_loop_(event_loop), dt_config_(dt_config), ekf_(dt_config) {
+    : event_loop_(event_loop),
+      dt_config_(dt_config),
+      ekf_(dt_config),
+      clock_offset_fetcher_(
+          event_loop_->MakeFetcher<aos::message_bridge::ServerStatistics>(
+              "/aos")) {
   // TODO(james): This doesn't really need to be a watcher; we could just use a
   // fetcher for the superstructure status.
   // This probably should be a Fetcher instead of a Watcher, but this
@@ -94,9 +100,21 @@
 
 void Localizer::HandleImageMatch(
     const frc971::vision::sift::ImageMatchResult &result) {
-  // TODO(james): compensate for capture time across multiple nodes correctly.
+  std::chrono::nanoseconds monotonic_offset(0);
+  clock_offset_fetcher_.Fetch();
+  if (clock_offset_fetcher_.get() != nullptr) {
+    for (const auto connection : *clock_offset_fetcher_->connections()) {
+      if (connection->has_node() && connection->node()->has_name() &&
+          connection->node()->name()->string_view() == "pi1") {
+        monotonic_offset =
+            std::chrono::nanoseconds(connection->monotonic_offset());
+        break;
+      }
+    }
+  }
   aos::monotonic_clock::time_point capture_time(
-      std::chrono::nanoseconds(result.image_monotonic_timestamp_ns()));
+      std::chrono::nanoseconds(result.image_monotonic_timestamp_ns()) -
+      monotonic_offset);
   CHECK(result.has_camera_calibration());
   // Per the ImageMatchResult specification, we can actually determine whether
   // the camera is the turret camera just from the presence of the
diff --git a/y2020/control_loops/drivetrain/localizer.h b/y2020/control_loops/drivetrain/localizer.h
index 0b79361..8636a7a 100644
--- a/y2020/control_loops/drivetrain/localizer.h
+++ b/y2020/control_loops/drivetrain/localizer.h
@@ -3,6 +3,7 @@
 
 #include "aos/containers/ring_buffer.h"
 #include "aos/events/event_loop.h"
+#include "aos/network/message_bridge_server_generated.h"
 #include "frc971/control_loops/drivetrain/hybrid_ekf.h"
 #include "frc971/control_loops/drivetrain/localizer.h"
 #include "y2020/control_loops/superstructure/superstructure_status_generated.h"
@@ -86,6 +87,8 @@
   std::vector<aos::Fetcher<frc971::vision::sift::ImageMatchResult>>
       image_fetchers_;
 
+  aos::Fetcher<aos::message_bridge::ServerStatistics> clock_offset_fetcher_;
+
   // Buffer of recent turret data--this is used so that when we receive a camera
   // frame from the turret, we can back out what the turret angle was at that
   // time.
diff --git a/y2020/control_loops/drivetrain/localizer_test.cc b/y2020/control_loops/drivetrain/localizer_test.cc
index e81691c..c438af5 100644
--- a/y2020/control_loops/drivetrain/localizer_test.cc
+++ b/y2020/control_loops/drivetrain/localizer_test.cc
@@ -4,6 +4,7 @@
 
 #include "aos/controls/control_loop_test.h"
 #include "aos/events/logging/logger.h"
+#include "aos/network/message_bridge_server_generated.h"
 #include "aos/network/team_number.h"
 #include "frc971/control_loops/drivetrain/drivetrain.h"
 #include "frc971/control_loops/drivetrain/drivetrain_test_lib.h"
@@ -84,6 +85,8 @@
   locations.push_back(H);
   return locations;
 }
+
+constexpr std::chrono::seconds kPiTimeOffset(10);
 }  // namespace
 
 namespace chrono = std::chrono;
@@ -113,6 +116,9 @@
         superstructure_status_sender_(
             test_event_loop_->MakeSender<superstructure::Status>(
                 "/superstructure")),
+        server_statistics_sender_(
+            test_event_loop_->MakeSender<aos::message_bridge::ServerStatistics>(
+                "/aos")),
         drivetrain_event_loop_(MakeEventLoop("drivetrain", roborio_)),
         dt_config_(GetTest2020DrivetrainConfig()),
         pi1_event_loop_(MakeEventLoop("test", pi1_)),
@@ -123,6 +129,9 @@
         drivetrain_plant_event_loop_(MakeEventLoop("plant", roborio_)),
         drivetrain_plant_(drivetrain_plant_event_loop_.get(), dt_config_),
         last_frame_(monotonic_now()) {
+    event_loop_factory()->GetNodeEventLoopFactory(pi1_)->SetDistributedOffset(
+        kPiTimeOffset);
+
     set_team_id(frc971::control_loops::testing::kTeamNumber);
     set_battery_voltage(12.0);
 
@@ -150,6 +159,29 @@
 
     test_event_loop_->AddPhasedLoop(
         [this](int) {
+          auto builder = server_statistics_sender_.MakeBuilder();
+          auto name_offset = builder.fbb()->CreateString("pi1");
+          auto node_builder = builder.MakeBuilder<aos::Node>();
+          node_builder.add_name(name_offset);
+          auto node_offset = node_builder.Finish();
+          auto connection_builder =
+              builder.MakeBuilder<aos::message_bridge::ServerConnection>();
+          connection_builder.add_node(node_offset);
+          connection_builder.add_monotonic_offset(
+              chrono::duration_cast<chrono::nanoseconds>(-kPiTimeOffset)
+                  .count());
+          auto connection_offset = connection_builder.Finish();
+          auto connections_offset =
+              builder.fbb()->CreateVector(&connection_offset, 1);
+          auto statistics_builder =
+              builder.MakeBuilder<aos::message_bridge::ServerStatistics>();
+          statistics_builder.add_connections(connections_offset);
+          builder.Send(statistics_builder.Finish());
+        },
+        chrono::milliseconds(500));
+
+    test_event_loop_->AddPhasedLoop(
+        [this](int) {
           // Also use the opportunity to send out turret messages.
           UpdateTurretPosition();
           auto builder = superstructure_status_sender_.MakeBuilder();
@@ -259,7 +291,10 @@
 
     frame->image_monotonic_timestamp_ns =
         chrono::duration_cast<chrono::nanoseconds>(
-            monotonic_now().time_since_epoch())
+            event_loop_factory()
+                ->GetNodeEventLoopFactory(pi1_)
+                ->monotonic_now()
+                .time_since_epoch())
             .count();
     frame->camera_calibration.reset(new CameraCalibrationT());
     {
@@ -302,6 +337,7 @@
   aos::Fetcher<Goal> drivetrain_goal_fetcher_;
   aos::Sender<LocalizerControl> localizer_control_sender_;
   aos::Sender<superstructure::Status> superstructure_status_sender_;
+  aos::Sender<aos::message_bridge::ServerStatistics> server_statistics_sender_;
 
   std::unique_ptr<aos::EventLoop> drivetrain_event_loop_;
   const frc971::control_loops::drivetrain::DrivetrainConfig<double>
diff --git a/y2020/control_loops/python/BUILD b/y2020/control_loops/python/BUILD
index 923d239..e218571 100644
--- a/y2020/control_loops/python/BUILD
+++ b/y2020/control_loops/python/BUILD
@@ -133,8 +133,8 @@
     legacy_create_init = False,
     restricted_to = ["//tools:k8"],
     deps = [
-        ":python_init",
         ":flywheel",
+        ":python_init",
         "//external:python-gflags",
         "//external:python-glog",
     ],
@@ -148,8 +148,8 @@
     legacy_create_init = False,
     restricted_to = ["//tools:k8"],
     deps = [
-        ":python_init",
         ":flywheel",
+        ":python_init",
         "//external:python-gflags",
         "//external:python-glog",
     ],
diff --git a/y2020/vision/BUILD b/y2020/vision/BUILD
index 6850a27..60276c9 100644
--- a/y2020/vision/BUILD
+++ b/y2020/vision/BUILD
@@ -16,6 +16,7 @@
     hdrs = [
         "v4l2_reader.h",
     ],
+    visibility = ["//y2020:__subpackages__"],
     deps = [
         ":vision_fbs",
         "//aos/events:event_loop",
@@ -23,7 +24,6 @@
         "@com_github_google_glog//:glog",
         "@com_google_absl//absl/base",
     ],
-    visibility = ["//y2020:__subpackages__"],
 )
 
 cc_binary(
@@ -35,6 +35,7 @@
         "//tools:k8",
         "//tools:armhf-debian",
     ],
+    visibility = ["//y2020:__subpackages__"],
     deps = [
         ":v4l2_reader",
         ":vision_fbs",
@@ -43,12 +44,11 @@
         "//aos/events:shm_event_loop",
         "//aos/network:team_number",
         "//third_party:opencv",
-        "//y2020/vision/tools/python_code:sift_training_data",
         "//y2020/vision/sift:sift971",
         "//y2020/vision/sift:sift_fbs",
         "//y2020/vision/sift:sift_training_fbs",
+        "//y2020/vision/tools/python_code:sift_training_data",
     ],
-    visibility = ["//y2020:__subpackages__"],
 )
 
 flatbuffer_ts_library(
diff --git a/y2020/vision/camera_reader.cc b/y2020/vision/camera_reader.cc
index d9b5ea0..ed4082d 100644
--- a/y2020/vision/camera_reader.cc
+++ b/y2020/vision/camera_reader.cc
@@ -7,10 +7,10 @@
 #include "aos/init.h"
 #include "aos/network/team_number.h"
 
-#include "y2020/vision/tools/python_code/sift_training_data.h"
 #include "y2020/vision/sift/sift971.h"
 #include "y2020/vision/sift/sift_generated.h"
 #include "y2020/vision/sift/sift_training_generated.h"
+#include "y2020/vision/tools/python_code/sift_training_data.h"
 #include "y2020/vision/v4l2_reader.h"
 #include "y2020/vision/vision_generated.h"
 
@@ -31,6 +31,8 @@
         image_sender_(event_loop->MakeSender<CameraImage>("/camera")),
         result_sender_(
             event_loop->MakeSender<sift::ImageMatchResult>("/camera")),
+        detailed_result_sender_(
+            event_loop->MakeSender<sift::ImageMatchResult>("/camera/detailed")),
         read_image_timer_(event_loop->AddTimer([this]() {
           ReadImage();
           read_image_timer_->Setup(event_loop_->monotonic_now());
@@ -63,6 +65,14 @@
                const std::vector<cv::KeyPoint> &keypoints,
                const cv::Mat &descriptors);
 
+  void SendImageMatchResult(const CameraImage &image,
+                            const std::vector<cv::KeyPoint> &keypoints,
+                            const cv::Mat &descriptors,
+                            const std::vector<std::vector<cv::DMatch>> &matches,
+                            const std::vector<cv::Mat> &camera_target_list,
+                            aos::Sender<sift::ImageMatchResult> *result_sender,
+                            bool send_details);
+
   // Returns the 3D location for the specified training feature.
   cv::Point3f Training3dPoint(int training_image_index,
                               int feature_index) const {
@@ -93,6 +103,14 @@
     return result;
   }
 
+  cv::Mat CameraDistCoeffs() const {
+    const cv::Mat result(5, 1, CV_32F,
+                         const_cast<void *>(static_cast<const void *>(
+                             camera_calibration_->dist_coeffs()->data())));
+    CHECK_EQ(result.total(), camera_calibration_->dist_coeffs()->size());
+    return result;
+  }
+
   aos::EventLoop *const event_loop_;
   const sift::TrainingData *const training_data_;
   const sift::CameraCalibration *const camera_calibration_;
@@ -100,6 +118,7 @@
   cv::FlannBasedMatcher *const matcher_;
   aos::Sender<CameraImage> image_sender_;
   aos::Sender<sift::ImageMatchResult> result_sender_;
+  aos::Sender<sift::ImageMatchResult> detailed_result_sender_;
   // We schedule this immediately to read an image. Having it on a timer means
   // other things can run on the event loop in between.
   aos::TimerHandler *const read_image_timer_;
@@ -146,13 +165,58 @@
   }
 }
 
-void CameraReader::ProcessImage(const CameraImage &image) {
-  // Be ready to pack the results up and send them out. We can pack things into
-  // this memory as we go to allow reusing temporaries better.
-  auto builder = result_sender_.MakeBuilder();
+void CameraReader::SendImageMatchResult(
+    const CameraImage &image, const std::vector<cv::KeyPoint> &keypoints,
+    const cv::Mat &descriptors,
+    const std::vector<std::vector<cv::DMatch>> &matches,
+    const std::vector<cv::Mat> &camera_target_list,
+    aos::Sender<sift::ImageMatchResult> *result_sender, bool send_details) {
+  auto builder = result_sender->MakeBuilder();
   const auto camera_calibration_offset =
       aos::CopyFlatBuffer(camera_calibration_, builder.fbb());
 
+  flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<sift::Feature>>>
+      features_offset;
+  if (send_details) {
+    features_offset = PackFeatures(builder.fbb(), keypoints, descriptors);
+  }
+
+  const auto image_matches_offset = PackImageMatches(builder.fbb(), matches);
+
+  std::vector<flatbuffers::Offset<sift::CameraPose>> camera_poses;
+
+  for (size_t i = 0; i < camera_target_list.size(); ++i) {
+    cv::Mat camera_target = camera_target_list[i];
+    CHECK(camera_target.isContinuous());
+    const auto data_offset = builder.fbb()->CreateVector<float>(
+        reinterpret_cast<float *>(camera_target.data), camera_target.total());
+    auto transform_offset =
+        sift::CreateTransformationMatrix(*builder.fbb(), data_offset);
+
+    sift::CameraPose::Builder pose_builder(*builder.fbb());
+    pose_builder.add_camera_to_target(transform_offset);
+    pose_builder.add_field_to_target(
+        aos::CopyFlatBuffer(FieldToTarget(i), builder.fbb()));
+    camera_poses.emplace_back(pose_builder.Finish());
+  }
+  const auto camera_poses_offset = builder.fbb()->CreateVector(camera_poses);
+
+  sift::ImageMatchResult::Builder result_builder(*builder.fbb());
+  result_builder.add_image_matches(image_matches_offset);
+  result_builder.add_camera_poses(camera_poses_offset);
+  if (send_details) {
+    result_builder.add_features(features_offset);
+  }
+  result_builder.add_image_monotonic_timestamp_ns(
+      image.monotonic_timestamp_ns());
+  result_builder.add_camera_calibration(camera_calibration_offset);
+
+  // TODO<Jim>: Need to add target point computed from matches and
+  // mapped by homography
+  builder.Send(result_builder.Finish());
+}
+
+void CameraReader::ProcessImage(const CameraImage &image) {
   // First, we need to extract the brightness information. This can't really be
   // fused into the beginning of the SIFT algorithm because the algorithm needs
   // to look at the base image directly. It also only takes 2ms on our images.
@@ -169,13 +233,10 @@
   std::vector<cv::KeyPoint> keypoints;
   cv::Mat descriptors;
   sift_->detectAndCompute(image_mat, cv::noArray(), keypoints, descriptors);
-  const auto features_offset =
-      PackFeatures(builder.fbb(), keypoints, descriptors);
 
   // Then, match those features against our training data.
   std::vector<std::vector<cv::DMatch>> matches;
   matcher_->knnMatch(/* queryDescriptors */ descriptors, matches, /* k */ 2);
-  const auto image_matches_offset = PackImageMatches(builder.fbb(), matches);
 
   struct PerImageMatches {
     std::vector<const std::vector<cv::DMatch> *> matches;
@@ -212,8 +273,8 @@
 
   // The minimum number of matches in a training image for us to use it.
   static constexpr int kMinimumMatchCount = 10;
+  std::vector<cv::Mat> camera_target_list;
 
-  std::vector<flatbuffers::Offset<sift::CameraPose>> camera_poses;
   for (size_t i = 0; i < per_image_matches.size(); ++i) {
     const PerImageMatches &per_image = per_image_matches[i];
     if (per_image.matches.size() < kMinimumMatchCount) {
@@ -223,12 +284,11 @@
     cv::Mat R_camera_target_vec, R_camera_target, T_camera_target;
     // Compute the pose of the camera (global origin relative to camera)
     cv::solvePnPRansac(per_image.training_points_3d, per_image.query_points,
-                       CameraIntrinsics(), cv::noArray(), R_camera_target_vec,
-                       T_camera_target);
+                       CameraIntrinsics(), CameraDistCoeffs(),
+                       R_camera_target_vec, T_camera_target);
     // Convert Camera from angle-axis (3x1) to homogenous (3x3) representation
     cv::Rodrigues(R_camera_target_vec, R_camera_target);
 
-    sift::CameraPose::Builder pose_builder(*builder.fbb());
     {
       CHECK_EQ(cv::Size(3, 3), R_camera_target.size());
       CHECK_EQ(cv::Size(3, 1), T_camera_target.size());
@@ -237,25 +297,15 @@
       T_camera_target.copyTo(camera_target(cv::Range(3, 4), cv::Range(0, 3)));
       camera_target.at<float>(3, 3) = 1;
       CHECK(camera_target.isContinuous());
-      const auto data_offset = builder.fbb()->CreateVector<float>(
-          reinterpret_cast<float *>(camera_target.data), camera_target.total());
-      pose_builder.add_camera_to_target(
-          sift::CreateTransformationMatrix(*builder.fbb(), data_offset));
+      camera_target_list.push_back(camera_target);
     }
-    pose_builder.add_field_to_target(
-        aos::CopyFlatBuffer(FieldToTarget(i), builder.fbb()));
-    camera_poses.emplace_back(pose_builder.Finish());
   }
-  const auto camera_poses_offset = builder.fbb()->CreateVector(camera_poses);
-
-  sift::ImageMatchResult::Builder result_builder(*builder.fbb());
-  result_builder.add_image_matches(image_matches_offset);
-  result_builder.add_camera_poses(camera_poses_offset);
-  result_builder.add_features(features_offset);
-  result_builder.add_image_monotonic_timestamp_ns(
-      image.monotonic_timestamp_ns());
-  result_builder.add_camera_calibration(camera_calibration_offset);
-  builder.Send(result_builder.Finish());
+  // Now, send our two messages-- one large, with details for remote
+  // debugging(features), and one smaller
+  SendImageMatchResult(image, keypoints, descriptors, matches,
+                       camera_target_list, &detailed_result_sender_, true);
+  SendImageMatchResult(image, keypoints, descriptors, matches,
+                       camera_target_list, &result_sender_, false);
 }
 
 void CameraReader::ReadImage() {
diff --git a/y2020/vision/rootfs/modify_rootfs.sh b/y2020/vision/rootfs/modify_rootfs.sh
index 4fb9f46..239db0a 100755
--- a/y2020/vision/rootfs/modify_rootfs.sh
+++ b/y2020/vision/rootfs/modify_rootfs.sh
@@ -3,6 +3,7 @@
 set -xe
 
 IMAGE="2020-02-13-raspbian-buster-lite.img"
+BOOT_PARTITION="2020-02-13-raspbian-buster-lite.img.boot_partition"
 PARTITION="2020-02-13-raspbian-buster-lite.img.partition"
 HOSTNAME="pi-8971-1"
 
@@ -16,6 +17,26 @@
 
 
 mkdir -p "${PARTITION}"
+mkdir -p "${BOOT_PARTITION}"
+
+if mount | grep "${BOOT_PARTITION}" >/dev/null ;
+then
+  echo "Already mounted"
+else
+  OFFSET="$(fdisk -lu "${IMAGE}" | grep "${IMAGE}1" | awk '{print 512*$2}')"
+  sudo mount -o loop,offset=${OFFSET} "${IMAGE}" "${BOOT_PARTITION}"
+fi
+
+# Enable the camera on boot.
+if ! grep "start_x=1" "${BOOT_PARTITION}/config.txt"; then
+  echo "start_x=1" | sudo tee -a "${BOOT_PARTITION}/config.txt"
+fi
+if ! grep "gpu_mem=128" "${BOOT_PARTITION}/config.txt"; then
+  echo "gpu_mem=128" | sudo tee -a "${BOOT_PARTITION}/config.txt"
+fi
+
+sudo umount "${BOOT_PARTITION}"
+rmdir "${BOOT_PARTITION}"
 
 if mount | grep "${PARTITION}" >/dev/null ;
 then
diff --git a/y2020/vision/sift/demo_sift_training.py b/y2020/vision/sift/demo_sift_training.py
index a6650fd..3fa33cf 100644
--- a/y2020/vision/sift/demo_sift_training.py
+++ b/y2020/vision/sift/demo_sift_training.py
@@ -49,14 +49,14 @@
   TrainingImage.TrainingImageStart(fbb)
   TrainingImage.TrainingImageAddFeatures(fbb, features_vector_table)
   # TODO(Brian): Fill out the transformation matrices.
-  training_image = TrainingImage.TrainingImageEnd(fbb)
+  training_image_offset = TrainingImage.TrainingImageEnd(fbb)
 
   TrainingData.TrainingDataStartImagesVector(fbb, 1)
-  fbb.PrependUOffsetTRelative(training_image)
-  images = fbb.EndVector(1)
+  fbb.PrependUOffsetTRelative(training_image_offset)
+  images_offset = fbb.EndVector(1)
 
   TrainingData.TrainingDataStart(fbb)
-  TrainingData.TrainingDataAddImages(fbb, images)
+  TrainingData.TrainingDataAddImages(fbb, images_offset)
   fbb.Finish(TrainingData.TrainingDataEnd(fbb))
 
   bfbs = fbb.Output()
diff --git a/y2020/vision/sift/fast_gaussian.bzl b/y2020/vision/sift/fast_gaussian.bzl
index a1c3173..0905423 100644
--- a/y2020/vision/sift/fast_gaussian.bzl
+++ b/y2020/vision/sift/fast_gaussian.bzl
@@ -1,55 +1,55 @@
 def fast_gaussian(sigmas, sizes):
-  files = []
-  for _, sigma_name, _ in sigmas:
+    files = []
+    for _, sigma_name, _ in sigmas:
+        for cols, rows in sizes:
+            files.append("fast_gaussian_%dx%d_%s" % (cols, rows, sigma_name))
+    for _, sigma_name, _ in sigmas:
+        for cols, rows in sizes:
+            files.append("fast_gaussian_subtract_%dx%d_%s" % (cols, rows, sigma_name))
     for cols, rows in sizes:
-      files.append("fast_gaussian_%dx%d_%s" % (cols, rows, sigma_name))
-  for _, sigma_name, _ in sigmas:
-    for cols, rows in sizes:
-      files.append("fast_gaussian_subtract_%dx%d_%s" % (cols, rows, sigma_name))
-  for cols, rows in sizes:
-    files.append('fast_subtract_%dx%d' % (cols, rows))
+        files.append("fast_subtract_%dx%d" % (cols, rows))
 
-  params = struct(
-    sigmas = sigmas,
-    sizes = sizes,
-  )
+    params = struct(
+        sigmas = sigmas,
+        sizes = sizes,
+    )
 
-  headers = [f + '.h' for f in files] + [
-    'fast_gaussian_all.h',
-  ]
-  objects = [f + '.o' for f in files] + [
-    'fast_gaussian_runtime.o',
-  ]
-  htmls = [f + '.html' for f in files]
+    headers = [f + ".h" for f in files] + [
+        "fast_gaussian_all.h",
+    ]
+    objects = [f + ".o" for f in files] + [
+        "fast_gaussian_runtime.o",
+    ]
+    htmls = [f + ".html" for f in files]
 
-  native.genrule(
-    name = "generate_fast_gaussian",
-    tools = [
-        ":fast_gaussian_runner",
-    ],
-    cmd = ' '.join([
-      '$(location fast_gaussian_runner)',
-      "'" + params.to_json() + "'",
-      # TODO(Brian): This should be RULEDIR once we have support for that.
-      '$(@D)',
-      '$(TARGET_CPU)',
-    ]),
-    outs = headers + objects + htmls,
-    restricted_to = [
-      "//tools:k8",
-      "//tools:armhf-debian",
-    ],
-  )
+    native.genrule(
+        name = "generate_fast_gaussian",
+        tools = [
+            ":fast_gaussian_runner",
+        ],
+        cmd = " ".join([
+            "$(location fast_gaussian_runner)",
+            "'" + params.to_json() + "'",
+            # TODO(Brian): This should be RULEDIR once we have support for that.
+            "$(@D)",
+            "$(TARGET_CPU)",
+        ]),
+        outs = headers + objects + htmls,
+        restricted_to = [
+            "//tools:k8",
+            "//tools:armhf-debian",
+        ],
+    )
 
-  native.cc_library(
-    name = 'fast_gaussian_all',
-    hdrs = ['fast_gaussian_all.h'],
-    srcs = headers + objects,
-    deps = [
-      '//third_party:halide_runtime',
-    ],
-    restricted_to = [
-      "//tools:k8",
-      "//tools:armhf-debian",
-    ],
-  )
+    native.cc_library(
+        name = "fast_gaussian_all",
+        hdrs = ["fast_gaussian_all.h"],
+        srcs = headers + objects,
+        deps = [
+            "//third_party:halide_runtime",
+        ],
+        restricted_to = [
+            "//tools:k8",
+            "//tools:armhf-debian",
+        ],
+    )
diff --git a/y2020/vision/sift/sift.fbs b/y2020/vision/sift/sift.fbs
index 97c2b0a..8806957 100644
--- a/y2020/vision/sift/sift.fbs
+++ b/y2020/vision/sift/sift.fbs
@@ -60,8 +60,8 @@
 }
 
 table TransformationMatrix {
-  // The matrix data. This is a row-major 4x4 matrix.
-  // In other words, the bottom row is (0, 0, 0, 1).
+  // The matrix data for a row-major 4x4 homogeneous transformation matrix.
+  // This implies the bottom row is (0, 0, 0, 1).
   data:[float];
 }
 
@@ -97,6 +97,9 @@
   //   rotation around the Z axis by the turret angle
   //   turret_extrinsics
   turret_extrinsics:TransformationMatrix;
+
+  // This is the standard OpenCV 5 parameter distortion coefficients
+  dist_coeffs:[float];
 }
 
 // Contains the information the EKF wants from an image matched against a single
@@ -128,6 +131,7 @@
   // The matches from this image to each of the training images which matched.
   // Each member is against the same captured image.
   image_matches:[ImageMatch];
+
   // The transformations for this image for each of the training images which
   // matched.
   // TODO(Brian): Include some kind of covariance information for these.
@@ -141,6 +145,10 @@
 
   // Information about the camera which took this image.
   camera_calibration:CameraCalibration;
+
+  // 2D image coordinate representing target location on the matched image
+  target_point_x:float;
+  target_point_y:float;
 }
 
 root_type ImageMatchResult;
diff --git a/y2020/vision/sift/sift_training.fbs b/y2020/vision/sift/sift_training.fbs
index 7391e76..d4fa740 100644
--- a/y2020/vision/sift/sift_training.fbs
+++ b/y2020/vision/sift/sift_training.fbs
@@ -10,6 +10,10 @@
   // from the target to the field. See CameraPose in :sift_fbs for details of
   // the conventions of this.
   field_to_target:TransformationMatrix;
+
+  // 2D image coordinate representing target location on the training image
+  target_point_x:float;
+  target_point_y:float;
 }
 
 // Represents the information used to match incoming images against.
diff --git a/y2020/vision/tools/python_code/BUILD b/y2020/vision/tools/python_code/BUILD
index a130ed6..a932886 100644
--- a/y2020/vision/tools/python_code/BUILD
+++ b/y2020/vision/tools/python_code/BUILD
@@ -22,10 +22,10 @@
     default_python_version = "PY3",
     srcs_version = "PY2AND3",
     deps = [
+        "//external:python-glog",
         "//y2020/vision/sift:sift_fbs_python",
         "@bazel_tools//tools/python/runfiles",
         "@opencv_contrib_nonfree_amd64//:python_opencv",
-        "//external:python-glog",
     ],
 )
 
@@ -54,10 +54,10 @@
 py_binary(
     name = "load_sift_training_test",
     srcs = [
-        "camera_definition.py",
+        "camera_definition_test.py",
         "define_training_data.py",
         "load_sift_training.py",
-        "target_definition.py",
+        "target_definition_test.py",
         "train_and_match.py",
     ],
     args = [
@@ -71,10 +71,10 @@
     main = "load_sift_training.py",
     srcs_version = "PY2AND3",
     deps = [
+        "//external:python-glog",
         "//y2020/vision/sift:sift_fbs_python",
         "@bazel_tools//tools/python/runfiles",
         "@opencv_contrib_nonfree_amd64//:python_opencv",
-        "//external:python-glog",
     ],
 )
 
diff --git a/y2020/vision/tools/python_code/camera_definition.py b/y2020/vision/tools/python_code/camera_definition.py
index f6a3591..194ddd3 100644
--- a/y2020/vision/tools/python_code/camera_definition.py
+++ b/y2020/vision/tools/python_code/camera_definition.py
@@ -10,6 +10,7 @@
 
     pass
 
+
 class CameraExtrinsics:
     def __init__(self):
         self.R = []
@@ -24,7 +25,6 @@
         self.team_number = -1
 
 
-
 ### CAMERA DEFINITIONS
 
 # Robot camera has:
@@ -40,13 +40,11 @@
 # Define a web_cam
 web_cam_int = CameraIntrinsics()
 web_cam_int.camera_matrix = np.asarray([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
-web_cam_int.distortion_coeffs = np.zeros((5,1))
+web_cam_int.distortion_coeffs = np.zeros((5, 1))
 
 web_cam_ext = CameraExtrinsics()
 # Camera rotation from robot x,y,z to opencv (z, -x, -y)
-web_cam_ext.R = np.array([[0., 0., 1.],
-                          [-1, 0,  0],
-                          [0, -1., 0]])
+web_cam_ext.R = np.array([[0., 0., 1.], [-1, 0, 0], [0, -1., 0]])
 web_cam_ext.T = np.array([0., 0., 0.])
 
 web_cam_params = CameraParameters()
diff --git a/y2020/vision/tools/python_code/camera_definition_test.py b/y2020/vision/tools/python_code/camera_definition_test.py
index f8e17f8..65d1b68 100644
--- a/y2020/vision/tools/python_code/camera_definition_test.py
+++ b/y2020/vision/tools/python_code/camera_definition_test.py
@@ -23,7 +23,6 @@
         self.team_number = -1
 
 
-
 ### CAMERA DEFINITIONS
 
 # Robot camera has:
@@ -39,13 +38,11 @@
 # Define a web_cam
 web_cam_int = CameraIntrinsics()
 web_cam_int.camera_matrix = np.asarray([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
-web_cam_int.distortion_coeffs = np.zeros((5,1))
+web_cam_int.distortion_coeffs = np.zeros((5, 1))
 
 web_cam_ext = CameraExtrinsics()
 # Camera rotation from robot x,y,z to opencv (z, -x, -y)
-web_cam_ext.R = np.array([[0., 0., 1.],
-                          [-1, 0,  0],
-                          [0, -1., 0]])
+web_cam_ext.R = np.array([[0., 0., 1.], [-1, 0, 0], [0, -1., 0]])
 web_cam_ext.T = np.array([0., 0., 0.])
 
 web_cam_params = CameraParameters()
@@ -54,11 +51,11 @@
 
 camera_list = []
 
-for team_number in (971, 8971, 9971): 
+for team_number in (971, 8971, 9971):
     for (i, node_name) in enumerate(("pi-1", "pi-2", "pi-3", "pi-4", "pi-5")):
         camera_base = copy.deepcopy(web_cam_params)
         camera_base.node_name = node_name
         camera_base.team_number = team_number
-        camera_base.camera_ext.T = np.asarray(np.float32([i+1, i+1, i+1]))
+        camera_base.camera_ext.T = np.asarray(
+            np.float32([i + 1, i + 1, i + 1]))
         camera_list.append(camera_base)
-
diff --git a/y2020/vision/tools/python_code/camera_param_test.cc b/y2020/vision/tools/python_code/camera_param_test.cc
index 3feaa99..5b959cd 100644
--- a/y2020/vision/tools/python_code/camera_param_test.cc
+++ b/y2020/vision/tools/python_code/camera_param_test.cc
@@ -41,6 +41,8 @@
 class TrainingImage {
  public:
   cv::Mat features_;
+  float target_point_x_;
+  float target_point_y_;
   cv::Mat field_to_target_;
 };
 
@@ -66,7 +68,8 @@
 
     CopyTrainingFeatures();
     sift_camera_calibration_ = CameraParamTest::FindCameraCalibration();
-    camera_intrinsics_ = CameraIntrinsics();
+    camera_intrinsic_matrix_ = CameraIntrinsicMatrix();
+    camera_dist_coeffs_ = CameraDistCoeffs();
     camera_extrinsics_ = CameraExtrinsics();
   }
 
@@ -86,7 +89,7 @@
         ->field_to_target();
   }
 
-  cv::Mat CameraIntrinsics() const {
+  cv::Mat CameraIntrinsicMatrix() const {
     const cv::Mat result(3, 3, CV_32F,
                          const_cast<void *>(static_cast<const void *>(
                              sift_camera_calibration_->intrinsics()->data())));
@@ -94,6 +97,14 @@
     return result;
   }
 
+  cv::Mat CameraDistCoeffs() const {
+    const cv::Mat result(5, 1, CV_32F,
+                         const_cast<void *>(static_cast<const void *>(
+                             sift_camera_calibration_->dist_coeffs()->data())));
+    CHECK_EQ(result.total(), sift_camera_calibration_->dist_coeffs()->size());
+    return result;
+  }
+
   cv::Mat CameraExtrinsics() const {
     const cv::Mat result(
         4, 4, CV_32F,
@@ -111,7 +122,8 @@
 
   // We'll just extract the one that matches our current module
   const sift::CameraCalibration *sift_camera_calibration_;
-  cv::Mat camera_intrinsics_;
+  cv::Mat camera_intrinsic_matrix_;
+  cv::Mat camera_dist_coeffs_;
   cv::Mat camera_extrinsics_;
 
   TrainingData training_data_;
@@ -127,7 +139,7 @@
   int train_image_index = 0;
   for (const sift::TrainingImage *training_image :
        *sift_training_data_->images()) {
-    TrainingImage training_image_data;
+    TrainingImage tmp_training_image_data;
     cv::Mat features(training_image->features()->size(), 128, CV_32F);
     for (size_t i = 0; i < training_image->features()->size(); ++i) {
       const sift::Feature *feature_table = training_image->features()->Get(i);
@@ -149,10 +161,14 @@
         4, 4, CV_32F,
         const_cast<void *>(
             static_cast<const void *>(field_to_target_->data()->data())));
-    training_image_data.features_ = features;
-    training_image_data.field_to_target_ = field_to_target_mat;
+    tmp_training_image_data.features_ = features;
+    tmp_training_image_data.field_to_target_ = field_to_target_mat;
+    tmp_training_image_data.target_point_x_ =
+        sift_training_data_->images()->Get(train_image_index)->target_point_x();
+    tmp_training_image_data.target_point_y_ =
+        sift_training_data_->images()->Get(train_image_index)->target_point_y();
 
-    training_data_.images_.push_back(training_image_data);
+    training_data_.images_.push_back(tmp_training_image_data);
     train_image_index++;
   }
 }
@@ -224,13 +240,26 @@
       << camera_params.training_data_.images_[0].field_to_target_ << "\nvs.\n"
       << field_to_targets_0;
 
+  ASSERT_EQ(camera_params.training_data_.images_[0].target_point_x_, 10.);
+  ASSERT_EQ(camera_params.training_data_.images_[0].target_point_y_, 20.);
+
   float intrinsic_mat[9] = {810, 0, 320, 0, 810, 240, 0, 0, 1};
   cv::Mat intrinsic = cv::Mat(3, 3, CV_32F, intrinsic_mat);
-  cv::Mat intrinsic_diff = (intrinsic != camera_params.camera_intrinsics_);
+  cv::Mat intrinsic_diff =
+      (intrinsic != camera_params.camera_intrinsic_matrix_);
   bool intrinsic_eq = (cv::countNonZero(intrinsic_diff) == 0);
   ASSERT_TRUE(intrinsic_eq)
-      << "Mismatch on intrinsics: " << intrinsic << "\nvs.\n"
-      << camera_params.camera_intrinsics_;
+      << "Mismatch on camera intrinsic matrix: " << intrinsic << "\nvs.\n"
+      << camera_params.camera_intrinsic_matrix_;
+
+  float dist_coeff_mat[5] = {0., 0., 0., 0., 0.};
+  cv::Mat dist_coeff = cv::Mat(5, 1, CV_32F, dist_coeff_mat);
+  cv::Mat dist_coeff_diff = (dist_coeff != camera_params.camera_dist_coeffs_);
+  bool dist_coeff_eq = (cv::countNonZero(dist_coeff_diff) == 0);
+  ASSERT_TRUE(dist_coeff_eq)
+      << "Mismatch on camera distortion coefficients: " << dist_coeff
+      << "\nvs.\n"
+      << camera_params.camera_dist_coeffs_;
 
   float i_f = 3.0;
   float extrinsic_mat[16] = {0, 0,  1, i_f, -1, 0, 0, i_f,
@@ -246,4 +275,3 @@
 }  // namespace
 }  // namespace vision
 }  // namespace frc971
-
diff --git a/y2020/vision/tools/python_code/load_sift_training.py b/y2020/vision/tools/python_code/load_sift_training.py
index 65f3342..651efe2 100644
--- a/y2020/vision/tools/python_code/load_sift_training.py
+++ b/y2020/vision/tools/python_code/load_sift_training.py
@@ -107,22 +107,28 @@
             fbb)
 
         # Create the TrainingImage feature vector
-        TrainingImage.TrainingImageStartFeaturesVector(fbb,
-                                                       len(features_vector))
+        TrainingImage.TrainingImageStartFeaturesVector(
+            fbb, len(features_vector))
         for feature in reversed(features_vector):
             fbb.PrependUOffsetTRelative(feature)
         features_vector_offset = fbb.EndVector(len(features_vector))
 
-        # Create the TrainingImage
+        # Add the TrainingImage data
         TrainingImage.TrainingImageStart(fbb)
-        TrainingImage.TrainingImageAddFeatures(fbb, features_vector_offset)
-        TrainingImage.TrainingImageAddFieldToTarget(fbb,
-                                                    transformation_mat_offset)
-
-        images_vector.append(TrainingImage.TrainingImageEnd(fbb))
+        TrainingImage.TrainingImageAddFeatures(fbb,
+                                                       features_vector_offset)
+        TrainingImage.TrainingImageAddFieldToTarget(
+            fbb, transformation_mat_offset)
+        TrainingImage.TrainingImageAddTargetPointX(
+            fbb, target_data.target_point_2d[0][0][0])
+        TrainingImage.TrainingImageAddTargetPointY(
+            fbb, target_data.target_point_2d[0][0][1])
+        images_vector.append(
+            TrainingImage.TrainingImageEnd(fbb))
 
     # Create and add Training Data of all targets
-    TrainingData.TrainingDataStartImagesVector(fbb, len(images_vector))
+    TrainingData.TrainingDataStartImagesVector(fbb,
+                                                     len(images_vector))
     for training_image in reversed(images_vector):
         fbb.PrependUOffsetTRelative(training_image)
     images_vector_table = fbb.EndVector(len(images_vector))
@@ -155,6 +161,14 @@
             fbb.PrependFloat32(n)
         intrinsics_vector = fbb.EndVector(len(camera_int_list))
 
+        dist_coeff_list = camera_calib.camera_int.distortion_coeffs.ravel(
+        ).tolist()
+        CameraCalibration.CameraCalibrationStartDistCoeffsVector(
+            fbb, len(dist_coeff_list))
+        for n in reversed(dist_coeff_list):
+            fbb.PrependFloat32(n)
+        dist_coeff_vector = fbb.EndVector(len(dist_coeff_list))
+
         node_name_offset = fbb.CreateString(camera_calib.node_name)
         CameraCalibration.CameraCalibrationStart(fbb)
         CameraCalibration.CameraCalibrationAddNodeName(fbb, node_name_offset)
@@ -162,6 +176,8 @@
             fbb, camera_calib.team_number)
         CameraCalibration.CameraCalibrationAddIntrinsics(
             fbb, intrinsics_vector)
+        CameraCalibration.CameraCalibrationAddDistCoeffs(
+            fbb, dist_coeff_vector)
         CameraCalibration.CameraCalibrationAddFixedExtrinsics(
             fbb, fixed_extrinsics_vector)
         camera_calibration_vector.append(
diff --git a/y2020/vision/tools/python_code/target_definition.py b/y2020/vision/tools/python_code/target_definition.py
index 1727dff..4c3f36b 100644
--- a/y2020/vision/tools/python_code/target_definition.py
+++ b/y2020/vision/tools/python_code/target_definition.py
@@ -181,7 +181,7 @@
     # These are manually captured by examining the images,
     # and entering the pixel values from the target center for each image.
     # These are currently only used for visualization of the target
-    ideal_power_port_red.target_point_2d = np.float32([[570,192]]).reshape(-1,1,2), # train_power_port_red.png
+    ideal_power_port_red.target_point_2d = np.float32([[570,192]]).reshape(-1,1,2)  # train_power_port_red.png
 
     # np.float32([[305, 97]]).reshape(-1, 1, 2),  #train_power_port_red_webcam.png
 
@@ -222,7 +222,7 @@
     ideal_loading_bay_red.target_position = np.array([field_length/2.,
                                                      loading_bay_edge_y + loading_bay_width/2.,
                                                       loading_bay_height/2.])
-    ideal_loading_bay_red.target_point_2d = np.float32([[366, 236]]).reshape(-1, 1, 2),  # train_loading_bay_red.png
+    ideal_loading_bay_red.target_point_2d = np.float32([[366, 236]]).reshape(-1, 1, 2)  # train_loading_bay_red.png
 
     ideal_target_list.append(ideal_loading_bay_red)
     training_target_loading_bay_red = TargetData(
@@ -285,7 +285,7 @@
     ideal_power_port_blue.target_position = np.array([field_length/2.,
                                                      -power_port_edge_y - power_port_width/2.,
                                                       power_port_target_height])
-    ideal_power_port_blue.target_point_2d = np.float32([[567, 180]]).reshape(-1, 1, 2),  # train_power_port_blue.png
+    ideal_power_port_blue.target_point_2d = np.float32([[567, 180]]).reshape(-1, 1, 2)  # train_power_port_blue.png
 
     ideal_target_list.append(ideal_power_port_blue)
     training_target_power_port_blue = TargetData(
@@ -325,7 +325,7 @@
     ideal_loading_bay_blue.target_position = np.array([-field_length/2.,
                                                      -loading_bay_edge_y - loading_bay_width/2.,
                                                        loading_bay_height/2.])
-    ideal_loading_bay_blue.target_point_2d = np.float32([[366, 236]]).reshape(-1, 1, 2),  # train_loading_bay_blue.png
+    ideal_loading_bay_blue.target_point_2d = np.float32([[366, 236]]).reshape(-1, 1, 2)  # train_loading_bay_blue.png
 
     ideal_target_list.append(ideal_loading_bay_blue)
     training_target_loading_bay_blue = TargetData(
diff --git a/y2020/vision/tools/python_code/target_definition_test.py b/y2020/vision/tools/python_code/target_definition_test.py
index 5432766..18df1e9 100644
--- a/y2020/vision/tools/python_code/target_definition_test.py
+++ b/y2020/vision/tools/python_code/target_definition_test.py
@@ -25,5 +25,6 @@
 
     target_data_test_1.target_rotation = np.identity(3, np.double)
     target_data_test_1.target_position = np.array([0., 1., 2.])
+    target_data_test_1.target_point_2d = np.array([10., 20.]).reshape(-1, 1, 2)
     target_data_list.append(target_data_test_1)
     return target_data_list
diff --git a/y2020/www/BUILD b/y2020/www/BUILD
index c0daa07..c280e91 100644
--- a/y2020/www/BUILD
+++ b/y2020/www/BUILD
@@ -4,24 +4,24 @@
 ts_library(
     name = "main",
     srcs = [
-        "main.ts",
         "image_handler.ts",
+        "main.ts",
     ],
+    visibility = ["//y2020:__subpackages__"],
     deps = [
         "//aos/network/www:proxy",
         "//y2020/vision:vision_ts_fbs",
         "//y2020/vision/sift:sift_ts_fbs",
     ],
-    visibility = ["//y2020:__subpackages__"],
 )
 
 rollup_bundle(
     name = "main_bundle",
     entry_point = "y2020/www/main",
+    visibility = ["//y2020:__subpackages__"],
     deps = [
         "main",
     ],
-    visibility = ["//y2020:__subpackages__"],
 )
 
 filegroup(
@@ -30,7 +30,7 @@
         "**/*.html",
         "**/*.css",
     ]),
-    visibility=["//visibility:public"],
+    visibility = ["//visibility:public"],
 )
 
 genrule(
@@ -42,5 +42,5 @@
         "flatbuffers.js",
     ],
     cmd = "cp $(location @com_github_google_flatbuffers//:flatjs) $@",
-    visibility=["//y2020:__subpackages__"],
+    visibility = ["//y2020:__subpackages__"],
 )