Merge "Add gstreamer to workspace"
diff --git a/README.md b/README.md
index 99154c4..d54d963 100644
--- a/README.md
+++ b/README.md
@@ -128,3 +128,21 @@
 # log messages and changes.
    apt-get install gitg
 ```
+
+### Roborio Kernel Traces
+
+Currently (as of 2020.02.26), top tends to produce misleading statistics. As
+such, you can get more useful information about CPU usage by using kernel
+traces. Sample usage:
+```console
+# Note that you will need to install the trace-cmd package on the roborio.
+# This may be not be a trivial task.
+# Start the trace
+trace-cmd start -e sched_switch -e workqueue
+# Stop the trace
+trace-cmd stop
+# Save the trace to trace.dat
+trace-cmd extract
+```
+You can then scp the `trace.dat` file to your computer and run `kernelshark
+trace.dat` (may require installing the `kernelshark` apt package).
diff --git a/aos/BUILD b/aos/BUILD
index 12d25b2..1d6d388 100644
--- a/aos/BUILD
+++ b/aos/BUILD
@@ -477,6 +477,7 @@
         "testdata/config2_multinode.json",
         "testdata/config3.json",
         "testdata/expected.json",
+        "testdata/expected_merge_with.json",
         "testdata/expected_multinode.json",
         "testdata/good_multinode.json",
         "testdata/good_multinode_hostnames.json",
diff --git a/aos/configuration.cc b/aos/configuration.cc
index ac897bf..d6d276e 100644
--- a/aos/configuration.cc
+++ b/aos/configuration.cc
@@ -417,6 +417,14 @@
   return MergeConfiguration(ReadConfig(path, &visited_paths));
 }
 
+FlatbufferDetachedBuffer<Configuration> MergeWithConfig(
+    const Configuration *config, std::string_view json) {
+  FlatbufferDetachedBuffer<Configuration> addition =
+      JsonToFlatbuffer(json, Configuration::MiniReflectTypeTable());
+
+  return MergeConfiguration(MergeFlatBuffers(config, &addition.message()));
+}
+
 const Channel *GetChannel(const Configuration *config, std::string_view name,
                           std::string_view type,
                           std::string_view application_name, const Node *node) {
diff --git a/aos/configuration.h b/aos/configuration.h
index bf2ec6c..9193fd5 100644
--- a/aos/configuration.h
+++ b/aos/configuration.h
@@ -32,6 +32,11 @@
     const Flatbuffer<Configuration> &config,
     const std::vector<aos::FlatbufferString<reflection::Schema>> &schemas);
 
+// Merges a configuration json snippet into the provided configuration and
+// returns the modified config.
+FlatbufferDetachedBuffer<Configuration> MergeWithConfig(
+    const Configuration *config, std::string_view json);
+
 // Returns the resolved location for a name, type, and application name. Returns
 // nullptr if none is found.
 //
diff --git a/aos/configuration_test.cc b/aos/configuration_test.cc
index c36bd93..f069277 100644
--- a/aos/configuration_test.cc
+++ b/aos/configuration_test.cc
@@ -13,6 +13,8 @@
 namespace configuration {
 namespace testing {
 
+const std::string kConfigPrefix = "aos/testdata/";
+
 class ConfigurationTest : public ::testing::Test {
  public:
   ConfigurationTest() { ::aos::testing::EnableTestLogging(); }
@@ -30,19 +32,19 @@
 // Tests that we can read and merge a configuration.
 TEST_F(ConfigurationTest, ConfigMerge) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/config1.json");
+      ReadConfig(kConfigPrefix + "config1.json");
   LOG(INFO) << "Read: " << FlatbufferToJson(config, true);
 
   EXPECT_EQ(
       absl::StripSuffix(
-          util::ReadFileToStringOrDie("aos/testdata/expected.json"), "\n"),
+          util::ReadFileToStringOrDie(kConfigPrefix + "expected.json"), "\n"),
       FlatbufferToJson(config, true));
 }
 
 // Tests that we can get back a ChannelIndex.
 TEST_F(ConfigurationTest, ChannelIndex) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/config1.json");
+      ReadConfig(kConfigPrefix + "config1.json");
 
   EXPECT_EQ(
       ChannelIndex(&config.message(), config.message().channels()->Get(1u)),
@@ -52,12 +54,12 @@
 // Tests that we can read and merge a multinode configuration.
 TEST_F(ConfigurationTest, ConfigMergeMultinode) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/config1_multinode.json");
+      ReadConfig(kConfigPrefix + "config1_multinode.json");
   LOG(INFO) << "Read: " << FlatbufferToJson(config, true);
 
   EXPECT_EQ(
       std::string(absl::StripSuffix(
-          util::ReadFileToStringOrDie("aos/testdata/expected_multinode.json"),
+          util::ReadFileToStringOrDie(kConfigPrefix + "expected_multinode.json"),
           "\n")),
       FlatbufferToJson(config, true));
 }
@@ -65,7 +67,7 @@
 // Tests that we sort the entries in a config so we can look entries up.
 TEST_F(ConfigurationTest, UnsortedConfig) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/backwards.json");
+      ReadConfig(kConfigPrefix + "backwards.json");
 
   LOG(INFO) << "Read: " << FlatbufferToJson(config, true);
 
@@ -80,16 +82,41 @@
   EXPECT_DEATH(
       {
         FlatbufferDetachedBuffer<Configuration> config =
-            ReadConfig("aos/testdata/config1_bad.json");
+            ReadConfig(kConfigPrefix + "config1_bad.json");
       },
-      "aos/testdata/config1_bad.json");
+      kConfigPrefix + "config1_bad.json");
+}
+
+// Tests that we can modify a config with a json snippet.
+TEST_F(ConfigurationTest, MergeWithConfig) {
+  FlatbufferDetachedBuffer<Configuration> config =
+      ReadConfig(kConfigPrefix + "config1.json");
+  LOG(INFO) << "Read: " << FlatbufferToJson(config, true);
+
+  FlatbufferDetachedBuffer<Configuration> updated_config =
+      MergeWithConfig(&config.message(),
+                      R"channel({
+  "channels": [
+    {
+      "name": "/foo",
+      "type": ".aos.bar",
+      "max_size": 100
+    }
+  ]
+})channel");
+
+  EXPECT_EQ(
+      absl::StripSuffix(util::ReadFileToStringOrDie(
+                            kConfigPrefix + "expected_merge_with.json"),
+                        "\n"),
+      FlatbufferToJson(updated_config, true));
 }
 
 // Tests that we can lookup a location, complete with maps, from a merged
 // config.
 TEST_F(ConfigurationTest, GetChannel) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/config1.json");
+      ReadConfig(kConfigPrefix + "config1.json");
 
   // Test a basic lookup first.
   EXPECT_EQ(
@@ -123,7 +150,7 @@
 // Tests that we can lookup a location with node specific maps.
 TEST_F(ConfigurationTest, GetChannelMultinode) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/good_multinode.json");
+      ReadConfig(kConfigPrefix + "good_multinode.json");
   const Node *pi1 = GetNode(&config.message(), "pi1");
   const Node *pi2 = GetNode(&config.message(), "pi2");
 
@@ -159,7 +186,7 @@
 // Tests that we can lookup a location with type specific maps.
 TEST_F(ConfigurationTest, GetChannelTypedMultinode) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/good_multinode.json");
+      ReadConfig(kConfigPrefix + "good_multinode.json");
   const Node *pi1 = GetNode(&config.message(), "pi1");
 
   // Test a basic lookup first.
@@ -182,28 +209,28 @@
   EXPECT_DEATH(
       {
         FlatbufferDetachedBuffer<Configuration> config =
-            ReadConfig("aos/testdata/invalid_nodes.json");
+            ReadConfig(kConfigPrefix + "invalid_nodes.json");
       },
       "source_node");
 
   EXPECT_DEATH(
       {
         FlatbufferDetachedBuffer<Configuration> config =
-            ReadConfig("aos/testdata/invalid_source_node.json");
+            ReadConfig(kConfigPrefix + "invalid_source_node.json");
       },
       "source_node");
 
   EXPECT_DEATH(
       {
         FlatbufferDetachedBuffer<Configuration> config =
-            ReadConfig("aos/testdata/invalid_destination_node.json");
+            ReadConfig(kConfigPrefix + "invalid_destination_node.json");
       },
       "destination_nodes");
 
   EXPECT_DEATH(
       {
         FlatbufferDetachedBuffer<Configuration> config =
-            ReadConfig("aos/testdata/self_forward.json");
+            ReadConfig(kConfigPrefix + "self_forward.json");
       },
       "forwarding data to itself");
 }
@@ -582,7 +609,7 @@
 // Tests that we can deduce source nodes from a multinode config.
 TEST_F(ConfigurationTest, SourceNodeNames) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/config1_multinode.json");
+      ReadConfig(kConfigPrefix + "config1_multinode.json");
 
   // This is a bit simplistic in that it doesn't test deduplication, but it does
   // exercise a lot of the logic.
@@ -597,7 +624,7 @@
 // Tests that we can deduce destination nodes from a multinode config.
 TEST_F(ConfigurationTest, DestinationNodeNames) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/config1_multinode.json");
+      ReadConfig(kConfigPrefix + "config1_multinode.json");
 
   // This is a bit simplistic in that it doesn't test deduplication, but it does
   // exercise a lot of the logic.
@@ -613,7 +640,7 @@
 TEST_F(ConfigurationTest, GetNodes) {
   {
     FlatbufferDetachedBuffer<Configuration> config =
-        ReadConfig("aos/testdata/good_multinode.json");
+        ReadConfig(kConfigPrefix + "good_multinode.json");
     const Node *pi1 = GetNode(&config.message(), "pi1");
     const Node *pi2 = GetNode(&config.message(), "pi2");
 
@@ -622,7 +649,7 @@
 
   {
     FlatbufferDetachedBuffer<Configuration> config =
-        ReadConfig("aos/testdata/config1.json");
+        ReadConfig(kConfigPrefix + "config1.json");
     EXPECT_THAT(GetNodes(&config.message()), ::testing::ElementsAre(nullptr));
   }
 }
@@ -630,9 +657,9 @@
 // Tests that we can extract a node index from a config.
 TEST_F(ConfigurationTest, GetNodeIndex) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/good_multinode.json");
+      ReadConfig(kConfigPrefix + "good_multinode.json");
   FlatbufferDetachedBuffer<Configuration> config2 =
-      ReadConfig("aos/testdata/good_multinode.json");
+      ReadConfig(kConfigPrefix + "good_multinode.json");
   const Node *pi1 = GetNode(&config.message(), "pi1");
   const Node *pi2 = GetNode(&config.message(), "pi2");
 
@@ -653,13 +680,13 @@
 // valid nodes.
 TEST_F(ConfigurationDeathTest, GetNodeOrDie) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/good_multinode.json");
+      ReadConfig(kConfigPrefix + "good_multinode.json");
   FlatbufferDetachedBuffer<Configuration> config2 =
-      ReadConfig("aos/testdata/good_multinode.json");
+      ReadConfig(kConfigPrefix + "good_multinode.json");
   {
     // Simple case, nullptr -> nullptr
     FlatbufferDetachedBuffer<Configuration> single_node_config =
-        ReadConfig("aos/testdata/config1.json");
+        ReadConfig(kConfigPrefix + "config1.json");
     EXPECT_EQ(nullptr, GetNodeOrDie(&single_node_config.message(), nullptr));
 
     // Confirm that we die when a node is passed in.
@@ -679,7 +706,7 @@
 
 TEST_F(ConfigurationTest, GetNodeFromHostname) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/good_multinode.json");
+      ReadConfig(kConfigPrefix + "good_multinode.json");
   EXPECT_EQ("pi1",
             CHECK_NOTNULL(GetNodeFromHostname(&config.message(), "raspberrypi"))
                 ->name()
@@ -695,7 +722,7 @@
 
 TEST_F(ConfigurationTest, GetNodeFromHostnames) {
   FlatbufferDetachedBuffer<Configuration> config =
-      ReadConfig("aos/testdata/good_multinode_hostnames.json");
+      ReadConfig(kConfigPrefix + "good_multinode_hostnames.json");
   EXPECT_EQ("pi1",
             CHECK_NOTNULL(GetNodeFromHostname(&config.message(), "raspberrypi"))
                 ->name()
diff --git a/aos/controls/control_loop_test.h b/aos/controls/control_loop_test.h
index 7ad5683..536df02 100644
--- a/aos/controls/control_loop_test.h
+++ b/aos/controls/control_loop_test.h
@@ -114,6 +114,10 @@
     return &configuration_.message();
   }
 
+  SimulatedEventLoopFactory *event_loop_factory() {
+    return &event_loop_factory_;
+  }
+
  private:
   // Sends out all of the required queue messages.
   void SendJoystickState() {
diff --git a/aos/events/logging/log_edit.cc b/aos/events/logging/log_edit.cc
index 02b7bd7..ff263f8 100644
--- a/aos/events/logging/log_edit.cc
+++ b/aos/events/logging/log_edit.cc
@@ -37,7 +37,7 @@
       PCHECK(rename(FLAGS_logfile.c_str(), orig_path.c_str()) == 0);
 
       aos::logger::SpanReader span_reader(orig_path);
-      CHECK(span_reader.ReadMessage().empty());
+      CHECK(!span_reader.ReadMessage().empty()) << ": Empty header, aborting";
 
       aos::logger::DetachedBufferWriter buffer_writer(FLAGS_logfile);
       buffer_writer.QueueSizedFlatbuffer(&fbb);
diff --git a/aos/events/shm_event_loop.cc b/aos/events/shm_event_loop.cc
index dad62cd..57bcb1c 100644
--- a/aos/events/shm_event_loop.cc
+++ b/aos/events/shm_event_loop.cc
@@ -58,7 +58,7 @@
 }
 std::string ShmPath(const Channel *channel) {
   CHECK(channel->has_type());
-  return ShmFolder(channel) + channel->type()->str() + ".v1";
+  return ShmFolder(channel) + channel->type()->str() + ".v2";
 }
 
 class MMapedQueue {
diff --git a/aos/events/shm_event_loop_test.cc b/aos/events/shm_event_loop_test.cc
index 87411d2..0c39704 100644
--- a/aos/events/shm_event_loop_test.cc
+++ b/aos/events/shm_event_loop_test.cc
@@ -27,12 +27,12 @@
     }
 
     // Clean up anything left there before.
-    unlink((FLAGS_shm_base + "/test/aos.TestMessage.v1").c_str());
-    unlink((FLAGS_shm_base + "/test1/aos.TestMessage.v1").c_str());
-    unlink((FLAGS_shm_base + "/test2/aos.TestMessage.v1").c_str());
-    unlink((FLAGS_shm_base + "/test2/aos.TestMessage.v1").c_str());
-    unlink((FLAGS_shm_base + "/aos/aos.timing.Report.v1").c_str());
-    unlink((FLAGS_shm_base + "/aos/aos.logging.LogMessageFbs.v1").c_str());
+    unlink((FLAGS_shm_base + "/test/aos.TestMessage.v2").c_str());
+    unlink((FLAGS_shm_base + "/test1/aos.TestMessage.v2").c_str());
+    unlink((FLAGS_shm_base + "/test2/aos.TestMessage.v2").c_str());
+    unlink((FLAGS_shm_base + "/test2/aos.TestMessage.v2").c_str());
+    unlink((FLAGS_shm_base + "/aos/aos.timing.Report.v2").c_str());
+    unlink((FLAGS_shm_base + "/aos/aos.logging.LogMessageFbs.v2").c_str());
   }
 
   ~ShmEventLoopTestFactory() { FLAGS_override_hostname = ""; }
diff --git a/aos/ipc_lib/lockless_queue.cc b/aos/ipc_lib/lockless_queue.cc
index c323b8b..7126ffd 100644
--- a/aos/ipc_lib/lockless_queue.cc
+++ b/aos/ipc_lib/lockless_queue.cc
@@ -306,6 +306,7 @@
     }
 
     memory->next_queue_index.Invalidate();
+    memory->uid = getuid();
 
     for (size_t i = 0; i < memory->num_senders(); ++i) {
       ::aos::ipc_lib::Sender *s = memory->GetSender(i);
@@ -319,6 +320,8 @@
     // Signal everything is done.  This needs to be done last, so if we die, we
     // redo initialization.
     memory->initialized = true;
+  } else {
+    CHECK_EQ(getuid(), memory->uid) << ": UIDs must match for all processes";
   }
 
   return memory;
@@ -871,6 +874,8 @@
               << memory->next_queue_index.Load(queue_size).DebugString()
               << ::std::endl;
 
+  ::std::cout << "    uid_t uid = " << memory->uid << ::std::endl;
+
   ::std::cout << "  }" << ::std::endl;
   ::std::cout << "  AtomicIndex queue[" << queue_size << "] {" << ::std::endl;
   for (size_t i = 0; i < queue_size; ++i) {
diff --git a/aos/ipc_lib/lockless_queue_memory.h b/aos/ipc_lib/lockless_queue_memory.h
index cbe76a7..a10609c 100644
--- a/aos/ipc_lib/lockless_queue_memory.h
+++ b/aos/ipc_lib/lockless_queue_memory.h
@@ -50,6 +50,11 @@
   // A message is valid iff its internal index matches the index in the queue.
   AtomicQueueIndex next_queue_index;
 
+  // All processes using a queue need to be able to send messages to each other.
+  // We're going to require they have matching UIDs, which is sufficient to do
+  // that.
+  uid_t uid;
+
   // There is then memory allocated after this structure.  That memory is used
   // to store the messages, queue, watchers, and senders.  This is equivalent to
   // writing:
diff --git a/aos/logging/BUILD b/aos/logging/BUILD
index 214df63..68a493b 100644
--- a/aos/logging/BUILD
+++ b/aos/logging/BUILD
@@ -156,6 +156,6 @@
 flatbuffer_cc_library(
     name = "log_message_fbs",
     srcs = ["log_message.fbs"],
-    visibility = ["//visibility:public"],
     gen_reflections = 1,
+    visibility = ["//visibility:public"],
 )
diff --git a/aos/logging/log_namer.cc b/aos/logging/log_namer.cc
index 83a39c8..f025054 100644
--- a/aos/logging/log_namer.cc
+++ b/aos/logging/log_namer.cc
@@ -144,21 +144,25 @@
 
   char *tmp;
   AllocateLogName(&tmp, folder, basename);
+
+  std::string log_base_name = tmp;
+  std::string log_roborio_name = log_base_name + "_roborio_data.bfbs";
+  free(tmp);
+
   char *tmp2;
-  if (asprintf(&tmp2, "%s/%s-current", folder, basename) == -1) {
+  if (asprintf(&tmp2, "%s/%s-current.bfbs", folder, basename) == -1) {
     PLOG(WARNING) << "couldn't create current symlink name";
   } else {
     if (unlink(tmp2) == -1 && (errno != EROFS && errno != ENOENT)) {
       LOG(WARNING) << "unlink('" << tmp2 << "') failed";
     }
-    if (symlink(tmp, tmp2) == -1) {
-      PLOG(WARNING) << "symlink('" << tmp << "', '" << tmp2 << "') failed";
+    if (symlink(log_roborio_name.c_str(), tmp2) == -1) {
+      PLOG(WARNING) << "symlink('" << log_roborio_name.c_str() << "', '" << tmp2
+                    << "') failed";
     }
     free(tmp2);
   }
-  std::string result = tmp;
-  free(tmp);
-  return result;
+  return log_base_name;
 }
 
 }  // namespace logging
diff --git a/aos/network/BUILD b/aos/network/BUILD
index 89853f0..8f3f394 100644
--- a/aos/network/BUILD
+++ b/aos/network/BUILD
@@ -70,6 +70,7 @@
     ],
     deps = [
         ":team_number",
+        "//aos:configuration",
         "//aos/testing:googletest",
     ],
 )
diff --git a/aos/network/team_number.cc b/aos/network/team_number.cc
index 14fadab..d2fbc8e 100644
--- a/aos/network/team_number.cc
+++ b/aos/network/team_number.cc
@@ -9,6 +9,8 @@
 
 #include "aos/util/string_to_num.h"
 
+DECLARE_string(override_hostname);
+
 namespace aos {
 namespace network {
 namespace team_number_internal {
@@ -101,10 +103,14 @@
 }  // namespace
 
 ::std::string GetHostname() {
-  char buf[256];
-  buf[sizeof(buf) - 1] = '\0';
-  PCHECK(gethostname(buf, sizeof(buf) - 1) == 0);
-  return buf;
+  if (FLAGS_override_hostname.empty()) {
+    char buf[256];
+    buf[sizeof(buf) - 1] = '\0';
+    PCHECK(gethostname(buf, sizeof(buf) - 1) == 0);
+    return buf;
+  } else {
+    return FLAGS_override_hostname;
+  }
 }
 
 uint16_t GetTeamNumber() {
diff --git a/aos/network/web_proxy.cc b/aos/network/web_proxy.cc
index f4a8ce8..5b50072 100644
--- a/aos/network/web_proxy.cc
+++ b/aos/network/web_proxy.cc
@@ -53,6 +53,8 @@
   }
 
   fetcher_->Fetch();
+  VLOG(2) << "Sending a message with " << GetPacketCount(fetcher_->context())
+          << "packets";
   for (int packet_index = 0; packet_index < GetPacketCount(fetcher_->context());
        ++packet_index) {
     flatbuffers::Offset<MessageHeader> message =
@@ -65,14 +67,20 @@
         rtc::CopyOnWriteBuffer(buffer.data(), buffer.size()),
         true /* binary array */);
     for (auto conn : channels_) {
+      if (conn->buffered_amount() > 14000000) {
+        VLOG(1) << "skipping a send because buffered amount is too high";
+        continue;
+      }
       conn->Send(data_buffer);
     }
   }
 }
 
 bool Subscriber::Compare(const Channel *channel) const {
-  return channel->name() == fetcher_->channel()->name() &&
-         channel->type() == fetcher_->channel()->type();
+  return channel->name()->string_view() ==
+             fetcher_->channel()->name()->string_view() &&
+         channel->type()->string_view() ==
+             fetcher_->channel()->type()->string_view();
 }
 
 Connection::Connection(
@@ -156,6 +164,7 @@
   webrtc::DataBuffer data_buffer(
       rtc::CopyOnWriteBuffer(buffer.data(), buffer.size()),
       true /* binary array */);
+  VLOG(1) << "Sending " << buffer.size() << "bytes to a client";
   data_channel_->Send(data_buffer);
 }
 
@@ -211,9 +220,11 @@
 void Connection::OnMessage(const webrtc::DataBuffer &buffer) {
   const message_bridge::Connect *message =
       flatbuffers::GetRoot<message_bridge::Connect>(buffer.data.data());
+  VLOG(2) << "Got a connect message " << aos::FlatbufferToJson(message);
   for (auto &subscriber : subscribers_) {
     // Make sure the subscriber is for a channel on this node.
     if (subscriber.get() == nullptr) {
+      VLOG(2) << ": Null subscriber";
       continue;
     }
     bool found_match = false;
diff --git a/aos/network/web_proxy_main.cc b/aos/network/web_proxy_main.cc
index 479320a..c674520 100644
--- a/aos/network/web_proxy_main.cc
+++ b/aos/network/web_proxy_main.cc
@@ -1,4 +1,5 @@
 #include "aos/events/shm_event_loop.h"
+#include "aos/flatbuffer_merge.h"
 #include "aos/init.h"
 #include "aos/network/web_proxy.h"
 #include "aos/seasocks/seasocks_logger.h"
@@ -15,17 +16,22 @@
     std::vector<std::unique_ptr<aos::web_proxy::Subscriber>> *subscribers,
     const aos::FlatbufferDetachedBuffer<aos::Configuration> &config) {
   aos::ShmEventLoop event_loop(&config.message());
-  const aos::Node *self = aos::configuration::GetMyNode(&config.message());
+  const bool is_multi_node =
+      aos::configuration::MultiNode(event_loop.configuration());
+  const aos::Node *self =
+      is_multi_node ? aos::configuration::GetMyNode(event_loop.configuration())
+                    : nullptr;
 
-  // TODO(alex): skip fetchers on the wrong node.
+  LOG(INFO) << "My node is " << aos::FlatbufferToJson(self);
+
   for (uint i = 0; i < config.message().channels()->size(); ++i) {
     auto channel = config.message().channels()->Get(i);
-    if (!aos::configuration::ChannelIsReadableOnNode(channel, self)) {
-      subscribers->emplace_back(nullptr);
-    } else {
+    if (aos::configuration::ChannelIsReadableOnNode(channel, self)) {
       auto fetcher = event_loop.MakeRawFetcher(channel);
       subscribers->emplace_back(
           std::make_unique<aos::web_proxy::Subscriber>(std::move(fetcher), i));
+    } else {
+      subscribers->emplace_back(nullptr);
     }
   }
 
@@ -33,7 +39,9 @@
 
   auto timer = event_loop.AddTimer([&]() {
     for (auto &subscriber : *subscribers) {
-      subscriber->RunIteration();
+      if (subscriber != nullptr) {
+        subscriber->RunIteration();
+      }
     }
   });
 
@@ -54,6 +62,14 @@
   aos::FlatbufferDetachedBuffer<aos::Configuration> config =
       aos::configuration::ReadConfig(FLAGS_config);
 
+  for (size_t i = 0; i < config.message().channels()->size(); ++i) {
+    aos::Channel *channel =
+        config.mutable_message()->mutable_channels()->GetMutableObject(i);
+    channel->clear_schema();
+  }
+
+  config = aos::CopyFlatBuffer(&config.message());
+
   std::vector<std::unique_ptr<aos::web_proxy::Subscriber>> subscribers;
 
   std::thread data_thread{
diff --git a/aos/network/www/BUILD b/aos/network/www/BUILD
index bab5198..67e54f8 100644
--- a/aos/network/www/BUILD
+++ b/aos/network/www/BUILD
@@ -7,7 +7,7 @@
         "**/*.html",
         "**/*.css",
     ]),
-    visibility=["//visibility:public"],
+    visibility = ["//visibility:public"],
 )
 
 ts_library(
@@ -16,12 +16,12 @@
         "config_handler.ts",
         "proxy.ts",
     ],
+    visibility = ["//visibility:public"],
     deps = [
-        "//aos/network:web_proxy_ts_fbs",
-        "//aos/network:connect_ts_fbs",
         "//aos:configuration_ts_fbs",
+        "//aos/network:connect_ts_fbs",
+        "//aos/network:web_proxy_ts_fbs",
     ],
-    visibility=["//visibility:public"],
 )
 
 ts_library(
@@ -39,10 +39,10 @@
 rollup_bundle(
     name = "main_bundle",
     entry_point = "aos/network/www/main",
+    visibility = ["//aos:__subpackages__"],
     deps = [
         "main",
     ],
-    visibility=["//aos:__subpackages__"],
 )
 
 genrule(
@@ -54,5 +54,5 @@
         "flatbuffers.js",
     ],
     cmd = "cp $(location @com_github_google_flatbuffers//:flatjs) $@",
-    visibility=["//aos:__subpackages__"],
+    visibility = ["//aos:__subpackages__"],
 )
diff --git a/aos/network/www/config_handler.ts b/aos/network/www/config_handler.ts
index 72b496e..6615f39 100644
--- a/aos/network/www/config_handler.ts
+++ b/aos/network/www/config_handler.ts
@@ -1,18 +1,36 @@
 import {Configuration, Channel} from 'aos/configuration_generated';
 import {Connect} from 'aos/network/connect_generated';
+import {Connection} from './proxy';
 
 export class ConfigHandler {
-  private readonly root_div = document.getElementById('config');
+  private readonly root_div = document.createElement('div');
+  private readonly tree_div;
+  private config: Configuration|null = null
 
-  constructor(
-      private readonly config: Configuration,
-      private readonly dataChannel: RTCDataChannel) {}
+  constructor(private readonly connection: Connection) {
+    this.connection.addConfigHandler((config) => this.handleConfig(config));
+
+    document.body.appendChild(this.root_div);
+    const show_button = document.createElement('button');
+    show_button.addEventListener('click', () => this.toggleConfig());
+    const show_text = document.createTextNode('Show/Hide Config');
+    show_button.appendChild(show_text);
+    this.tree_div = document.createElement('div');
+    this.tree_div.hidden = true;
+    this.root_div.appendChild(show_button);
+    this.root_div.appendChild(this.tree_div);
+  }
+
+  handleConfig(config: Configuration) {
+    this.config = config;
+    this.printConfig();
+  }
 
   printConfig() {
     for (const i = 0; i < this.config.channelsLength(); i++) {
       const channel_div = document.createElement('div');
       channel_div.classList.add('channel');
-      this.root_div.appendChild(channel_div);
+      this.tree_div.appendChild(channel_div);
 
       const input_el = document.createElement('input');
       input_el.setAttribute('data-index', i);
@@ -60,8 +78,10 @@
     Connect.addChannelsToTransfer(builder, channelsfb);
     const connect = Connect.endConnect(builder);
     builder.finish(connect);
-    const array = builder.asUint8Array();
-    console.log('connect', array);
-    this.dataChannel.send(array.buffer.slice(array.byteOffset));
+    this.connection.sendConnectMessage(builder);
+  }
+
+  toggleConfig() {
+    this.tree_div.hidden = !this.tree_div.hidden;
   }
 }
diff --git a/aos/network/www/proxy.ts b/aos/network/www/proxy.ts
index 80c71b2..7bcf575 100644
--- a/aos/network/www/proxy.ts
+++ b/aos/network/www/proxy.ts
@@ -1,5 +1,6 @@
 import {ConfigHandler} from './config_handler';
 import {Configuration} from 'aos/configuration_generated';
+import * as WebProxy from 'aos/network/web_proxy_generated';
 
 // There is one handler for each DataChannel, it maintains the state of
 // multi-part messages and delegates to a callback when the message is fully
@@ -18,13 +19,17 @@
     const messageHeader =
         WebProxy.MessageHeader.getRootAsMessageHeader(fbBuffer);
     // Short circuit if only one packet
-    if (messageHeader.packetCount === 1) {
+    if (messageHeader.packetCount() === 1) {
       this.handlerFunc(messageHeader.dataArray());
       return;
     }
 
     if (messageHeader.packetIndex() === 0) {
       this.dataBuffer = new Uint8Array(messageHeader.length());
+      this.receivedMessageLength = 0;
+    }
+    if (!messageHeader.dataLength()) {
+      return;
     }
     this.dataBuffer.set(
         messageHeader.dataArray(),
@@ -44,8 +49,11 @@
   private rtcPeerConnection: RTCPeerConnection|null = null;
   private dataChannel: DataChannel|null = null;
   private webSocketUrl: string;
-  private configHandler: ConfigHandler|null = null;
-  private config: Configuration|null = null;
+
+  private configInternal: Configuration|null = null;
+  // A set of functions that accept the config to handle.
+  private readonly configHandlers = new Set<(config: Configuration) => void>();
+
   private readonly handlerFuncs = new Map<string, (data: Uint8Array) => void>();
   private readonly handlers = new Set<Handler>();
 
@@ -54,6 +62,15 @@
     this.webSocketUrl = `ws://${server}/ws`;
   }
 
+  addConfigHandler(handler: (config: Configuration) => void): void {
+    this.configHandlers.add(handler);
+  }
+
+  /**
+   * Add a handler for a specific message type. Until we need to handle
+   * different channel names with the same type differently, this is good
+   * enough.
+   */
   addHandler(id: string, handler: (data: Uint8Array) => void): void {
     this.handlerFuncs.set(id, handler);
   }
@@ -67,17 +84,17 @@
         'message', (e) => this.onWebSocketMessage(e));
   }
 
+  getConfig() {
+    return this.config_internal;
+  }
+
   // Handle messages on the DataChannel. Handles the Configuration message as
   // all other messages are sent on specific DataChannels.
   onDataChannelMessage(e: MessageEvent): void {
     const fbBuffer = new flatbuffers.ByteBuffer(new Uint8Array(e.data));
-    // TODO(alex): handle config updates if/when required
-    if (!this.configHandler) {
-      const config = aos.Configuration.getRootAsConfiguration(fbBuffer);
-      this.config = config;
-      this.configHandler = new ConfigHandler(config, this.dataChannel);
-      this.configHandler.printConfig();
-      return;
+    this.configInternal = Configuration.getRootAsConfiguration(fbBuffer);
+    for (const handler of Array.from(this.configHandlers)) {
+      handler(this.configInternal);
     }
   }
 
@@ -127,7 +144,7 @@
   onWebSocketOpen(): void {
     this.rtcPeerConnection = new RTCPeerConnection({});
     this.rtcPeerConnection.addEventListener(
-        'datachannel', (e) => this.onDataCnannel(e));
+        'datachannel', (e) => this.onDataChannel(e));
     this.dataChannel = this.rtcPeerConnection.createDataChannel('signalling');
     this.dataChannel.addEventListener(
         'message', (e) => this.onDataChannelMessage(e));
@@ -169,4 +186,14 @@
         break;
     }
   }
+
+  /**
+   * Subscribes to messages. Only the most recent connect message is in use. Any
+   * channels not specified in the message are implicitely unsubscribed.
+   * @param a Finished flatbuffer.Builder containing a Connect message to send.
+   */
+  sendConnectMessage(builder: any) {
+    const array = builder.asUint8Array();
+    this.dataChannel.send(array.buffer.slice(array.byteOffset));
+  }
 }
diff --git a/aos/starter/starter.sh b/aos/starter/starter.sh
index f5b14a5..e140351 100755
--- a/aos/starter/starter.sh
+++ b/aos/starter/starter.sh
@@ -6,6 +6,9 @@
 
   ln -s /var/local/natinst/log/FRC_UserProgram.log /tmp/FRC_UserProgram.log
   ln -s /var/local/natinst/log/FRC_UserProgram.log "${ROBOT_CODE}/FRC_UserProgram.log"
+elif [[ "$(hostname)" == "pi-"* ]]; then
+  ROBOT_CODE="/home/pi/robot_code"
+
 else
   ROBOT_CODE="${HOME}/robot_code"
 fi
diff --git a/aos/testdata/expected_merge_with.json b/aos/testdata/expected_merge_with.json
new file mode 100644
index 0000000..0ce2056
--- /dev/null
+++ b/aos/testdata/expected_merge_with.json
@@ -0,0 +1,64 @@
+{
+ "channels": [
+  {
+   "name": "/foo",
+   "type": ".aos.bar",
+   "max_size": 100
+  },
+  {
+   "name": "/foo2",
+   "type": ".aos.bar"
+  },
+  {
+   "name": "/foo3",
+   "type": ".aos.bar",
+   "max_size": 9
+  }
+ ],
+ "maps": [
+  {
+   "match": {
+    "name": "/batman"
+   },
+   "rename": {
+    "name": "/bar"
+   }
+  },
+  {
+   "match": {
+    "name": "/batman"
+   },
+   "rename": {
+    "name": "/foo"
+   }
+  }
+ ],
+ "applications": [
+  {
+   "name": "app1",
+   "maps": [
+    {
+     "match": {
+      "name": "/bar"
+     },
+     "rename": {
+      "name": "/foo"
+     }
+    }
+   ]
+  },
+  {
+   "name": "app2",
+   "maps": [
+    {
+     "match": {
+      "name": "/baz"
+     },
+     "rename": {
+      "name": "/foo"
+     }
+    }
+   ]
+  }
+ ]
+}
diff --git a/aos/testing/BUILD b/aos/testing/BUILD
index 9aa058d..8693f27 100644
--- a/aos/testing/BUILD
+++ b/aos/testing/BUILD
@@ -24,9 +24,9 @@
     visibility = ["//visibility:public"],
     deps = [
         ":googletest",
-        "@com_google_absl//absl/base",
         "//aos/logging:implementations",
         "//aos/mutex",
+        "@com_google_absl//absl/base",
     ],
 )
 
diff --git a/debian/webrtc.BUILD b/debian/webrtc.BUILD
index e90e268..23e029d 100644
--- a/debian/webrtc.BUILD
+++ b/debian/webrtc.BUILD
@@ -2,17 +2,17 @@
 
 cc_library(
     name = "webrtc",
-    visibility = ["//visibility:public"],
-    hdrs = glob(["include/**/*.h"]),
     srcs = cpu_select({
         "arm": ["lib/arm/Release/libwebrtc_full.a"],
         "else": ["lib/x64/Release/libwebrtc_full.a"],
     }),
+    hdrs = glob(["include/**/*.h"]),
     includes = ["include"],
+    visibility = ["//visibility:public"],
     deps = [
+        "@com_google_absl//absl/algorithm:container",
         "@com_google_absl//absl/strings",
         "@com_google_absl//absl/types:optional",
         "@com_google_absl//absl/types:variant",
-        "@com_google_absl//absl/algorithm:container",
     ],
 )
diff --git a/frc971/analysis/plot_configs/localizer.pb b/frc971/analysis/plot_configs/localizer.pb
index f81ec89..930d63d 100644
--- a/frc971/analysis/plot_configs/localizer.pb
+++ b/frc971/analysis/plot_configs/localizer.pb
@@ -23,6 +23,16 @@
   axes {
     line {
       y_signal {
+        channel: "DrivetrainStatus"
+        field: "trajectory_logging.y"
+      }
+      x_signal {
+        channel: "DrivetrainStatus"
+        field: "trajectory_logging.x"
+      }
+    }
+    line {
+      y_signal {
         channel: "DrivetrainTruthStatus"
         field: "y"
       }
@@ -31,7 +41,6 @@
         field: "x"
       }
     }
-    share_x_axis: false
     line {
       y_signal {
         channel: "DrivetrainStatus"
@@ -176,6 +185,18 @@
     line {
       y_signal {
         channel: "DrivetrainStatus"
+        field: "trajectory_logging.left_velocity"
+      }
+    }
+    line {
+      y_signal {
+        channel: "DrivetrainStatus"
+        field: "trajectory_logging.right_velocity"
+      }
+    }
+    line {
+      y_signal {
+        channel: "DrivetrainStatus"
         field: "localizer.left_velocity"
       }
     }
diff --git a/frc971/analysis/py_log_reader.cc b/frc971/analysis/py_log_reader.cc
index a9a1db1..08f3707 100644
--- a/frc971/analysis/py_log_reader.cc
+++ b/frc971/analysis/py_log_reader.cc
@@ -87,8 +87,14 @@
   tools->reader = std::make_unique<aos::logger::LogReader>(log_file_name);
   tools->reader->Register();
 
-  tools->event_loop =
-      tools->reader->event_loop_factory()->MakeEventLoop("data_fetcher");
+  if (aos::configuration::MultiNode(tools->reader->configuration())) {
+    tools->event_loop = tools->reader->event_loop_factory()->MakeEventLoop(
+        "data_fetcher",
+        aos::configuration::GetNode(tools->reader->configuration(), "roborio"));
+  } else {
+    tools->event_loop =
+        tools->reader->event_loop_factory()->MakeEventLoop("data_fetcher");
+  }
   tools->event_loop->SkipTimingReport();
   tools->event_loop->SkipAosLog();
 
diff --git a/frc971/constants.h b/frc971/constants.h
index debfb55..7c2121d 100644
--- a/frc971/constants.h
+++ b/frc971/constants.h
@@ -97,9 +97,9 @@
   double lower;
   double upper;
 
-  double middle() const { return (lower_hard + upper_hard) / 2.0; }
+  constexpr double middle() const { return (lower_hard + upper_hard) / 2.0; }
 
-  double range() const { return upper_hard - lower_hard; }
+  constexpr double range() const { return upper_hard - lower_hard; }
 };
 
 }  // namespace constants
diff --git a/frc971/control_loops/drivetrain/BUILD b/frc971/control_loops/drivetrain/BUILD
index 9fef8f9..d223d13 100644
--- a/frc971/control_loops/drivetrain/BUILD
+++ b/frc971/control_loops/drivetrain/BUILD
@@ -562,7 +562,7 @@
         ":trajectory",
         "//aos/logging:implementations",
         "//aos/network:team_number",
-        "//y2019/control_loops/drivetrain:drivetrain_base",
+        "//y2020/control_loops/drivetrain:drivetrain_base",
         "@org_tuxfamily_eigen//:eigen",
     ],
 )
@@ -691,6 +691,7 @@
         "improved_down_estimator.h",
     ],
     deps = [
+        ":drivetrain_config",
         ":drivetrain_status_fbs",
         "//aos/events:event_loop",
         "//frc971/control_loops:control_loops_fbs",
@@ -707,6 +708,7 @@
         "improved_down_estimator_test.cc",
     ],
     deps = [
+        ":drivetrain_test_lib",
         "//aos/testing:googletest",
         "//aos/testing:random_seed",
         "//frc971/control_loops/drivetrain:improved_down_estimator",
diff --git a/frc971/control_loops/drivetrain/drivetrain.cc b/frc971/control_loops/drivetrain/drivetrain.cc
index a19f57b..1c0ac4f 100644
--- a/frc971/control_loops/drivetrain/drivetrain.cc
+++ b/frc971/control_loops/drivetrain/drivetrain.cc
@@ -41,6 +41,7 @@
       gyro_reading_fetcher_(
           event_loop->MakeFetcher<::frc971::sensors::GyroReading>(
               "/drivetrain")),
+      down_estimator_(dt_config),
       localizer_(localizer),
       kf_(dt_config_.make_kf_drivetrain_loop()),
       dt_openloop_(dt_config_, &kf_),
@@ -148,7 +149,7 @@
   }
 
   while (imu_values_fetcher_.FetchNext()) {
-    imu_zeroer_.ProcessMeasurement(*imu_values_fetcher_);
+    imu_zeroer_.InsertMeasurement(*imu_values_fetcher_);
     last_gyro_time_ = monotonic_now;
     if (!imu_zeroer_.Zeroed()) {
       continue;
@@ -165,6 +166,7 @@
 
   bool got_imu_reading = false;
   if (imu_values_fetcher_.get() != nullptr) {
+    imu_zeroer_.ProcessMeasurements();
     got_imu_reading = true;
     switch (dt_config_.imu_type) {
       case IMUType::IMU_X:
@@ -176,6 +178,9 @@
       case IMUType::IMU_Y:
         last_accel_ = -imu_values_fetcher_->accelerometer_y();
         break;
+      case IMUType::IMU_Z:
+        last_accel_ = imu_values_fetcher_->accelerometer_z();
+        break;
     }
   }
 
diff --git a/frc971/control_loops/drivetrain/drivetrain_config.h b/frc971/control_loops/drivetrain/drivetrain_config.h
index 5a29008..a33ff22 100644
--- a/frc971/control_loops/drivetrain/drivetrain_config.h
+++ b/frc971/control_loops/drivetrain/drivetrain_config.h
@@ -37,6 +37,7 @@
   IMU_X = 0,          // Use the x-axis of the IMU.
   IMU_Y = 1,          // Use the y-axis of the IMU.
   IMU_FLIPPED_X = 2,  // Use the flipped x-axis of the IMU.
+  IMU_Z = 3,          // Use the z-axis of the IMU.
 };
 
 template <typename Scalar = double>
diff --git a/frc971/control_loops/drivetrain/drivetrain_test_lib.cc b/frc971/control_loops/drivetrain/drivetrain_test_lib.cc
index 336025a..6a90122 100644
--- a/frc971/control_loops/drivetrain/drivetrain_test_lib.cc
+++ b/frc971/control_loops/drivetrain/drivetrain_test_lib.cc
@@ -190,16 +190,23 @@
   auto builder = imu_sender_.MakeBuilder();
   frc971::IMUValues::Builder imu_builder =
       builder.MakeBuilder<frc971::IMUValues>();
-  imu_builder.add_gyro_x(0.0);
-  imu_builder.add_gyro_y(0.0);
-  imu_builder.add_gyro_z(
-      (drivetrain_plant_.X(3, 0) - drivetrain_plant_.X(1, 0)) /
-      (dt_config_.robot_radius * 2.0));
+  const Eigen::Vector3d gyro =
+      dt_config_.imu_transform.inverse() *
+      Eigen::Vector3d(0.0, 0.0,
+                      (drivetrain_plant_.X(3, 0) - drivetrain_plant_.X(1, 0)) /
+                          (dt_config_.robot_radius * 2.0));
+  imu_builder.add_gyro_x(gyro.x());
+  imu_builder.add_gyro_y(gyro.y());
+  imu_builder.add_gyro_z(gyro.z());
   // Acceleration due to gravity, in m/s/s.
   constexpr double kG = 9.807;
-  imu_builder.add_accelerometer_x(last_acceleration_.x() / kG);
-  imu_builder.add_accelerometer_y(last_acceleration_.y() / kG);
-  imu_builder.add_accelerometer_z(1.0);
+  const Eigen::Vector3d accel =
+      dt_config_.imu_transform.inverse() *
+      Eigen::Vector3d(last_acceleration_.x() / kG, last_acceleration_.y() / kG,
+                      1.0);
+  imu_builder.add_accelerometer_x(accel.x());
+  imu_builder.add_accelerometer_y(accel.y());
+  imu_builder.add_accelerometer_z(accel.z());
   imu_builder.add_monotonic_timestamp_ns(
       std::chrono::duration_cast<std::chrono::nanoseconds>(
           event_loop_->monotonic_now().time_since_epoch())
diff --git a/frc971/control_loops/drivetrain/hybrid_ekf.h b/frc971/control_loops/drivetrain/hybrid_ekf.h
index d2c0be3..cb50165 100644
--- a/frc971/control_loops/drivetrain/hybrid_ekf.h
+++ b/frc971/control_loops/drivetrain/hybrid_ekf.h
@@ -146,7 +146,7 @@
   static constexpr int kNOutputs = 3;
   // Time constant to use for estimating how the longitudinal/lateral velocity
   // offsets decay, in seconds.
-  static constexpr double kVelocityOffsetTimeConstant = 10.0;
+  static constexpr double kVelocityOffsetTimeConstant = 1.0;
   static constexpr double kLateralVelocityTimeConstant = 1.0;
   // Inputs are [left_volts, right_volts]
   typedef Eigen::Matrix<Scalar, kNInputs, 1> Input;
diff --git a/frc971/control_loops/drivetrain/improved_down_estimator.h b/frc971/control_loops/drivetrain/improved_down_estimator.h
index 27de867..c1c8e1b 100644
--- a/frc971/control_loops/drivetrain/improved_down_estimator.h
+++ b/frc971/control_loops/drivetrain/improved_down_estimator.h
@@ -6,6 +6,7 @@
 
 #include "aos/events/event_loop.h"
 #include "aos/time/time.h"
+#include "frc971/control_loops/drivetrain/drivetrain_config.h"
 #include "frc971/control_loops/drivetrain/drivetrain_status_generated.h"
 #include "frc971/control_loops/runge_kutta.h"
 #include "glog/logging.h"
@@ -276,6 +277,8 @@
 // accelerometer calibration).
 class DrivetrainUkf : public QuaternionUkf {
  public:
+  DrivetrainUkf(const DrivetrainConfig<double> &dt_config)
+      : QuaternionUkf(dt_config.imu_transform) {}
   // UKF for http://kodlab.seas.upenn.edu/uploads/Arun/UKFpaper.pdf
   // Reference in case the link is dead:
   // Kraft, Edgar. "A quaternion-based unscented Kalman filter for orientation
diff --git a/frc971/control_loops/drivetrain/improved_down_estimator_test.cc b/frc971/control_loops/drivetrain/improved_down_estimator_test.cc
index 86ef5e9..2edce5a 100644
--- a/frc971/control_loops/drivetrain/improved_down_estimator_test.cc
+++ b/frc971/control_loops/drivetrain/improved_down_estimator_test.cc
@@ -4,6 +4,7 @@
 #include <random>
 
 #include "aos/testing/random_seed.h"
+#include "frc971/control_loops/drivetrain/drivetrain_test_lib.h"
 #include "frc971/control_loops/drivetrain/improved_down_estimator.h"
 #include "glog/logging.h"
 #include "gtest/gtest.h"
@@ -91,7 +92,8 @@
 }
 
 TEST(DownEstimatorTest, UkfConstantRotation) {
-  drivetrain::DrivetrainUkf dtukf;
+  drivetrain::DrivetrainUkf dtukf(
+      drivetrain::testing::GetTestDrivetrainConfig());
   const Eigen::Vector3d ux = Eigen::Vector3d::UnitX();
   EXPECT_EQ(0.0,
             (Eigen::Vector3d(0.0, 0.0, 1.0) - dtukf.H(dtukf.X_hat().coeffs()))
@@ -112,7 +114,8 @@
 
 // Tests that the euler angles in the status message are correct.
 TEST(DownEstimatorTest, UkfEulerStatus) {
-  drivetrain::DrivetrainUkf dtukf;
+  drivetrain::DrivetrainUkf dtukf(
+      drivetrain::testing::GetTestDrivetrainConfig());
   const Eigen::Vector3d ux = Eigen::Vector3d::UnitX();
   const Eigen::Vector3d uy = Eigen::Vector3d::UnitY();
   const Eigen::Vector3d uz = Eigen::Vector3d::UnitZ();
@@ -167,7 +170,8 @@
 // that we are slightly rotated, that we eventually adjust our estimate to be
 // correct.
 TEST(DownEstimatorTest, UkfAccelCorrectsBias) {
-  drivetrain::DrivetrainUkf dtukf;
+  drivetrain::DrivetrainUkf dtukf(
+      drivetrain::testing::GetTestDrivetrainConfig());
   const Eigen::Vector3d ux = Eigen::Vector3d::UnitX();
   Eigen::Matrix<double, 3, 1> measurement;
   // Supply the accelerometer with a slightly off reading to ensure that we
@@ -192,7 +196,8 @@
 // that we are slightly rotated, that we eventually adjust our estimate to be
 // correct.
 TEST(DownEstimatorTest, UkfIgnoreBadAccel) {
-  drivetrain::DrivetrainUkf dtukf;
+  drivetrain::DrivetrainUkf dtukf(
+      drivetrain::testing::GetTestDrivetrainConfig());
   const Eigen::Vector3d uy = Eigen::Vector3d::UnitY();
   Eigen::Matrix<double, 3, 1> measurement;
   // Set up a scenario where, if we naively took the accelerometer readings, we
diff --git a/frc971/control_loops/drivetrain/libspline.cc b/frc971/control_loops/drivetrain/libspline.cc
index 5ad175e..a9820f7 100644
--- a/frc971/control_loops/drivetrain/libspline.cc
+++ b/frc971/control_loops/drivetrain/libspline.cc
@@ -8,7 +8,7 @@
 #include "frc971/control_loops/drivetrain/distance_spline.h"
 #include "frc971/control_loops/drivetrain/spline.h"
 #include "frc971/control_loops/drivetrain/trajectory.h"
-#include "y2019/control_loops/drivetrain/drivetrain_base.h"
+#include "y2020/control_loops/drivetrain/drivetrain_base.h"
 
 namespace frc971 {
 namespace control_loops {
@@ -124,7 +124,7 @@
   Trajectory *NewTrajectory(DistanceSpline *spline, double vmax,
                             int num_distance) {
     return new Trajectory(
-        spline, ::y2019::control_loops::drivetrain::GetDrivetrainConfig(), vmax,
+        spline, ::y2020::control_loops::drivetrain::GetDrivetrainConfig(), vmax,
         num_distance);
   }
 
diff --git a/frc971/control_loops/drivetrain/splinedrivetrain.h b/frc971/control_loops/drivetrain/splinedrivetrain.h
index b28334c..051e71a 100644
--- a/frc971/control_loops/drivetrain/splinedrivetrain.h
+++ b/frc971/control_loops/drivetrain/splinedrivetrain.h
@@ -138,7 +138,7 @@
       (::Eigen::DiagonalMatrix<double, 5>().diagonal()
            << 1.0 / ::std::pow(0.12, 2),
        1.0 / ::std::pow(0.12, 2), 1.0 / ::std::pow(0.1, 2),
-       1.0 / ::std::pow(1.5, 2), 1.0 / ::std::pow(1.5, 2))
+       1.0 / ::std::pow(1.0, 2), 1.0 / ::std::pow(1.0, 2))
           .finished()
           .asDiagonal();
   const ::Eigen::DiagonalMatrix<double, 2> R =
diff --git a/frc971/control_loops/drivetrain/trajectory.cc b/frc971/control_loops/drivetrain/trajectory.cc
index 43b1c30..4da3dee 100644
--- a/frc971/control_loops/drivetrain/trajectory.cc
+++ b/frc971/control_loops/drivetrain/trajectory.cc
@@ -37,7 +37,7 @@
                 ? ::std::max(100, static_cast<int>(spline_->length() / 0.0025))
                 : num_distance,
             vmax),
-      plan_segment_type_(plan_.size() - 1, SegmentType::VELOCITY_LIMITED) {}
+      plan_segment_type_(plan_.size(), SegmentType::VELOCITY_LIMITED) {}
 
 void Trajectory::LateralAccelPass() {
   for (size_t i = 0; i < plan_.size(); ++i) {
diff --git a/frc971/control_loops/hybrid_state_feedback_loop.h b/frc971/control_loops/hybrid_state_feedback_loop.h
index a6160e7..f033a89 100644
--- a/frc971/control_loops/hybrid_state_feedback_loop.h
+++ b/frc971/control_loops/hybrid_state_feedback_loop.h
@@ -75,6 +75,9 @@
     ::std::swap(coefficients_, other.coefficients_);
     X_.swap(other.X_);
     Y_.swap(other.Y_);
+    A_.swap(other.A_);
+    B_.swap(other.B_);
+    DelayedU_.swap(other.DelayedU_);
   }
 
   virtual ~StateFeedbackHybridPlant() {}
@@ -139,7 +142,7 @@
     A_.setZero();
     B_.setZero();
     DelayedU_.setZero();
-    UpdateAB(::std::chrono::milliseconds(5));
+    UpdateAB(::std::chrono::microseconds(5050));
   }
 
   // Assert that U is within the hardware range.
@@ -155,6 +158,8 @@
   // Computes the new X and Y given the control input.
   void Update(const Eigen::Matrix<Scalar, number_of_inputs, 1> &U,
               ::std::chrono::nanoseconds dt, Scalar voltage_battery) {
+    CHECK_NE(dt, std::chrono::nanoseconds(0));
+
     // Powers outside of the range are more likely controller bugs than things
     // that the plant should deal with.
     CheckU(U);
@@ -167,8 +172,6 @@
     DelayedU_ = U;
   }
 
-  Eigen::Matrix<Scalar, number_of_inputs, 1> DelayedU_;
-
   Eigen::Matrix<Scalar, number_of_states, 1> Update(
       const Eigen::Matrix<Scalar, number_of_states, 1> X,
       const Eigen::Matrix<Scalar, number_of_inputs, 1> &U,
@@ -177,7 +180,11 @@
     // or the unit delay...  Might want to do that if we care about performance
     // again.
     UpdateAB(dt);
-    return A() * X + B() * U;
+    const Eigen::Matrix<Scalar, number_of_states, 1> new_X =
+        A() * X + B() * DelayedU_;
+    DelayedU_ = U;
+
+    return new_X;
   }
 
  protected:
@@ -202,6 +209,8 @@
       number_of_states, number_of_inputs, number_of_outputs>>>
       coefficients_;
 
+  Eigen::Matrix<Scalar, number_of_inputs, 1> DelayedU_;
+
   int index_;
 
   DISALLOW_COPY_AND_ASSIGN(StateFeedbackHybridPlant);
@@ -243,9 +252,13 @@
           *observers)
       : coefficients_(::std::move(*observers)) {}
 
-  HybridKalman(HybridKalman &&other)
-      : X_hat_(other.X_hat_), index_(other.index_) {
+  HybridKalman(HybridKalman &&other) : index_(other.index_) {
     ::std::swap(coefficients_, other.coefficients_);
+
+    X_hat_.swap(other.X_hat_);
+    P_.swap(other.P_);
+    Q_.swap(other.Q_);
+    R_.swap(other.R_);
   }
 
   // Getters for Q
diff --git a/frc971/control_loops/pose.h b/frc971/control_loops/pose.h
index 20672cf..ab8afd0 100644
--- a/frc971/control_loops/pose.h
+++ b/frc971/control_loops/pose.h
@@ -42,6 +42,8 @@
 // position of (10, -5, 0) and a yaw of pi / 2, that suggests the robot is
 // facing straight to the left from the driver's perspective and is placed 10m
 // from the driver's station wall and 5m to the right of the center of the wall.
+// For 2020, we move the origin to be the center of the field and make positive
+// x always point towards the red alliance driver stations.
 //
 // Furthermore, Poses can be chained such that a Pose can be placed relative to
 // another Pose; the other Pose can dynamically update, thus allowing us to,
@@ -134,7 +136,7 @@
   // If new_base == nullptr, provides a Pose referenced to the global frame.
   // Note that the lifetime of new_base should be greater than the lifetime of
   // the returned object (unless new_base == nullptr).
-  TypedPose Rebase(const TypedPose<Scalar> *new_base) const;
+  [[nodiscard]] TypedPose Rebase(const TypedPose<Scalar> *new_base) const;
 
   // Convert this pose to the heading/distance/skew numbers that we
   // traditionally use for EKF corrections.
diff --git a/frc971/control_loops/python/BUILD b/frc971/control_loops/python/BUILD
index 1fae02b..7953474 100644
--- a/frc971/control_loops/python/BUILD
+++ b/frc971/control_loops/python/BUILD
@@ -167,22 +167,22 @@
     name = "spline_graph",
     srcs = [
         "color.py",
-        "spline_drawing.py",
-        "spline_writer.py",
+        "graph.py",
         "path_edit.py",
         "points.py",
-        "graph.py",
+        "spline_drawing.py",
         "spline_graph.py",
+        "spline_writer.py",
     ],
     legacy_create_init = False,
     restricted_to = ["//tools:k8"],
     visibility = ["//visibility:public"],
     deps = [
+        ":basic_window",
         ":libspline",
         ":python_init",
-        "@python_gtk",
         "@matplotlib_repo//:matplotlib2.7",
-        ":basic_window",
+        "@python_gtk",
     ],
 )
 
diff --git a/frc971/control_loops/python/constants.py b/frc971/control_loops/python/constants.py
index 7b45215..e0b4878 100644
--- a/frc971/control_loops/python/constants.py
+++ b/frc971/control_loops/python/constants.py
@@ -11,18 +11,31 @@
 args = arg_parser.parse_args()
 SCREEN_SIZE = args.size
 
-WIDTH_OF_FIELD_IN_METERS = 8.258302
-
 WIDTH_OF_ROBOT = 0.65
 LENGTH_OF_ROBOT = 0.8
 
-ROBOT_SIDE_TO_BALL_CENTER = 0.15 #Placeholder value
+ROBOT_SIDE_TO_BALL_CENTER = 0.15 # Placeholder value
 BALL_RADIUS = 0.165
-ROBOT_SIDE_TO_HATCH_PANEL = 0.1 #Placeholder value
+ROBOT_SIDE_TO_HATCH_PANEL = 0.1 # Placeholder value
 HATCH_PANEL_WIDTH = 0.4826
 
-def pxToM(p):
+FIELD = 2020
+
+if FIELD == 2019:
+    WIDTH_OF_FIELD_IN_METERS = 8.258302 # Half Field
+elif FIELD == 2020:
+    WIDTH_OF_FIELD_IN_METERS = 15.98295 # Full Field
+    LENGTH_OF_FIELD_IN_METERS = 8.21055 # Full Field
+
+def pxToM(p, length = False):
+    if(length):
+        return p * LENGTH_OF_FIELD_IN_METERS / (SCREEN_SIZE/2)
     return p * WIDTH_OF_FIELD_IN_METERS / SCREEN_SIZE
 
-def mToPx(m):
+def mToPx(m, length = False):
+    if(length):
+        return (m*(SCREEN_SIZE/2)/LENGTH_OF_FIELD_IN_METERS)
     return (m*SCREEN_SIZE/WIDTH_OF_FIELD_IN_METERS)
+
+def inToM(i):
+    return (i*0.0254)
diff --git a/frc971/control_loops/python/control_loop.py b/frc971/control_loops/python/control_loop.py
index 27a659a..9c36c90 100644
--- a/frc971/control_loops/python/control_loop.py
+++ b/frc971/control_loops/python/control_loop.py
@@ -284,9 +284,9 @@
 
     def InitializeState(self):
         """Sets X, Y, and X_hat to zero defaults."""
-        self.X = numpy.zeros((self.A.shape[0], 1))
+        self.X = numpy.matrix(numpy.zeros((self.A.shape[0], 1)))
         self.Y = self.C * self.X
-        self.X_hat = numpy.zeros((self.A.shape[0], 1))
+        self.X_hat = numpy.matrix(numpy.zeros((self.A.shape[0], 1)))
 
     def PlaceControllerPoles(self, poles):
         """Places the controller poles.
diff --git a/frc971/control_loops/python/drawing_constants.py b/frc971/control_loops/python/drawing_constants.py
index 89979f0..a2ddf5d 100644
--- a/frc971/control_loops/python/drawing_constants.py
+++ b/frc971/control_loops/python/drawing_constants.py
@@ -3,7 +3,6 @@
 from constants import *
 import numpy as np
 
-
 def set_color(cr, color, a=1):
     if color.a == 1.0:
         cr.set_source_rgba(color.r, color.g, color.b, a)
@@ -37,6 +36,11 @@
     cr.stroke()
     set_color(cr, palette["WHITE"])
 
+def draw_circle(cr,  x, y, radius, color=palette["RED"]):
+    set_color(cr, color)
+    cr.arc(x, y, radius, 0, 2*np.pi)
+    cr.fill()
+    cr.stroke()
 
 def draw_control_points(cr, points, width=10, radius=4, color=palette["BLUE"]):
     for i in range(0, len(points)):
@@ -46,12 +50,131 @@
         cr.fill()
         set_color(cr, palette["WHITE"])
 
-
 def display_text(cr, text, widtha, heighta, widthb, heightb):
     cr.scale(widtha, -heighta)
     cr.show_text(text)
     cr.scale(widthb, -heightb)
 
+def markers(cr):
+    SHOW_MARKERS = False
+    if SHOW_MARKERS:
+        # Shield Generator Reference
+        color = palette["BLUE"]
+        yorigin = 0-SCREEN_SIZE/2 # Move origin to bottom left
+        # Top Left
+        draw_circle(cr, mToPx(inToM(206.625)), yorigin + mToPx(inToM(212.097), True), 10, color)
+        # Top Right
+        draw_circle(cr, mToPx(inToM(373.616)), yorigin + mToPx(inToM(281.26), True), 10, color)
+        # Bottom Right
+        draw_circle(cr, mToPx(inToM(422.625)), yorigin + mToPx(inToM(124.67), True), 10, color)
+        # Bottom Left
+        draw_circle(cr, mToPx(inToM(255.634)), yorigin + mToPx(inToM(55.5), True), 10, color)
+
+        # Trench Run Reference
+        color = palette["GREEN"]
+        # Bottom Trench Run
+        # Bottom Right
+        draw_circle(cr, mToPx(inToM(206.625)), yorigin, 10, color)
+        # Top Right
+        draw_circle(cr, mToPx(inToM(206.625)), yorigin + mToPx(inToM(55.5), True), 10, color)
+        # Top Left
+        draw_circle(cr, mToPx(inToM(422.625)), yorigin + mToPx(inToM(55.5), True), 10, color)
+        # Bottom Left
+        draw_circle(cr, mToPx(inToM(422.625)), yorigin, 10, color)
+
+        # Top Trench Run
+        # Top Right
+        draw_circle(cr, mToPx(inToM(206.625)), yorigin + mToPx(inToM(323.25), True), 10, color)
+        # Bottom Right
+        draw_circle(cr, mToPx(inToM(206.625)), yorigin + mToPx(inToM(281.26), True), 10, color)
+        # Top Left
+        draw_circle(cr, mToPx(inToM(422.625)), yorigin + mToPx(inToM(281.26), True), 10, color)
+        # Bottom Left
+        draw_circle(cr, mToPx(inToM(422.625)), yorigin + mToPx(inToM(323.25), True), 10, color)
+        cr.stroke()
+
+def draw_init_lines(cr):
+    yorigin = 0-SCREEN_SIZE/2 # Move origin to bottom left
+    set_color(cr, palette["RED"])
+    cr.move_to(mToPx(inToM(120)), yorigin)
+    cr.line_to(mToPx(inToM(120)), yorigin + mToPx(8.21055, True))
+
+    cr.move_to(mToPx(inToM(505.25)), yorigin)
+    cr.line_to(mToPx(inToM(505.25)), yorigin + mToPx(8.21055, True))
+
+    cr.stroke()
+
+def draw_trench_run(cr):
+    set_color(cr, palette["GREEN"])
+    yorigin = 0-SCREEN_SIZE/2 # Move origin to bottom left
+    cr.move_to(mToPx(inToM(206.625)), yorigin)
+    cr.line_to(mToPx(inToM(206.625)), yorigin + mToPx(inToM(55.5), True))
+
+    cr.move_to(mToPx(inToM(206.625)), yorigin + mToPx(inToM(55.5), True))
+    cr.line_to(mToPx(inToM(422.625)), yorigin + mToPx(inToM(55.5), True))
+
+    cr.move_to(mToPx(inToM(422.625)), yorigin + mToPx(inToM(55.5), True))
+    cr.line_to(mToPx(inToM(422.625)), yorigin)
+
+    cr.move_to(mToPx(inToM(206.625)), yorigin + mToPx(inToM(323.25), True))
+    cr.line_to(mToPx(inToM(206.625)), yorigin + mToPx(inToM(281.26), True))
+
+    cr.move_to(mToPx(inToM(206.625)), yorigin + mToPx(inToM(281.26), True))
+    cr.line_to(mToPx(inToM(422.625)), yorigin + mToPx(inToM(281.26), True))
+
+    cr.move_to(mToPx(inToM(422.625)), yorigin + mToPx(inToM(281.26), True))
+    cr.line_to(mToPx(inToM(422.625)), yorigin + mToPx(inToM(323.25), True))
+
+    cr.stroke()
+
+def draw_shield_generator(cr):
+    set_color(cr, palette["BLUE"])
+    yorigin = 0-SCREEN_SIZE/2 # Move origin to bottom left
+
+    cr.move_to(mToPx(inToM(206.625)), yorigin + mToPx(inToM(212.097), True))
+    cr.line_to(mToPx(inToM(373.616)), yorigin + mToPx(inToM(281.26), True))
+
+    cr.move_to(mToPx(inToM(373.616)), yorigin + mToPx(inToM(281.26), True))
+    cr.line_to(mToPx(inToM(422.625)), yorigin + mToPx(inToM(124.67), True))
+
+    cr.move_to(mToPx(inToM(422.625)), yorigin + mToPx(inToM(124.67), True))
+    cr.line_to(mToPx(inToM(255.634)), yorigin + mToPx(inToM(55.5), True))
+
+    cr.move_to(mToPx(inToM(255.634)), yorigin + mToPx(inToM(55.5), True))
+    cr.line_to(mToPx(inToM(206.625)), yorigin + mToPx(inToM(212.097), True))
+
+    cr.stroke()
+
+def draw_control_panel(cr): # Base plates are not included
+    set_color(cr, palette["LIGHT_GREY"])
+    yorigin = 0-SCREEN_SIZE/2 # Move origin to bottom left
+    # Bottom Control Panel
+    # Top Line
+    cr.move_to(mToPx(inToM(225.624)), yorigin + mToPx(inToM(55.5), True))
+    cr.line_to(mToPx(inToM(285.624)), yorigin + mToPx(inToM(55.5), True))
+
+    # Left Line
+    cr.move_to(mToPx(inToM(225.624)),  yorigin + mToPx(inToM(55.5), True))
+    cr.line_to(mToPx(inToM(225.624)), yorigin)
+
+    # Right Line
+    cr.move_to(mToPx(inToM(285.624)), yorigin + mToPx(inToM(55.5), True))
+    cr.line_to(mToPx(inToM(285.624)), yorigin)
+
+    # Top Control Panel
+    # Bottom Line
+    cr.move_to(mToPx(inToM(403.616)), yorigin + mToPx(inToM(281.26), True))
+    cr.line_to(mToPx(inToM(343.616)), yorigin + mToPx(inToM(281.26), True))
+
+    # Right Line
+    cr.move_to(mToPx(inToM(403.616)), yorigin + mToPx(inToM(281.26), True))
+    cr.line_to(mToPx(inToM(403.616)), yorigin + mToPx(inToM(323.25), True))
+
+    # Left Line
+    cr.move_to(mToPx(inToM(343.616)), yorigin + mToPx(inToM(281.26), True))
+    cr.line_to(mToPx(inToM(343.616)), yorigin + mToPx(inToM(323.25), True))
+
+    cr.stroke()
 
 def draw_HAB(cr):
     # BASE Constants
@@ -307,7 +430,6 @@
                  mToPx(1.41605))
     cr.stroke()
 
-
 def draw_points(cr, p, size):
     for i in range(0, len(p)):
         draw_px_cross(cr, p[i][0], p[i][1], size,
diff --git a/frc971/control_loops/python/lib_spline_test.py b/frc971/control_loops/python/lib_spline_test.py
index d544cbb..b4efe72 100644
--- a/frc971/control_loops/python/lib_spline_test.py
+++ b/frc971/control_loops/python/lib_spline_test.py
@@ -32,7 +32,7 @@
         trajectory = Trajectory(dSpline)
         trajectory.Plan()
         plan = trajectory.GetPlanXVA(5.05*1e-3)
-        self.assertEqual(plan.shape, (3, 617))
+        self.assertEqual(plan.shape, (3, 624))
 
     def testLimits(self):
         """ A plan with a lower limit should take longer. """
@@ -44,7 +44,7 @@
         trajectory.LimitVelocity(0, trajectory.Length(), 3)
         trajectory.Plan()
         plan = trajectory.GetPlanXVA(5.05*1e-3)
-        self.assertEqual(plan.shape, (3, 650))
+        self.assertEqual(plan.shape, (3, 656))
 
 
 if __name__ == '__main__':
diff --git a/frc971/control_loops/python/path_edit.py b/frc971/control_loops/python/path_edit.py
index 0d1b263..c186711 100755
--- a/frc971/control_loops/python/path_edit.py
+++ b/frc971/control_loops/python/path_edit.py
@@ -72,6 +72,8 @@
         self.inValue = None
         self.startSet = False
 
+        self.module_path = os.path.dirname(os.path.realpath(sys.argv[0]))
+
     """set extents on images"""
 
     def reinit_extents(self):
@@ -93,9 +95,17 @@
         return self.all_controls[self.get_index_of_nearest_point()]
 
     def draw_field_elements(self, cr):
-        draw_HAB(cr)
-        draw_rockets(cr)
-        draw_cargo_ship(cr)
+        if FIELD == 2019:
+            draw_HAB(cr)
+            draw_rockets(cr)
+            draw_cargo_ship(cr)
+        elif FIELD == 2020:
+            set_color(cr, palette["BLACK"])
+            markers(cr)
+            draw_shield_generator(cr)
+            draw_trench_run(cr)
+            draw_init_lines(cr)
+            draw_control_panel(cr)
 
     def draw_robot_at_point(self, cr, i, p, spline):
         p1 = [mToPx(spline.Point(i)[0]), mToPx(spline.Point(i)[1])]
@@ -206,16 +216,16 @@
                      self.extents_y_max - self.extents_y_min)
         cr.fill()
 
-        #Drawing the switch and scale in the field
         cr.move_to(0, 50)
         cr.show_text('Press "e" to export')
         cr.show_text('Press "i" to import')
 
-        set_color(cr, palette["WHITE"])
-        cr.rectangle(0, -mToPx(8.2296 / 2.0), SCREEN_SIZE, SCREEN_SIZE)
-        cr.fill()
         set_color(cr, palette["BLACK"])
-        cr.rectangle(0, -mToPx(8.2296 / 2.0), SCREEN_SIZE, SCREEN_SIZE)
+        if FIELD == 2020:
+            cr.rectangle(0, mToPx(-7.991475), SCREEN_SIZE, SCREEN_SIZE/2)
+        else:
+            cr.rectangle(0, mToPx(-7.991475), SCREEN_SIZE, SCREEN_SIZE)
+            print(mToPx(-7.991475))
         cr.set_line_join(cairo.LINE_JOIN_ROUND)
         cr.stroke()
         self.draw_field_elements(cr)
@@ -312,35 +322,45 @@
             self.spline_edit = self.points.updates_for_mouse_move(
                 self.index_of_edit, self.spline_edit, self.x, self.y, difs)
 
+    def export_json(self, file_name):
+        self.path_to_export = os.path.join(self.module_path,
+                                           "spline_jsons/" + file_name)
+        if file_name[-5:] != ".json":
+            print("Error: Filename doesn't end in .json")
+        else:
+            # Will export to json file
+            self.mode = Mode.kEditing
+            exportList = [l.tolist() for l in self.points.getSplines()]
+            with open(self.path_to_export, mode='w') as points_file:
+                json.dump(exportList, points_file)
+
+    def import_json(self, file_name):
+        self.path_to_export = os.path.join(self.module_path,
+                                           "spline_jsons/" + file_name)
+        if file_name[-5:] != ".json":
+            print("Error: Filename doesn't end in .json")
+        else:
+            # import from json file
+            self.mode = Mode.kEditing
+            self.points.resetPoints()
+            self.points.resetSplines()
+            print("LOADING LOAD FROM " + file_name) # Load takes a few seconds
+            with open(self.path_to_export) as points_file:
+                self.points.setUpSplines(json.load(points_file))
+
+            self.points.update_lib_spline()
+            print("SPLINES LOADED")
+
     def do_key_press(self, event, file_name):
         keyval = Gdk.keyval_to_lower(event.keyval)
-        module_path = os.path.dirname(os.path.realpath(sys.argv[0]))
-        self.path_to_export = os.path.join(module_path,
-                                           "spline_jsons/" + file_name)
         if keyval == Gdk.KEY_q:
             print("Found q key and exiting.")
             quit_main_loop()
-        file_name_end = file_name[-5:]
-        if file_name_end != ".json":
-            print("Error: Filename doesn't end in .json")
-        else:
-            if keyval == Gdk.KEY_e:
-                # Will export to json file
-                self.mode = Mode.kEditing
-                print('out to: ', self.path_to_export)
-                exportList = [l.tolist() for l in self.points.getSplines()]
-                with open(self.path_to_export, mode='w') as points_file:
-                    json.dump(exportList, points_file)
+        if keyval == Gdk.KEY_e:
+            export_json(file_name)
 
-            if keyval == Gdk.KEY_i:
-                # import from json file
-                self.mode = Mode.kEditing
-                self.points.resetPoints()
-                self.points.resetSplines()
-                with open(self.path_to_export) as points_file:
-                    self.points.setUpSplines(json.load(points_file))
-
-                self.points.update_lib_spline()
+        if keyval == Gdk.KEY_i:
+            import_json(file_name)
 
         if keyval == Gdk.KEY_p:
             self.mode = Mode.kPlacing
@@ -393,97 +413,7 @@
                             self.held_x = self.x
 
     def do_button_press(self, event):
-        print("button press activated")
         # Be consistent with the scaling in the drawing_area
         self.x = event.x * 2
         self.y = event.y * 2
         self.button_press_action()
-
-
-class GridWindow(Gtk.Window):
-    def method_connect(self, event, cb):
-        def handler(obj, *args):
-            cb(*args)
-
-        print("Method_connect ran")
-        self.connect(event, handler)
-
-    def mouse_move(self, event):
-        #Changes event.x and event.y to be relative to the center.
-        x = event.x - self.drawing_area.window_shape[0] / 2
-        y = self.drawing_area.window_shape[1] / 2 - event.y
-        scale = self.drawing_area.get_current_scale()
-        event.x = x / scale + self.drawing_area.center[0]
-        event.y = y / scale + self.drawing_area.center[1]
-        self.drawing_area.mouse_move(event)
-        self.queue_draw()
-
-    def button_press(self, event):
-        print("button press activated")
-        o_x = event.x
-        o_y = event.y
-        x = event.x - self.drawing_area.window_shape[0] / 2
-        y = self.drawing_area.window_shape[1] / 2 - event.y
-        scale = 2 * self.drawing_area.get_current_scale()
-        event.x = x / scale + self.drawing_area.center[0]
-        event.y = y / scale + self.drawing_area.center[1]
-        self.drawing_area.do_button_press(event)
-        event.x = o_x
-        event.y = o_y
-
-    def key_press(self, event):
-        print("key press activated")
-        self.drawing_area.do_key_press(event, self.file_name_box.get_text())
-        self.queue_draw()
-
-    def configure(self, event):
-        print("configure activated")
-        self.drawing_area.window_shape = (event.width, event.height)
-
-    def __init__(self):
-        Gtk.Window.__init__(self)
-
-        self.set_default_size(1.5 * SCREEN_SIZE, SCREEN_SIZE)
-
-        flowBox = Gtk.FlowBox()
-        flowBox.set_valign(Gtk.Align.START)
-        flowBox.set_selection_mode(Gtk.SelectionMode.NONE)
-
-        flowBox.set_valign(Gtk.Align.START)
-
-        self.add(flowBox)
-
-        container = Gtk.Fixed()
-        flowBox.add(container)
-
-        self.eventBox = Gtk.EventBox()
-        container.add(self.eventBox)
-
-        self.eventBox.set_events(Gdk.EventMask.BUTTON_PRESS_MASK
-                                 | Gdk.EventMask.BUTTON_RELEASE_MASK
-                                 | Gdk.EventMask.POINTER_MOTION_MASK
-                                 | Gdk.EventMask.SCROLL_MASK
-                                 | Gdk.EventMask.KEY_PRESS_MASK)
-
-        #add the graph box
-        self.drawing_area = GTK_Widget()
-        self.eventBox.add(self.drawing_area)
-
-        self.method_connect("key-release-event", self.key_press)
-        self.method_connect("button-release-event", self.button_press)
-        self.method_connect("configure-event", self.configure)
-        self.method_connect("motion_notify_event", self.mouse_move)
-
-        self.file_name_box = Gtk.Entry()
-        self.file_name_box.set_size_request(200, 40)
-
-        self.file_name_box.set_text("File")
-        self.file_name_box.set_editable(True)
-
-        container.put(self.file_name_box, 0, 0)
-
-        self.show_all()
-
-
-window = GridWindow()
-RunApp()
diff --git a/frc971/control_loops/python/spline_graph.py b/frc971/control_loops/python/spline_graph.py
index 2fd7964..7885ec1 100755
--- a/frc971/control_loops/python/spline_graph.py
+++ b/frc971/control_loops/python/spline_graph.py
@@ -11,11 +11,10 @@
         def handler(obj, *args):
             cb(*args)
 
-        print("Method_connect ran")
         self.connect(event, handler)
 
     def mouse_move(self, event):
-        #Changes event.x and event.y to be relative to the center.
+        # Changes event.x and event.y to be relative to the center.
         x = event.x - self.drawing_area.window_shape[0] / 2
         y = self.drawing_area.window_shape[1] / 2 - event.y
         scale = self.drawing_area.get_current_scale()
@@ -25,31 +24,31 @@
         self.queue_draw()
 
     def button_press(self, event):
-        print("button press activated")
-        o_x = event.x
-        o_y = event.y
+        original_x = event.x
+        original_y = event.y
         x = event.x - self.drawing_area.window_shape[0] / 2
         y = self.drawing_area.window_shape[1] / 2 - event.y
         scale = 2 * self.drawing_area.get_current_scale()
         event.x = x / scale + self.drawing_area.center[0]
         event.y = y / scale + self.drawing_area.center[1]
         self.drawing_area.do_button_press(event)
-        event.x = o_x
-        event.y = o_y
+        event.x = original_x
+        event.y = original_y
 
     def key_press(self, event):
-        print("key press activated")
         self.drawing_area.do_key_press(event, self.file_name_box.get_text())
         self.queue_draw()
 
     def configure(self, event):
-        print("configure activated")
         self.drawing_area.window_shape = (event.width, event.height)
 
-    # handle submitting a constraint
-    def on_submit_click(self, widget):
-        self.drawing_area.inConstraint = int(self.constraint_box.get_text())
-        self.drawing_area.inValue = int(self.value_box.get_text())
+    def output_json_clicked(self, button):
+        print("OUTPUT JSON CLICKED")
+        self.drawing_area.export_json(self.file_name_box.get_text())
+
+    def input_json_clicked(self, button):
+        print("INPUT JSON CLICKED")
+        self.drawing_area.import_json(self.file_name_box.get_text())
 
     def __init__(self):
         Gtk.Window.__init__(self)
@@ -88,11 +87,22 @@
         self.file_name_box = Gtk.Entry()
         self.file_name_box.set_size_request(200, 40)
 
-        self.file_name_box.set_text("File")
+        self.file_name_box.set_text("output_file_name.json")
         self.file_name_box.set_editable(True)
 
         container.put(self.file_name_box, 0, 0)
 
+        self.output_json = Gtk.Button.new_with_label("Output")
+        self.output_json.set_size_request(100, 40)
+        self.output_json.connect("clicked", self.output_json_clicked)
+
+        self.input_json = Gtk.Button.new_with_label("Import")
+        self.input_json.set_size_request(100, 40)
+        self.input_json.connect("clicked", self.input_json_clicked)
+
+        container.put(self.output_json, 210, 0)
+        container.put(self.input_json, 320, 0)
+
         self.show_all()
 
 
diff --git a/frc971/control_loops/state_feedback_loop.h b/frc971/control_loops/state_feedback_loop.h
index 83a54f0..168df43 100644
--- a/frc971/control_loops/state_feedback_loop.h
+++ b/frc971/control_loops/state_feedback_loop.h
@@ -108,6 +108,8 @@
   }
   Scalar U_max(int i, int j = 0) const { return U_max()(i, j); }
 
+  const std::chrono::nanoseconds dt() const { return coefficients().dt; }
+
   const Eigen::Matrix<Scalar, number_of_states, 1> &X() const { return X_; }
   Scalar X(int i, int j = 0) const { return X()(i, j); }
   const Eigen::Matrix<Scalar, number_of_outputs, 1> &Y() const { return Y_; }
@@ -397,11 +399,11 @@
       : plant_(::std::move(other.plant_)),
         controller_(::std::move(other.controller_)),
         observer_(::std::move(other.observer_)) {
+    ff_U_.swap(other.ff_U_);
     R_.swap(other.R_);
     next_R_.swap(other.next_R_);
     U_.swap(other.U_);
     U_uncapped_.swap(other.U_uncapped_);
-    ff_U_.swap(other.ff_U_);
   }
 
   virtual ~StateFeedbackLoop() {}
@@ -503,9 +505,7 @@
     return controller().Kff() * (next_R() - plant().A() * R());
   }
 
-  // stop_motors is whether or not to output all 0s.
-  void Update(bool stop_motors,
-              ::std::chrono::nanoseconds dt = ::std::chrono::milliseconds(5)) {
+  void UpdateController(bool stop_motors) {
     if (stop_motors) {
       U_.setZero();
       U_uncapped_.setZero();
@@ -514,10 +514,15 @@
       U_ = U_uncapped_ = ControllerOutput();
       CapU();
     }
+    UpdateFFReference();
+  }
+
+  // stop_motors is whether or not to output all 0s.
+  void Update(bool stop_motors,
+              ::std::chrono::nanoseconds dt = ::std::chrono::milliseconds(0)) {
+    UpdateController(stop_motors);
 
     UpdateObserver(U_, dt);
-
-    UpdateFFReference();
   }
 
   // Updates R() after any CapU operations happen on U().
diff --git a/frc971/downloader.bzl b/frc971/downloader.bzl
index aff3420..11423e0 100644
--- a/frc971/downloader.bzl
+++ b/frc971/downloader.bzl
@@ -1,7 +1,15 @@
 load("//frc971/downloader:downloader.bzl", "aos_downloader")
 load("//tools/build_rules:label.bzl", "expand_label")
 
-def robot_downloader(start_binaries, binaries = [], data = [], dirs = None, default_target = None):
+
+def robot_downloader(start_binaries,
+                     name="download",
+                     binaries=[],
+                     data=[],
+                     dirs=None,
+                     default_target=None,
+                     restricted_to=["//tools:roborio"],
+                     target_type="roborio"):
     """Sets up the standard robot download targets.
 
     Attrs:
@@ -11,27 +19,30 @@
     """
 
     aos_downloader(
-        name = "download",
-        start_srcs = [
+        name=name,
+        start_srcs=([
             "//aos:prime_start_binaries",
-        ] + start_binaries,
-        srcs = [
+        ] if target_type == "roborio" else []) + start_binaries,
+        srcs=[
             "//aos:prime_binaries",
         ] + binaries + data,
-        dirs = dirs,
-        default_target = default_target,
-        restricted_to = ["//tools:roborio"],
+        dirs=dirs,
+        target_type=target_type,
+        default_target=default_target,
+        restricted_to=restricted_to,
     )
 
     aos_downloader(
-        name = "download_stripped",
-        start_srcs = [
+        name=name + "_stripped",
+        start_srcs=([
             "//aos:prime_start_binaries_stripped",
-        ] + [expand_label(binary) + ".stripped" for binary in start_binaries],
-        srcs = [
+        ] if target_type == "roborio" else []) +
+        [expand_label(binary) + ".stripped" for binary in start_binaries],
+        srcs=[
             "//aos:prime_binaries_stripped",
         ] + [expand_label(binary) + ".stripped" for binary in binaries] + data,
-        dirs = dirs,
-        default_target = default_target,
-        restricted_to = ["//tools:roborio"],
+        dirs=dirs,
+        target_type=target_type,
+        default_target=default_target,
+        restricted_to=restricted_to,
     )
diff --git a/frc971/downloader/downloader.bzl b/frc971/downloader/downloader.bzl
index a5d1bc1..75a1a9b 100644
--- a/frc971/downloader/downloader.bzl
+++ b/frc971/downloader/downloader.bzl
@@ -7,16 +7,16 @@
             "#!/bin/bash",
             "set -e",
             'cd "${BASH_SOURCE[0]}.runfiles/%s"' % ctx.workspace_name,
-        ] + ['%s %s --dirs %s -- %s "$@"' % (
+        ] + ['%s --dir %s --target "$@" --type %s %s' % (
             ctx.executable._downloader.short_path,
-            " ".join([src.short_path for src in d.downloader_srcs]),
             d.downloader_dir,
-            ctx.attr.default_target,
+            ctx.attr.target_type,
+            " ".join([src.short_path for src in d.downloader_srcs]),
         ) for d in ctx.attr.dirs] + [
-            'exec %s %s -- %s "$@"' % (
+            'exec %s --target "$@" --type %s %s' % (
                 ctx.executable._downloader.short_path,
+                ctx.attr.target_type,
                 " ".join([src.short_path for src in all_files]),
-                ctx.attr.default_target,
             ),
         ]),
     )
@@ -57,8 +57,6 @@
   srcs: The files to download. They currently all get shoved into one folder.
   dirs: A list of aos_downloader_dirs to download too.
   start_srcs: Like srcs, except they also get put into start_list.txt.
-  default_target: The default host to download to. If not specified, defaults to
-                  roboRIO-971.local.
 """
 
 aos_downloader = rule(
@@ -76,6 +74,9 @@
             mandatory = True,
             allow_files = True,
         ),
+        "target_type": attr.string(
+            default = "roborio",
+        ),
         "dirs": attr.label_list(
             mandatory = False,
             providers = [
@@ -83,9 +84,6 @@
                 "downloader_srcs",
             ],
         ),
-        "default_target": attr.string(
-            default = "roboRIO-971-frc.local",
-        ),
     },
     executable = True,
     outputs = {
diff --git a/frc971/downloader/downloader.py b/frc971/downloader/downloader.py
index e4826cf..42d4a7c 100644
--- a/frc971/downloader/downloader.py
+++ b/frc971/downloader/downloader.py
@@ -4,6 +4,7 @@
 
 from __future__ import print_function
 
+import argparse
 import sys
 import subprocess
 import re
@@ -16,61 +17,76 @@
     PKG_URL = "http://download.ni.com/ni-linux-rt/feeds/2015/arm/ipk/cortexa9-vfpv3/" + pkg
     subprocess.check_call(["wget", PKG_URL, "-O", pkg])
     try:
-        subprocess.check_call([
-            scp_path, "-S", ssh_path, pkg,
-            ssh_target + ":/tmp/" + pkg
-        ])
-        subprocess.check_call([
-            ssh_path, ssh_target, "opkg", "install",
-            "/tmp/" + pkg
-        ])
         subprocess.check_call(
-            [ssh_path, ssh_target, "rm", "/tmp/" + pkg])
+            [scp_path, "-S", ssh_path, pkg, ssh_target + ":/tmp/" + pkg])
+        subprocess.check_call(
+            [ssh_path, ssh_target, "opkg", "install", "/tmp/" + pkg])
+        subprocess.check_call([ssh_path, ssh_target, "rm", "/tmp/" + pkg])
     finally:
         subprocess.check_call(["rm", pkg])
 
 
 def main(argv):
-    args = argv[argv.index("--") + 1:]
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--target",
+                        type=str,
+                        default="roborio-971-frc.local",
+                        help="Target to deploy code to.")
+    parser.add_argument("--type",
+                        type=str,
+                        choices=["roborio", "pi"],
+                        required=True,
+                        help="Target type for deployment")
+    parser.add_argument(
+        "--dir",
+        type=str,
+        help="Directory within robot_code to copy the files to.")
+    parser.add_argument("srcs",
+                        type=str,
+                        nargs='+',
+                        help="List of files to copy over")
+    args = parser.parse_args(argv[1:])
 
     relative_dir = ""
     recursive = False
 
-    if "--dirs" in argv:
-        dirs_index = argv.index("--dirs")
-        srcs = argv[1:dirs_index]
-        relative_dir = argv[dirs_index + 1]
+    srcs = args.srcs
+    if args.dir is not None:
+        relative_dir = args.dir
         recursive = True
-    else:
-        srcs = argv[1:argv.index("--")]
 
-    ROBORIO_TARGET_DIR = "/home/admin/robot_code"
-    ROBORIO_USER = "admin"
-
-    target_dir = ROBORIO_TARGET_DIR
-    user = ROBORIO_USER
-    destination = args[-1]
+    destination = args.target
 
     result = re.match("(?:([^:@]+)@)?([^:@]+)(?::([^:@]+))?", destination)
     if not result:
-        print(
-            "Not sure how to parse destination \"%s\"!" % destination,
-            file=sys.stderr)
+        print("Not sure how to parse destination \"%s\"!" % destination,
+              file=sys.stderr)
         return 1
+    user = None
     if result.group(1):
         user = result.group(1)
     hostname = result.group(2)
+
     if result.group(3):
         target_dir = result.group(3)
 
+    if user is None:
+        if args.type == "pi":
+          user = "pi"
+        elif args.type == "roborio":
+          user = "admin"
+    target_dir = "/home/" + user + "/robot_code"
+
     ssh_target = "%s@%s" % (user, hostname)
 
     ssh_path = "external/ssh/ssh"
     scp_path = "external/ssh/scp"
 
+    subprocess.check_call([ssh_path, ssh_target, "mkdir", "-p", target_dir])
+
     rsync_cmd = ([
-        "external/rsync/usr/bin/rsync", "-e", ssh_path, "-c",
-        "-v", "-z", "--copy-links"
+        "external/rsync/usr/bin/rsync", "-e", ssh_path, "-c", "-v", "-z",
+        "--copy-links"
     ] + srcs + ["%s:%s/%s" % (ssh_target, target_dir, relative_dir)])
     try:
         subprocess.check_call(rsync_cmd)
@@ -88,12 +104,11 @@
             raise e
 
     if not recursive:
-        subprocess.check_call(
-            (ssh_path, ssh_target, "&&".join([
-                "chmod u+s %s/starter_exe" % target_dir,
-                "echo \'Done moving new executables into place\'",
-                "bash -c \'sync && sync && sync\'",
-            ])))
+        subprocess.check_call((ssh_path, ssh_target, "&&".join([
+            "chmod u+s %s/starter_exe" % target_dir,
+            "echo \'Done moving new executables into place\'",
+            "bash -c \'sync && sync && sync\'",
+        ])))
 
 
 if __name__ == "__main__":
diff --git a/frc971/zeroing/imu_zeroer.cc b/frc971/zeroing/imu_zeroer.cc
index b13b43d..9e82d13 100644
--- a/frc971/zeroing/imu_zeroer.cc
+++ b/frc971/zeroing/imu_zeroer.cc
@@ -35,15 +35,23 @@
          accel_averager_.GetRange() < kAccelMaxVariation;
 }
 
-void ImuZeroer::ProcessMeasurement(const IMUValues &values) {
+void ImuZeroer::InsertAndProcessMeasurement(const IMUValues &values) {
+  InsertMeasurement(values);
+  ProcessMeasurements();
+}
+
+void ImuZeroer::InsertMeasurement(const IMUValues &values) {
   last_gyro_sample_ << values.gyro_x(), values.gyro_y(), values.gyro_z();
   gyro_averager_.AddData(last_gyro_sample_);
   last_accel_sample_ << values.accelerometer_x(), values.accelerometer_y(),
                            values.accelerometer_z();
   accel_averager_.AddData(last_accel_sample_);
+}
+
+void ImuZeroer::ProcessMeasurements() {
   if (GyroZeroReady() && AccelZeroReady()) {
     ++good_iters_;
-    if (good_iters_ > kSamplesToAverage / 4) {
+    if (good_iters_ > kSamplesToAverage / 40) {
       const Eigen::Vector3d current_gyro_average = gyro_averager_.GetAverage();
       constexpr double kAverageUpdateWeight = 0.05;
       if (num_zeroes_ > 0) {
diff --git a/frc971/zeroing/imu_zeroer.h b/frc971/zeroing/imu_zeroer.h
index b55c6dd..fd82571 100644
--- a/frc971/zeroing/imu_zeroer.h
+++ b/frc971/zeroing/imu_zeroer.h
@@ -22,7 +22,10 @@
   ImuZeroer();
   bool Zeroed() const;
   bool Faulted() const;
-  void ProcessMeasurement(const IMUValues &values);
+  void InsertMeasurement(const IMUValues &values);
+  // PErforms the heavier-duty processing for managing zeroing.
+  void ProcessMeasurements();
+  void InsertAndProcessMeasurement(const IMUValues &values);
   Eigen::Vector3d GyroOffset() const;
   Eigen::Vector3d ZeroedGyro() const;
   Eigen::Vector3d ZeroedAccel() const;
diff --git a/frc971/zeroing/imu_zeroer_test.cc b/frc971/zeroing/imu_zeroer_test.cc
index eee3f57..e97fbe2 100644
--- a/frc971/zeroing/imu_zeroer_test.cc
+++ b/frc971/zeroing/imu_zeroer_test.cc
@@ -32,7 +32,7 @@
   ASSERT_EQ(0.0, zeroer.ZeroedAccel().norm());
   // A measurement before we are zeroed should just result in the measurement
   // being passed through without modification.
-  zeroer.ProcessMeasurement(
+  zeroer.InsertAndProcessMeasurement(
       MakeMeasurement({0.01, 0.02, 0.03}, {4, 5, 6}).message());
   ASSERT_FALSE(zeroer.Zeroed());
   ASSERT_FALSE(zeroer.Faulted());
@@ -50,7 +50,7 @@
   ImuZeroer zeroer;
   ASSERT_FALSE(zeroer.Zeroed());
   for (size_t ii = 0; ii < kMinSamplesToZero; ++ii) {
-    zeroer.ProcessMeasurement(
+    zeroer.InsertAndProcessMeasurement(
         MakeMeasurement({0.01, 0.02, 0.03}, {4, 5, 6}).message());
   }
   ASSERT_TRUE(zeroer.Zeroed());
@@ -68,7 +68,7 @@
   ASSERT_EQ(6.0, zeroer.ZeroedAccel().z());
   // If we get another measurement offset by {1, 1, 1} we should read the result
   // as {1, 1, 1}.
-  zeroer.ProcessMeasurement(
+  zeroer.InsertAndProcessMeasurement(
       MakeMeasurement({0.02, 0.03, 0.04}, {0, 0, 0}).message());
   ASSERT_FALSE(zeroer.Faulted());
   ASSERT_FLOAT_EQ(0.01, zeroer.ZeroedGyro().x());
@@ -82,7 +82,7 @@
   ImuZeroer zeroer;
   ASSERT_FALSE(zeroer.Zeroed());
   for (size_t ii = 0; ii < kMinSamplesToZero; ++ii) {
-    zeroer.ProcessMeasurement(
+    zeroer.InsertAndProcessMeasurement(
         MakeMeasurement({0.1, 0.2, 0.3}, {4, 5, 6}).message());
     ASSERT_FALSE(zeroer.Zeroed());
   }
@@ -97,10 +97,9 @@
   for (size_t ii = 0; ii < kMinSamplesToZero; ++ii) {
     const double offset =
         (static_cast<double>(ii) / (kMinSamplesToZero - 1) - 0.5) * 0.001;
-    zeroer.ProcessMeasurement(
+    zeroer.InsertAndProcessMeasurement(
         MakeMeasurement({0.01 + offset, 0.02 + offset, 0.03 + offset},
-                        {4 + offset, 5 + offset, 6 + offset})
-            .message());
+                        {4 + offset, 5 + offset, 6 + offset}).message());
   }
   ASSERT_TRUE(zeroer.Zeroed());
   ASSERT_FALSE(zeroer.Faulted());
@@ -109,7 +108,7 @@
   ASSERT_NEAR(0.03, zeroer.GyroOffset().z(), 1e-3);
   // If we get another measurement offset by {0.01, 0.01, 0.01} we should read
   // the result as {0.01, 0.01, 0.01}.
-  zeroer.ProcessMeasurement(
+  zeroer.InsertAndProcessMeasurement(
       MakeMeasurement({0.02, 0.03, 0.04}, {0, 0, 0}).message());
   ASSERT_FALSE(zeroer.Faulted());
   ASSERT_NEAR(0.01, zeroer.ZeroedGyro().x(), 1e-3);
@@ -128,10 +127,9 @@
     ASSERT_FALSE(zeroer.Zeroed());
     const double offset =
         (static_cast<double>(ii) / (kMinSamplesToZero - 1) - 0.5) * 1.0;
-    zeroer.ProcessMeasurement(
+    zeroer.InsertAndProcessMeasurement(
         MakeMeasurement({0.01 + offset, 0.02 + offset, 0.03 + offset},
-                        {4 + offset, 5 + offset, 6 + offset})
-            .message());
+                        {4 + offset, 5 + offset, 6 + offset}).message());
   }
   ASSERT_FALSE(zeroer.Zeroed());
   ASSERT_FALSE(zeroer.Faulted());
@@ -143,14 +141,14 @@
   ImuZeroer zeroer;
   ASSERT_FALSE(zeroer.Zeroed());
   for (size_t ii = 0; ii < kMinSamplesToZero; ++ii) {
-    zeroer.ProcessMeasurement(
+    zeroer.InsertAndProcessMeasurement(
         MakeMeasurement({0.01, 0.02, 0.03}, {4, 5, 6}).message());
   }
   ASSERT_TRUE(zeroer.Zeroed());
   ASSERT_FALSE(zeroer.Faulted())
       << "We should not fault until we complete a second cycle of zeroing.";
   for (size_t ii = 0; ii < kMinSamplesToZero; ++ii) {
-    zeroer.ProcessMeasurement(
+    zeroer.InsertAndProcessMeasurement(
         MakeMeasurement({0.01, 0.05, 0.03}, {4, 5, 6}).message());
   }
   ASSERT_TRUE(zeroer.Faulted());
@@ -161,12 +159,12 @@
   ImuZeroer zeroer;
   ASSERT_FALSE(zeroer.Zeroed());
   for (size_t ii = 0; ii < kMinSamplesToZero; ++ii) {
-    zeroer.ProcessMeasurement(
+    zeroer.InsertAndProcessMeasurement(
         MakeMeasurement({0.01, 0.02, 0.03}, {4, 5, 6}).message());
   }
   ASSERT_TRUE(zeroer.Zeroed());
   for (size_t ii = 0; ii < kMinSamplesToZero; ++ii) {
-    zeroer.ProcessMeasurement(
+    zeroer.InsertAndProcessMeasurement(
         MakeMeasurement({0.01, 0.020001, 0.03}, {4, 5, 6}).message());
   }
   ASSERT_FALSE(zeroer.Faulted());
diff --git a/third_party/BUILD b/third_party/BUILD
index 5b2ca31..bcba978 100644
--- a/third_party/BUILD
+++ b/third_party/BUILD
@@ -113,4 +113,3 @@
         "roborio": ["@webrtc_rio//:webrtc"],
     }),
 )
-
diff --git a/third_party/flatbuffers/build_defs.bzl b/third_party/flatbuffers/build_defs.bzl
index 6ac1d87..f4c1dc7 100644
--- a/third_party/flatbuffers/build_defs.bzl
+++ b/third_party/flatbuffers/build_defs.bzl
@@ -282,7 +282,7 @@
         been parsed. As such, we just force the user to manually specify
         things.
     """
-    python_files = ["%s/%s.py" % (namespace.replace('.', '/'), table) for table in tables]
+    python_files = ["%s/%s.py" % (namespace.replace(".", "/"), table) for table in tables]
 
     srcs_lib = "%s_srcs" % (name)
     flatbuffer_library_public(
diff --git a/third_party/gperftools/BUILD b/third_party/gperftools/BUILD
index 25a8740..7f5e9d6 100644
--- a/third_party/gperftools/BUILD
+++ b/third_party/gperftools/BUILD
@@ -86,7 +86,7 @@
     "-DPERFTOOLS_DLL_DECL=",
     "-DSTDC_HEADERS=1",
     "-DSTL_NAMESPACE=std",
-    "-DPACKAGE_STRING=\\\"gperftools\ 2.4\\\"",
+    "-DPACKAGE_STRING=\\\"gperftools\\ 2.4\\\"",
     "-DPACKAGE_BUGREPORT=\\\"http://www.frc971.org/contact\\\"",
     "-DPACKAGE_VERSION=\\\"2.4\\\"",
 ] + cpu_select({
@@ -141,7 +141,7 @@
         "-lrt",
         "-lpthread",
     ],
-    nocopts = "-std=gnu\+\+1y",
+    nocopts = "-std=gnu\\+\\+1y",
     visibility = ["//visibility:public"],
     deps = [
         "//third_party/empty_config_h",
diff --git a/third_party/libevent/BUILD b/third_party/libevent/BUILD
index da05f3c..347ec0b 100644
--- a/third_party/libevent/BUILD
+++ b/third_party/libevent/BUILD
@@ -148,7 +148,7 @@
         "_EVENT_STDC_HEADERS=1",
         "_EVENT_TIME_WITH_SYS_TIME=1",
         "_EVENT_NUMERIC_VERSION=0x02001600",
-        '_EVENT_VERSION=\\"2.0.22-stable\\"',
+        "_EVENT_VERSION=\\\"2.0.22-stable\\\"",
     ] + address_size_select({
         "32": [
             "_EVENT_SIZEOF_LONG_LONG=4",
diff --git a/y2019/control_loops/drivetrain/localized_drivetrain_test.cc b/y2019/control_loops/drivetrain/localized_drivetrain_test.cc
index 1459248..c5a2d18 100644
--- a/y2019/control_loops/drivetrain/localized_drivetrain_test.cc
+++ b/y2019/control_loops/drivetrain/localized_drivetrain_test.cc
@@ -84,7 +84,7 @@
         });
 
     // Run for enough time to allow the gyro/imu zeroing code to run.
-    RunFor(std::chrono::seconds(10));
+    RunFor(std::chrono::seconds(15));
   }
 
   virtual ~LocalizedDrivetrainTest() {}
diff --git a/y2020/BUILD b/y2020/BUILD
index aa5f16e..320a0cf 100644
--- a/y2020/BUILD
+++ b/y2020/BUILD
@@ -4,10 +4,14 @@
 load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library")
 
 robot_downloader(
+    binaries = [
+        ":setpoint_setter",
+    ],
     data = [
         ":config.json",
     ],
     start_binaries = [
+        "//aos/events/logging:logger_main",
         ":joystick_reader",
         ":wpilib_interface",
         "//aos/network:message_bridge_client",
@@ -18,6 +22,24 @@
     ],
 )
 
+robot_downloader(
+    name = "pi_download",
+    data = [
+        ":config.json",
+    ],
+    dirs = [
+        "//y2020/www:www_files",
+    ],
+    restricted_to = ["//tools:armhf-debian"],
+    start_binaries = [
+        "//aos/network:message_bridge_client",
+        "//aos/network:message_bridge_server",
+        "//y2020/vision:camera_reader",
+        "//aos/network:web_proxy_main",
+    ],
+    target_type = "pi",
+)
+
 cc_library(
     name = "constants",
     srcs = [
@@ -92,6 +114,7 @@
         ":joystick_reader.cc",
     ],
     deps = [
+        ":setpoint_fbs",
         "//aos:init",
         "//aos/actions:action_lib",
         "//aos/input:action_joystick_input",
@@ -112,6 +135,7 @@
     name = "config",
     src = "y2020.json",
     flatbuffers = [
+        ":setpoint_fbs",
         "//aos/network:message_bridge_client_fbs",
         "//aos/network:message_bridge_server_fbs",
         "//aos/network:timestamp_fbs",
@@ -121,6 +145,7 @@
         "//y2020/control_loops/superstructure:superstructure_position_fbs",
         "//y2020/control_loops/superstructure:superstructure_status_fbs",
         "//y2020/vision/sift:sift_fbs",
+        "//y2020/vision/sift:sift_training_fbs",
         "//y2020/vision:vision_fbs",
     ],
     visibility = ["//visibility:public"],
@@ -143,9 +168,30 @@
     srcs = ["web_proxy.sh"],
     data = [
         ":config.json",
+        "//y2020/www:field_main_bundle",
         "//aos/network:web_proxy_main",
         "//y2020/www:files",
         "//y2020/www:flatbuffers",
-        "//y2020/www:main_bundle",
+        "//y2020/www:camera_main_bundle",
+    ],
+)
+
+load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library")
+
+flatbuffer_cc_library(
+    name = "setpoint_fbs",
+    srcs = [
+        "setpoint.fbs",
+    ],
+    gen_reflections = 1,
+)
+
+cc_binary(
+    name = "setpoint_setter",
+    srcs = ["setpoint_setter.cc"],
+    deps = [
+        ":setpoint_fbs",
+        "//aos:init",
+        "//aos/events:shm_event_loop",
     ],
 )
diff --git a/y2020/actors/auto_splines.cc b/y2020/actors/auto_splines.cc
index 86eed14..3498a3e 100644
--- a/y2020/actors/auto_splines.cc
+++ b/y2020/actors/auto_splines.cc
@@ -59,16 +59,16 @@
               {longitudinal_constraint_offset, lateral_constraint_offset,
                voltage_constraint_offset});
 
-  const float startx = 0.4;
-  const float starty = 3.4;
+  const float startx = 0.0;
+  const float starty = 0.05;
   flatbuffers::Offset<flatbuffers::Vector<float>> spline_x_offset =
-      builder->fbb()->CreateVector<float>({0.0f + startx, 0.6f + startx,
-                                           0.6f + startx, 0.4f + startx,
-                                           0.4f + startx, 1.0f + startx});
+      builder->fbb()->CreateVector<float>({0.0f + startx, 0.8f + startx,
+                                           0.8f + startx, 1.2f + startx,
+                                           1.2f + startx, 2.0f + startx});
   flatbuffers::Offset<flatbuffers::Vector<float>> spline_y_offset =
       builder->fbb()->CreateVector<float>({starty - 0.0f, starty - 0.0f,
-                                           starty - 0.3f, starty - 0.7f,
-                                           starty - 1.0f, starty - 1.0f});
+                                           starty - 0.1f, starty - 0.2f,
+                                           starty - 0.3f, starty - 0.3f});
 
   frc971::MultiSpline::Builder multispline_builder =
       builder->MakeBuilder<frc971::MultiSpline>();
diff --git a/y2020/actors/autonomous_actor.cc b/y2020/actors/autonomous_actor.cc
index d015c65..e87e064 100644
--- a/y2020/actors/autonomous_actor.cc
+++ b/y2020/actors/autonomous_actor.cc
@@ -8,6 +8,7 @@
 #include "aos/logging/logging.h"
 #include "frc971/control_loops/drivetrain/localizer_generated.h"
 #include "y2020/control_loops/drivetrain/drivetrain_base.h"
+#include "y2020/actors/auto_splines.h"
 
 namespace y2020 {
 namespace actors {
@@ -19,17 +20,43 @@
 
 AutonomousActor::AutonomousActor(::aos::EventLoop *event_loop)
     : frc971::autonomous::BaseAutonomousActor(
-          event_loop, control_loops::drivetrain::GetDrivetrainConfig()) {}
+          event_loop, control_loops::drivetrain::GetDrivetrainConfig()),
+      localizer_control_sender_(event_loop->MakeSender<
+          ::frc971::control_loops::drivetrain::LocalizerControl>(
+          "/drivetrain")) {}
 
 void AutonomousActor::Reset() {
   InitializeEncoders();
   ResetDrivetrain();
+
+  {
+    auto builder = localizer_control_sender_.MakeBuilder();
+
+    LocalizerControl::Builder localizer_control_builder =
+        builder.MakeBuilder<LocalizerControl>();
+    // TODO(james): Set starting position based on the alliance.
+    localizer_control_builder.add_x(0.0);
+    localizer_control_builder.add_y(0.0);
+    localizer_control_builder.add_theta(0.0);
+    localizer_control_builder.add_theta_uncertainty(0.00001);
+    if (!builder.Send(localizer_control_builder.Finish())) {
+      AOS_LOG(ERROR, "Failed to reset localizer.\n");
+    }
+  }
 }
 
 bool AutonomousActor::RunAction(
     const ::frc971::autonomous::AutonomousActionParams *params) {
   Reset();
 
+  SplineHandle spline1 =
+      PlanSpline(AutonomousSplines::BasicSSpline, SplineDirection::kForward);
+
+  if (!spline1.WaitForPlan()) return true;
+  spline1.Start();
+
+  if (!spline1.WaitForSplineDistanceRemaining(0.02)) return true;
+
   AOS_LOG(INFO, "Params are %d\n", params->mode());
   return true;
 }
diff --git a/y2020/actors/autonomous_actor.h b/y2020/actors/autonomous_actor.h
index 3bfa610..d86feab 100644
--- a/y2020/actors/autonomous_actor.h
+++ b/y2020/actors/autonomous_actor.h
@@ -6,6 +6,7 @@
 #include "frc971/autonomous/base_autonomous_actor.h"
 #include "frc971/control_loops/control_loops_generated.h"
 #include "frc971/control_loops/drivetrain/drivetrain_config.h"
+#include "frc971/control_loops/drivetrain/localizer_generated.h"
 
 namespace y2020 {
 namespace actors {
@@ -19,6 +20,9 @@
 
  private:
   void Reset();
+
+  ::aos::Sender<::frc971::control_loops::drivetrain::LocalizerControl>
+      localizer_control_sender_;
 };
 
 }  // namespace actors
diff --git a/y2020/constants.cc b/y2020/constants.cc
index f0610a0..463919a 100644
--- a/y2020/constants.cc
+++ b/y2020/constants.cc
@@ -102,15 +102,15 @@
       break;
 
     case kCompTeamNumber:
-      hood->zeroing_constants.measured_absolute_position = 0.432541963515072;
+      hood->zeroing_constants.measured_absolute_position = 0.266691815725476;
 
       intake->zeroing_constants.measured_absolute_position =
           1.42977866919024 - Values::kIntakeZero();
 
-      turret->potentiometer_offset =
-          5.52519370141247 + 0.00853506822980376 + 0.0109413725126625;
+      turret->potentiometer_offset = 5.52519370141247 + 0.00853506822980376 +
+                                     0.0109413725126625 - 0.223719825811759;
       turret_params->zeroing_constants.measured_absolute_position =
-          0.251065633255048;
+          0.547478339799516;
       break;
 
     case kPracticeTeamNumber:
diff --git a/y2020/control_loops/drivetrain/BUILD b/y2020/control_loops/drivetrain/BUILD
index 03d2dc2..1bd760f 100644
--- a/y2020/control_loops/drivetrain/BUILD
+++ b/y2020/control_loops/drivetrain/BUILD
@@ -59,6 +59,7 @@
     hdrs = ["localizer.h"],
     deps = [
         "//aos/containers:ring_buffer",
+        "//aos/network:message_bridge_server_fbs",
         "//frc971/control_loops:profiled_subsystem_fbs",
         "//frc971/control_loops/drivetrain:hybrid_ekf",
         "//frc971/control_loops/drivetrain:localizer",
diff --git a/y2020/control_loops/drivetrain/drivetrain_base.cc b/y2020/control_loops/drivetrain/drivetrain_base.cc
index 62679bc..b919902 100644
--- a/y2020/control_loops/drivetrain/drivetrain_base.cc
+++ b/y2020/control_loops/drivetrain/drivetrain_base.cc
@@ -25,8 +25,8 @@
   static DrivetrainConfig<double> kDrivetrainConfig{
       ::frc971::control_loops::drivetrain::ShifterType::SIMPLE_SHIFTER,
       ::frc971::control_loops::drivetrain::LoopType::CLOSED_LOOP,
-      ::frc971::control_loops::drivetrain::GyroType::IMU_Z_GYRO,
-      ::frc971::control_loops::drivetrain::IMUType::IMU_FLIPPED_X,
+      ::frc971::control_loops::drivetrain::GyroType::IMU_X_GYRO,
+      ::frc971::control_loops::drivetrain::IMUType::IMU_Z,
 
       drivetrain::MakeDrivetrainLoop,
       drivetrain::MakeVelocityDrivetrainLoop,
@@ -53,8 +53,8 @@
       1.2 /* quickturn_wheel_multiplier */,
       1.2 /* wheel_multiplier */,
       true /*pistol_grip_shift_enables_line_follow*/,
-      (Eigen::Matrix<double, 3, 3>() << 0.0, -1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,
-       1.0)
+      (Eigen::Matrix<double, 3, 3>() << 0.0, 0.0, 1.0, 0.0, -1.0, 0.0, 1.0, 0.0,
+       0.0)
           .finished() /*imu_transform*/,
   };
 
diff --git a/y2020/control_loops/drivetrain/drivetrain_main.cc b/y2020/control_loops/drivetrain/drivetrain_main.cc
index 7159118..7804814 100644
--- a/y2020/control_loops/drivetrain/drivetrain_main.cc
+++ b/y2020/control_loops/drivetrain/drivetrain_main.cc
@@ -6,7 +6,8 @@
 
 using ::frc971::control_loops::drivetrain::DrivetrainLoop;
 
-int main() {
+int main(int argc, char *argv[]) {
+  ::aos::InitGoogle(&argc, &argv);
   ::aos::InitNRT();
 
   aos::FlatbufferDetachedBuffer<aos::Configuration> config =
diff --git a/y2020/control_loops/drivetrain/drivetrain_replay.cc b/y2020/control_loops/drivetrain/drivetrain_replay.cc
index af6a841..faf2cef 100644
--- a/y2020/control_loops/drivetrain/drivetrain_replay.cc
+++ b/y2020/control_loops/drivetrain/drivetrain_replay.cc
@@ -41,15 +41,19 @@
                             "frc971.control_loops.drivetrain.Output");
   reader.Register();
 
+  const aos::Node *node = nullptr;
+  if (aos::configuration::MultiNode(reader.configuration())) {
+    node = aos::configuration::GetNode(reader.configuration(), "roborio");
+  }
+
   aos::logger::DetachedBufferWriter file_writer(FLAGS_output_file);
   std::unique_ptr<aos::EventLoop> log_writer_event_loop =
-      reader.event_loop_factory()->MakeEventLoop("log_writer");
+      reader.event_loop_factory()->MakeEventLoop("log_writer", node);
   log_writer_event_loop->SkipTimingReport();
-  CHECK(nullptr == log_writer_event_loop->node());
   aos::logger::Logger writer(&file_writer, log_writer_event_loop.get());
 
   std::unique_ptr<aos::EventLoop> drivetrain_event_loop =
-      reader.event_loop_factory()->MakeEventLoop("drivetrain");
+      reader.event_loop_factory()->MakeEventLoop("drivetrain", node);
   drivetrain_event_loop->SkipTimingReport();
 
   y2020::control_loops::drivetrain::Localizer localizer(
diff --git a/y2020/control_loops/drivetrain/drivetrain_replay_test.cc b/y2020/control_loops/drivetrain/drivetrain_replay_test.cc
index 3e391c8..6033184 100644
--- a/y2020/control_loops/drivetrain/drivetrain_replay_test.cc
+++ b/y2020/control_loops/drivetrain/drivetrain_replay_test.cc
@@ -52,13 +52,19 @@
         reader_.event_loop_factory()->MakeEventLoop("drivetrain", roborio_);
     drivetrain_event_loop_->SkipTimingReport();
 
+    frc971::control_loops::drivetrain::DrivetrainConfig<double> config =
+        GetDrivetrainConfig();
+    // Make the modification required to the imu transform to work with the 2016
+    // logs...
+    config.imu_transform << 0.0, -1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0;
+    config.gyro_type = frc971::control_loops::drivetrain::GyroType::IMU_Z_GYRO;
+
     localizer_ =
         std::make_unique<frc971::control_loops::drivetrain::DeadReckonEkf>(
-            drivetrain_event_loop_.get(), GetDrivetrainConfig());
+            drivetrain_event_loop_.get(), config);
     drivetrain_ =
         std::make_unique<frc971::control_loops::drivetrain::DrivetrainLoop>(
-            GetDrivetrainConfig(), drivetrain_event_loop_.get(),
-            localizer_.get());
+            config, drivetrain_event_loop_.get(), localizer_.get());
 
     test_event_loop_ =
         reader_.event_loop_factory()->MakeEventLoop("drivetrain", roborio_);
@@ -88,7 +94,7 @@
   ASSERT_TRUE(status_fetcher_->has_x());
   ASSERT_TRUE(status_fetcher_->has_y());
   ASSERT_TRUE(status_fetcher_->has_theta());
-  EXPECT_LT(std::abs(status_fetcher_->x()), 0.1);
+  EXPECT_LT(std::abs(status_fetcher_->x()), 0.25);
   // Because the encoders should not be affecting the y or yaw axes, expect a
   // reasonably precise result (although, since this is a real worl dtest, the
   // robot probably did actually move be some non-zero amount).
diff --git a/y2020/control_loops/drivetrain/localizer.cc b/y2020/control_loops/drivetrain/localizer.cc
index 4b11f6f..d26552a 100644
--- a/y2020/control_loops/drivetrain/localizer.cc
+++ b/y2020/control_loops/drivetrain/localizer.cc
@@ -22,13 +22,22 @@
   }
   return result;
 }
+
+// Indices of the pis to use.
+const std::array<std::string, 3> kPisToUse{"pi1", "pi2", "pi3"};
+
 }  // namespace
 
 Localizer::Localizer(
     aos::EventLoop *event_loop,
     const frc971::control_loops::drivetrain::DrivetrainConfig<double>
         &dt_config)
-    : event_loop_(event_loop), dt_config_(dt_config), ekf_(dt_config) {
+    : event_loop_(event_loop),
+      dt_config_(dt_config),
+      ekf_(dt_config),
+      clock_offset_fetcher_(
+          event_loop_->MakeFetcher<aos::message_bridge::ServerStatistics>(
+              "/aos")) {
   // TODO(james): This doesn't really need to be a watcher; we could just use a
   // fetcher for the superstructure status.
   // This probably should be a Fetcher instead of a Watcher, but this
@@ -40,9 +49,16 @@
                              HandleSuperstructureStatus(status);
                            });
 
-  image_fetchers_.emplace_back(
-      event_loop_->MakeFetcher<frc971::vision::sift::ImageMatchResult>(
-          "/pi1/camera"));
+  event_loop->OnRun([this, event_loop]() {
+    ekf_.ResetInitialState(event_loop->monotonic_now(), Ekf::State::Zero(),
+                           ekf_.P());
+  });
+
+  for (const auto &pi : kPisToUse) {
+    image_fetchers_.emplace_back(
+        event_loop_->MakeFetcher<frc971::vision::sift::ImageMatchResult>(
+            "/" + pi + "/camera"));
+  }
 
   target_selector_.set_has_target(false);
 }
@@ -78,9 +94,10 @@
                        aos::monotonic_clock::time_point now,
                        double left_encoder, double right_encoder,
                        double gyro_rate, const Eigen::Vector3d &accel) {
-  for (auto &image_fetcher : image_fetchers_) {
+  for (size_t ii = 0; ii < kPisToUse.size(); ++ii) {
+    auto &image_fetcher = image_fetchers_[ii];
     while (image_fetcher.FetchNext()) {
-      HandleImageMatch(*image_fetcher);
+      HandleImageMatch(kPisToUse[ii], *image_fetcher);
     }
   }
   ekf_.UpdateEncodersAndGyro(left_encoder, right_encoder, gyro_rate, U, accel,
@@ -88,10 +105,30 @@
 }
 
 void Localizer::HandleImageMatch(
-    const frc971::vision::sift::ImageMatchResult &result) {
-  // TODO(james): compensate for capture time across multiple nodes correctly.
+    std::string_view pi, const frc971::vision::sift::ImageMatchResult &result) {
+  std::chrono::nanoseconds monotonic_offset(0);
+  clock_offset_fetcher_.Fetch();
+  if (clock_offset_fetcher_.get() != nullptr) {
+    for (const auto connection : *clock_offset_fetcher_->connections()) {
+      if (connection->has_node() && connection->node()->has_name() &&
+          connection->node()->name()->string_view() == pi) {
+        monotonic_offset =
+            std::chrono::nanoseconds(connection->monotonic_offset());
+        break;
+      }
+    }
+  }
   aos::monotonic_clock::time_point capture_time(
-      std::chrono::nanoseconds(result.image_monotonic_timestamp_ns()));
+      std::chrono::nanoseconds(result.image_monotonic_timestamp_ns()) -
+      monotonic_offset);
+  VLOG(1) << "Got monotonic offset of "
+          << aos::time::DurationInSeconds(monotonic_offset)
+          << " when at time of " << event_loop_->monotonic_now()
+          << " and capture time estimate of " << capture_time;
+  if (capture_time > event_loop_->monotonic_now()) {
+    LOG(WARNING) << "Got camera frame from the future.";
+    return;
+  }
   CHECK(result.has_camera_calibration());
   // Per the ImageMatchResult specification, we can actually determine whether
   // the camera is the turret camera just from the presence of the
@@ -162,6 +199,12 @@
     H(0, StateIdx::kX) = 1;
     H(1, StateIdx::kY) = 1;
     H(2, StateIdx::kTheta) = 1;
+    // If the heading is off by too much, assume that we got a false-positive
+    // and don't use the correction.
+    if (std::abs(aos::math::DiffAngle(theta(), Z(2))) > M_PI_2) {
+      AOS_LOG(WARNING, "Dropped image match due to heading mismatch.\n");
+      return;
+    }
     ekf_.Correct(Z, nullptr, {}, [H, Z](const State &X, const Input &) {
                                    Eigen::Vector3d Zhat = H * X;
                                    // In order to deal with wrapping of the
diff --git a/y2020/control_loops/drivetrain/localizer.h b/y2020/control_loops/drivetrain/localizer.h
index 0b79361..1d6f816 100644
--- a/y2020/control_loops/drivetrain/localizer.h
+++ b/y2020/control_loops/drivetrain/localizer.h
@@ -1,8 +1,11 @@
 #ifndef Y2020_CONTROL_LOOPS_DRIVETRAIN_LOCALIZER_H_
 #define Y2020_CONTROL_LOOPS_DRIVETRAIN_LOCALIZER_H_
 
+#include <string_view>
+
 #include "aos/containers/ring_buffer.h"
 #include "aos/events/event_loop.h"
+#include "aos/network/message_bridge_server_generated.h"
 #include "frc971/control_loops/drivetrain/hybrid_ekf.h"
 #include "frc971/control_loops/drivetrain/localizer.h"
 #include "y2020/control_loops/superstructure/superstructure_status_generated.h"
@@ -68,8 +71,9 @@
     double velocity = 0.0;  // rad/sec
   };
 
-  // Processes new image data and updates the EKF.
-  void HandleImageMatch(const frc971::vision::sift::ImageMatchResult &result);
+  // Processes new image data from the given pi and updates the EKF.
+  void HandleImageMatch(std::string_view pi,
+                        const frc971::vision::sift::ImageMatchResult &result);
 
   // Processes the most recent turret position and stores it in the turret_data_
   // buffer.
@@ -86,6 +90,8 @@
   std::vector<aos::Fetcher<frc971::vision::sift::ImageMatchResult>>
       image_fetchers_;
 
+  aos::Fetcher<aos::message_bridge::ServerStatistics> clock_offset_fetcher_;
+
   // Buffer of recent turret data--this is used so that when we receive a camera
   // frame from the turret, we can back out what the turret angle was at that
   // time.
diff --git a/y2020/control_loops/drivetrain/localizer_test.cc b/y2020/control_loops/drivetrain/localizer_test.cc
index 1e72c39..c47faa7 100644
--- a/y2020/control_loops/drivetrain/localizer_test.cc
+++ b/y2020/control_loops/drivetrain/localizer_test.cc
@@ -4,6 +4,7 @@
 
 #include "aos/controls/control_loop_test.h"
 #include "aos/events/logging/logger.h"
+#include "aos/network/message_bridge_server_generated.h"
 #include "aos/network/team_number.h"
 #include "frc971/control_loops/drivetrain/drivetrain.h"
 #include "frc971/control_loops/drivetrain/drivetrain_test_lib.h"
@@ -84,12 +85,13 @@
   locations.push_back(H);
   return locations;
 }
+
+constexpr std::chrono::seconds kPiTimeOffset(10);
 }  // namespace
 
 namespace chrono = std::chrono;
 using frc971::control_loops::drivetrain::testing::DrivetrainSimulation;
 using frc971::control_loops::drivetrain::DrivetrainLoop;
-using frc971::control_loops::drivetrain::testing::GetTestDrivetrainConfig;
 using aos::monotonic_clock;
 
 class LocalizedDrivetrainTest : public aos::testing::ControlLoopTest {
@@ -113,6 +115,9 @@
         superstructure_status_sender_(
             test_event_loop_->MakeSender<superstructure::Status>(
                 "/superstructure")),
+        server_statistics_sender_(
+            test_event_loop_->MakeSender<aos::message_bridge::ServerStatistics>(
+                "/aos")),
         drivetrain_event_loop_(MakeEventLoop("drivetrain", roborio_)),
         dt_config_(GetTest2020DrivetrainConfig()),
         pi1_event_loop_(MakeEventLoop("test", pi1_)),
@@ -123,8 +128,10 @@
         drivetrain_plant_event_loop_(MakeEventLoop("plant", roborio_)),
         drivetrain_plant_(drivetrain_plant_event_loop_.get(), dt_config_),
         last_frame_(monotonic_now()) {
+    event_loop_factory()->GetNodeEventLoopFactory(pi1_)->SetDistributedOffset(
+        kPiTimeOffset);
+
     set_team_id(frc971::control_loops::testing::kTeamNumber);
-    SetStartingPosition({3.0, 2.0, 0.0});
     set_battery_voltage(12.0);
 
     if (!FLAGS_output_file.empty()) {
@@ -151,6 +158,29 @@
 
     test_event_loop_->AddPhasedLoop(
         [this](int) {
+          auto builder = server_statistics_sender_.MakeBuilder();
+          auto name_offset = builder.fbb()->CreateString("pi1");
+          auto node_builder = builder.MakeBuilder<aos::Node>();
+          node_builder.add_name(name_offset);
+          auto node_offset = node_builder.Finish();
+          auto connection_builder =
+              builder.MakeBuilder<aos::message_bridge::ServerConnection>();
+          connection_builder.add_node(node_offset);
+          connection_builder.add_monotonic_offset(
+              chrono::duration_cast<chrono::nanoseconds>(-kPiTimeOffset)
+                  .count());
+          auto connection_offset = connection_builder.Finish();
+          auto connections_offset =
+              builder.fbb()->CreateVector(&connection_offset, 1);
+          auto statistics_builder =
+              builder.MakeBuilder<aos::message_bridge::ServerStatistics>();
+          statistics_builder.add_connections(connections_offset);
+          builder.Send(statistics_builder.Finish());
+        },
+        chrono::milliseconds(500));
+
+    test_event_loop_->AddPhasedLoop(
+        [this](int) {
           // Also use the opportunity to send out turret messages.
           UpdateTurretPosition();
           auto builder = superstructure_status_sender_.MakeBuilder();
@@ -167,6 +197,8 @@
         },
         chrono::milliseconds(5));
 
+    test_event_loop_->OnRun([this]() { SetStartingPosition({3.0, 2.0, 0.0}); });
+
     // Run for enough time to allow the gyro/imu zeroing code to run.
     RunFor(std::chrono::seconds(10));
   }
@@ -258,7 +290,10 @@
 
     frame->image_monotonic_timestamp_ns =
         chrono::duration_cast<chrono::nanoseconds>(
-            monotonic_now().time_since_epoch())
+            event_loop_factory()
+                ->GetNodeEventLoopFactory(pi1_)
+                ->monotonic_now()
+                .time_since_epoch())
             .count();
     frame->camera_calibration.reset(new CameraCalibrationT());
     {
@@ -301,6 +336,7 @@
   aos::Fetcher<Goal> drivetrain_goal_fetcher_;
   aos::Sender<LocalizerControl> localizer_control_sender_;
   aos::Sender<superstructure::Status> superstructure_status_sender_;
+  aos::Sender<aos::message_bridge::ServerStatistics> server_statistics_sender_;
 
   std::unique_ptr<aos::EventLoop> drivetrain_event_loop_;
   const frc971::control_loops::drivetrain::DrivetrainConfig<double>
@@ -381,6 +417,22 @@
   EXPECT_TRUE(VerifyEstimatorAccurate(5e-4));
 }
 
+// Tests that we can drive in a straight line and have things estimate
+// correctly.
+TEST_F(LocalizedDrivetrainTest, NoCameraUpdateStraightLine) {
+  SetEnabled(true);
+  set_enable_cameras(false);
+  EXPECT_TRUE(VerifyEstimatorAccurate(1e-7));
+
+  SendGoal(1.0, 1.0);
+
+  RunFor(chrono::seconds(1));
+  VerifyNearGoal();
+  // Due to accelerometer drift, the straight-line driving tends to be less
+  // accurate...
+  EXPECT_TRUE(VerifyEstimatorAccurate(0.1));
+}
+
 // Tests that camera updates with a perfect models results in no errors.
 TEST_F(LocalizedDrivetrainTest, PerfectCameraUpdate) {
   SetEnabled(true);
diff --git a/y2020/control_loops/python/BUILD b/y2020/control_loops/python/BUILD
index 923d239..e218571 100644
--- a/y2020/control_loops/python/BUILD
+++ b/y2020/control_loops/python/BUILD
@@ -133,8 +133,8 @@
     legacy_create_init = False,
     restricted_to = ["//tools:k8"],
     deps = [
-        ":python_init",
         ":flywheel",
+        ":python_init",
         "//external:python-gflags",
         "//external:python-glog",
     ],
@@ -148,8 +148,8 @@
     legacy_create_init = False,
     restricted_to = ["//tools:k8"],
     deps = [
-        ":python_init",
         ":flywheel",
+        ":python_init",
         "//external:python-gflags",
         "//external:python-glog",
     ],
diff --git a/y2020/control_loops/python/accelerator.py b/y2020/control_loops/python/accelerator.py
index 1746589..48c9098 100644
--- a/y2020/control_loops/python/accelerator.py
+++ b/y2020/control_loops/python/accelerator.py
@@ -28,20 +28,22 @@
     name='Accelerator',
     motor=control_loop.Falcon(),
     G=G,
-    J=J,
-    q_pos=0.08,
-    q_vel=4.00,
-    q_voltage=0.4,
+    J=J + 0.0015,
+    q_pos=0.01,
+    q_vel=40.0,
+    q_voltage=2.0,
     r_pos=0.05,
-    controller_poles=[.84])
+    controller_poles=[.86])
 
 
 def main(argv):
     if FLAGS.plot:
         R = numpy.matrix([[0.0], [500.0], [0.0]])
-        flywheel.PlotSpinup(kAccelerator, goal=R, iterations=200)
+        flywheel.PlotSpinup(kAccelerator, goal=R, iterations=400)
         return 0
 
+    glog.debug("J is %f" % J)
+
     if len(argv) != 5:
         glog.fatal('Expected .h file name and .cc file name')
     else:
@@ -53,4 +55,5 @@
 
 if __name__ == '__main__':
     argv = FLAGS(sys.argv)
+    glog.init()
     sys.exit(main(argv))
diff --git a/y2020/control_loops/python/finisher.py b/y2020/control_loops/python/finisher.py
index 5067a16..9381630 100644
--- a/y2020/control_loops/python/finisher.py
+++ b/y2020/control_loops/python/finisher.py
@@ -14,7 +14,7 @@
 gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
 
 # Inertia for a single 4" diameter, 2" wide neopreme wheel.
-J_wheel = 0.000319 * 2.0
+J_wheel = 0.000319 * 2.0 * 6.0
 # Gear ratio to the final wheel.
 # 40 tooth on the flywheel
 # 48 for the falcon.
@@ -29,17 +29,17 @@
     motor=control_loop.Falcon(),
     G=G,
     J=J,
-    q_pos=0.08,
-    q_vel=4.00,
-    q_voltage=0.4,
+    q_pos=0.01,
+    q_vel=100.0,
+    q_voltage=6.0,
     r_pos=0.05,
-    controller_poles=[.84])
+    controller_poles=[.92])
 
 
 def main(argv):
     if FLAGS.plot:
         R = numpy.matrix([[0.0], [500.0], [0.0]])
-        flywheel.PlotSpinup(params=kFinisher, goal=R, iterations=200)
+        flywheel.PlotSpinup(params=kFinisher, goal=R, iterations=400)
         return 0
 
     if len(argv) != 5:
diff --git a/y2020/control_loops/python/flywheel.py b/y2020/control_loops/python/flywheel.py
index 630d223..451788a 100755
--- a/y2020/control_loops/python/flywheel.py
+++ b/y2020/control_loops/python/flywheel.py
@@ -30,7 +30,7 @@
         self.controller_poles = controller_poles
 
 
-class VelocityFlywheel(control_loop.ControlLoop):
+class VelocityFlywheel(control_loop.HybridControlLoop):
     def __init__(self, params, name="Flywheel"):
         super(VelocityFlywheel, self).__init__(name=name)
         self.params = params
@@ -121,26 +121,44 @@
         self.B_continuous = numpy.matrix(numpy.zeros((3, 1)))
         self.B_continuous[0:2, 0] = self.B_continuous_unaugmented
 
+
         # states
         # [position, velocity, voltage_error]
         self.C_unaugmented = self.C
         self.C = numpy.matrix(numpy.zeros((1, 3)))
         self.C[0:1, 0:2] = self.C_unaugmented
 
+        glog.debug('A_continuous %s' % str(self.A_continuous))
+        glog.debug('B_continuous %s' % str(self.B_continuous))
+        glog.debug('C %s' % str(self.C))
+
         self.A, self.B = self.ContinuousToDiscrete(self.A_continuous,
                                                    self.B_continuous, self.dt)
 
+        glog.debug('A %s' % str(self.A))
+        glog.debug('B %s' % str(self.B))
+
         q_pos = self.params.q_pos
         q_vel = self.params.q_vel
         q_voltage = self.params.q_voltage
-        self.Q = numpy.matrix([[(q_pos**2.0), 0.0, 0.0],
+        self.Q_continuous = numpy.matrix([[(q_pos**2.0), 0.0, 0.0],
                                [0.0, (q_vel**2.0), 0.0],
                                [0.0, 0.0, (q_voltage**2.0)]])
 
         r_pos = self.params.r_pos
-        self.R = numpy.matrix([[(r_pos**2.0)]])
+        self.R_continuous = numpy.matrix([[(r_pos**2.0)]])
 
-        self.KalmanGain, self.Q_steady = controls.kalman(
+        _, _, self.Q, self.R = controls.kalmd(
+            A_continuous=self.A_continuous,
+            B_continuous=self.B_continuous,
+            Q_continuous=self.Q_continuous,
+            R_continuous=self.R_continuous,
+            dt=self.dt)
+
+        glog.debug('Q_discrete %s' % (str(self.Q)))
+        glog.debug('R_discrete %s' % (str(self.R)))
+
+        self.KalmanGain, self.P_steady_state = controls.kalman(
             A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
         self.L = self.A * self.KalmanGain
 
@@ -155,7 +173,7 @@
         self.InitializeState()
 
 
-def PlotSpinup(params, goal, iterations=200):
+def PlotSpinup(params, goal, iterations=400):
     """Runs the flywheel plant with an initial condition and goal.
 
     Args:
@@ -210,21 +228,20 @@
 
         if observer_flywheel is not None:
             observer_flywheel.Y = flywheel.Y
-            observer_flywheel.CorrectObserver(U)
+            observer_flywheel.CorrectHybridObserver(U)
             offset.append(observer_flywheel.X_hat[2, 0])
 
         applied_U = U.copy()
-        if i > 100:
+        if i > 200:
             applied_U += 2
         flywheel.Update(applied_U)
 
         if observer_flywheel is not None:
-            observer_flywheel.PredictObserver(U)
+            observer_flywheel.PredictHybridObserver(U, flywheel.dt)
 
         t.append(initial_t + i * flywheel.dt)
         u.append(U[0, 0])
 
-        glog.debug('Time: %f', t[-1])
     pylab.subplot(3, 1, 1)
     pylab.plot(t, v, label='x')
     pylab.plot(t, x_hat, label='x_hat')
@@ -281,5 +298,9 @@
     loop_writer.Write(plant_files[0], plant_files[1])
 
     integral_loop_writer = control_loop.ControlLoopWriter(
-        'Integral' + name, integral_flywheels, namespaces=namespace)
+        'Integral' + name,
+        integral_flywheels,
+        namespaces=namespace,
+        plant_type='StateFeedbackHybridPlant',
+        observer_type='HybridKalman')
     integral_loop_writer.Write(controller_files[0], controller_files[1])
diff --git a/y2020/control_loops/python/hood.py b/y2020/control_loops/python/hood.py
index b4d8c84..7e75dce 100644
--- a/y2020/control_loops/python/hood.py
+++ b/y2020/control_loops/python/hood.py
@@ -31,13 +31,13 @@
 kHood = angular_system.AngularSystemParams(
     name='Hood',
     motor=control_loop.BAG(),
-    G=radians_per_turn,
-    J=0.08389,
-    q_pos=0.55,
-    q_vel=14.0,
+    G=radians_per_turn / (2.0 * numpy.pi),
+    J=4.0,
+    q_pos=0.15,
+    q_vel=5.0,
     kalman_q_pos=0.12,
-    kalman_q_vel=2.0,
-    kalman_q_voltage=2.5,
+    kalman_q_vel=10.0,
+    kalman_q_voltage=15.0,
     kalman_r_position=0.05)
 
 
@@ -47,6 +47,8 @@
         angular_system.PlotKick(kHood, R)
         angular_system.PlotMotion(kHood, R)
 
+    glog.info("Radians per turn: %f\n", radians_per_turn)
+
     # Write the generated constants out to a file.
     if len(argv) != 5:
         glog.fatal(
diff --git a/y2020/control_loops/python/turret.py b/y2020/control_loops/python/turret.py
index e276457..5a27afb 100644
--- a/y2020/control_loops/python/turret.py
+++ b/y2020/control_loops/python/turret.py
@@ -22,12 +22,12 @@
     name='Turret',
     motor=control_loop.Vex775Pro(),
     G=(6.0 / 60.0) * (26.0 / 150.0),
-    J=0.11,
-    q_pos=0.40,
-    q_vel=7.0,
+    J=0.20,
+    q_pos=0.30,
+    q_vel=4.5,
     kalman_q_pos=0.12,
-    kalman_q_vel=2.0,
-    kalman_q_voltage=3.0,
+    kalman_q_vel=10.0,
+    kalman_q_voltage=12.0,
     kalman_r_position=0.05)
 
 def main(argv):
diff --git a/y2020/control_loops/superstructure/BUILD b/y2020/control_loops/superstructure/BUILD
index d81b6b5..4534404 100644
--- a/y2020/control_loops/superstructure/BUILD
+++ b/y2020/control_loops/superstructure/BUILD
@@ -62,8 +62,11 @@
         ":superstructure_status_fbs",
         "//aos/controls:control_loop",
         "//aos/events:event_loop",
+        "//frc971/control_loops:control_loops_fbs",
+        "//frc971/control_loops/drivetrain:drivetrain_status_fbs",
         "//y2020:constants",
         "//y2020/control_loops/superstructure/shooter",
+        "//y2020/control_loops/superstructure/turret:aiming",
     ],
 )
 
diff --git a/y2020/control_loops/superstructure/shooter/flywheel_controller.cc b/y2020/control_loops/superstructure/shooter/flywheel_controller.cc
index 77fb769..07c3d99 100644
--- a/y2020/control_loops/superstructure/shooter/flywheel_controller.cc
+++ b/y2020/control_loops/superstructure/shooter/flywheel_controller.cc
@@ -11,8 +11,12 @@
 namespace superstructure {
 namespace shooter {
 
-FlywheelController::FlywheelController(StateFeedbackLoop<3, 1, 1> &&loop)
-    : loop_(new StateFeedbackLoop<3, 1, 1>(std::move(loop))) {
+FlywheelController::FlywheelController(
+    StateFeedbackLoop<3, 1, 1, double, StateFeedbackHybridPlant<3, 1, 1>,
+                      HybridKalman<3, 1, 1>> &&loop)
+    : loop_(new StateFeedbackLoop<3, 1, 1, double,
+                                  StateFeedbackHybridPlant<3, 1, 1>,
+                                  HybridKalman<3, 1, 1>>(std::move(loop))) {
   history_.fill(std::pair<double, ::aos::monotonic_clock::time_point>(
       0, ::aos::monotonic_clock::epoch()));
   Y_.setZero();
@@ -26,6 +30,18 @@
 void FlywheelController::set_position(
     double current_position,
     const aos::monotonic_clock::time_point position_timestamp) {
+  // Project time forwards.
+  const int newest_history_position =
+      ((history_position_ == 0) ? kHistoryLength : history_position_) - 1;
+
+  if (!first_) {
+    loop_->UpdateObserver(
+        loop_->U(),
+        position_timestamp - std::get<1>(history_[newest_history_position]));
+  } else {
+    first_ = false;
+  }
+
   // Update position in the model.
   Y_ << current_position;
 
@@ -34,6 +50,8 @@
       std::pair<double, ::aos::monotonic_clock::time_point>(current_position,
                                                             position_timestamp);
   history_position_ = (history_position_ + 1) % kHistoryLength;
+
+  loop_->Correct(Y_);
 }
 
 double FlywheelController::voltage() const { return loop_->U(0, 0); }
@@ -45,22 +63,22 @@
     disabled = true;
   }
 
-  loop_->Correct(Y_);
-  loop_->Update(disabled);
+  loop_->UpdateController(disabled);
 }
 
 flatbuffers::Offset<FlywheelControllerStatus> FlywheelController::SetStatus(
     flatbuffers::FlatBufferBuilder *fbb) {
   // Compute the oldest point in the history.
-  const int oldest_history_position =
+  const int oldest_history_position = history_position_;
+  const int newest_history_position =
       ((history_position_ == 0) ? kHistoryLength : history_position_) - 1;
 
   const double total_loop_time = ::aos::time::DurationInSeconds(
-      std::get<1>(history_[history_position_]) -
+      std::get<1>(history_[newest_history_position]) -
       std::get<1>(history_[oldest_history_position]));
 
   const double distance_traveled =
-      std::get<0>(history_[history_position_]) -
+      std::get<0>(history_[newest_history_position]) -
       std::get<0>(history_[oldest_history_position]);
 
   // Compute the distance moved over that time period.
@@ -70,6 +88,7 @@
 
   builder.add_avg_angular_velocity(avg_angular_velocity_);
   builder.add_angular_velocity(loop_->X_hat(1, 0));
+  builder.add_voltage_error(loop_->X_hat(2, 0));
   builder.add_angular_velocity_goal(last_goal_);
   return builder.Finish();
 }
diff --git a/y2020/control_loops/superstructure/shooter/flywheel_controller.h b/y2020/control_loops/superstructure/shooter/flywheel_controller.h
index a3e9bdb..e130389 100644
--- a/y2020/control_loops/superstructure/shooter/flywheel_controller.h
+++ b/y2020/control_loops/superstructure/shooter/flywheel_controller.h
@@ -18,7 +18,9 @@
 // Handles the velocity control of each flywheel.
 class FlywheelController {
  public:
-  FlywheelController(StateFeedbackLoop<3, 1, 1> &&loop);
+  FlywheelController(
+      StateFeedbackLoop<3, 1, 1, double, StateFeedbackHybridPlant<3, 1, 1>,
+                        HybridKalman<3, 1, 1>> &&loop);
 
   // Sets the velocity goal in radians/sec
   void set_goal(double angular_velocity_goal);
@@ -45,7 +47,10 @@
   // The current sensor measurement.
   Eigen::Matrix<double, 1, 1> Y_;
   // The control loop.
-  ::std::unique_ptr<StateFeedbackLoop<3, 1, 1>> loop_;
+  ::std::unique_ptr<
+      StateFeedbackLoop<3, 1, 1, double, StateFeedbackHybridPlant<3, 1, 1>,
+                        HybridKalman<3, 1, 1>>>
+      loop_;
 
   // History array for calculating a filtered angular velocity.
   static constexpr int kHistoryLength = 10;
@@ -59,6 +64,8 @@
 
   double last_goal_ = 0;
 
+  bool first_ = true;
+
   DISALLOW_COPY_AND_ASSIGN(FlywheelController);
 };
 
diff --git a/y2020/control_loops/superstructure/shooter/shooter.cc b/y2020/control_loops/superstructure/shooter/shooter.cc
index 25f6a6a..a9f1c4c 100644
--- a/y2020/control_loops/superstructure/shooter/shooter.cc
+++ b/y2020/control_loops/superstructure/shooter/shooter.cc
@@ -12,7 +12,7 @@
 namespace shooter {
 
 namespace {
-const double kVelocityTolerance = 0.01;
+const double kVelocityTolerance = 20.0;
 }  // namespace
 
 Shooter::Shooter()
@@ -46,16 +46,18 @@
   accelerator_right_.set_position(position->theta_accelerator_right(),
                                   position_timestamp);
 
-  finisher_.Update(output == nullptr);
-  accelerator_left_.Update(output == nullptr);
-  accelerator_right_.Update(output == nullptr);
-
   // Update goal.
   if (goal) {
     finisher_.set_goal(goal->velocity_finisher());
     accelerator_left_.set_goal(goal->velocity_accelerator());
     accelerator_right_.set_goal(goal->velocity_accelerator());
+  }
 
+  finisher_.Update(output == nullptr);
+  accelerator_left_.Update(output == nullptr);
+  accelerator_right_.Update(output == nullptr);
+
+  if (goal) {
     if (UpToSpeed(goal) && goal->velocity_finisher() > kVelocityTolerance &&
         goal->velocity_accelerator() > kVelocityTolerance) {
       ready_ = true;
diff --git a/y2020/control_loops/superstructure/shooter_plot.pb b/y2020/control_loops/superstructure/shooter_plot.pb
index 01a1e20..16ab5f4 100644
--- a/y2020/control_loops/superstructure/shooter_plot.pb
+++ b/y2020/control_loops/superstructure/shooter_plot.pb
@@ -24,19 +24,87 @@
     line {
       y_signal {
         channel: "Status"
-        field: "hood.position"
+        field: "shooter.accelerator_left.avg_angular_velocity"
+      }
+    }
+    line {
+      y_signal {
+        channel: "Status"
+        field: "shooter.accelerator_right.avg_angular_velocity"
+      }
+    }
+    line {
+      y_signal {
+        channel: "Status"
+        field: "shooter.accelerator_left.angular_velocity"
+      }
+    }
+    line {
+      y_signal {
+        channel: "Status"
+        field: "shooter.accelerator_right.angular_velocity"
       }
     }
     line {
       y_signal {
         channel: "Goal"
-        field: "hood.unsafe_goal"
+        field: "shooter.velocity_accelerator"
       }
     }
     line {
       y_signal {
-        channel: "Position"
-        field: "hood.encoder"
+        channel: "Status"
+        field: "shooter.finisher.avg_angular_velocity"
+      }
+    }
+    line {
+      y_signal {
+        channel: "Status"
+        field: "shooter.finisher.angular_velocity"
+      }
+    }
+    line {
+      y_signal {
+        channel: "Goal"
+        field: "shooter.velocity_finisher"
+      }
+    }
+  }
+  axes {
+    line {
+      y_signal {
+        channel: "Output"
+        field: "accelerator_left_voltage"
+      }
+    }
+    line {
+      y_signal {
+        channel: "Output"
+        field: "accelerator_right_voltage"
+      }
+    }
+    line {
+      y_signal {
+        channel: "Status"
+        field: "shooter.accelerator_left.voltage_error"
+      }
+    }
+    line {
+      y_signal {
+        channel: "Status"
+        field: "shooter.accelerator_right.voltage_error"
+      }
+    }
+    line {
+      y_signal {
+        channel: "Output"
+        field: "finisher_voltage"
+      }
+    }
+    line {
+      y_signal {
+        channel: "Status"
+        field: "shooter.finisher.voltage_error"
       }
     }
     ylabel: "hood position"
diff --git a/y2020/control_loops/superstructure/superstructure.cc b/y2020/control_loops/superstructure/superstructure.cc
index 39c913e..8947f85 100644
--- a/y2020/control_loops/superstructure/superstructure.cc
+++ b/y2020/control_loops/superstructure/superstructure.cc
@@ -16,7 +16,11 @@
       hood_(constants::GetValues().hood),
       intake_joint_(constants::GetValues().intake),
       turret_(constants::GetValues().turret.subsystem_params),
-      shooter_() {
+      drivetrain_status_fetcher_(
+          event_loop->MakeFetcher<frc971::control_loops::drivetrain::Status>(
+              "/drivetrain")),
+      joystick_state_fetcher_(
+          event_loop->MakeFetcher<aos::JoystickState>("/aos")) {
   event_loop->SetRuntimeRealtimePriority(30);
 }
 
@@ -34,6 +38,22 @@
   const aos::monotonic_clock::time_point position_timestamp =
       event_loop()->context().monotonic_event_time;
 
+  if (drivetrain_status_fetcher_.Fetch()) {
+    aos::Alliance alliance = aos::Alliance::kInvalid;
+    if (joystick_state_fetcher_.Fetch()) {
+      alliance = joystick_state_fetcher_->alliance();
+    }
+    const turret::Aimer::WrapMode mode =
+        (unsafe_goal != nullptr && unsafe_goal->shooting())
+            ? turret::Aimer::WrapMode::kAvoidWrapping
+            : turret::Aimer::WrapMode::kAvoidEdges;
+    aimer_.Update(drivetrain_status_fetcher_.get(), alliance, mode,
+                  turret::Aimer::ShotMode::kShootOnTheFly);
+  }
+
+  const flatbuffers::Offset<AimerStatus> aimer_status_offset =
+      aimer_.PopulateStatus(status->fbb());
+
   OutputT output_struct;
 
   flatbuffers::Offset<AbsoluteEncoderProfiledJointStatus> hood_status_offset =
@@ -42,6 +62,30 @@
                     output != nullptr ? &(output_struct.hood_voltage) : nullptr,
                     status->fbb());
 
+  if (unsafe_goal != nullptr) {
+    if (unsafe_goal->shooting() &&
+        shooting_start_time_ == aos::monotonic_clock::min_time) {
+      shooting_start_time_ = position_timestamp;
+    }
+
+    if (unsafe_goal->shooting()) {
+      constexpr std::chrono::milliseconds kPeriod =
+          std::chrono::milliseconds(250);
+      if ((position_timestamp - shooting_start_time_) % (kPeriod * 2) <
+          kPeriod) {
+        intake_joint_.set_min_position(-0.25);
+      } else {
+        intake_joint_.set_min_position(-0.75);
+      }
+    } else {
+      intake_joint_.clear_min_position();
+    }
+
+    if (!unsafe_goal->shooting()) {
+      shooting_start_time_ = aos::monotonic_clock::min_time;
+    }
+  }
+
   flatbuffers::Offset<AbsoluteEncoderProfiledJointStatus> intake_status_offset =
       intake_joint_.Iterate(
           unsafe_goal != nullptr ? unsafe_goal->intake() : nullptr,
@@ -49,10 +93,14 @@
           output != nullptr ? &(output_struct.intake_joint_voltage) : nullptr,
           status->fbb());
 
+  const frc971::control_loops::StaticZeroingSingleDOFProfiledSubsystemGoal
+      *turret_goal = unsafe_goal != nullptr ? (unsafe_goal->turret_tracking()
+                                                   ? aimer_.TurretGoal()
+                                                   : unsafe_goal->turret())
+                                            : nullptr;
   flatbuffers::Offset<PotAndAbsoluteEncoderProfiledJointStatus>
       turret_status_offset = turret_.Iterate(
-          unsafe_goal != nullptr ? unsafe_goal->turret() : nullptr,
-          position->turret(),
+          turret_goal, position->turret(),
           output != nullptr ? &(output_struct.turret_voltage) : nullptr,
           status->fbb());
 
@@ -64,19 +112,40 @@
 
   climber_.Iterate(unsafe_goal, output != nullptr ? &(output_struct) : nullptr);
 
+  const AbsoluteEncoderProfiledJointStatus *const hood_status =
+      GetMutableTemporaryPointer(*status->fbb(), hood_status_offset);
+
+  const PotAndAbsoluteEncoderProfiledJointStatus *const turret_status =
+      GetMutableTemporaryPointer(*status->fbb(), turret_status_offset);
+
+  if (output != nullptr) {
+    // Friction is a pain and putting a really high burden on the integrator.
+    const double turret_velocity_sign = turret_status->velocity() * kTurretFrictionGain;
+    output_struct.turret_voltage +=
+        std::clamp(turret_velocity_sign, -kTurretFrictionVoltageLimit,
+                   kTurretFrictionVoltageLimit);
+    output_struct.turret_voltage =
+        std::clamp(output_struct.turret_voltage, -turret_.operating_voltage(),
+                   turret_.operating_voltage());
+
+    // Friction is a pain and putting a really high burden on the integrator.
+    const double hood_velocity_sign = hood_status->velocity() * kHoodFrictionGain;
+    output_struct.hood_voltage +=
+        std::clamp(hood_velocity_sign, -kHoodFrictionVoltageLimit,
+                   kHoodFrictionVoltageLimit);
+
+    // And dither the output.
+    time_ += 0.00505;
+    output_struct.hood_voltage += 1.3 * std::sin(time_ * 2.0 * M_PI * 30.0);
+  }
+
   bool zeroed;
   bool estopped;
 
   {
-    const AbsoluteEncoderProfiledJointStatus *const hood_status =
-        GetMutableTemporaryPointer(*status->fbb(), hood_status_offset);
-
     const AbsoluteEncoderProfiledJointStatus *const intake_status =
         GetMutableTemporaryPointer(*status->fbb(), intake_status_offset);
 
-    const PotAndAbsoluteEncoderProfiledJointStatus *const turret_status =
-        GetMutableTemporaryPointer(*status->fbb(), turret_status_offset);
-
     zeroed = hood_status->zeroed() && intake_status->zeroed() &&
              turret_status->zeroed();
     estopped = hood_status->estopped() || intake_status->estopped() ||
@@ -92,12 +161,27 @@
   status_builder.add_intake(intake_status_offset);
   status_builder.add_turret(turret_status_offset);
   status_builder.add_shooter(shooter_status_offset);
+  status_builder.add_aimer(aimer_status_offset);
 
   status->Send(status_builder.Finish());
 
   if (output != nullptr) {
     if (unsafe_goal) {
-      output_struct.intake_roller_voltage = unsafe_goal->roller_voltage();
+      output_struct.washing_machine_spinner_voltage = 0.0;
+      if (unsafe_goal->shooting()) {
+        if (shooter_.ready() &&
+            unsafe_goal->shooter()->velocity_accelerator() > 10.0 &&
+            unsafe_goal->shooter()->velocity_finisher() > 10.0) {
+          output_struct.feeder_voltage = 12.0;
+        } else {
+          output_struct.feeder_voltage = 0.0;
+        }
+        output_struct.washing_machine_spinner_voltage = 5.0;
+        output_struct.intake_roller_voltage = 3.0;
+      } else {
+        output_struct.feeder_voltage = 0.0;
+        output_struct.intake_roller_voltage = unsafe_goal->roller_voltage();
+      }
     } else {
       output_struct.intake_roller_voltage = 0.0;
     }
diff --git a/y2020/control_loops/superstructure/superstructure.h b/y2020/control_loops/superstructure/superstructure.h
index d8ce917..1a9d401 100644
--- a/y2020/control_loops/superstructure/superstructure.h
+++ b/y2020/control_loops/superstructure/superstructure.h
@@ -3,6 +3,8 @@
 
 #include "aos/controls/control_loop.h"
 #include "aos/events/event_loop.h"
+#include "aos/robot_state/joystick_state_generated.h"
+#include "frc971/control_loops/drivetrain/drivetrain_status_generated.h"
 #include "y2020/constants.h"
 #include "y2020/control_loops/superstructure/shooter/shooter.h"
 #include "y2020/control_loops/superstructure/superstructure_goal_generated.h"
@@ -10,6 +12,7 @@
 #include "y2020/control_loops/superstructure/superstructure_position_generated.h"
 #include "y2020/control_loops/superstructure/superstructure_status_generated.h"
 #include "y2020/control_loops/superstructure/climber.h"
+#include "y2020/control_loops/superstructure/turret/aiming.h"
 
 namespace y2020 {
 namespace control_loops {
@@ -21,6 +24,14 @@
   explicit Superstructure(::aos::EventLoop *event_loop,
                           const ::std::string &name = "/superstructure");
 
+  // Terms to control the velocity gain for the friction compensation, and the
+  // voltage cap.
+  static constexpr double kTurretFrictionGain = 10.0;
+  static constexpr double kTurretFrictionVoltageLimit = 1.5;
+
+  static constexpr double kHoodFrictionGain = 40.0;
+  static constexpr double kHoodFrictionVoltageLimit = 1.5;
+
   using PotAndAbsoluteEncoderSubsystem =
       ::frc971::control_loops::StaticZeroingSingleDOFProfiledSubsystem<
           ::frc971::zeroing::PotAndAbsoluteEncoderZeroingEstimator,
@@ -45,8 +56,19 @@
   AbsoluteEncoderSubsystem intake_joint_;
   PotAndAbsoluteEncoderSubsystem turret_;
   shooter::Shooter shooter_;
+  turret::Aimer aimer_;
+
+  aos::Fetcher<frc971::control_loops::drivetrain::Status>
+      drivetrain_status_fetcher_;
+  aos::Fetcher<aos::JoystickState> joystick_state_fetcher_;
 
   Climber climber_;
+
+  aos::monotonic_clock::time_point shooting_start_time_ =
+      aos::monotonic_clock::min_time;
+
+  double time_ = 0.0;
+
   DISALLOW_COPY_AND_ASSIGN(Superstructure);
 };
 
diff --git a/y2020/control_loops/superstructure/superstructure_goal.fbs b/y2020/control_loops/superstructure/superstructure_goal.fbs
index f066ab0..c7fe643 100644
--- a/y2020/control_loops/superstructure/superstructure_goal.fbs
+++ b/y2020/control_loops/superstructure/superstructure_goal.fbs
@@ -27,9 +27,7 @@
   // Positive is rollers intaking to Washing Machine.
   roller_voltage:float;
 
-  // 0 = facing the front of the robot. Positive rotates counterclockwise.
-  // TODO(Kai): Define which wrap of the shooter is 0 if it can rotate past
-  // forward more than once.
+  // 0 = facing the back of the robot. Positive rotates counterclockwise.
   turret:frc971.control_loops.StaticZeroingSingleDOFProfiledSubsystemGoal;
 
   // Only applies if shooter_tracking = false.
diff --git a/y2020/control_loops/superstructure/superstructure_lib_test.cc b/y2020/control_loops/superstructure/superstructure_lib_test.cc
index f68db08..e8f1f8d 100644
--- a/y2020/control_loops/superstructure/superstructure_lib_test.cc
+++ b/y2020/control_loops/superstructure/superstructure_lib_test.cc
@@ -36,6 +36,7 @@
     CreateStaticZeroingSingleDOFProfiledSubsystemGoal;
 using ::frc971::control_loops::PositionSensorSimulator;
 using ::frc971::control_loops::StaticZeroingSingleDOFProfiledSubsystemGoal;
+typedef ::frc971::control_loops::drivetrain::Status DrivetrainStatus;
 typedef Superstructure::AbsoluteEncoderSubsystem AbsoluteEncoderSubsystem;
 typedef Superstructure::PotAndAbsoluteEncoderSubsystem
     PotAndAbsoluteEncoderSubsystem;
@@ -72,7 +73,6 @@
             event_loop_->MakeFetcher<Status>("/superstructure")),
         superstructure_output_fetcher_(
             event_loop_->MakeFetcher<Output>("/superstructure")),
-
         hood_plant_(new CappedTestPlant(hood::MakeHoodPlant())),
         hood_encoder_(constants::GetValues()
                           .hood.zeroing_constants.one_revolution_distance),
@@ -235,17 +235,29 @@
     EXPECT_NEAR(superstructure_output_fetcher_->turret_voltage(), 0.0,
                 voltage_check_turret);
 
+    // Invert the friction model.
+    const double hood_velocity_sign =
+        hood_plant_->X(1) * Superstructure::kHoodFrictionGain;
     ::Eigen::Matrix<double, 1, 1> hood_U;
     hood_U << superstructure_output_fetcher_->hood_voltage() +
-                  hood_plant_->voltage_offset();
+                  hood_plant_->voltage_offset() -
+                  std::clamp(hood_velocity_sign,
+                             -Superstructure::kHoodFrictionVoltageLimit,
+                             Superstructure::kHoodFrictionVoltageLimit);
 
     ::Eigen::Matrix<double, 1, 1> intake_U;
     intake_U << superstructure_output_fetcher_->intake_joint_voltage() +
                     intake_plant_->voltage_offset();
 
+    // Invert the friction model.
+    const double turret_velocity_sign =
+        turret_plant_->X(1) * Superstructure::kTurretFrictionGain;
     ::Eigen::Matrix<double, 1, 1> turret_U;
     turret_U << superstructure_output_fetcher_->turret_voltage() +
-                    turret_plant_->voltage_offset();
+                    turret_plant_->voltage_offset() -
+                    std::clamp(turret_velocity_sign,
+                               -Superstructure::kTurretFrictionVoltageLimit,
+                               Superstructure::kTurretFrictionVoltageLimit);
 
     ::Eigen::Matrix<double, 1, 1> accelerator_left_U;
     accelerator_left_U
@@ -386,6 +398,10 @@
             test_event_loop_->MakeFetcher<Output>("/superstructure")),
         superstructure_position_fetcher_(
             test_event_loop_->MakeFetcher<Position>("/superstructure")),
+        drivetrain_status_sender_(
+            test_event_loop_->MakeSender<DrivetrainStatus>("/drivetrain")),
+        joystick_state_sender_(
+            test_event_loop_->MakeSender<aos::JoystickState>("/aos")),
         superstructure_event_loop_(MakeEventLoop("superstructure", roborio_)),
         superstructure_(superstructure_event_loop_.get()),
         superstructure_plant_event_loop_(MakeEventLoop("plant", roborio_)),
@@ -494,6 +510,8 @@
   ::aos::Fetcher<Status> superstructure_status_fetcher_;
   ::aos::Fetcher<Output> superstructure_output_fetcher_;
   ::aos::Fetcher<Position> superstructure_position_fetcher_;
+  ::aos::Sender<DrivetrainStatus> drivetrain_status_sender_;
+  ::aos::Sender<aos::JoystickState> joystick_state_sender_;
 
   // Create a control loop and simulation.
   ::std::unique_ptr<::aos::EventLoop> superstructure_event_loop_;
@@ -667,7 +685,8 @@
     ASSERT_TRUE(builder.Send(goal_builder.Finish()));
   }
   superstructure_plant_.set_peak_hood_velocity(23.0);
-  superstructure_plant_.set_peak_hood_acceleration(0.2);
+  // 30 hz sin wave on the hood causes acceleration to be ignored.
+  superstructure_plant_.set_peak_hood_acceleration(5.5);
 
   superstructure_plant_.set_peak_intake_velocity(23.0);
   superstructure_plant_.set_peak_intake_acceleration(0.2);
@@ -750,7 +769,7 @@
   }
 
   // Give it a lot of time to get there.
-  RunFor(chrono::seconds(9));
+  RunFor(chrono::seconds(15));
 
   VerifyNearGoal();
 }
@@ -814,6 +833,79 @@
   VerifyNearGoal();
 }
 
+class SuperstructureAllianceTest
+    : public SuperstructureTest,
+      public ::testing::WithParamInterface<aos::Alliance> {};
+
+// Tests that the turret switches to auto-aiming when we set turret_tracking to
+// true.
+TEST_P(SuperstructureAllianceTest, TurretAutoAim) {
+  SetEnabled(true);
+  // Set a reasonable goal.
+  const frc971::control_loops::Pose target = turret::OuterPortPose(GetParam());
+
+  WaitUntilZeroed();
+
+  constexpr double kShotAngle = 1.0;
+  {
+    auto builder = joystick_state_sender_.MakeBuilder();
+
+    aos::JoystickState::Builder joystick_builder =
+        builder.MakeBuilder<aos::JoystickState>();
+
+    joystick_builder.add_alliance(GetParam());
+
+    ASSERT_TRUE(builder.Send(joystick_builder.Finish()));
+  }
+  {
+    auto builder = superstructure_goal_sender_.MakeBuilder();
+
+    Goal::Builder goal_builder = builder.MakeBuilder<Goal>();
+
+    goal_builder.add_turret_tracking(true);
+
+    ASSERT_TRUE(builder.Send(goal_builder.Finish()));
+  }
+
+  {
+    auto builder = drivetrain_status_sender_.MakeBuilder();
+
+    frc971::control_loops::drivetrain::LocalizerState::Builder
+        localizer_builder = builder.MakeBuilder<
+            frc971::control_loops::drivetrain::LocalizerState>();
+    localizer_builder.add_left_velocity(0.0);
+    localizer_builder.add_right_velocity(0.0);
+    const auto localizer_offset = localizer_builder.Finish();
+
+    DrivetrainStatus::Builder status_builder =
+        builder.MakeBuilder<DrivetrainStatus>();
+
+    // Set the robot up at kShotAngle off from the target, 1m away.
+    status_builder.add_x(target.abs_pos().x() + std::cos(kShotAngle));
+    status_builder.add_y(target.abs_pos().y() + std::sin(kShotAngle));
+    status_builder.add_theta(0.0);
+    status_builder.add_localizer(localizer_offset);
+
+    ASSERT_TRUE(builder.Send(status_builder.Finish()));
+  }
+
+  // Give it time to stabilize.
+  RunFor(chrono::seconds(1));
+
+  superstructure_status_fetcher_.Fetch();
+  EXPECT_NEAR(kShotAngle, superstructure_status_fetcher_->turret()->position(),
+              1e-4);
+  EXPECT_FLOAT_EQ(kShotAngle,
+                  superstructure_status_fetcher_->aimer()->turret_position());
+  EXPECT_FLOAT_EQ(0,
+                  superstructure_status_fetcher_->aimer()->turret_velocity());
+}
+
+INSTANTIATE_TEST_CASE_P(ShootAnyAlliance, SuperstructureAllianceTest,
+                        ::testing::Values(aos::Alliance::kRed,
+                                          aos::Alliance::kBlue,
+                                          aos::Alliance::kInvalid));
+
 }  // namespace testing
 }  // namespace superstructure
 }  // namespace control_loops
diff --git a/y2020/control_loops/superstructure/superstructure_status.fbs b/y2020/control_loops/superstructure/superstructure_status.fbs
index e8f7637..81293d3 100644
--- a/y2020/control_loops/superstructure/superstructure_status.fbs
+++ b/y2020/control_loops/superstructure/superstructure_status.fbs
@@ -14,6 +14,9 @@
   // The target speed selected by the lookup table or from manual override
   // Can be compared to velocity to determine if ready.
   angular_velocity_goal:double;
+
+  // Voltage error.
+  voltage_error:double;
 }
 
 table ShooterStatus {
@@ -26,6 +29,15 @@
   accelerator_right:FlywheelControllerStatus;
 }
 
+table AimerStatus {
+  // The current goal angle for the turret auto-tracking, in radians.
+  turret_position:double;
+  // The current goal velocity for the turret, in radians / sec.
+  turret_velocity:double;
+  // Whether we are currently aiming for the inner port.
+  aiming_for_inner_port:bool;
+}
+
 table Status {
   // All subsystems know their location.
   zeroed:bool;
@@ -43,6 +55,9 @@
 
   // Status of the control_panel
   control_panel:frc971.control_loops.RelativeEncoderProfiledJointStatus;
+
+  // Status of the vision auto-tracking.
+  aimer:AimerStatus;
 }
 
 root_type Status;
diff --git a/y2020/control_loops/superstructure/turret/BUILD b/y2020/control_loops/superstructure/turret/BUILD
index 894d418..010c6fd 100644
--- a/y2020/control_loops/superstructure/turret/BUILD
+++ b/y2020/control_loops/superstructure/turret/BUILD
@@ -30,3 +30,28 @@
         "//frc971/control_loops:state_feedback_loop",
     ],
 )
+
+cc_library(
+    name = "aiming",
+    srcs = ["aiming.cc"],
+    hdrs = ["aiming.h"],
+    deps = [
+        "//aos:flatbuffers",
+        "//frc971/control_loops:control_loops_fbs",
+        "//frc971/control_loops:pose",
+        "//frc971/control_loops:profiled_subsystem_fbs",
+        "//frc971/control_loops/drivetrain:drivetrain_status_fbs",
+        "//y2020:constants",
+        "//y2020/control_loops/drivetrain:drivetrain_base",
+        "//y2020/control_loops/superstructure:superstructure_status_fbs",
+    ],
+)
+
+cc_test(
+    name = "aiming_test",
+    srcs = ["aiming_test.cc"],
+    deps = [
+        ":aiming",
+        "//aos/testing:googletest",
+    ],
+)
diff --git a/y2020/control_loops/superstructure/turret/aiming.cc b/y2020/control_loops/superstructure/turret/aiming.cc
new file mode 100644
index 0000000..360fd1a
--- /dev/null
+++ b/y2020/control_loops/superstructure/turret/aiming.cc
@@ -0,0 +1,252 @@
+#include "y2020/control_loops/superstructure/turret/aiming.h"
+
+#include "y2020/constants.h"
+#include "y2020/control_loops/drivetrain/drivetrain_base.h"
+
+namespace y2020 {
+namespace control_loops {
+namespace superstructure {
+namespace turret {
+
+using frc971::control_loops::Pose;
+
+// Shooting-on-the-fly concept:
+// The current way that we manage shooting-on-the fly endeavors to be reasonably
+// simple, until we get a chance to see how the actual dynamics play out.
+// Essentially, we assume that the robot's velocity will represent a constant
+// offset to the ball's velocity over the entire trajectory to the goal and
+// then offset the target that we are pointing at based on that.
+// Let us assume that, if the robot shoots while not moving, regardless of shot
+// distance, the ball's average speed-over-ground to the target will be a
+// constant s_shot (this implies that if the robot is driving straight towards
+// the target, the actual ball speed-over-ground will be greater than s_shot).
+// We will define things in the robot's coordinate frame. We will be shooting
+// at a target that is at position (target_x, target_y) in the robot frame. The
+// robot is travelling at (v_robot_x, v_robot_y). In order to shoot the ball,
+// we need to generate some virtual target (virtual_x, virtual_y) that we will
+// shoot at as if we were standing still. The total time-of-flight to that
+// target will be t_shot = norm2(virtual_x, virtual_y) / s_shot.
+// we will have virtual_x + v_robot_x * t_shot = target_x, and the same
+// for y. This gives us three equations and three unknowns (virtual_x,
+// virtual_y, and t_shot), and given appropriate assumptions, can be solved
+// analytically. However, doing so is obnoxious and given appropriate functions
+// for t_shot may not be feasible. As such, instead of actually solving the
+// equation analytically, we will use an iterative solution where we maintain
+// a current virtual target estimate. We start with this estimate as if the
+// robot is stationary. We then use this estimate to calculate t_shot, and
+// calculate the next value for the virtual target.
+
+namespace {
+// The overall length and width of the field, in meters.
+constexpr double kFieldLength = 15.983;
+constexpr double kFieldWidth = 8.212;
+// Height of the center of the port(s) above the ground, in meters.
+constexpr double kPortHeight = 2.494;
+
+// Maximum shot angle at which we will attempt to make the shot into the inner
+// port, in radians. Zero would imply that we could only shoot if we were
+// exactly perpendicular to the target. Larger numbers allow us to aim at the
+// inner port more aggressively, at the risk of being more likely to miss the
+// outer port entirely.
+constexpr double kMaxInnerPortAngle = 20.0 * M_PI / 180.0;
+
+// Distance (in meters) from the edge of the field to the port.
+constexpr double kEdgeOfFieldToPort = 2.404;
+
+// The amount (in meters) that the inner port is set back from the outer port.
+constexpr double kInnerPortBackset = 0.743;
+
+// Average speed-over-ground of the ball on its way to the target. Our current
+// model assumes constant ball velocity regardless of shot distance.
+// TODO(james): Is this an appropriate model? For the outer port it should be
+// good enough that it doesn't really matter, but for the inner port it may be
+// more appropriate to do something more dynamic--however, it is not yet clear
+// how we would best estimate speed-over-ground given a hood angle + shooter
+// speed. Assuming a constant average speed over the course of the trajectory
+// should be reasonable, since all we are trying to do here is calculate an
+// overall time-of-flight (we don't actually care about the ball speed itself).
+constexpr double kBallSpeedOverGround = 15.0;  // m/s
+
+// Minimum distance that we must be from the inner port in order to attempt the
+// shot--this is to account for the fact that if we are too close to the target,
+// then we won't have a clear shot on the inner port.
+constexpr double kMinimumInnerPortShotDistance = 4.0;
+
+// Amount of buffer, in radians, to leave to help avoid wrapping. I.e., any time
+// that we are in kAvoidEdges mode, we will keep ourselves at least
+// kAntiWrapBuffer radians away from the hardstops.
+constexpr double kAntiWrapBuffer = 0.2;
+
+// If the turret is at zero, then it will be at this angle relative to pointed
+// straight forwards on the robot.
+constexpr double kTurretZeroOffset = M_PI;
+
+constexpr double kTurretRange = constants::Values::kTurretRange().range();
+static_assert((kTurretRange - 2.0 * kAntiWrapBuffer) > 2.0 * M_PI,
+              "kAntiWrap buffer should be small enough that we still have 360 "
+              "degrees of range.");
+
+Pose ReverseSideOfField(Pose target) {
+  *target.mutable_pos() *= -1;
+  target.set_theta(aos::math::NormalizeAngle(target.rel_theta() + M_PI));
+  return target;
+}
+
+flatbuffers::DetachedBuffer MakePrefilledGoal() {
+  flatbuffers::FlatBufferBuilder fbb;
+  fbb.ForceDefaults(true);
+  Aimer::Goal::Builder builder(fbb);
+  builder.add_unsafe_goal(0);
+  builder.add_goal_velocity(0);
+  builder.add_ignore_profile(true);
+  fbb.Finish(builder.Finish());
+  return fbb.Release();
+}
+
+// This implements the iteration in the described shooting-on-the-fly algorithm.
+// robot_pose: Current robot pose.
+// robot_velocity: Current robot velocity, in the absolute field frame.
+// target_pose: Absolute goal Pose.
+// current_virtual_pose: Current estimate of where we want to shoot at.
+Pose IterateVirtualGoal(const Pose &robot_pose,
+                        const Eigen::Vector3d &robot_velocity,
+                        const Pose &target_pose,
+                        const Pose &current_virtual_pose) {
+  const double air_time =
+      current_virtual_pose.Rebase(&robot_pose).xy_norm() / kBallSpeedOverGround;
+  const Eigen::Vector3d virtual_target =
+      target_pose.abs_pos() - air_time * robot_velocity;
+  return Pose(virtual_target, target_pose.abs_theta());
+}
+}  // namespace
+
+Pose InnerPortPose(aos::Alliance alliance) {
+  const Pose target({kFieldLength / 2 + kInnerPortBackset,
+                     -kFieldWidth / 2.0 + kEdgeOfFieldToPort, kPortHeight},
+                    0.0);
+  if (alliance == aos::Alliance::kRed) {
+    return ReverseSideOfField(target);
+  }
+  return target;
+}
+
+Pose OuterPortPose(aos::Alliance alliance) {
+  Pose target(
+      {kFieldLength / 2, -kFieldWidth / 2.0 + kEdgeOfFieldToPort, kPortHeight},
+      0.0);
+  if (alliance == aos::Alliance::kRed) {
+    return ReverseSideOfField(target);
+  }
+  return target;
+}
+
+Aimer::Aimer() : goal_(MakePrefilledGoal()) {}
+
+void Aimer::Update(const Status *status, aos::Alliance alliance,
+                   WrapMode wrap_mode, ShotMode shot_mode) {
+  const Pose robot_pose({status->x(), status->y(), 0}, status->theta());
+  const Pose inner_port = InnerPortPose(alliance);
+  const Pose outer_port = OuterPortPose(alliance);
+  const Pose robot_pose_from_inner_port = robot_pose.Rebase(&inner_port);
+
+  // TODO(james): This code should probably just be in the localizer and have
+  // xdot/ydot get populated in the status message directly... that way we don't
+  // keep duplicating this math.
+  // Also, this doesn't currently take into account the lateral velocity of the
+  // robot. All of this would be helped by just doing this work in the Localizer
+  // itself.
+  const Eigen::Vector2d linear_angular =
+      drivetrain::GetDrivetrainConfig().Tlr_to_la() *
+      Eigen::Vector2d(status->localizer()->left_velocity(),
+                      status->localizer()->right_velocity());
+  const double xdot = linear_angular(0) * std::cos(status->theta());
+  const double ydot = linear_angular(0) * std::sin(status->theta());
+
+  const double inner_port_angle = robot_pose_from_inner_port.heading();
+  const double inner_port_distance = robot_pose_from_inner_port.xy_norm();
+  aiming_for_inner_port_ =
+      (std::abs(inner_port_angle) < kMaxInnerPortAngle) &&
+      (inner_port_distance > kMinimumInnerPortShotDistance);
+
+  // This code manages compensating the goal turret heading for the robot's
+  // current velocity, to allow for shooting on-the-fly.
+  // This works by solving for the correct turret angle numerically, since while
+  // we technically could do it analytically, doing so would both make it hard
+  // to make small changes (since it would force us to redo the math) and be
+  // error-prone since it'd be easy to make typos or other minor math errors.
+  Pose virtual_goal;
+  {
+    const Pose goal = aiming_for_inner_port_ ? inner_port : outer_port;
+    virtual_goal = goal;
+    if (shot_mode == ShotMode::kShootOnTheFly) {
+      for (int ii = 0; ii < 3; ++ii) {
+        virtual_goal =
+            IterateVirtualGoal(robot_pose, {xdot, ydot, 0}, goal, virtual_goal);
+      }
+      VLOG(1) << "Shooting-on-the-fly target position: "
+              << virtual_goal.abs_pos().transpose();
+    }
+    virtual_goal = virtual_goal.Rebase(&robot_pose);
+  }
+
+  const double heading_to_goal = virtual_goal.heading();
+  CHECK(status->has_localizer());
+  distance_ = virtual_goal.xy_norm();
+
+  // The following code all works to calculate what the rate of turn of the
+  // turret should be. The code only accounts for the rate of turn if we are
+  // aiming at a static target, which should be close enough to correct that it
+  // doesn't matter that it fails to account for the
+  // shooting-on-the-fly compensation.
+  const double rel_x = virtual_goal.rel_pos().x();
+  const double rel_y = virtual_goal.rel_pos().y();
+  const double squared_norm = rel_x * rel_x + rel_y * rel_y;
+
+  // If squared_norm gets to be too close to zero, just zero out the relevant
+  // term to prevent NaNs. Note that this doesn't address the chattering that
+  // would likely occur if we were to get excessively close to the target.
+  // Note that x and y terms are swapped relative to what you would normally see
+  // in the derivative of atan because xdot and ydot are the derivatives of
+  // robot_pos and we are working with the atan of (target_pos - robot_pos).
+  const double atan_diff = (squared_norm < 1e-3)
+                               ? 0.0
+                               : (rel_y * xdot - rel_x * ydot) / squared_norm;
+  // heading = atan2(relative_y, relative_x) - robot_theta
+  // dheading / dt = (rel_x * rel_y' - rel_y * rel_x') / (rel_x^2 + rel_y^2) - dtheta / dt
+  const double dheading_dt = atan_diff - linear_angular(1);
+
+  double range = kTurretRange;
+  if (wrap_mode == WrapMode::kAvoidEdges) {
+    range -= 2.0 * kAntiWrapBuffer;
+  }
+  // Calculate a goal turret heading such that it is within +/- pi of the
+  // current position (i.e., a goal that would minimize the amount the turret
+  // would have to travel).
+  // We then check if this goal would bring us out of range of the valid angles,
+  // and if it would, we reset to be within +/- pi of zero.
+  double turret_heading =
+      goal_.message().unsafe_goal() +
+      aos::math::NormalizeAngle(heading_to_goal - kTurretZeroOffset -
+                                goal_.message().unsafe_goal());
+  if (std::abs(turret_heading - constants::Values::kTurretRange().middle()) >
+      range / 2.0) {
+    turret_heading = aos::math::NormalizeAngle(turret_heading);
+  }
+
+  goal_.mutable_message()->mutate_unsafe_goal(turret_heading);
+  goal_.mutable_message()->mutate_goal_velocity(dheading_dt);
+}
+
+flatbuffers::Offset<AimerStatus> Aimer::PopulateStatus(
+    flatbuffers::FlatBufferBuilder *fbb) const {
+  AimerStatus::Builder builder(*fbb);
+  builder.add_turret_position(goal_.message().unsafe_goal());
+  builder.add_turret_velocity(goal_.message().goal_velocity());
+  builder.add_aiming_for_inner_port(aiming_for_inner_port_);
+  return builder.Finish();
+}
+
+}  // namespace turret
+}  // namespace superstructure
+}  // namespace control_loops
+}  // namespace y2020
diff --git a/y2020/control_loops/superstructure/turret/aiming.h b/y2020/control_loops/superstructure/turret/aiming.h
new file mode 100644
index 0000000..3b3071e
--- /dev/null
+++ b/y2020/control_loops/superstructure/turret/aiming.h
@@ -0,0 +1,74 @@
+#ifndef y2020_CONTROL_LOOPS_SUPERSTRUCTURE_TURRET_AIMING_H_
+#define y2020_CONTROL_LOOPS_SUPERSTRUCTURE_TURRET_AIMING_H_
+
+#include "aos/flatbuffers.h"
+#include "aos/robot_state/joystick_state_generated.h"
+#include "frc971/control_loops/drivetrain/drivetrain_status_generated.h"
+#include "frc971/control_loops/pose.h"
+#include "frc971/control_loops/profiled_subsystem_generated.h"
+#include "y2020/control_loops/superstructure/superstructure_status_generated.h"
+
+namespace y2020 {
+namespace control_loops {
+namespace superstructure {
+namespace turret {
+
+// Returns the port that we want to score on given our current alliance. The yaw
+// of the port will be such that the positive x axis points out the back of the
+// target.
+frc971::control_loops::Pose InnerPortPose(aos::Alliance alliance);
+frc971::control_loops::Pose OuterPortPose(aos::Alliance alliance);
+
+// This class manages taking in drivetrain status messages and generating turret
+// goals so that it gets aimed at the goal.
+class Aimer {
+ public:
+  typedef frc971::control_loops::StaticZeroingSingleDOFProfiledSubsystemGoal
+      Goal;
+  typedef frc971::control_loops::drivetrain::Status Status;
+  // Mode to run the aimer in, to control how we manage wrapping the turret
+  // angle.
+  enum class WrapMode {
+    // Keep the turret as far away from the edges of the range of motion as
+    // reasonable, to minimize the odds that we will hit the hardstops once we
+    // start shooting.
+    kAvoidEdges,
+    // Do everything reasonable to avoid having to wrap the shooter--set this
+    // while shooting so that we don't randomly spin the shooter 360 while
+    // shooting.
+    kAvoidWrapping,
+  };
+
+  // Control modes for managing how we manage shooting on the fly.
+  enum class ShotMode {
+    // Don't do any shooting-on-the-fly compensation--just point straight at the
+    // target. Primarily used in tests.
+    kStatic,
+    // Do do shooting-on-the-fly compensation.
+    kShootOnTheFly,
+  };
+
+  Aimer();
+
+  void Update(const Status *status, aos::Alliance alliance, WrapMode wrap_mode,
+              ShotMode shot_mode);
+
+  const Goal *TurretGoal() const { return &goal_.message(); }
+
+  // Returns the distance to the goal, in meters.
+  double DistanceToGoal() const { return distance_; }
+
+  flatbuffers::Offset<AimerStatus> PopulateStatus(
+      flatbuffers::FlatBufferBuilder *fbb) const;
+
+ private:
+  aos::FlatbufferDetachedBuffer<Goal> goal_;
+  bool aiming_for_inner_port_ = false;
+  double distance_ = 0.0;
+};
+
+}  // namespace turret
+}  // namespace superstructure
+}  // namespace control_loops
+}  // namespace y2020
+#endif  // y2020_CONTROL_LOOPS_SUPERSTRUCTURE_TURRET_AIMING_H_
diff --git a/y2020/control_loops/superstructure/turret/aiming_test.cc b/y2020/control_loops/superstructure/turret/aiming_test.cc
new file mode 100644
index 0000000..cb95b56
--- /dev/null
+++ b/y2020/control_loops/superstructure/turret/aiming_test.cc
@@ -0,0 +1,249 @@
+#include "y2020/control_loops/superstructure/turret/aiming.h"
+
+#include "frc971/control_loops/pose.h"
+#include "gtest/gtest.h"
+#include "y2020/constants.h"
+#include "y2020/control_loops/drivetrain/drivetrain_base.h"
+
+namespace y2020 {
+namespace control_loops {
+namespace superstructure {
+namespace turret {
+namespace testing {
+
+using frc971::control_loops::Pose;
+
+class AimerTest : public ::testing::Test {
+ public:
+  typedef Aimer::Goal Goal;
+  typedef Aimer::Status Status;
+  struct StatusData {
+    double x;
+    double y;
+    double theta;
+    double linear;
+    double angular;
+  };
+  aos::FlatbufferDetachedBuffer<Status> MakeStatus(const StatusData &data) {
+    flatbuffers::FlatBufferBuilder fbb;
+    frc971::control_loops::drivetrain::LocalizerState::Builder state_builder(
+        fbb);
+    state_builder.add_left_velocity(
+        data.linear -
+        data.angular * drivetrain::GetDrivetrainConfig().robot_radius);
+    state_builder.add_right_velocity(
+        data.linear +
+        data.angular * drivetrain::GetDrivetrainConfig().robot_radius);
+    const auto state_offset = state_builder.Finish();
+    Status::Builder builder(fbb);
+    builder.add_x(data.x);
+    builder.add_y(data.y);
+    builder.add_theta(data.theta);
+    builder.add_localizer(state_offset);
+    fbb.Finish(builder.Finish());
+    return fbb.Release();
+  }
+
+  const Goal *Update(
+      const StatusData &data, aos::Alliance alliance = aos::Alliance::kBlue,
+      Aimer::WrapMode wrap_mode = Aimer::WrapMode::kAvoidEdges,
+      Aimer::ShotMode shot_mode = Aimer::ShotMode::kShootOnTheFly) {
+    const auto buffer = MakeStatus(data);
+    aimer_.Update(&buffer.message(), alliance, wrap_mode, shot_mode);
+    const Goal *goal = aimer_.TurretGoal();
+    EXPECT_TRUE(goal->ignore_profile());
+    return goal;
+  }
+
+ protected:
+  Aimer aimer_;
+};
+
+TEST_F(AimerTest, StandingStill) {
+  const Pose target = OuterPortPose(aos::Alliance::kBlue);
+  const Goal *goal = Update({.x = target.abs_pos().x() + 1.0,
+                             .y = target.abs_pos().y() + 0.0,
+                             .theta = 0.0,
+                             .linear = 0.0,
+                             .angular = 0.0});
+  EXPECT_EQ(0.0, goal->unsafe_goal());
+  EXPECT_EQ(0.0, goal->goal_velocity());
+  EXPECT_EQ(1.0, aimer_.DistanceToGoal());
+  goal = Update({.x = target.abs_pos().x() + 1.0,
+                 .y = target.abs_pos().y() + 0.0,
+                 .theta = 1.0,
+                 .linear = 0.0,
+                 .angular = 0.0});
+  EXPECT_EQ(-1.0, goal->unsafe_goal());
+  EXPECT_EQ(0.0, goal->goal_velocity());
+  goal = Update({.x = target.abs_pos().x() + 1.0,
+                 .y = target.abs_pos().y() + 0.0,
+                 .theta = -1.0,
+                 .linear = 0.0,
+                 .angular = 0.0});
+  EXPECT_EQ(1.0, aos::math::NormalizeAngle(goal->unsafe_goal()));
+  EXPECT_EQ(0.0, goal->goal_velocity());
+  EXPECT_EQ(1.0, aimer_.DistanceToGoal());
+  // Test that we handle the case that where we are right on top of the target.
+  goal = Update({.x = target.abs_pos().x() + 0.0,
+                 .y = target.abs_pos().y() + 0.0,
+                 .theta = 0.0,
+                 .linear = 0.0,
+                 .angular = 0.0});
+  EXPECT_EQ(M_PI, goal->unsafe_goal());
+  EXPECT_EQ(0.0, goal->goal_velocity());
+  EXPECT_EQ(0.0, aimer_.DistanceToGoal());
+}
+
+TEST_F(AimerTest, SpinningRobot) {
+  const Pose target = OuterPortPose(aos::Alliance::kBlue);
+  const Goal *goal = Update({.x = target.abs_pos().x() + 1.0,
+                             .y = target.abs_pos().y() + 0.0,
+                             .theta = 0.0,
+                             .linear = 0.0,
+                             .angular = 1.0});
+  EXPECT_EQ(0.0, goal->unsafe_goal());
+  EXPECT_FLOAT_EQ(-1.0, goal->goal_velocity());
+}
+
+// Tests that when we drive straight away from the target we don't have to spin
+// the turret.
+TEST_F(AimerTest, DrivingAwayFromTarget) {
+  const Pose target = OuterPortPose(aos::Alliance::kBlue);
+  // To keep the test simple, disable shooting on the fly so that the
+  // goal distance comes out in an easy to calculate number.
+  const Goal *goal = Update({.x = target.abs_pos().x() + 1.0,
+                             .y = target.abs_pos().y() + 0.0,
+                             .theta = 0.0,
+                             .linear = 1.0,
+                             .angular = 0.0},
+                            aos::Alliance::kBlue, Aimer::WrapMode::kAvoidEdges,
+                            Aimer::ShotMode::kStatic);
+  EXPECT_EQ(0.0, goal->unsafe_goal());
+  EXPECT_FLOAT_EQ(0.0, goal->goal_velocity());
+  EXPECT_EQ(1.0, aimer_.DistanceToGoal());
+  // Next, try with shooting-on-the-fly enabled--because we are driving straight
+  // towards the target, only the goal distance should be impacted.
+  goal = Update({.x = target.abs_pos().x() + 1.0,
+                 .y = target.abs_pos().y() + 0.0,
+                 .theta = 0.0,
+                 .linear = 1.0,
+                 .angular = 0.0},
+                aos::Alliance::kBlue, Aimer::WrapMode::kAvoidEdges,
+                Aimer::ShotMode::kShootOnTheFly);
+  EXPECT_EQ(0.0, goal->unsafe_goal());
+  EXPECT_FLOAT_EQ(0.0, goal->goal_velocity());
+  EXPECT_LT(1.0001, aimer_.DistanceToGoal());
+  EXPECT_GT(1.1, aimer_.DistanceToGoal());
+}
+
+// Tests that when we drive perpendicular to the target, we do have to spin.
+TEST_F(AimerTest, DrivingLateralToTarget) {
+  const Pose target = OuterPortPose(aos::Alliance::kBlue);
+  // To keep the test simple, disable shooting on the fly so that the
+  // goal_velocity comes out in an easy to calculate number.
+  const Goal *goal = Update({.x = target.abs_pos().x() + 0.0,
+                             .y = target.abs_pos().y() + 1.0,
+                             .theta = 0.0,
+                             .linear = 1.0,
+                             .angular = 0.0},
+                            aos::Alliance::kBlue, Aimer::WrapMode::kAvoidEdges,
+                            Aimer::ShotMode::kStatic);
+  EXPECT_EQ(M_PI_2, goal->unsafe_goal());
+  EXPECT_FLOAT_EQ(-1.0, goal->goal_velocity());
+  EXPECT_EQ(1.0, aimer_.DistanceToGoal());
+  // Next, test with shooting-on-the-fly enabled, The goal numbers should all be
+  // slightly offset due to the robot velocity.
+  goal = Update({.x = target.abs_pos().x() + 0.0,
+                 .y = target.abs_pos().y() + 1.0,
+                 .theta = 0.0,
+                 .linear = 1.0,
+                 .angular = 0.0},
+                aos::Alliance::kBlue, Aimer::WrapMode::kAvoidEdges,
+                Aimer::ShotMode::kShootOnTheFly);
+  // Confirm that the turret heading goal is less then -pi / 2, but not by too
+  // much.
+  EXPECT_GT(M_PI_2 - 0.001, goal->unsafe_goal());
+  EXPECT_LT(M_PI_2 - 0.1, goal->unsafe_goal());
+  // Similarly, the turret velocity goal should be a bit greater than -1.0,
+  // since the turret is no longer at exactly a right angle.
+  EXPECT_LT(-1.0, goal->goal_velocity());
+  EXPECT_GT(-0.95, goal->goal_velocity());
+  // And the distance to the goal should be a bit greater than 1.0.
+  EXPECT_LT(1.0001, aimer_.DistanceToGoal());
+  EXPECT_GT(1.1, aimer_.DistanceToGoal());
+}
+
+// Confirms that we will indeed shoot at the inner port when we have a good shot
+// angle on it.
+TEST_F(AimerTest, InnerPort) {
+  const Pose target = InnerPortPose(aos::Alliance::kRed);
+  const Goal *goal = Update({.x = target.abs_pos().x() + 1.0,
+                             .y = target.abs_pos().y() + 0.0,
+                             .theta = 0.0,
+                             .linear = 0.0,
+                             .angular = 0.0},
+                            aos::Alliance::kRed);
+  EXPECT_EQ(0.0, goal->unsafe_goal());
+  EXPECT_EQ(0.0, goal->goal_velocity());
+}
+
+// Confirms that when we move the turret heading so that it would be entirely
+// out of the normal range of motion that we send a valid (in-range) goal.
+TEST_F(AimerTest, WrapWhenOutOfRange) {
+  // Start ourselves needing a turret angle of M_PI.
+  const Pose target = OuterPortPose(aos::Alliance::kBlue);
+  StatusData status{.x = target.abs_pos().x() + 1.0,
+                    .y = target.abs_pos().y() + 0.0,
+                    .theta = 0.0,
+                    .linear = 0.0,
+                    .angular = 0.0};
+  const Goal *goal = Update(status);
+  EXPECT_EQ(0.0, goal->unsafe_goal());
+  EXPECT_EQ(0.0, goal->goal_velocity());
+  // Move the robot a small amount--we should go past pi and not wrap yet.
+  status.theta = -0.1;
+  goal = Update(status);
+  EXPECT_FLOAT_EQ(0.1, goal->unsafe_goal());
+  EXPECT_EQ(0.0, goal->goal_velocity());
+  // Move the robot so that, if we had no hard-stops, we would go past it.
+  status.theta = -2.0;
+  goal = Update(status);
+  EXPECT_FLOAT_EQ(2.0, goal->unsafe_goal());
+  EXPECT_EQ(0.0, goal->goal_velocity());
+}
+
+// Confirms that the avoid edges turret mode doesn't let us get all the way to
+// the turret hard-stops but that the avoid wrapping mode does.
+TEST_F(AimerTest, WrappingModes) {
+  // Start ourselves needing a turret angle of M_PI.
+  const Pose target = OuterPortPose(aos::Alliance::kBlue);
+  StatusData status{.x = target.abs_pos().x() - 1.0,
+                    .y = target.abs_pos().y() + 0.0,
+                    .theta = 0.0,
+                    .linear = 0.0,
+                    .angular = 0.0};
+  const Goal *goal =
+      Update(status, aos::Alliance::kBlue, Aimer::WrapMode::kAvoidWrapping);
+  EXPECT_EQ(M_PI, goal->unsafe_goal());
+  EXPECT_EQ(0.0, goal->goal_velocity());
+  constexpr double kUpperLimit = constants::Values::kTurretRange().upper;
+  // Move the robot to the upper limit with AvoidWrapping set--we should be at
+  // the upper limit and not wrapped.
+  status.theta = goal->unsafe_goal() - kUpperLimit;
+  goal = Update(status, aos::Alliance::kBlue, Aimer::WrapMode::kAvoidWrapping);
+  EXPECT_FLOAT_EQ(kUpperLimit, goal->unsafe_goal());
+  EXPECT_EQ(0.0, goal->goal_velocity());
+  // Enter kAvoidEdges mode--we should wrap around.
+  goal = Update(status, aos::Alliance::kBlue, Aimer::WrapMode::kAvoidEdges);
+  // confirm that this test is actually testing something...
+  ASSERT_NE(aos::math::NormalizeAngle(kUpperLimit), kUpperLimit);
+  EXPECT_FLOAT_EQ(aos::math::NormalizeAngle(kUpperLimit), goal->unsafe_goal());
+  EXPECT_EQ(0.0, goal->goal_velocity());
+}
+
+}  // namespace testing
+}  // namespace turret
+}  // namespace superstructure
+}  // namespace control_loops
+}  // namespace y2020
diff --git a/y2020/joystick_reader.cc b/y2020/joystick_reader.cc
index 7910859..2049e83 100644
--- a/y2020/joystick_reader.cc
+++ b/y2020/joystick_reader.cc
@@ -13,10 +13,11 @@
 #include "aos/network/team_number.h"
 #include "aos/util/log_interval.h"
 #include "frc971/autonomous/base_autonomous_actor.h"
-#include "y2020/control_loops/drivetrain/drivetrain_base.h"
 #include "y2020/constants.h"
+#include "y2020/control_loops/drivetrain/drivetrain_base.h"
 #include "y2020/control_loops/superstructure/superstructure_goal_generated.h"
 #include "y2020/control_loops/superstructure/superstructure_status_generated.h"
+#include "y2020/setpoint_generated.h"
 
 using aos::input::driver_station::ButtonLocation;
 using aos::input::driver_station::ControlBit;
@@ -40,9 +41,12 @@
 const ButtonLocation kTurret(3, 15);
 const ButtonLocation kHood(3, 3);
 const ButtonLocation kShootSlow(4, 2);
+const ButtonLocation kFeed(4, 1);
 const ButtonLocation kIntakeExtend(3, 9);
 const ButtonLocation kIntakeIn(4, 4);
-const ButtonLocation kSpit(4, 5);
+const ButtonLocation kSpit(4, 3);
+
+const ButtonLocation kWinch(3, 14);
 
 class Reader : public ::aos::input::ActionJoystickInput {
  public:
@@ -54,8 +58,9 @@
         superstructure_goal_sender_(
             event_loop->MakeSender<superstructure::Goal>("/superstructure")),
         superstructure_status_fetcher_(
-            event_loop->MakeFetcher<superstructure::Status>(
-                "/superstructure")) {}
+            event_loop->MakeFetcher<superstructure::Status>("/superstructure")),
+        setpoint_fetcher_(event_loop->MakeFetcher<y2020::joysticks::Setpoint>(
+            "/superstructure")) {}
 
   void AutoEnded() override {
     AOS_LOG(INFO, "Auto ended, assuming disc and have piece\n");
@@ -68,31 +73,54 @@
       return;
     }
 
+    setpoint_fetcher_.Fetch();
+
     double hood_pos = constants::Values::kHoodRange().middle();
     double intake_pos = -0.89;
     double turret_pos = 0.0;
     float roller_speed = 0.0f;
     double accelerator_speed = 0.0;
     double finisher_speed = 0.0;
+    double climber_speed = 0.0;
 
     if (data.IsPressed(kTurret)) {
       turret_pos = 3.5;
     }
 
     if (data.IsPressed(kHood)) {
-      hood_pos = 0.05;
+      hood_pos = 0.45;
+    } else {
+      if (setpoint_fetcher_.get()) {
+        hood_pos = setpoint_fetcher_->hood();
+      } else {
+        hood_pos = 0.58;
+      }
     }
 
     if (data.IsPressed(kShootFast)) {
-      accelerator_speed = 600.0;
-      finisher_speed = 200.0;
+      if (setpoint_fetcher_.get()) {
+        accelerator_speed = setpoint_fetcher_->accelerator();
+        finisher_speed = setpoint_fetcher_->finisher();
+      } else {
+        accelerator_speed = 250.0;
+        finisher_speed = 500.0;
+      }
     } else if (data.IsPressed(kShootSlow)) {
-      accelerator_speed = 30.0;
-      finisher_speed = 30.0;
+      accelerator_speed = 180.0;
+      finisher_speed = 375.0;
     }
 
     if (data.IsPressed(kIntakeExtend)) {
-      intake_pos = 1.0;
+      intake_pos = 1.2;
+      roller_speed = 9.0f;
+    }
+
+    if (superstructure_status_fetcher_.get() &&
+        superstructure_status_fetcher_->intake()->position() > -0.5) {
+      roller_speed = std::max(roller_speed, 6.0f);
+    }
+
+    if (data.IsPressed(kFeed)) {
     }
 
     if (data.IsPressed(kIntakeIn)) {
@@ -101,6 +129,10 @@
       roller_speed = -6.0f;
     }
 
+    if (data.IsPressed(kWinch)) {
+      climber_speed = 12.0f;
+    }
+
     auto builder = superstructure_goal_sender_.MakeBuilder();
 
     flatbuffers::Offset<superstructure::Goal> superstructure_goal_offset;
@@ -108,7 +140,7 @@
       flatbuffers::Offset<StaticZeroingSingleDOFProfiledSubsystemGoal>
           hood_offset = CreateStaticZeroingSingleDOFProfiledSubsystemGoal(
               *builder.fbb(), hood_pos,
-              CreateProfileParameters(*builder.fbb(), 0.5, 1.0));
+              CreateProfileParameters(*builder.fbb(), 0.7, 3.0));
 
       flatbuffers::Offset<StaticZeroingSingleDOFProfiledSubsystemGoal>
           intake_offset = CreateStaticZeroingSingleDOFProfiledSubsystemGoal(
@@ -132,6 +164,8 @@
       superstructure_goal_builder.add_turret(turret_offset);
       superstructure_goal_builder.add_roller_voltage(roller_speed);
       superstructure_goal_builder.add_shooter(shooter_offset);
+      superstructure_goal_builder.add_shooting(data.IsPressed(kFeed));
+      superstructure_goal_builder.add_climber_voltage(climber_speed);
 
       if (!builder.Send(superstructure_goal_builder.Finish())) {
         AOS_LOG(ERROR, "Sending superstructure goal failed.\n");
@@ -143,6 +177,8 @@
   ::aos::Sender<superstructure::Goal> superstructure_goal_sender_;
 
   ::aos::Fetcher<superstructure::Status> superstructure_status_fetcher_;
+
+  ::aos::Fetcher<y2020::joysticks::Setpoint> setpoint_fetcher_;
 };
 
 }  // namespace joysticks
diff --git a/y2020/setpoint.fbs b/y2020/setpoint.fbs
new file mode 100644
index 0000000..5f1af72
--- /dev/null
+++ b/y2020/setpoint.fbs
@@ -0,0 +1,11 @@
+namespace y2020.joysticks;
+
+table Setpoint {
+  accelerator:double;
+
+  finisher:double;
+
+  hood:double;
+}
+
+root_type Setpoint;
diff --git a/y2020/setpoint_setter.cc b/y2020/setpoint_setter.cc
new file mode 100644
index 0000000..1cbc518
--- /dev/null
+++ b/y2020/setpoint_setter.cc
@@ -0,0 +1,34 @@
+#include "aos/events/shm_event_loop.h"
+#include "aos/init.h"
+#include "gflags/gflags.h"
+#include "y2020/setpoint_generated.h"
+
+DEFINE_double(accelerator, 250.0, "Accelerator speed");
+DEFINE_double(finisher, 500.0, "Finsher speed");
+DEFINE_double(hood, 0.45, "Hood setpoint");
+
+int main(int argc, char ** argv) {
+  aos::InitGoogle(&argc, &argv);
+  aos::InitNRT();
+
+  aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+      aos::configuration::ReadConfig("config.json");
+
+  aos::ShmEventLoop event_loop(&config.message());
+
+  ::aos::Sender<y2020::joysticks::Setpoint> setpoint_sender =
+      event_loop.MakeSender<y2020::joysticks::Setpoint>("/superstructure");
+
+  aos::Sender<y2020::joysticks::Setpoint>::Builder builder =
+      setpoint_sender.MakeBuilder();
+
+  y2020::joysticks::Setpoint::Builder setpoint_builder =
+      builder.MakeBuilder<y2020::joysticks::Setpoint>();
+
+  setpoint_builder.add_accelerator(FLAGS_accelerator);
+  setpoint_builder.add_finisher(FLAGS_finisher);
+  setpoint_builder.add_hood(FLAGS_hood);
+  builder.Send(setpoint_builder.Finish());
+
+  aos::Cleanup();
+}
diff --git a/y2020/vision/BUILD b/y2020/vision/BUILD
index fb7b113..3baa61b 100644
--- a/y2020/vision/BUILD
+++ b/y2020/vision/BUILD
@@ -16,6 +16,7 @@
     hdrs = [
         "v4l2_reader.h",
     ],
+    visibility = ["//y2020:__subpackages__"],
     deps = [
         ":vision_fbs",
         "//aos/events:event_loop",
@@ -30,10 +31,14 @@
     srcs = [
         "camera_reader.cc",
     ],
+    data = [
+        "//y2020:config.json",
+    ],
     restricted_to = [
         "//tools:k8",
         "//tools:armhf-debian",
     ],
+    visibility = ["//y2020:__subpackages__"],
     deps = [
         ":v4l2_reader",
         ":vision_fbs",
@@ -42,10 +47,10 @@
         "//aos/events:shm_event_loop",
         "//aos/network:team_number",
         "//third_party:opencv",
-        "//y2020/vision/sift:demo_sift",
         "//y2020/vision/sift:sift971",
         "//y2020/vision/sift:sift_fbs",
         "//y2020/vision/sift:sift_training_fbs",
+        "//y2020/vision/tools/python_code:sift_training_data",
     ],
 )
 
@@ -54,3 +59,46 @@
     srcs = ["vision.fbs"],
     visibility = ["//y2020:__subpackages__"],
 )
+
+cc_binary(
+    name = "viewer",
+    srcs = [
+        "viewer.cc",
+    ],
+    data = [
+        "//y2020:config.json",
+    ],
+    restricted_to = [
+        "//tools:k8",
+        "//tools:armhf-debian",
+    ],
+    visibility = ["//y2020:__subpackages__"],
+    deps = [
+        ":vision_fbs",
+        "//aos:init",
+        "//aos/events:shm_event_loop",
+        "//third_party:opencv",
+    ],
+)
+
+cc_binary(
+    name = "viewer_replay",
+    srcs = [
+        "viewer_replay.cc",
+    ],
+    data = [
+        "//y2020:config.json",
+    ],
+    restricted_to = [
+        "//tools:k8",
+        "//tools:armhf-debian",
+    ],
+    visibility = ["//y2020:__subpackages__"],
+    deps = [
+        ":vision_fbs",
+        "//aos:init",
+        "//aos/events:simulated_event_loop",
+        "//aos/events/logging:logger",
+        "//third_party:opencv",
+    ],
+)
diff --git a/y2020/vision/camera_reader.cc b/y2020/vision/camera_reader.cc
index 06e4077..f38a40e 100644
--- a/y2020/vision/camera_reader.cc
+++ b/y2020/vision/camera_reader.cc
@@ -7,13 +7,18 @@
 #include "aos/init.h"
 #include "aos/network/team_number.h"
 
-#include "y2020/vision/sift/demo_sift.h"
 #include "y2020/vision/sift/sift971.h"
 #include "y2020/vision/sift/sift_generated.h"
 #include "y2020/vision/sift/sift_training_generated.h"
+#include "y2020/vision/tools/python_code/sift_training_data.h"
 #include "y2020/vision/v4l2_reader.h"
 #include "y2020/vision/vision_generated.h"
 
+// config used to allow running camera_reader independently.  E.g.,
+// bazel run //y2020/vision:camera_reader -- --config y2020/config.json
+//   --override_hostname pi-7971-1  --ignore_timestamps true
+DEFINE_string(config, "config.json", "Path to the config file to use.");
+
 namespace frc971 {
 namespace vision {
 namespace {
@@ -31,6 +36,8 @@
         image_sender_(event_loop->MakeSender<CameraImage>("/camera")),
         result_sender_(
             event_loop->MakeSender<sift::ImageMatchResult>("/camera")),
+        detailed_result_sender_(
+            event_loop->MakeSender<sift::ImageMatchResult>("/camera/detailed")),
         read_image_timer_(event_loop->AddTimer([this]() {
           ReadImage();
           read_image_timer_->Setup(event_loop_->monotonic_now());
@@ -63,6 +70,33 @@
                const std::vector<cv::KeyPoint> &keypoints,
                const cv::Mat &descriptors);
 
+  void SendImageMatchResult(const CameraImage &image,
+                            const std::vector<cv::KeyPoint> &keypoints,
+                            const cv::Mat &descriptors,
+                            const std::vector<std::vector<cv::DMatch>> &matches,
+                            const std::vector<cv::Mat> &camera_target_list,
+                            const std::vector<cv::Mat> &field_camera_list,
+                            const std::vector<cv::Point2f> &target_point_vector,
+                            const std::vector<float> &target_radius_vector,
+                            aos::Sender<sift::ImageMatchResult> *result_sender,
+                            bool send_details);
+
+  // Returns the 2D (image) location for the specified training feature.
+  cv::Point2f Training2dPoint(int training_image_index,
+                              int feature_index) const {
+    const float x = training_data_->images()
+                        ->Get(training_image_index)
+                        ->features()
+                        ->Get(feature_index)
+                        ->x();
+    const float y = training_data_->images()
+                        ->Get(training_image_index)
+                        ->features()
+                        ->Get(feature_index)
+                        ->y();
+    return cv::Point2f(x, y);
+  }
+
   // Returns the 3D location for the specified training feature.
   cv::Point3f Training3dPoint(int training_image_index,
                               int feature_index) const {
@@ -81,6 +115,17 @@
         ->field_to_target();
   }
 
+  void TargetLocation(int training_image_index, cv::Point2f &target_location,
+                      float &target_radius) {
+    target_location.x =
+        training_data_->images()->Get(training_image_index)->target_point_x();
+    target_location.y =
+        training_data_->images()->Get(training_image_index)->target_point_y();
+    target_radius = training_data_->images()
+                        ->Get(training_image_index)
+                        ->target_point_radius();
+  }
+
   int number_training_images() const {
     return training_data_->images()->size();
   }
@@ -93,6 +138,14 @@
     return result;
   }
 
+  cv::Mat CameraDistCoeffs() const {
+    const cv::Mat result(5, 1, CV_32F,
+                         const_cast<void *>(static_cast<const void *>(
+                             camera_calibration_->dist_coeffs()->data())));
+    CHECK_EQ(result.total(), camera_calibration_->dist_coeffs()->size());
+    return result;
+  }
+
   aos::EventLoop *const event_loop_;
   const sift::TrainingData *const training_data_;
   const sift::CameraCalibration *const camera_calibration_;
@@ -100,6 +153,7 @@
   cv::FlannBasedMatcher *const matcher_;
   aos::Sender<CameraImage> image_sender_;
   aos::Sender<sift::ImageMatchResult> result_sender_;
+  aos::Sender<sift::ImageMatchResult> detailed_result_sender_;
   // We schedule this immediately to read an image. Having it on a timer means
   // other things can run on the event loop in between.
   aos::TimerHandler *const read_image_timer_;
@@ -128,37 +182,103 @@
 void CameraReader::CopyTrainingFeatures() {
   for (const sift::TrainingImage *training_image : *training_data_->images()) {
     cv::Mat features(training_image->features()->size(), 128, CV_32F);
-    for (size_t i = 0; i <  training_image->features()->size(); ++i) {
+    for (size_t i = 0; i < training_image->features()->size(); ++i) {
       const sift::Feature *feature_table = training_image->features()->Get(i);
 
       // We don't need this information right now, but make sure it's here to
       // avoid crashes that only occur when specific features are matched.
       CHECK(feature_table->has_field_location());
 
-      const flatbuffers::Vector<float> *const descriptor =
+      const flatbuffers::Vector<uint8_t> *const descriptor =
           feature_table->descriptor();
       CHECK_EQ(descriptor->size(), 128u) << ": Unsupported feature size";
-      cv::Mat(1, descriptor->size(), CV_32F,
-              const_cast<void *>(static_cast<const void *>(descriptor->data())))
-          .copyTo(features(cv::Range(i, i + 1), cv::Range(0, 128)));
+      const auto in_mat = cv::Mat(
+          1, descriptor->size(), CV_8U,
+          const_cast<void *>(static_cast<const void *>(descriptor->data())));
+      const auto out_mat = features(cv::Range(i, i + 1), cv::Range(0, 128));
+      in_mat.convertTo(out_mat, CV_32F);
     }
     matcher_->add(features);
   }
 }
 
-void CameraReader::ProcessImage(const CameraImage &image) {
-  // Be ready to pack the results up and send them out. We can pack things into
-  // this memory as we go to allow reusing temporaries better.
-  auto builder = result_sender_.MakeBuilder();
+void CameraReader::SendImageMatchResult(
+    const CameraImage &image, const std::vector<cv::KeyPoint> &keypoints,
+    const cv::Mat &descriptors,
+    const std::vector<std::vector<cv::DMatch>> &matches,
+    const std::vector<cv::Mat> &camera_target_list,
+    const std::vector<cv::Mat> &field_camera_list,
+    const std::vector<cv::Point2f> &target_point_vector,
+    const std::vector<float> &target_radius_vector,
+    aos::Sender<sift::ImageMatchResult> *result_sender, bool send_details) {
+  auto builder = result_sender->MakeBuilder();
   const auto camera_calibration_offset =
       aos::CopyFlatBuffer(camera_calibration_, builder.fbb());
 
+  flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<sift::Feature>>>
+      features_offset;
+  flatbuffers::Offset<
+      flatbuffers::Vector<flatbuffers::Offset<sift::ImageMatch>>>
+      image_matches_offset;
+  if (send_details) {
+    features_offset = PackFeatures(builder.fbb(), keypoints, descriptors);
+    image_matches_offset = PackImageMatches(builder.fbb(), matches);
+  }
+
+  std::vector<flatbuffers::Offset<sift::CameraPose>> camera_poses;
+
+  CHECK_EQ(camera_target_list.size(), field_camera_list.size());
+  for (size_t i = 0; i < camera_target_list.size(); ++i) {
+    cv::Mat camera_target = camera_target_list[i];
+    CHECK(camera_target.isContinuous());
+    const auto data_offset = builder.fbb()->CreateVector<float>(
+        reinterpret_cast<float *>(camera_target.data), camera_target.total());
+    const flatbuffers::Offset<sift::TransformationMatrix> transform_offset =
+        sift::CreateTransformationMatrix(*builder.fbb(), data_offset);
+
+    cv::Mat field_camera = field_camera_list[i];
+    CHECK(field_camera.isContinuous());
+    const auto fc_data_offset = builder.fbb()->CreateVector<float>(
+        reinterpret_cast<float *>(field_camera.data), field_camera.total());
+    const flatbuffers::Offset<sift::TransformationMatrix> fc_transform_offset =
+        sift::CreateTransformationMatrix(*builder.fbb(), fc_data_offset);
+
+    const flatbuffers::Offset<sift::TransformationMatrix>
+        field_to_target_offset =
+            aos::CopyFlatBuffer(FieldToTarget(i), builder.fbb());
+
+    sift::CameraPose::Builder pose_builder(*builder.fbb());
+    pose_builder.add_camera_to_target(transform_offset);
+    pose_builder.add_field_to_camera(fc_transform_offset);
+    pose_builder.add_field_to_target(field_to_target_offset);
+    pose_builder.add_query_target_point_x(target_point_vector[i].x);
+    pose_builder.add_query_target_point_y(target_point_vector[i].y);
+    pose_builder.add_query_target_point_radius(target_radius_vector[i]);
+    camera_poses.emplace_back(pose_builder.Finish());
+  }
+  const auto camera_poses_offset = builder.fbb()->CreateVector(camera_poses);
+
+  sift::ImageMatchResult::Builder result_builder(*builder.fbb());
+  result_builder.add_camera_poses(camera_poses_offset);
+  if (send_details) {
+    result_builder.add_image_matches(image_matches_offset);
+    result_builder.add_features(features_offset);
+  }
+  result_builder.add_image_monotonic_timestamp_ns(
+      image.monotonic_timestamp_ns());
+  result_builder.add_camera_calibration(camera_calibration_offset);
+
+  // TODO<Jim>: Need to add target point computed from matches and
+  // mapped by homography
+  builder.Send(result_builder.Finish());
+}
+
+void CameraReader::ProcessImage(const CameraImage &image) {
   // First, we need to extract the brightness information. This can't really be
   // fused into the beginning of the SIFT algorithm because the algorithm needs
   // to look at the base image directly. It also only takes 2ms on our images.
   // This is converting from YUYV to a grayscale image.
-  cv::Mat image_mat(
-      image.rows(), image.cols(), CV_8U);
+  cv::Mat image_mat(image.rows(), image.cols(), CV_8U);
   CHECK(image_mat.isContinuous());
   const int number_pixels = image.rows() * image.cols();
   for (int i = 0; i < number_pixels; ++i) {
@@ -170,18 +290,17 @@
   std::vector<cv::KeyPoint> keypoints;
   cv::Mat descriptors;
   sift_->detectAndCompute(image_mat, cv::noArray(), keypoints, descriptors);
-  const auto features_offset =
-      PackFeatures(builder.fbb(), keypoints, descriptors);
 
   // Then, match those features against our training data.
   std::vector<std::vector<cv::DMatch>> matches;
   matcher_->knnMatch(/* queryDescriptors */ descriptors, matches, /* k */ 2);
-  const auto image_matches_offset = PackImageMatches(builder.fbb(), matches);
 
   struct PerImageMatches {
     std::vector<const std::vector<cv::DMatch> *> matches;
     std::vector<cv::Point3f> training_points_3d;
     std::vector<cv::Point2f> query_points;
+    std::vector<cv::Point2f> training_points;
+    cv::Mat homography;
   };
   std::vector<PerImageMatches> per_image_matches(number_training_images());
 
@@ -204,6 +323,8 @@
     CHECK_LT(training_image, static_cast<int>(per_image_matches.size()));
     PerImageMatches *const per_image = &per_image_matches[training_image];
     per_image->matches.push_back(&match);
+    per_image->training_points.push_back(
+        Training2dPoint(training_image, match[0].trainIdx));
     per_image->training_points_3d.push_back(
         Training3dPoint(training_image, match[0].trainIdx));
 
@@ -213,47 +334,170 @@
 
   // The minimum number of matches in a training image for us to use it.
   static constexpr int kMinimumMatchCount = 10;
+  std::vector<cv::Mat> camera_target_list;
+  std::vector<cv::Mat> field_camera_list;
 
-  std::vector<flatbuffers::Offset<sift::CameraPose>> camera_poses;
+  // Rebuild the matches and store them here
+  std::vector<std::vector<cv::DMatch>> all_good_matches;
+  // Build list of target point and radius for each good match
+  std::vector<cv::Point2f> target_point_vector;
+  std::vector<float> target_radius_vector;
+
+  // Iterate through matches for each training image
   for (size_t i = 0; i < per_image_matches.size(); ++i) {
     const PerImageMatches &per_image = per_image_matches[i];
+
+    VLOG(2) << "Number of matches to start: " << per_image_matches.size()
+            << "\n";
+    // If we don't have enough matches to start, skip this set of matches
     if (per_image.matches.size() < kMinimumMatchCount) {
       continue;
     }
 
-    cv::Mat R_camera_target, T_camera_target;
-    cv::solvePnPRansac(per_image.training_points_3d, per_image.query_points,
-                       CameraIntrinsics(), cv::noArray(), R_camera_target,
-                       T_camera_target);
+    // Use homography to determine which matches make sense physically
+    cv::Mat mask;
+    cv::Mat homography =
+        cv::findHomography(per_image.training_points, per_image.query_points,
+                           CV_RANSAC, 3.0, mask);
 
-    sift::CameraPose::Builder pose_builder(*builder.fbb());
-    {
-    CHECK_EQ(cv::Size(3, 3), R_camera_target.size());
-    CHECK_EQ(cv::Size(3, 1), T_camera_target.size());
-    cv::Mat camera_target = cv::Mat::zeros(4, 4, CV_32F);
-    R_camera_target.copyTo(camera_target(cv::Range(0, 3), cv::Range(0, 3)));
-    T_camera_target.copyTo(camera_target(cv::Range(3, 4), cv::Range(0, 3)));
-    camera_target.at<float>(3, 3) = 1;
-    CHECK(camera_target.isContinuous());
-    const auto data_offset = builder.fbb()->CreateVector<float>(
-        reinterpret_cast<float *>(camera_target.data), camera_target.total());
-    pose_builder.add_camera_to_target(
-        sift::CreateTransformationMatrix(*builder.fbb(), data_offset));
+    // If mask doesn't have enough leftover matches, skip these matches
+    if (cv::countNonZero(mask) < kMinimumMatchCount) {
+      continue;
     }
-    pose_builder.add_field_to_target(
-        aos::CopyFlatBuffer(FieldToTarget(i), builder.fbb()));
-    camera_poses.emplace_back(pose_builder.Finish());
-  }
-  const auto camera_poses_offset = builder.fbb()->CreateVector(camera_poses);
 
-  sift::ImageMatchResult::Builder result_builder(*builder.fbb());
-  result_builder.add_image_matches(image_matches_offset);
-  result_builder.add_camera_poses(camera_poses_offset);
-  result_builder.add_features(features_offset);
-  result_builder.add_image_monotonic_timestamp_ns(
-      image.monotonic_timestamp_ns());
-  result_builder.add_camera_calibration(camera_calibration_offset);
-  builder.Send(result_builder.Finish());
+    VLOG(2) << "Number of matches after homography: " << cv::countNonZero(mask)
+            << "\n";
+
+    // Fill our match info for each good match based on homography result
+    PerImageMatches per_image_good_match;
+    CHECK_EQ(per_image.training_points.size(),
+             (unsigned long)mask.size().height);
+    for (size_t j = 0; j < per_image.matches.size(); j++) {
+      // Skip if we masked out by homography
+      if (mask.at<uchar>(0, j) != 1) {
+        continue;
+      }
+
+      // Add this to our collection of all matches that passed our criteria
+      all_good_matches.push_back(
+          static_cast<std::vector<cv::DMatch>>(*per_image.matches[j]));
+
+      // Fill out the data for matches per image that made it past
+      // homography check, for later use
+      per_image_good_match.matches.push_back(per_image.matches[j]);
+      per_image_good_match.training_points.push_back(
+          per_image.training_points[j]);
+      per_image_good_match.training_points_3d.push_back(
+          per_image.training_points_3d[j]);
+      per_image_good_match.query_points.push_back(per_image.query_points[j]);
+    }
+
+    // Returns from opencv are doubles (CV_64F), which don't play well
+    // with our floats
+    homography.convertTo(homography, CV_32F);
+    per_image_good_match.homography = homography;
+
+    CHECK_GT(per_image_good_match.matches.size(), 0u);
+
+    // Collect training target location, so we can map it to matched image
+    cv::Point2f target_point;
+    float target_radius;
+    TargetLocation((*(per_image_good_match.matches[0]))[0].imgIdx, target_point,
+                   target_radius);
+
+    // Store target_point in vector for use by perspectiveTransform
+    std::vector<cv::Point2f> src_target_pt;
+    src_target_pt.push_back(target_point);
+    std::vector<cv::Point2f> query_target_pt;
+
+    cv::perspectiveTransform(src_target_pt, query_target_pt, homography);
+
+    float query_target_radius =
+        target_radius *
+        abs(homography.at<float>(0, 0) + homography.at<float>(1, 1)) / 2.;
+
+    CHECK_EQ(query_target_pt.size(), 1u);
+    target_point_vector.push_back(query_target_pt[0]);
+    target_radius_vector.push_back(query_target_radius);
+
+    // Pose transformations (rotations and translations) for various
+    // coordinate frames.  R_X_Y_vec is the Rodrigues (angle-axis)
+    // representation of the 3x3 rotation R_X_Y from frame X to frame Y
+
+    // Tranform from camera to target frame
+    cv::Mat R_camera_target_vec, R_camera_target, T_camera_target;
+    // Tranform from camera to field origin (global) reference frame
+    cv::Mat R_camera_field_vec, R_camera_field, T_camera_field;
+    // Inverse of camera to field-- defines location of camera in
+    // global (field) reference frame
+    cv::Mat R_field_camera_vec, R_field_camera, T_field_camera;
+
+    // Compute the pose of the camera (global origin relative to camera)
+    cv::solvePnPRansac(per_image_good_match.training_points_3d,
+                       per_image_good_match.query_points, CameraIntrinsics(),
+                       CameraDistCoeffs(), R_camera_field_vec, T_camera_field);
+    CHECK_EQ(cv::Size(1, 3), T_camera_field.size());
+
+    // Convert to float32's (from float64) to be compatible with the rest
+    R_camera_field_vec.convertTo(R_camera_field_vec, CV_32F);
+    T_camera_field.convertTo(T_camera_field, CV_32F);
+
+    // Get matrix version of R_camera_field
+    cv::Rodrigues(R_camera_field_vec, R_camera_field);
+    CHECK_EQ(cv::Size(3, 3), R_camera_field.size());
+
+    // Compute H_field_camera = H_camera_field^-1
+    R_field_camera = R_camera_field.t();
+    T_field_camera = -R_field_camera * (T_camera_field);
+
+    // Extract the field_target transformation
+    const cv::Mat H_field_target(4, 4, CV_32F,
+                                 const_cast<void *>(static_cast<const void *>(
+                                     FieldToTarget(i)->data()->data())));
+
+    const cv::Mat R_field_target =
+        H_field_target(cv::Range(0, 3), cv::Range(0, 3));
+    const cv::Mat T_field_target =
+        H_field_target(cv::Range(0, 3), cv::Range(3, 4));
+
+    // Use it to get the relative pose from camera to target
+    R_camera_target = R_camera_field * (R_field_target);
+    T_camera_target = R_camera_field * (T_field_target) + T_camera_field;
+
+    // Set H_camera_target
+    {
+      CHECK_EQ(cv::Size(3, 3), R_camera_target.size());
+      CHECK_EQ(cv::Size(1, 3), T_camera_target.size());
+      cv::Mat H_camera_target = cv::Mat::zeros(4, 4, CV_32F);
+      R_camera_target.copyTo(H_camera_target(cv::Range(0, 3), cv::Range(0, 3)));
+      T_camera_target.copyTo(H_camera_target(cv::Range(0, 3), cv::Range(3, 4)));
+      H_camera_target.at<float>(3, 3) = 1;
+      CHECK(H_camera_target.isContinuous());
+      camera_target_list.push_back(H_camera_target.clone());
+    }
+
+    // Set H_field_camera
+    {
+      CHECK_EQ(cv::Size(3, 3), R_field_camera.size());
+      CHECK_EQ(cv::Size(1, 3), T_field_camera.size());
+      cv::Mat H_field_camera = cv::Mat::zeros(4, 4, CV_32F);
+      R_field_camera.copyTo(H_field_camera(cv::Range(0, 3), cv::Range(0, 3)));
+      T_field_camera.copyTo(H_field_camera(cv::Range(0, 3), cv::Range(3, 4)));
+      H_field_camera.at<float>(3, 3) = 1;
+      CHECK(H_field_camera.isContinuous());
+      field_camera_list.push_back(H_field_camera.clone());
+    }
+  }
+  // Now, send our two messages-- one large, with details for remote
+  // debugging(features), and one smaller
+  SendImageMatchResult(image, keypoints, descriptors, all_good_matches,
+                       camera_target_list, field_camera_list,
+                       target_point_vector, target_radius_vector,
+                       &detailed_result_sender_, true);
+  SendImageMatchResult(image, keypoints, descriptors, all_good_matches,
+                       camera_target_list, field_camera_list,
+                       target_point_vector, target_radius_vector,
+                       &result_sender_, false);
 }
 
 void CameraReader::ReadImage() {
@@ -310,13 +554,21 @@
                            const cv::Mat &descriptors) {
   const int number_features = keypoints.size();
   CHECK_EQ(descriptors.rows, number_features);
+  CHECK_EQ(descriptors.cols, 128);
   std::vector<flatbuffers::Offset<sift::Feature>> features_vector(
       number_features);
   for (int i = 0; i < number_features; ++i) {
-    const auto submat = descriptors(cv::Range(i, i + 1), cv::Range(0, 128));
+    const auto submat =
+        descriptors(cv::Range(i, i + 1), cv::Range(0, descriptors.cols));
     CHECK(submat.isContinuous());
-    const auto descriptor_offset =
-        fbb->CreateVector(reinterpret_cast<float *>(submat.data), 128);
+    flatbuffers::Offset<flatbuffers::Vector<uint8_t>> descriptor_offset;
+    {
+      uint8_t *data;
+      descriptor_offset = fbb->CreateUninitializedVector(128, &data);
+      submat.convertTo(
+          cv::Mat(1, descriptors.cols, CV_8U, static_cast<void *>(data)),
+          CV_8U);
+    }
     sift::Feature::Builder feature_builder(*fbb);
     feature_builder.add_descriptor(descriptor_offset);
     feature_builder.add_x(keypoints[i].pt.x);
@@ -334,9 +586,9 @@
 
 void CameraReaderMain() {
   aos::FlatbufferDetachedBuffer<aos::Configuration> config =
-      aos::configuration::ReadConfig("config.json");
+      aos::configuration::ReadConfig(FLAGS_config);
 
-  const auto training_data_bfbs = DemoSiftData();
+  const auto training_data_bfbs = SiftTrainingData();
   const sift::TrainingData *const training_data =
       flatbuffers::GetRoot<sift::TrainingData>(training_data_bfbs.data());
   {
@@ -354,8 +606,18 @@
   cv::FlannBasedMatcher matcher(index_params, search_params);
 
   aos::ShmEventLoop event_loop(&config.message());
+
+  // First, log the data for future reference.
+  {
+    aos::Sender<sift::TrainingData> training_data_sender =
+        event_loop.MakeSender<sift::TrainingData>("/camera");
+    training_data_sender.Send(
+        aos::FlatbufferString<sift::TrainingData>(training_data_bfbs));
+  }
+
   V4L2Reader v4l2_reader(&event_loop, "/dev/video0");
-  CameraReader camera_reader(&event_loop, training_data, &v4l2_reader, &matcher);
+  CameraReader camera_reader(&event_loop, training_data, &v4l2_reader,
+                             &matcher);
 
   event_loop.Run();
 }
@@ -364,7 +626,6 @@
 }  // namespace vision
 }  // namespace frc971
 
-
 int main(int argc, char **argv) {
   aos::InitGoogle(&argc, &argv);
   frc971::vision::CameraReaderMain();
diff --git a/y2020/vision/rootfs/change_hostname.sh b/y2020/vision/rootfs/change_hostname.sh
index 1842504..712aba9 100755
--- a/y2020/vision/rootfs/change_hostname.sh
+++ b/y2020/vision/rootfs/change_hostname.sh
@@ -22,7 +22,7 @@
 
 echo "${HOSTNAME}" > /etc/hostname
 
-if grep /etc/hosts '^10\.[0-9]*\.[0-9]*\.[0-9]*\s*pi-[0-9]*-[0-9] pi[0-9]$' /etc/hosts >/dev/null ;
+if grep '^10\.[0-9]*\.[0-9]*\.[0-9]*\s*pi-[0-9]*-[0-9] pi[0-9]$' /etc/hosts >/dev/null ;
 then
   sed -i "s/^10\.[0-9]*\.[0-9]*\(\.[0-9]*\s*pi-\)[0-9]*\(-[0-9] pi[0-9]\)$/${IP_BASE}\1${TEAM_NUMBER}\2/" /etc/hosts
 else
diff --git a/y2020/vision/rootfs/frc971.service b/y2020/vision/rootfs/frc971.service
new file mode 100644
index 0000000..bf42652
--- /dev/null
+++ b/y2020/vision/rootfs/frc971.service
@@ -0,0 +1,9 @@
+[Unit]
+Description=Start up 971 robot code
+
+[Service]
+Type=oneshot
+ExecStart=su pi -c /home/pi/robot_code/starter.sh
+
+[Install]
+WantedBy=multi-user.target
diff --git a/y2020/vision/rootfs/modify_rootfs.sh b/y2020/vision/rootfs/modify_rootfs.sh
index c472caa..219590f 100755
--- a/y2020/vision/rootfs/modify_rootfs.sh
+++ b/y2020/vision/rootfs/modify_rootfs.sh
@@ -3,6 +3,7 @@
 set -xe
 
 IMAGE="2020-02-13-raspbian-buster-lite.img"
+BOOT_PARTITION="2020-02-13-raspbian-buster-lite.img.boot_partition"
 PARTITION="2020-02-13-raspbian-buster-lite.img.partition"
 HOSTNAME="pi-8971-1"
 
@@ -14,13 +15,60 @@
   USER=root sudo proot -0 -q qemu-arm-static -w / -r "${PARTITION}" sudo -h 127.0.0.1 -u pi "$@"
 }
 
-OFFSET="$(fdisk -lu "${IMAGE}" | grep "${IMAGE}2" | awk '{print 512*$2}')"
+
 mkdir -p "${PARTITION}"
+mkdir -p "${BOOT_PARTITION}"
+
+if mount | grep "${BOOT_PARTITION}" >/dev/null ;
+then
+  echo "Already mounted"
+else
+  OFFSET="$(fdisk -lu "${IMAGE}" | grep "${IMAGE}1" | awk '{print 512*$2}')"
+  sudo mount -o loop,offset=${OFFSET} "${IMAGE}" "${BOOT_PARTITION}"
+fi
+
+# Enable the camera on boot.
+if ! grep "start_x=1" "${BOOT_PARTITION}/config.txt"; then
+  echo "start_x=1" | sudo tee -a "${BOOT_PARTITION}/config.txt"
+fi
+if ! grep "gpu_mem=128" "${BOOT_PARTITION}/config.txt"; then
+  echo "gpu_mem=128" | sudo tee -a "${BOOT_PARTITION}/config.txt"
+fi
+
+sudo umount "${BOOT_PARTITION}"
+rmdir "${BOOT_PARTITION}"
 
 if mount | grep "${PARTITION}" >/dev/null ;
 then
   echo "Already mounted"
 else
+  OFFSET="$(fdisk -lu "${IMAGE}" | grep "${IMAGE}2" | awk '{print 512*$2}')"
+
+  if [[ "$(stat -c %s "${IMAGE}")" == 1849688064 ]]; then
+    echo "Growing image"
+    dd if=/dev/zero bs=1M count=1024 >> "${IMAGE}"
+    START="$(fdisk -lu "${IMAGE}" | grep "${IMAGE}2" | awk '{print $2}')"
+
+    sed -e 's/\s*\([\+0-9a-zA-Z]*\).*/\1/' << EOF | fdisk "${IMAGE}"
+  d # remove old partition
+  2
+  n # new partition
+  p # primary partition
+  2 # partion number 2
+  532480 # start where the old one starts
+    # To the end
+  p # print the in-memory partition table
+  w # Flush
+  q # and we're done
+EOF
+
+    sudo losetup -o "${OFFSET}" -f "${IMAGE}"
+    LOOPBACK="$(sudo losetup --list | grep "${IMAGE}" | awk '{print $1}')"
+    sudo e2fsck -f "${LOOPBACK}"
+    sudo resize2fs "${LOOPBACK}"
+    sudo losetup -d "${LOOPBACK}"
+  fi
+
   echo "Mounting"
   sudo mount -o loop,offset=${OFFSET} "${IMAGE}" "${PARTITION}"
 fi
@@ -28,6 +76,7 @@
 sudo cp target_configure.sh "${PARTITION}/tmp/"
 sudo cp dhcpcd.conf "${PARTITION}/tmp/dhcpcd.conf"
 sudo cp change_hostname.sh "${PARTITION}/tmp/change_hostname.sh"
+sudo cp frc971.service "${PARTITION}/etc/systemd/system/frc971.service"
 
 target /bin/mkdir -p /home/pi/.ssh/
 cat ~/.ssh/id_rsa.pub | target tee /home/pi/.ssh/authorized_keys
diff --git a/y2020/vision/rootfs/target_configure.sh b/y2020/vision/rootfs/target_configure.sh
index 2a072e5..193f8b5 100755
--- a/y2020/vision/rootfs/target_configure.sh
+++ b/y2020/vision/rootfs/target_configure.sh
@@ -13,9 +13,44 @@
 
 chown -R pi.pi /home/pi/.ssh
 
+apt-get update
+
 apt-get install -y vim-nox \
   git \
-  cpufrequtils
+  python3-pip \
+  cpufrequtils \
+  libopencv-calib3d3.2 \
+  libopencv-contrib3.2 \
+  libopencv-core3.2 \
+  libopencv-features2d3.2 \
+  libopencv-flann3.2 \
+  libopencv-highgui3.2 \
+  libopencv-imgcodecs3.2 \
+  libopencv-imgproc3.2 \
+  libopencv-ml3.2 \
+  libopencv-objdetect3.2 \
+  libopencv-photo3.2 \
+  libopencv-shape3.2 \
+  libopencv-stitching3.2 \
+  libopencv-superres3.2 \
+  libopencv-video3.2 \
+  libopencv-videoio3.2 \
+  libopencv-videostab3.2 \
+  libopencv-viz3.2 \
+  python3-opencv \
+  gstreamer1.0-plugins-bad \
+  gstreamer1.0-plugins-base \
+  gstreamer1.0-plugins-good \
+  gstreamer1.0-plugins-ugly \
+  gstreamer1.0-x \
+  gstreamer1.0-nice \
+  gstreamer1.0-gl \
+  libgstreamer-plugins-bad1.0-0 \
+  libgstreamer-plugins-base1.0-0 \
+  libgstreamer1.0-0 \
+  libgstreamer-gl1.0-0 \
+  libnice10 \
+  libnice-dev
 
 echo 'GOVERNOR="performance"' > /etc/default/cpufrequtils
 
@@ -46,6 +81,7 @@
 rm -f /etc/profile.d/wifi-check.sh
 
 systemctl enable ssh.service
+systemctl enable frc971.service
 
 # Default us to pi-8971-1
 /root/bin/change_hostname.sh pi-8971-1
diff --git a/y2020/vision/sift/BUILD b/y2020/vision/sift/BUILD
index 02507a9..a7827b6 100644
--- a/y2020/vision/sift/BUILD
+++ b/y2020/vision/sift/BUILD
@@ -28,6 +28,7 @@
         # TODO(Brian): Replace this with something more fine-grained from the
         # configuration fragment or something.
         "//tools/cpp:toolchain",
+        "@amd64_debian_sysroot//:sysroot_files",
     ],
     default_python_version = "PY3",
     main = "fast_gaussian_runner.py",
@@ -195,10 +196,12 @@
     ],
     namespace = "frc971.vision.sift",
     tables = [
+        "KeypointFieldLocation",
         "Feature",
         "Match",
         "ImageMatch",
         "TransformationMatrix",
+        "CameraCalibration",
         "CameraPose",
         "ImageMatchResult",
         "TrainingImage",
diff --git a/y2020/vision/sift/demo_sift_training.py b/y2020/vision/sift/demo_sift_training.py
index a6650fd..c78a44a 100644
--- a/y2020/vision/sift/demo_sift_training.py
+++ b/y2020/vision/sift/demo_sift_training.py
@@ -26,7 +26,8 @@
   for keypoint, descriptor in zip(keypoints, descriptors):
     Feature.FeatureStartDescriptorVector(fbb, len(descriptor))
     for n in reversed(descriptor):
-      fbb.PrependFloat32(n)
+      assert n == round(n)
+      fbb.PrependUint8(int(round(n)))
     descriptor_vector = fbb.EndVector(len(descriptor))
 
     Feature.FeatureStart(fbb)
@@ -49,14 +50,14 @@
   TrainingImage.TrainingImageStart(fbb)
   TrainingImage.TrainingImageAddFeatures(fbb, features_vector_table)
   # TODO(Brian): Fill out the transformation matrices.
-  training_image = TrainingImage.TrainingImageEnd(fbb)
+  training_image_offset = TrainingImage.TrainingImageEnd(fbb)
 
   TrainingData.TrainingDataStartImagesVector(fbb, 1)
-  fbb.PrependUOffsetTRelative(training_image)
-  images = fbb.EndVector(1)
+  fbb.PrependUOffsetTRelative(training_image_offset)
+  images_offset = fbb.EndVector(1)
 
   TrainingData.TrainingDataStart(fbb)
-  TrainingData.TrainingDataAddImages(fbb, images)
+  TrainingData.TrainingDataAddImages(fbb, images_offset)
   fbb.Finish(TrainingData.TrainingDataEnd(fbb))
 
   bfbs = fbb.Output()
diff --git a/y2020/vision/sift/fast_gaussian.bzl b/y2020/vision/sift/fast_gaussian.bzl
index a1c3173..0905423 100644
--- a/y2020/vision/sift/fast_gaussian.bzl
+++ b/y2020/vision/sift/fast_gaussian.bzl
@@ -1,55 +1,55 @@
 def fast_gaussian(sigmas, sizes):
-  files = []
-  for _, sigma_name, _ in sigmas:
+    files = []
+    for _, sigma_name, _ in sigmas:
+        for cols, rows in sizes:
+            files.append("fast_gaussian_%dx%d_%s" % (cols, rows, sigma_name))
+    for _, sigma_name, _ in sigmas:
+        for cols, rows in sizes:
+            files.append("fast_gaussian_subtract_%dx%d_%s" % (cols, rows, sigma_name))
     for cols, rows in sizes:
-      files.append("fast_gaussian_%dx%d_%s" % (cols, rows, sigma_name))
-  for _, sigma_name, _ in sigmas:
-    for cols, rows in sizes:
-      files.append("fast_gaussian_subtract_%dx%d_%s" % (cols, rows, sigma_name))
-  for cols, rows in sizes:
-    files.append('fast_subtract_%dx%d' % (cols, rows))
+        files.append("fast_subtract_%dx%d" % (cols, rows))
 
-  params = struct(
-    sigmas = sigmas,
-    sizes = sizes,
-  )
+    params = struct(
+        sigmas = sigmas,
+        sizes = sizes,
+    )
 
-  headers = [f + '.h' for f in files] + [
-    'fast_gaussian_all.h',
-  ]
-  objects = [f + '.o' for f in files] + [
-    'fast_gaussian_runtime.o',
-  ]
-  htmls = [f + '.html' for f in files]
+    headers = [f + ".h" for f in files] + [
+        "fast_gaussian_all.h",
+    ]
+    objects = [f + ".o" for f in files] + [
+        "fast_gaussian_runtime.o",
+    ]
+    htmls = [f + ".html" for f in files]
 
-  native.genrule(
-    name = "generate_fast_gaussian",
-    tools = [
-        ":fast_gaussian_runner",
-    ],
-    cmd = ' '.join([
-      '$(location fast_gaussian_runner)',
-      "'" + params.to_json() + "'",
-      # TODO(Brian): This should be RULEDIR once we have support for that.
-      '$(@D)',
-      '$(TARGET_CPU)',
-    ]),
-    outs = headers + objects + htmls,
-    restricted_to = [
-      "//tools:k8",
-      "//tools:armhf-debian",
-    ],
-  )
+    native.genrule(
+        name = "generate_fast_gaussian",
+        tools = [
+            ":fast_gaussian_runner",
+        ],
+        cmd = " ".join([
+            "$(location fast_gaussian_runner)",
+            "'" + params.to_json() + "'",
+            # TODO(Brian): This should be RULEDIR once we have support for that.
+            "$(@D)",
+            "$(TARGET_CPU)",
+        ]),
+        outs = headers + objects + htmls,
+        restricted_to = [
+            "//tools:k8",
+            "//tools:armhf-debian",
+        ],
+    )
 
-  native.cc_library(
-    name = 'fast_gaussian_all',
-    hdrs = ['fast_gaussian_all.h'],
-    srcs = headers + objects,
-    deps = [
-      '//third_party:halide_runtime',
-    ],
-    restricted_to = [
-      "//tools:k8",
-      "//tools:armhf-debian",
-    ],
-  )
+    native.cc_library(
+        name = "fast_gaussian_all",
+        hdrs = ["fast_gaussian_all.h"],
+        srcs = headers + objects,
+        deps = [
+            "//third_party:halide_runtime",
+        ],
+        restricted_to = [
+            "//tools:k8",
+            "//tools:armhf-debian",
+        ],
+    )
diff --git a/y2020/vision/sift/fast_gaussian_runner.py b/y2020/vision/sift/fast_gaussian_runner.py
index 9699fef..6faa9fc 100755
--- a/y2020/vision/sift/fast_gaussian_runner.py
+++ b/y2020/vision/sift/fast_gaussian_runner.py
@@ -22,12 +22,14 @@
 
   commands = []
 
+  amd64_debian_sysroot = r.Rlocation('amd64_debian_sysroot/usr/lib/x86_64-linux-gnu/libc.so.6').rsplit('/', 4)[0]
   env = os.environ.copy()
   env['LD_LIBRARY_PATH'] = ':'.join([
-      'debian_amd64_sysroot/lib/x86_64-linux-gnu',
-      'debian_amd64_sysroot/lib',
-      'debian_amd64_sysroot/usr/lib/x86_64-linux-gnu',
-      'debian_amd64_sysroot/usr/lib',
+      # TODO(brian): Figure this out again.  It is a bit aggressive.
+      #amd64_debian_sysroot + '/lib/x86_64-linux-gnu',
+      #amd64_debian_sysroot + '/lib',
+      #amd64_debian_sysroot + '/usr/lib/x86_64-linux-gnu',
+      #amd64_debian_sysroot + '/usr/lib',
   ])
 
   all_header = [
diff --git a/y2020/vision/sift/sift.fbs b/y2020/vision/sift/sift.fbs
index 97c2b0a..24fd64d 100644
--- a/y2020/vision/sift/sift.fbs
+++ b/y2020/vision/sift/sift.fbs
@@ -9,7 +9,8 @@
 
 // Represents a single feature extracted from an image.
 table Feature {
-  // Contains the descriptor data.
+  // Contains the descriptor data. OpenCV likes to represent them as floats, but
+  // they're really ubytes.
   //
   // TODO(Brian): These are scaled to be convertible to chars. Should we do
   // that to minimize storage space? Or maybe int16?
@@ -17,7 +18,7 @@
   // The size of this depends on the parameters. It is width*width*hist_bins.
   // Currently we have width=4 and hist_bins=8, which results in a size of
   // 4*4*8=128.
-  descriptor:[float];
+  descriptor:[ubyte];
 
   // Location of the keypoint.
   x:float;
@@ -60,8 +61,8 @@
 }
 
 table TransformationMatrix {
-  // The matrix data. This is a row-major 4x4 matrix.
-  // In other words, the bottom row is (0, 0, 0, 1).
+  // The matrix data for a row-major 4x4 homogeneous transformation matrix.
+  // This implies the bottom row is (0, 0, 0, 1).
   data:[float];
 }
 
@@ -97,21 +98,29 @@
   //   rotation around the Z axis by the turret angle
   //   turret_extrinsics
   turret_extrinsics:TransformationMatrix;
+
+  // This is the standard OpenCV 5 parameter distortion coefficients
+  dist_coeffs:[float];
 }
 
 // Contains the information the EKF wants from an image matched against a single
 // training image.
 //
-// This is represented as a transformation to a target in field coordinates.
+// This is represented as a transformation from the camera to the target
+// (camera_to_target) and a transformatoin from the field to the target
+// (field_to_target).
+//
+// We also send the map from the field to the camera, which can be computed
+// with the first two, to make it easier to display.
 table CameraPose {
-  // Transformation matrix from the target to the camera's origin.
+  // Transformation matrix from the camera to the target.
   // (0, 0, 0) is the aperture of the camera (we pretend it's an ideal pinhole
   // camera). Positive Z is out of the camera. Positive X and Y are right
   // handed, but which way they face depends on the camera extrinsics.
   camera_to_target:TransformationMatrix;
 
   // Field coordinates of the target, represented as a transformation matrix
-  // from the target to the field.
+  // from the field to the target.
   // (0, 0, 0) is the center of the field, on the level of most of the field
   // (not the region under the truss). Positive X is towards the red alliance's
   // PLAYER STATION. Positive Z is up. The coordinate system is right-handed.
@@ -122,12 +131,22 @@
   // The value here will be selected from a small, static set of targets we
   // train images on.
   field_to_target:TransformationMatrix;
+
+  // The pose of the camera in the field coordinate frame
+  field_to_camera:TransformationMatrix;
+
+  // 2D image coordinate representing target location on the matched image
+  query_target_point_x:float;
+  query_target_point_y:float;
+  // Perceived radius of target circle
+  query_target_point_radius:float;
 }
 
 table ImageMatchResult {
   // The matches from this image to each of the training images which matched.
   // Each member is against the same captured image.
   image_matches:[ImageMatch];
+
   // The transformations for this image for each of the training images which
   // matched.
   // TODO(Brian): Include some kind of covariance information for these.
diff --git a/y2020/vision/sift/sift_training.fbs b/y2020/vision/sift/sift_training.fbs
index 7391e76..12ad9b4 100644
--- a/y2020/vision/sift/sift_training.fbs
+++ b/y2020/vision/sift/sift_training.fbs
@@ -10,6 +10,12 @@
   // from the target to the field. See CameraPose in :sift_fbs for details of
   // the conventions of this.
   field_to_target:TransformationMatrix;
+
+  // 2D image coordinate representing target location on the training image
+  target_point_x:float;
+  target_point_y:float;
+  // Radius of target circle
+  target_point_radius:float;
 }
 
 // Represents the information used to match incoming images against.
diff --git a/y2020/vision/tools/python_code/BUILD b/y2020/vision/tools/python_code/BUILD
index 670b664..1892a88 100644
--- a/y2020/vision/tools/python_code/BUILD
+++ b/y2020/vision/tools/python_code/BUILD
@@ -2,27 +2,26 @@
 
 py_binary(
     name = "load_sift_training",
-    data = [
-        ":test_images/train_power_port_red.png",
-        ":test_images/train_power_port_red_webcam.png",
-        ":test_images/train_power_port_blue.png",
-        ":test_images/train_loading_bay_red.png",
-        ":test_images/train_loading_bay_blue.png",
-    ],
-    srcs = ["load_sift_training.py",
+    srcs = [
         "camera_definition.py",
         "define_training_data.py",
+        "load_sift_training.py",
         "target_definition.py",
         "train_and_match.py",
     ],
-    args = ["sift_training_data.h",
+    args = [
+        "sift_training_data.h",
     ],
+    data = glob(["calib_files/*.txt"]) + glob([
+        "test_images/*.png",
+    ]),
     default_python_version = "PY3",
     srcs_version = "PY2AND3",
     deps = [
+        "//external:python-glog",
         "//y2020/vision/sift:sift_fbs_python",
-        "@opencv_contrib_nonfree_amd64//:python_opencv",
         "@bazel_tools//tools/python/runfiles",
+        "@opencv_contrib_nonfree_amd64//:python_opencv",
     ],
 )
 
@@ -41,9 +40,77 @@
 )
 
 cc_library(
-    name = "sift_training",
+    name = "sift_training_data",
     hdrs = [
         "sift_training_data.h",
     ],
     visibility = ["//visibility:public"],
 )
+
+py_binary(
+    name = "load_sift_training_test",
+    srcs = [
+        "camera_definition_test.py",
+        "define_training_data.py",
+        "load_sift_training.py",
+        "target_definition_test.py",
+        "train_and_match.py",
+    ],
+    args = [
+        "sift_training_data_test.h",
+        "test",
+    ],
+    data = glob(["calib_files/*.txt"]) + glob([
+        "test_images/*.png",
+    ]),
+    default_python_version = "PY3",
+    main = "load_sift_training.py",
+    srcs_version = "PY2AND3",
+    deps = [
+        "//external:python-glog",
+        "//y2020/vision/sift:sift_fbs_python",
+        "@bazel_tools//tools/python/runfiles",
+        "@opencv_contrib_nonfree_amd64//:python_opencv",
+    ],
+)
+
+genrule(
+    name = "run_load_sift_training_test",
+    outs = [
+        "sift_training_data_test.h",
+    ],
+    cmd = " ".join([
+        "$(location :load_sift_training_test)",
+        "$(location sift_training_data_test.h) test",
+    ]),
+    tools = [
+        ":load_sift_training_test",
+    ],
+)
+
+cc_library(
+    name = "sift_training_data_test",
+    hdrs = [
+        "sift_training_data_test.h",
+    ],
+    visibility = ["//visibility:public"],
+)
+
+cc_test(
+    name = "camera_param_test",
+    srcs = [
+        "camera_param_test.cc",
+    ],
+    restricted_to = [
+        "//tools:k8",
+        "//tools:armhf-debian",
+    ],
+    deps = [
+        ":sift_training_data_test",
+        "//aos/testing:googletest",
+        "//third_party:opencv",
+        "//y2020/vision:vision_fbs",
+        "//y2020/vision/sift:sift_fbs",
+        "//y2020/vision/sift:sift_training_fbs",
+    ],
+)
diff --git a/y2020/vision/tools/python_code/calib_files/cam-calib-int_pi-7971-3_Feb-13-2020-00-00-00.txt b/y2020/vision/tools/python_code/calib_files/cam-calib-int_pi-7971-3_Feb-13-2020-00-00-00.txt
new file mode 100644
index 0000000..8c1efbb
--- /dev/null
+++ b/y2020/vision/tools/python_code/calib_files/cam-calib-int_pi-7971-3_Feb-13-2020-00-00-00.txt
@@ -0,0 +1 @@
+{"hostname": "pi-7971-3", "node_name": "pi3", "team_number": 7971, "timestamp": "Feb-13-2020-00-00-00", "camera_matrix": [[388.79947319784713, 0.0, 345.0976031055917], [0.0, 388.539148344188, 265.2780372766764], [0.0, 0.0, 1.0]], "dist_coeffs": [[0.13939139612079282, -0.24345067782097646, -0.0004228219772016648, -0.0004552350162154737, 0.08966339831250879]]}
diff --git a/y2020/vision/tools/python_code/calibrate_intrinsics.py b/y2020/vision/tools/python_code/calibrate_intrinsics.py
new file mode 100644
index 0000000..828a9a9
--- /dev/null
+++ b/y2020/vision/tools/python_code/calibrate_intrinsics.py
@@ -0,0 +1,133 @@
+import cv2
+import cv2.aruco
+import datetime
+import glog
+import json
+from json import JSONEncoder
+import numpy as np
+import os
+import time
+
+
+# From: https://pynative.com/python-serialize-numpy-ndarray-into-json/
+class NumpyArrayEncoder(json.JSONEncoder):
+    def default(self, obj):
+        if isinstance(obj, np.integer):
+            return int(obj)
+        elif isinstance(obj, np.floating):
+            return float(obj)
+        elif isinstance(obj, np.ndarray):
+            return obj.tolist()
+        else:
+            return super(NumpyArrayEncoder, self).default(obj)
+
+
+def get_robot_info(hostname):
+    hostname_split = hostname[1].split("-")
+    if hostname_split[0] != "pi":
+        print("ERROR: expected hostname to start with pi!")
+        quit()
+
+    team_number = int(hostname_split[1])
+    node_name = hostname_split[0] + hostname_split[2]
+    return node_name, team_number
+
+
+USE_LARGE_BOARD = False
+
+if USE_LARGE_BOARD:
+    dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_5X5_100)
+    # Need to measure what the second size parameter is (markerLength)
+    board = cv2.aruco.CharucoBoard_create(12, 9, .06, .045, dictionary)
+    img = board.draw((200 * 12, 200 * 9))
+else:
+    dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_50)
+    board = cv2.aruco.CharucoBoard_create(11, 8, .015, .011, dictionary)
+    img = board.draw((200 * 11, 200 * 8))
+
+#Dump the calibration board to a file
+cv2.imwrite('charuco.png', img)
+
+#Start capturing images for calibration
+CAMERA_INDEX = 0  # Capture from /dev/videoX, where X=CAMERA_INDEX
+cap = cv2.VideoCapture(CAMERA_INDEX)
+
+allCorners = []
+allIds = []
+capture_count = 0
+MIN_IMAGES = 50
+
+while (capture_count < MIN_IMAGES):
+
+    ret, frame = cap.read()
+    assert ret, "Unable to get image from the camera at /dev/video%d" % CAMERA_INDEX
+    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
+    res = cv2.aruco.detectMarkers(gray, dictionary)
+    aruco_detect_image = frame.copy()
+
+    if len(res[0]) > 0 and len(res[1]) > 0:
+        cv2.aruco.drawDetectedMarkers(aruco_detect_image, res[0], res[1])
+
+    # Display every image to let user trigger capture
+    cv2.imshow('frame', aruco_detect_image)
+    keystroke = cv2.waitKey(1)
+
+    if keystroke & 0xFF == ord('q'):
+        break
+    elif keystroke & 0xFF == ord('c'):
+        glog.info("Asked to capture image")
+        if len(res[0]) == 0 or len(res[1]) == 0:
+            # Can't use this image
+            continue
+
+        res2 = cv2.aruco.interpolateCornersCharuco(res[0], res[1], gray, board)
+        if res2[1] is not None and res2[2] is not None and len(res2[1]) > 3:
+            capture_count += 1
+            charuco_detect_image = frame.copy()
+            allCorners.append(res2[1])
+            allIds.append(res2[2])
+            glog.info("Capturing image #%d" % capture_count)
+            cv2.aruco.drawDetectedCornersCharuco(charuco_detect_image, res2[1],
+                                                 res2[2])
+
+            cv2.imshow('frame', charuco_detect_image)
+            cv2.waitKey(1000)
+            # TODO<Jim>: Should log image to disk
+
+#Calibration fails for lots of reasons. Release the video if we do
+try:
+    imsize = gray.shape
+    cal = cv2.aruco.calibrateCameraCharuco(allCorners, allIds, board, imsize,
+                                           None, None)
+    glog.info("Calibration is:\n", cal)
+    glog.info("Reproduction error:", cal[0])
+    if (cal[0] > 1.0):
+        glog.error("REPRODUCTION ERROR NOT GOOD")
+    glog.info("Calibration matrix:\n", cal[1])
+    glog.info("Distortion Coefficients:\n", cal[2])
+except:
+    glog.error("Calibration failed")
+    cap.release()
+    quit()
+
+hostname = os.uname()[1]
+date_str = datetime.datetime.today().strftime("%Y-%m-%d-%H-%M-%S")
+node_name, team_number = get_robot_info(hostname)
+numpyData = {
+    "hostname": hostname,
+    "node_name": node_name,
+    "team_number": team_number,
+    "timestamp": date_str,
+    "camera_matrix": cal[1],
+    "dist_coeffs": cal[2]
+}
+encodedNumpyData = json.dumps(
+    numpyData, cls=NumpyArrayEncoder)  # use dump() to write array into file
+
+# Write out the data
+calib_file = open("cam_calib_%s_%s.txt" % (hostname, date_str), "w")
+calib_file.write(encodedNumpyData)
+calib_file.close()
+
+cap.release()
+cv2.destroyAllWindows()
diff --git a/y2020/vision/tools/python_code/camera_definition.py b/y2020/vision/tools/python_code/camera_definition.py
index 59f008f..4d8929a 100644
--- a/y2020/vision/tools/python_code/camera_definition.py
+++ b/y2020/vision/tools/python_code/camera_definition.py
@@ -1,31 +1,42 @@
-import argparse
-import cv2
+import copy
+import glog
 import json
 import math
 import numpy as np
-import time
+import os
+
+glog.setLevel("WARN")
+USE_BAZEL = True
+
+
+def bazel_name_fix(filename):
+    ret_name = filename
+    if USE_BAZEL:
+        ret_name = 'org_frc971/y2020/vision/tools/python_code/' + filename
+
+    return ret_name
 
 
 class CameraIntrinsics:
     def __init__(self):
         self.camera_matrix = []
-        self.distortion_coeffs = []
+        self.dist_coeffs = []
 
     pass
 
+
 class CameraExtrinsics:
     def __init__(self):
         self.R = []
         self.T = []
 
-    pass
 
 class CameraParameters:
     def __init__(self):
         self.camera_int = CameraIntrinsics()
         self.camera_ext = CameraExtrinsics()
-
-    pass
+        self.node_name = ""
+        self.team_number = -1
 
 
 ### CAMERA DEFINITIONS
@@ -43,15 +54,54 @@
 # Define a web_cam
 web_cam_int = CameraIntrinsics()
 web_cam_int.camera_matrix = np.asarray([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
-web_cam_int.distortion_coeffs = np.zeros((5,1))
+web_cam_int.dist_coeffs = np.zeros((5, 1))
 
 web_cam_ext = CameraExtrinsics()
 # Camera rotation from robot x,y,z to opencv (z, -x, -y)
-web_cam_ext.R = np.array([[0., 0., 1.],
-                          [-1, 0,  0],
-                          [0, -1., 0]])
-web_cam_ext.T = np.array([[0., 0., 0.]]).T
+web_cam_ext.R = np.array([[0., 0., 1.], [-1, 0, 0], [0, -1., 0]])
+web_cam_ext.T = np.array([0., 0., 0.])
 
 web_cam_params = CameraParameters()
 web_cam_params.camera_int = web_cam_int
 web_cam_params.camera_ext = web_cam_ext
+
+camera_list = []
+
+# TODO<Jim>: Should probably make this a dict to make replacing easier
+for team_number in (971, 7971, 8971, 9971):
+    for node_name in ("pi0", "pi1", "pi2", "pi3", "pi4", "pi5"):
+        camera_base = copy.deepcopy(web_cam_params)
+        camera_base.node_name = node_name
+        camera_base.team_number = team_number
+        camera_list.append(camera_base)
+
+dir_name = ('calib_files')
+
+if USE_BAZEL:
+    from bazel_tools.tools.python.runfiles import runfiles
+    r = runfiles.Create()
+    dir_name = r.Rlocation(bazel_name_fix('calib_files'))
+
+for filename in os.listdir(dir_name):
+    if "cam-calib-int" in filename and filename.endswith(".txt"):
+        # Extract intrinsics from file
+        fn_split = filename.split("_")
+        hostname_split = fn_split[1].split("-")
+        if hostname_split[0] == "pi":
+            team_number = int(hostname_split[1])
+            node_name = hostname_split[0] + hostname_split[2]
+
+        calib_file = open(dir_name + "/" + filename, 'r')
+        calib_dict = json.loads(calib_file.read())
+        hostname = np.asarray(calib_dict["hostname"])
+        camera_matrix = np.asarray(calib_dict["camera_matrix"])
+        dist_coeffs = np.asarray(calib_dict["dist_coeffs"])
+
+        # Look for match, and replace camera_intrinsics
+        for camera_calib in camera_list:
+            if camera_calib.node_name == node_name and camera_calib.team_number == team_number:
+                glog.info("Found calib for %s, team #%d" % (node_name,
+                                                            team_number))
+                camera_calib.camera_int.camera_matrix = copy.copy(
+                    camera_matrix)
+                camera_calib.camera_int.dist_coeffs = copy.copy(dist_coeffs)
diff --git a/y2020/vision/tools/python_code/camera_definition_test.py b/y2020/vision/tools/python_code/camera_definition_test.py
new file mode 100644
index 0000000..732dbb8
--- /dev/null
+++ b/y2020/vision/tools/python_code/camera_definition_test.py
@@ -0,0 +1,61 @@
+import copy
+import math
+import numpy as np
+
+
+class CameraIntrinsics:
+    def __init__(self):
+        self.camera_matrix = []
+        self.dist_coeffs = []
+
+
+class CameraExtrinsics:
+    def __init__(self):
+        self.R = []
+        self.T = []
+
+
+class CameraParameters:
+    def __init__(self):
+        self.camera_int = CameraIntrinsics()
+        self.camera_ext = CameraExtrinsics()
+        self.node_name = ""
+        self.team_number = -1
+
+
+### CAMERA DEFINITIONS
+
+# Robot camera has:
+# FOV_H = 93.*math.pi()/180.
+# FOV_V = 70.*math.pi()/180.
+
+# Create fake camera (based on USB webcam params)
+fx = 810.
+fy = 810.
+cx = 320.
+cy = 240.
+
+# Define a web_cam
+web_cam_int = CameraIntrinsics()
+web_cam_int.camera_matrix = np.asarray([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
+web_cam_int.dist_coeffs = np.zeros((5, 1))
+
+web_cam_ext = CameraExtrinsics()
+# Camera rotation from robot x,y,z to opencv (z, -x, -y)
+web_cam_ext.R = np.array([[0., 0., 1.], [-1, 0, 0], [0, -1., 0]])
+web_cam_ext.T = np.array([0., 0., 0.])
+
+web_cam_params = CameraParameters()
+web_cam_params.camera_int = web_cam_int
+web_cam_params.camera_ext = web_cam_ext
+
+camera_list = []
+
+for team_number in (971, 8971, 9971):
+    for (i, node_name) in enumerate(("pi-1", "pi-2", "pi-3", "pi-4", "pi-5")):
+        camera_base = copy.deepcopy(web_cam_params)
+        camera_base.node_name = node_name
+        camera_base.team_number = team_number
+        camera_base.camera_ext.T = np.asarray(
+            np.float32([i + 1, i + 1, i + 1]))
+        camera_list.append(camera_base)
diff --git a/y2020/vision/tools/python_code/camera_param_test.cc b/y2020/vision/tools/python_code/camera_param_test.cc
new file mode 100644
index 0000000..483fe75
--- /dev/null
+++ b/y2020/vision/tools/python_code/camera_param_test.cc
@@ -0,0 +1,279 @@
+#include <unistd.h>
+#include <opencv2/features2d.hpp>
+#include <opencv2/imgproc.hpp>
+
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+#if 1
+#include "y2020/vision/tools/python_code/sift_training_data_test.h"
+#else
+// Using this include (and changing BUILD file) allows to test on real data
+#include "y2020/vision/tools/python_code/sift_training_data.h"
+#endif
+
+#include "y2020/vision/sift/sift_generated.h"
+#include "y2020/vision/sift/sift_training_generated.h"
+#include "y2020/vision/vision_generated.h"
+
+namespace frc971 {
+namespace vision {
+namespace {
+
+class Feature {
+ public:
+  std::vector<float> descriptor_;
+  /*	float x_;
+  float y_;
+  float size_;
+  float angle_;
+  float response_;
+  int octave_;
+  cv::Point3f keypoint_field_location_;*/
+};
+
+class CameraCalibration {
+ public:
+  cv::Mat intrinsics_;
+  cv::Mat fixed_extrinsics_;
+};
+
+class TrainingImage {
+ public:
+  cv::Mat features_;
+  float target_point_x_;
+  float target_point_y_;
+  cv::Mat field_to_target_;
+};
+
+class TrainingData {
+ public:
+  std::vector<TrainingImage> images_;
+  CameraCalibration camera_calibration_;
+};
+
+class CameraParamTest {
+ public:
+  CameraParamTest() {
+    const auto training_data_bfbs = SiftTrainingData();
+    sift_training_data_ =
+        flatbuffers::GetRoot<sift::TrainingData>(training_data_bfbs.data());
+    {
+      flatbuffers::Verifier verifier(
+          reinterpret_cast<const uint8_t *>(training_data_bfbs.data()),
+          training_data_bfbs.size());
+
+      CHECK(sift_training_data_->Verify(verifier));
+    }
+
+    CopyTrainingFeatures();
+    sift_camera_calibration_ = CameraParamTest::FindCameraCalibration();
+    camera_intrinsic_matrix_ = CameraIntrinsicMatrix();
+    camera_dist_coeffs_ = CameraDistCoeffs();
+    camera_extrinsics_ = CameraExtrinsics();
+  }
+
+  // Copies the information from sift_training_data_ into matcher_.
+  void CopyTrainingFeatures();
+
+  const sift::CameraCalibration *FindCameraCalibration() const;
+
+  // Returns the 2D image location for the specified training feature.
+  cv::Point2f Training2dPoint(int training_image_index, int feature_index);
+  // Returns the 3D location for the specified training feature.
+  cv::Point3f Training3dPoint(int training_image_index, int feature_index);
+
+  const sift::TransformationMatrix *FieldToTarget(int training_image_index) {
+    return sift_training_data_->images()
+        ->Get(training_image_index)
+        ->field_to_target();
+  }
+
+  cv::Mat CameraIntrinsicMatrix() const {
+    const cv::Mat result(3, 3, CV_32F,
+                         const_cast<void *>(static_cast<const void *>(
+                             sift_camera_calibration_->intrinsics()->data())));
+    CHECK_EQ(result.total(), sift_camera_calibration_->intrinsics()->size());
+    return result;
+  }
+
+  cv::Mat CameraDistCoeffs() const {
+    const cv::Mat result(5, 1, CV_32F,
+                         const_cast<void *>(static_cast<const void *>(
+                             sift_camera_calibration_->dist_coeffs()->data())));
+    CHECK_EQ(result.total(), sift_camera_calibration_->dist_coeffs()->size());
+    return result;
+  }
+
+  cv::Mat CameraExtrinsics() const {
+    const cv::Mat result(
+        4, 4, CV_32F,
+        const_cast<void *>(static_cast<const void *>(
+            sift_camera_calibration_->fixed_extrinsics()->data()->data())));
+    return result;
+  }
+
+  int number_training_images() const {
+    return sift_training_data_->images()->size();
+  }
+
+  const std::string node_name_ = "pi-3";
+  const int team_number_ = 971;
+
+  // We'll just extract the one that matches our current module
+  const sift::CameraCalibration *sift_camera_calibration_;
+  cv::Mat camera_intrinsic_matrix_;
+  cv::Mat camera_dist_coeffs_;
+  cv::Mat camera_extrinsics_;
+
+  TrainingData training_data_;
+  const sift::TrainingData *sift_training_data_;
+  std::vector<sift::TransformationMatrix *> field_to_targets_;
+  std::vector<cv::Point2f> point_list_2d_;
+  std::vector<cv::Point3f> point_list_3d_;
+};
+
+// Copy in the keypoint descriptors, the 2d (image) location,
+// and the 3d (field) location
+void CameraParamTest::CopyTrainingFeatures() {
+  int train_image_index = 0;
+  for (const sift::TrainingImage *training_image :
+       *sift_training_data_->images()) {
+    TrainingImage tmp_training_image_data;
+    cv::Mat features(training_image->features()->size(), 128, CV_32F);
+    for (size_t i = 0; i < training_image->features()->size(); ++i) {
+      const sift::Feature *feature_table = training_image->features()->Get(i);
+      const flatbuffers::Vector<uint8_t> *const descriptor =
+          feature_table->descriptor();
+      CHECK_EQ(descriptor->size(), 128u) << ": Unsupported feature size";
+      const auto in_mat = cv::Mat(
+          1, descriptor->size(), CV_8U,
+          const_cast<void *>(static_cast<const void *>(descriptor->data())));
+      const auto out_mat = features(cv::Range(i, i + 1), cv::Range(0, 128));
+      in_mat.convertTo(out_mat, CV_32F);
+
+      cv::Point2f point_2d = Training2dPoint(train_image_index, i);
+      point_list_2d_.push_back(point_2d);
+      cv::Point3f point_3d = Training3dPoint(train_image_index, i);
+      point_list_3d_.push_back(point_3d);
+    }
+    const sift::TransformationMatrix *field_to_target_ =
+        FieldToTarget(train_image_index);
+    const cv::Mat field_to_target_mat(
+        4, 4, CV_32F,
+        const_cast<void *>(
+            static_cast<const void *>(field_to_target_->data()->data())));
+    tmp_training_image_data.features_ = features;
+    tmp_training_image_data.field_to_target_ = field_to_target_mat;
+    tmp_training_image_data.target_point_x_ =
+        sift_training_data_->images()->Get(train_image_index)->target_point_x();
+    tmp_training_image_data.target_point_y_ =
+        sift_training_data_->images()->Get(train_image_index)->target_point_y();
+
+    training_data_.images_.push_back(tmp_training_image_data);
+    train_image_index++;
+  }
+}
+
+const sift::CameraCalibration *CameraParamTest::FindCameraCalibration() const {
+  for (const sift::CameraCalibration *candidate :
+       *sift_training_data_->camera_calibrations()) {
+    if (candidate->node_name()->string_view() != node_name_) {
+      continue;
+    }
+    if (candidate->team_number() != team_number_) {
+      continue;
+    }
+    return candidate;
+  }
+  LOG(INFO) << ": Failed to find camera calibration for " << node_name_
+            << " on " << team_number_;
+  return NULL;
+}
+
+// Returns the 2D image location for the specified training feature.
+cv::Point2f CameraParamTest::Training2dPoint(int training_image_index,
+                                             int feature_index) {
+  const float x = sift_training_data_->images()
+                      ->Get(training_image_index)
+                      ->features()
+                      ->Get(feature_index)
+                      ->x();
+  const float y = sift_training_data_->images()
+                      ->Get(training_image_index)
+                      ->features()
+                      ->Get(feature_index)
+                      ->y();
+  return cv::Point2f(x, y);
+}
+
+// Returns the 3D location for the specified training feature.
+cv::Point3f CameraParamTest::Training3dPoint(int training_image_index,
+                                             int feature_index) {
+  const sift::KeypointFieldLocation *const location =
+      sift_training_data_->images()
+          ->Get(training_image_index)
+          ->features()
+          ->Get(feature_index)
+          ->field_location();
+  return cv::Point3f(location->x(), location->y(), location->z());
+}
+
+TEST(CameraParamTest, TargetDataTest) {
+  CameraParamTest camera_params;
+
+  ASSERT_EQ(camera_params.number_training_images(), 1);
+  ASSERT_EQ(camera_params.point_list_2d_.size(), 676);
+  ASSERT_EQ(camera_params.point_list_2d_[0].x, 0);
+  ASSERT_EQ(camera_params.point_list_2d_[0].y, 1);
+  ASSERT_EQ(camera_params.point_list_3d_.size(), 676);
+  ASSERT_EQ(camera_params.point_list_3d_[0].x, 0);
+  ASSERT_EQ(camera_params.point_list_3d_[1].y, 1);
+  ASSERT_EQ(camera_params.point_list_3d_[2].z, 2);
+
+  float ftt_mat[16] = {1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 2, 0, 0, 0, 1};
+  cv::Mat field_to_targets_0 = cv::Mat(4, 4, CV_32F, ftt_mat);
+  cv::Mat ftt_diff =
+      (camera_params.training_data_.images_[0].field_to_target_ !=
+       field_to_targets_0);
+  bool ftt_eq = (cv::countNonZero(ftt_diff) == 0);
+  ASSERT_TRUE(ftt_eq)
+      << "Mismatch on field_to_target: "
+      << camera_params.training_data_.images_[0].field_to_target_ << "\nvs.\n"
+      << field_to_targets_0;
+
+  ASSERT_EQ(camera_params.training_data_.images_[0].target_point_x_, 10.);
+  ASSERT_EQ(camera_params.training_data_.images_[0].target_point_y_, 20.);
+
+  float intrinsic_mat[9] = {810, 0, 320, 0, 810, 240, 0, 0, 1};
+  cv::Mat intrinsic = cv::Mat(3, 3, CV_32F, intrinsic_mat);
+  cv::Mat intrinsic_diff =
+      (intrinsic != camera_params.camera_intrinsic_matrix_);
+  bool intrinsic_eq = (cv::countNonZero(intrinsic_diff) == 0);
+  ASSERT_TRUE(intrinsic_eq)
+      << "Mismatch on camera intrinsic matrix: " << intrinsic << "\nvs.\n"
+      << camera_params.camera_intrinsic_matrix_;
+
+  float dist_coeffs_mat[5] = {0., 0., 0., 0., 0.};
+  cv::Mat dist_coeffs = cv::Mat(5, 1, CV_32F, dist_coeffs_mat);
+  cv::Mat dist_coeffs_diff = (dist_coeffs != camera_params.camera_dist_coeffs_);
+  bool dist_coeffs_eq = (cv::countNonZero(dist_coeffs_diff) == 0);
+  ASSERT_TRUE(dist_coeffs_eq)
+      << "Mismatch on camera distortion coefficients: " << dist_coeffs
+      << "\nvs.\n"
+      << camera_params.camera_dist_coeffs_;
+
+  float i_f = 3.0;
+  float extrinsic_mat[16] = {0, 0,  1, i_f, -1, 0, 0, i_f,
+                             0, -1, 0, i_f, 0,  0, 0, 1};
+  cv::Mat extrinsic = cv::Mat(4, 4, CV_32F, extrinsic_mat);
+  cv::Mat extrinsic_diff = (extrinsic != camera_params.camera_extrinsics_);
+  bool extrinsic_eq = (cv::countNonZero(extrinsic_diff) == 0);
+  ASSERT_TRUE(extrinsic_eq)
+      << "Mismatch of extrinsic: " << extrinsic << "\nvs.\n"
+      << camera_params.camera_extrinsics_;
+}
+
+}  // namespace
+}  // namespace vision
+}  // namespace frc971
diff --git a/y2020/vision/tools/python_code/define_training_data.py b/y2020/vision/tools/python_code/define_training_data.py
index 22eb6ce..8116fb7 100644
--- a/y2020/vision/tools/python_code/define_training_data.py
+++ b/y2020/vision/tools/python_code/define_training_data.py
@@ -1,5 +1,6 @@
 import argparse
 import cv2
+import glog
 import json
 import math
 import numpy as np
@@ -17,7 +18,7 @@
     global current_mouse
     current_mouse = (x, y)
     if event == cv2.EVENT_LBUTTONUP:
-        #print("Adding point at %d, %d" % (x,y))
+        glog.debug("Adding point at %d, %d" % (x, y))
         point_list.append([x, y])
     pass
 
@@ -142,20 +143,22 @@
 
     return point_list
 
+
 # Determine whether a given point lies within (or on border of) a set of polygons
 # Return true if it does
 def point_in_polygons(point, polygons):
     for poly in polygons:
         np_poly = np.asarray(poly)
         dist = cv2.pointPolygonTest(np_poly, (point[0], point[1]), True)
-        if dist >=0:
+        if dist >= 0:
             return True
 
     return False
 
+
 ## Filter keypoints by polygons
 def filter_keypoints_by_polygons(keypoint_list, descriptor_list, polygons):
-    # TODO: Need to make sure we've got the right numpy array / list 
+    # TODO: Need to make sure we've got the right numpy array / list
     keep_keypoint_list = []
     keep_descriptor_list = []
     reject_keypoint_list = []
@@ -165,7 +168,8 @@
 
     # For now, pretend keypoints are just points
     for i in range(len(keypoint_list)):
-        if point_in_polygons((keypoint_list[i].pt[0], keypoint_list[i].pt[1]), polygons):
+        if point_in_polygons((keypoint_list[i].pt[0], keypoint_list[i].pt[1]),
+                             polygons):
             keep_list.append(i)
         else:
             reject_list.append(i)
@@ -174,51 +178,56 @@
     reject_keypoint_list = [keypoint_list[kp_ind] for kp_ind in reject_list]
     # Allow function to be called with no descriptors, and just return empty list
     if descriptor_list is not None:
-        keep_descriptor_list = descriptor_list[keep_list,:]
-        reject_descriptor_list = descriptor_list[reject_list,:]
+        keep_descriptor_list = descriptor_list[keep_list, :]
+        reject_descriptor_list = descriptor_list[reject_list, :]
 
     return keep_keypoint_list, keep_descriptor_list, reject_keypoint_list, reject_descriptor_list, keep_list
 
+
 # Helper function that appends a column of ones to a list (of 2d points)
 def append_ones(point_list):
-    return np.hstack([point_list, np.ones((len(point_list),1))])
+    return np.hstack([point_list, np.ones((len(point_list), 1))])
+
 
 # Given a list of 2d points and thei corresponding 3d locations, compute map
 # between them.
 # pt_3d = (pt_2d, 1) * reprojection_map
 # TODO: We should really look at residuals to see if things are messed up
 def compute_reprojection_map(polygon_2d, polygon_3d):
-    pts_2d = np.asarray(np.float32(polygon_2d)).reshape(-1,2)
+    pts_2d = np.asarray(np.float32(polygon_2d)).reshape(-1, 2)
     pts_2d_lstsq = append_ones(pts_2d)
-    pts_3d_lstsq = np.asarray(np.float32(polygon_3d)).reshape(-1,3)
+    pts_3d_lstsq = np.asarray(np.float32(polygon_3d)).reshape(-1, 3)
 
     reprojection_map = np.linalg.lstsq(pts_2d_lstsq, pts_3d_lstsq, rcond=-1)[0]
 
     return reprojection_map
 
+
 # Given set of keypoints (w/ 2d location), a reprojection map, and a polygon
 # that defines regions for where this is valid
 # Returns a numpy array of 3d locations the same size as the keypoint list
 def compute_3d_points(keypoint_2d_list, reprojection_map):
-    pt_2d_lstsq = append_ones(np.asarray(np.float32(keypoint_2d_list)).reshape(-1,2))
+    pt_2d_lstsq = append_ones(
+        np.asarray(np.float32(keypoint_2d_list)).reshape(-1, 2))
     pt_3d = pt_2d_lstsq.dot(reprojection_map)
 
     return pt_3d
 
+
 # Given 2d and 3d locations, and camera location and projection model,
 # display locations on an image
-def visualize_reprojections(img, pts_2d, pts_3d, cam_mat, distortion_coeffs):
+def visualize_reprojections(img, pts_2d, pts_3d, cam_mat, dist_coeffs):
     # Compute camera location
     # TODO: Warn on bad inliers
     # TODO: Change this to not have to recast to np
     pts_2d_np = np.asarray(np.float32(pts_2d)).reshape(-1, 1, 2)
     pts_3d_np = np.asarray(np.float32(pts_3d)).reshape(-1, 1, 3)
     retval, R, T, inliers = cv2.solvePnPRansac(pts_3d_np, pts_2d_np, cam_mat,
-                                               distortion_coeffs)
+                                               dist_coeffs)
     pts_3d_proj_2d, jac_2d = cv2.projectPoints(pts_3d_np, R, T, cam_mat,
-                                               distortion_coeffs)
+                                               dist_coeffs)
     if inliers is None:
-        print("WARNING: Didn't get any inliers when reprojecting polygons")
+        glog.warn("WARNING: Didn't get any inliers when reprojecting polygons")
         return img
     for i in range(len(pts_2d)):
         pt_2d = pts_2d_np[i][0]
@@ -239,17 +248,14 @@
     image = cv2.imread("test_images/train_power_port_red.png")
 
     polygon_list = define_polygon(image)
-    print(polygon_list)
+    glog.debug(polygon_list)
 
 
 def sample_define_points_by_list_usage():
     image = cv2.imread("test_images/train_power_port_red.png")
 
-    test_points = [(451, 679), (451, 304),
-                   (100, 302), (451, 74),
-                   (689, 74), (689, 302),
-                   (689, 679)]
+    test_points = [(451, 679), (451, 304), (100, 302), (451, 74), (689, 74),
+                   (689, 302), (689, 679)]
 
     polygon_list = define_points_by_list(image, test_points)
-    print(polygon_list)
-
+    glog.debug(polygon_list)
diff --git a/y2020/vision/tools/python_code/field_display.py b/y2020/vision/tools/python_code/field_display.py
index 93f645c..11d098d 100644
--- a/y2020/vision/tools/python_code/field_display.py
+++ b/y2020/vision/tools/python_code/field_display.py
@@ -17,17 +17,18 @@
 
 
 # Convert field coordinates (meters) to image coordinates (pixels).
-# Field origin is (x,y) at center of red alliance driver station wall,
-# with x pointing into the field.
+# Field origin is (x,y) at center of the field,
+# with x pointing towards the red alliance driver station
 # The default field image is 1598 x 821 pixels, so 1 cm per pixel on field
 # I.e., Field is 1598 x 821 pixels = 15.98 x 8.21 meters
-# Our origin in image coordinates is at (1598, 410) pixels, facing in the -x image direction
+# Our origin in image coordinates is at (799, 410) pixels, with +x direction
+# to the right
 
 
 def field_coord_to_image_coord(robot_position):
     # Scale by 100 pixel / cm
-    robot_pos_img_coord = np.array([[1598, 410]]).T + np.int32(
-        100.0 * np.array([[-robot_position[0][0], robot_position[1][0]]]).T)
+    robot_pos_img_coord = np.array([[799, 410]]).T + np.int32(
+        100.0 * np.array([[robot_position[0][0], -robot_position[1][0]]]).T)
     return robot_pos_img_coord
 
 
diff --git a/y2020/vision/tools/python_code/image_capture.py b/y2020/vision/tools/python_code/image_capture.py
new file mode 100644
index 0000000..d6ce6d7
--- /dev/null
+++ b/y2020/vision/tools/python_code/image_capture.py
@@ -0,0 +1,34 @@
+import cv2
+import datetime
+# Open the device at the ID X for /dev/videoX
+CAMERA_INDEX = 0
+cap = cv2.VideoCapture(CAMERA_INDEX)
+
+#Check whether user selected camera is opened successfully.
+if not (cap.isOpened()):
+    print("Could not open video device /dev/video%d" % CAMERA_INDEX)
+    quit()
+
+while True:
+    # Capture frame-by-frame
+    ret, frame = cap.read()
+
+    exp = cap.get(cv2.CAP_PROP_EXPOSURE)
+    #print("Exposure:", exp)
+    # Display the resulting frame
+    cv2.imshow('preview', frame)
+
+    #Waits for a user input to capture image or quit the application
+    keystroke = cv2.waitKey(1)
+
+    if keystroke & 0xFF == ord('q'):
+        break
+    elif keystroke & 0xFF == ord('c'):
+        image_name = datetime.datetime.today().strftime(
+            "capture-%Y-%m-%d-%H-%M-%S.png")
+        print("Capturing image as %s" % image_name)
+        cv2.imwrite(image_name, frame)
+
+# When everything's done, release the capture
+cap.release()
+cv2.destroyAllWindows()
diff --git a/y2020/vision/tools/python_code/image_match_test.py b/y2020/vision/tools/python_code/image_match_test.py
index d6b7deb..455bc30 100644
--- a/y2020/vision/tools/python_code/image_match_test.py
+++ b/y2020/vision/tools/python_code/image_match_test.py
@@ -1,36 +1,20 @@
 import cv2
 import math
-from matplotlib import pyplot as plt
 import numpy as np
 import time
 
 import field_display
 import train_and_match as tam
+import target_definition
+import camera_definition
 
 ### DEFINITIONS
+target_definition.USE_BAZEL = False
+target_list = target_definition.compute_target_definition()
+camera_list = camera_definition.camera_list
 
-# List of images to train on
-training_image_names = [
-    'test_images/train_power_port_blue.png',
-    # Using webcam captured image for testing-- best to use only one of the two
-    # training images for the power_port_red
-    'test_images/train_power_port_red_webcam.png',
-    #    'test_images/train_power_port_red.png',
-    'test_images/train_loading_bay_blue.png',
-    'test_images/train_loading_bay_red.png'
-]
-
-# Target points on the image -- needs to match training images-- one per image
-# These are manually captured by examining the images, and entering the pixel
-# values from the target center for each image.
-# These are currently only used for visualization of the target
-target_pt_list = [
-    np.float32([[393, 147]]).reshape(-1, 1, 2),  # train_power_port_blue.png
-    np.float32([[305, 97]]).reshape(-1, 1, 2),  #train_power_port_red_webcam.png
-    #    np.float32([[570,192]]).reshape(-1,1,2), # train_power_port_red.png
-    np.float32([[366, 236]]).reshape(-1, 1, 2),  # train_loading_bay_blue.png
-    np.float32([[366, 236]]).reshape(-1, 1, 2),  # train_loading_bay_red.png
-]
+# For now, just use the first one
+camera_params = camera_list[0]
 
 # Put list of all possible images we want to test, and then we'll select one
 # Note, if you query on the same image as training, only that image will match
@@ -45,8 +29,8 @@
     'test_images/test_raspi3_sample.jpg',  #7
     'test_images/test_VR_sample1.png',  #8
     'test_images/train_loading_bay_blue.png',  #9
-    'test_images/train_loading_bay_red.png'
-]  #10
+    'test_images/train_loading_bay_red.png'  #10
+]
 
 training_image_index = 0
 # TODO: Should add argParser here to select this
@@ -54,15 +38,25 @@
 
 ##### Let's get to work!
 
-# Load the training and query images
-training_images = tam.load_images(training_image_names)
+training_image_names = []
+training_images = []
+train_keypoint_lists = []
+train_descriptor_lists = []
+target_points_tmp = []
+target_pt_list = None
+# Popluate the data structures used by this function
+for target in target_list:
+    # Image names
+    print("Target filename:", target.image_filename)
+    training_image_names.append(target.image_filename)
+    # The training images
+    training_images.append(target.image)
+    # Keypoints and desciptors
+    train_keypoint_lists.append(target.keypoint_list)
+    train_descriptor_lists.append(target.descriptor_list)
+    target_points_tmp.append(target.target_point_2d)
 
-# Create feature extractor
-feature_extractor = tam.load_feature_extractor()
-
-# Extract keypoints and descriptors for model
-train_keypoint_lists, train_descriptor_lists = tam.detect_and_compute(
-    feature_extractor, training_images)
+target_pt_list = np.asarray(target_points_tmp).reshape(-1, 1, 2)
 
 # # Create the matcher.  This only needs to be created once
 ts = time.monotonic()  # Do some basic timing
@@ -72,6 +66,11 @@
 for i in range(len(train_keypoint_lists)):
     print("Model ", i, " has ", len(train_keypoint_lists[i]), " features: ")
 
+# Create feature extractor
+# TODO<Jim>: Should probably make sure we're using the same extractor for
+# training and query
+feature_extractor = tam.load_feature_extractor()
+
 # Load the query image in.  Based on index in our list, or using camera if index is -1
 query_images = []
 
@@ -121,183 +120,160 @@
     for i in range(len(train_keypoint_lists)):
         print("Model ", i, " has # good matches: ", len(good_matches_list[i]))
 
-    # TEMP: Use first list for what follows
-    # TODO: Should look for best match after homography and display that one
-    # OR: show results on all good matches
-    good_matches = good_matches_list[0]
+    # If we're querying static images, show full results
+    if (query_image_index is not -1):
+        print("Showing results for static query image")
+        homography_list, matches_mask_list = tam.show_results(
+            training_images, train_keypoint_lists, query_images,
+            query_keypoint_lists, target_pt_list, good_matches_list)
 
     # Next, find homography (map between training and query images)
     homography_list, matches_mask_list = tam.compute_homographies(
         train_keypoint_lists, query_keypoint_lists, good_matches_list)
 
-    if matches_mask_list[0].count(1) < tam.MIN_MATCH_COUNT:
-        print(
-            "Not continuing pose calc because not enough good matches after homography-- only had ",
-            matches_mask_list[0].count(1))
-        continue
+    # Image used to store field plot
+    img_ret = None
+    for i, good_matches in enumerate(good_matches_list):
+        # TODO: Should look for best match after homography and display that one
+        # OR: show results on all good matches
+        print("Processing match for model %d" % i)
 
-    # Next, let's go to compute pose
-    # I've chosen some coordinate frames to help track things.  Using indices:
-    #   w:    world coordinate frame (at center wall of driver station)
-    #   tarj: camera pose when jth target (e.g., power panel) was captured
-    #   ci:   ith camera
-    #   b:    body frame of robot (robot location)
-    #
-    # And, T for translation, R for rotation, "est" for estimated
-    # So, T_w_b_est is the estimated translation from world to body coords
-    #
-    # A few notes:
-    #   -- CV uses rotated frame, where z points out of camera, y points down
-    #   -- This causes a lot of mapping around of points, but overall OK
+        if matches_mask_list[i].count(1) < tam.MIN_MATCH_COUNT:
+            print(
+                "Not continuing pose calc because not enough good matches after homography-- only had ",
+                matches_mask_list[i].count(1))
+            continue
 
-    ### TODO: Still need to clean this rest up
+        # Next, let's go to compute pose
+        # I've chosen some coordinate frames to help track things.  Using indices:
+        #   w:    world coordinate frame (at center wall of driver station)
+        #   tarj: camera pose when jth target (e.g., power panel) was captured
+        #   ci:   ith camera
+        #   b:    body frame of robot (robot location)
+        #
+        # And, T for translation, R for rotation, "est" for estimated
+        # So, T_w_b_est is the estimated translation from world to body coords
+        #
+        # A few notes:
+        #   -- CV uses rotated frame, where z points out of camera, y points down
+        #   -- This causes a lot of mapping around of points, but overall OK
 
-    # TODO: Need to generalize this for each target-- need to have 3D global locations of each target's keypoints
-    field_length = 15.98
-    target_center_offset_y = 1.67
-    target_center_height = 2.49
-    if training_image_index is 0:
-        # webcam
-        fx = 810.
-        fy = 810.
-        cx = 320.
-        cy = 240.
-        cy_above_ground = 1.  # meters above ground
-        depth = 4.57  # meters
-        target_center_offset_y = 1.67
-    elif training_image_index is 1:
-        # Made up for screenshot from manual
-        cx = int(1165 / 2)
-        cy_above_ground = -0.5856  # meters above ground
-        cy = 776 / 2
-        fx = 1143  # pixels (or 0.00160m or 1143 pixels?)
-        depth = 4.57  # meters
-    elif training_image_index is 2:
-        cx = 732 / 2
-        cy_above_ground = 0.454  # meters above ground
-        cy = 436 / 2
-        target_width = 5 * 12 * 0.0254  # width in ft * in/ft * m/in
-        target_height = target_width * cy / cx  # width in ft * in/ft * m/in * cy / cx
-        FOV_x = 54 * math.pi / 180.  # camera FOV in radians (from 54 degree lens)
-        fx = cx / math.tan(FOV_x / 2)  # pixels
-        depth = (target_height / 2) / math.tan(FOV_x / 2)  # meters
+        src_pts_3d = []
+        for m in good_matches:
+            src_pts_3d.append(target_list[i].keypoint_list_3d[m.trainIdx])
+            pt = query_keypoint_lists[0][m.queryIdx].pt
+            print("Color at ", pt, " is ", query_images[0][int(pt[1])][int(
+                pt[0])])
+            query_images[0] = cv2.circle(
+                query_images[0], (int(pt[0]), int(pt[1])), 5, (0, 255, 0), 3)
 
-    src_pts_3d = []
-    for m in good_matches:
-        # Project the matches all back to 3D, assuming they're all at the same depth
-        # Add in the distance across the field and any lateral offset of the target
-        src_pts_3d.append([(
-            field_length,
-            -(train_keypoint_lists[training_image_index][m.trainIdx].pt[0] - cx
-              ) * depth / fx - target_center_offset_y,
-            -(train_keypoint_lists[training_image_index][m.trainIdx].pt[1] - cy
-              ) * depth / fx + cy_above_ground)])
+        cv2.imshow('DEBUG', query_images[0]), cv2.waitKey(0)
+        # Reshape 3d point list to work with computations to follow
+        src_pts_3d_array = np.asarray(np.float32(src_pts_3d)).reshape(-1, 3, 1)
 
-    # Reshape 3d point list to work with computations to follow
-    src_pts_3d_array = np.asarray(np.float32(src_pts_3d)).reshape(-1, 3, 1)
+        # Load from camera parameters
+        cam_mat = camera_params.camera_int.camera_matrix
+        dist_coeffs = camera_params.camera_int.dist_coeffs
 
-    cam_mat = np.asarray([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
-    distortion_coeffs = np.zeros((5, 1))
+        # Create list of matching query point locations
+        dst_pts = np.float32([
+            query_keypoint_lists[0][m.queryIdx].pt for m in good_matches
+        ]).reshape(-1, 1, 2)
 
-    # Create list of matching query point locations
-    dst_pts = np.float32([
-        query_keypoint_lists[0][m.queryIdx].pt for m in good_matches
-    ]).reshape(-1, 1, 2)
+        ts = time.monotonic()
+        # TODO: May want to supply it with estimated guess as starting point
+        # Find offset from camera to original location of camera relative to target
+        retval, R_ci_w_estj, T_ci_w_estj, inliers = cv2.solvePnPRansac(
+            src_pts_3d_array, dst_pts, cam_mat, dist_coeffs)
 
-    ts = time.monotonic()
-    # TODO: May want to supply it with estimated guess as starting point
-    # Find offset from camera to original location of camera relative to target
-    retval, R_ci_w_estj, T_ci_w_estj, inliers = cv2.solvePnPRansac(
-        src_pts_3d_array, dst_pts, cam_mat, distortion_coeffs)
+        tf = time.monotonic()
+        print("Solving Pose took ", tf - ts, " secs")
+        print("Found ", len(inliers), " inliers, out of ", len(dst_pts),
+              " matched points")
+        ### THIS is the estimate of the robot on the field!
+        R_w_ci_estj, T_w_ci_estj = field_display.invert_pose_Rodrigues(
+            R_ci_w_estj, T_ci_w_estj)
+        #print("Pose from PnP is:\n", R_ci_w_estj, "\n", T_ci_w_estj)
+        # The rotation estimate is for camera with z pointing forwards.
+        # Compute the rotation as if x is forward
+        R_w_ci_estj_robot = field_display.camera_rot_to_world_Rodrigues(
+            R_w_ci_estj)
+        #print("Pose in world coords is:\n", R_w_ci_estj, "\n", T_w_ci_estj,
+        #      "\nWith robot coord frame rotation as \n", R_w_ci_estj_robot)
 
-    tf = time.monotonic()
-    print("Solving Pose took ", tf - ts, " secs")
-    print("Found ", len(inliers), " inliers, out of ", len(dst_pts),
-          " matched points")
-    ### THIS is the estimate of the robot on the field!
-    R_w_ci_estj, T_w_ci_estj = field_display.invert_pose_Rodrigues(
-        R_ci_w_estj, T_ci_w_estj)
-    print("Pose from PnP is:\n", R_ci_w_estj, "\n", T_ci_w_estj)
-    # The rotation estimate is for camera with z pointing forwards.
-    # Compute the rotation as if x is forward
-    R_w_ci_estj_robot = field_display.camera_rot_to_world_Rodrigues(
-        R_w_ci_estj)
-    print("Pose in world coords is:\n", R_w_ci_estj, "\n", T_w_ci_estj,
-          "\nWith robot coord frame rotation as \n", R_w_ci_estj_robot)
+        # Use the rotational component about the z axis to define the heading
+        # TODO<jim>: Change this to use rotation of x-unit vector
+        heading_est = R_w_ci_estj_robot[2][0]
+        # Plot location of the robot, along with heading
+        color = (255, 0, 0)  # Blue
+        if i in (0, 1):
+            color = (0, 0, 255)  # Red
+        img_ret = field_display.plot_bot_on_field(img_ret, color, T_w_ci_estj,
+                                                  heading_est)
 
-    # Use the rotational component about the z axis to define the heading
-    heading_est = R_w_ci_estj_robot[2][0]
-    # Plot location of the robot, along with heading
-    img_ret = field_display.plot_bot_on_field(None, (0, 255, 255), T_w_ci_estj,
-                                              heading_est)
+        # Compute vector to target
+        T_w_target_pt = np.array(target_list[i].target_position).reshape(3, 1)
+        vector_to_target = T_w_target_pt - T_w_ci_estj
+        # Compute distance to target (full 3D)
+        distance_to_target = np.linalg.norm(vector_to_target)
+        # Compute distance to target (x,y)
+        distance_to_target_ground = np.linalg.norm(vector_to_target[0:2])
+        #print("Distance comparison: all: ", distance_to_target, " vs. ground: ", distance_to_target_ground)
 
-    # Compute vector to target
-    T_w_target_pt = np.array(
-        [[field_length, -target_center_offset_y, target_center_height]]).T
-    vector_to_target = T_w_target_pt - T_w_ci_estj
-    # Compute distance to target (full 3D)
-    distance_to_target = np.linalg.norm(vector_to_target)
-    # Compute distance to target (x,y)
-    distance_to_target_ground = np.linalg.norm(vector_to_target[0:2])
-    #print("Distance comparison: all: ", distance_to_target, " vs. ground: ", distance_to_target_ground)
+        angle_to_target_abs = math.atan2(vector_to_target[1][0],
+                                         vector_to_target[0][0])
+        angle_to_target_robot = angle_to_target_abs - heading_est
+        img_ret = field_display.plot_line_on_field(img_ret, (255, 255, 0),
+                                                   T_w_ci_estj, T_w_target_pt)
+        # THESE ARE OUR ESTIMATES OF HEADING, DISTANCE, AND SKEW TO TARGET
+        log_file.write('%lf, %lf, %lf\n' %
+                       (heading_est, distance_to_target_ground,
+                        angle_to_target_robot))
 
-    angle_to_target_abs = math.atan2(vector_to_target[1][0],
-                                     vector_to_target[0][0])
-    angle_to_target_robot = angle_to_target_abs - heading_est
-    img_ret = field_display.plot_line_on_field(img_ret, (255, 255, 0),
-                                               T_w_ci_estj, T_w_target_pt)
-    # THESE ARE OUR ESTIMATES OF HEADING, DISTANCE, AND SKEW TO TARGET
-    log_file.write('%lf, %lf, %lf\n' % (heading_est, distance_to_target_ground,
-                                        angle_to_target_robot))
+        # A bunch of code to visualize things...
+        #
+        # Draw target on the image
+        h, w, ch = query_images[0].shape
+        query_image_copy = query_images[0].copy()
+        # Map target_pt onto image, for display
+        target_point_2d_trans = cv2.perspectiveTransform(
+            target_pt_list[i].reshape(-1, 1, 2),
+            homography_list[i]).astype(int)
+        # Ballpark the size of the circle so it looks right on image
+        radius = int(
+            32 * abs(homography_list[i][0][0] + homography_list[i][1][1]) /
+            2)  # Average of scale factors
+        query_image_copy = cv2.circle(query_image_copy,
+                                      (target_point_2d_trans.flatten()[0],
+                                       target_point_2d_trans.flatten()[1]),
+                                      radius, (0, 255, 0), 3)
 
-    # A bunch of code to visualize things...
-    #
-    # Draw target on the image
-    h, w, ch = query_images[0].shape
-    query_image_copy = query_images[0].copy()
-    # Map target_pt onto image, for display
-    transformed_target = cv2.perspectiveTransform(target_pt_list[0],
-                                                  homography_list[0])
-    # Ballpark the size of the circle so it looks right on image
-    radius = int(32 * abs(homography_list[0][0][0] + homography_list[0][1][1])
-                 / 2)  # Average of scale factors
-    query_image_copy = cv2.circle(
-        query_image_copy,
-        (transformed_target.flatten()[0], transformed_target.flatten()[1]),
-        radius, (0, 255, 0), 3)
+        # Create empty image the size of the field
+        img_heading = field_display.load_field_image()
+        img_heading[:, :, :] = 0
+        f_h, f_w, f_ch = img_heading.shape
 
-    # If we're querying static images, show full results
-    if (query_image_index is not -1):
-        homography_list, matches_mask_list = tam.show_results(
-            training_images, train_keypoint_lists, query_images,
-            query_keypoint_lists, target_pt_list, good_matches_list)
+        # Create heading view, and paste it to bottom of field
+        img_heading = field_display.plot_camera_to_target(
+            img_heading, (0, 255, 0), heading_est, distance_to_target_ground,
+            angle_to_target_robot)
+        vis = np.concatenate((img_ret, img_heading), axis=0)
 
-    # Create empty image the size of the field
-    img_heading = field_display.load_field_image()
-    img_heading[:, :, :] = 0
-    f_h, f_w, f_ch = img_heading.shape
+        # Paste query image to right of other views (scale to keep aspect ratio)
+        img_query_scaled = cv2.resize(query_image_copy,
+                                      (int(2 * w * f_h / h), 2 * f_h))
+        vis = np.concatenate((vis, img_query_scaled), axis=1)
 
-    # Create heading view, and paste it to bottom of field
-    img_heading = field_display.plot_camera_to_target(
-        img_heading, (0, 255, 0), heading_est, distance_to_target_ground,
-        angle_to_target_robot)
-    vis = np.concatenate((img_ret, img_heading), axis=0)
+        # Scale down to make it fit on screen
+        vis = cv2.resize(vis, (int(vis.shape[1] / 4), int(vis.shape[0] / 4)))
+        cv2.imshow('field_display', vis)
 
-    # Paste query image to right of other views (scale to keep aspect ratio)
-    img_query_scaled = cv2.resize(query_image_copy,
-                                  (int(2 * w * f_h / h), 2 * f_h))
-    vis = np.concatenate((vis, img_query_scaled), axis=1)
-
-    # Scale down to make it fit on screen
-    vis = cv2.resize(vis, (int(vis.shape[1] / 4), int(vis.shape[0] / 4)))
-    cv2.imshow('field_display', vis)
-
-    #Waits for a user input to quit the application
-    pause_time = 0
-    if (query_image_index is -1):
-        pause_time = 1
-    if cv2.waitKey(pause_time) & 0xFF == ord('q'):
-        break
+        #Waits for a user input to quit the application
+        pause_time = 0
+        if (query_image_index is -1):
+            pause_time = 1
+        if cv2.waitKey(pause_time) & 0xFF == ord('q'):
+            break
 
 # Done.  Clean things up
 if (query_image_index is -1):
@@ -305,4 +281,3 @@
     cap.release()
 
 cv2.destroyAllWindows()
-
diff --git a/y2020/vision/tools/python_code/load_sift_training.py b/y2020/vision/tools/python_code/load_sift_training.py
index 9fa4acf..0ce70b7 100644
--- a/y2020/vision/tools/python_code/load_sift_training.py
+++ b/y2020/vision/tools/python_code/load_sift_training.py
@@ -1,100 +1,232 @@
 #!/usr/bin/python3
 
 import cv2
+import glog
+import numpy as np
 import sys
 import flatbuffers
-import target_definition
 
-import frc971.vision.sift.TrainingImage as TrainingImage
-import frc971.vision.sift.TrainingData as TrainingData
+import frc971.vision.sift.CameraCalibration as CameraCalibration
 import frc971.vision.sift.Feature as Feature
+import frc971.vision.sift.KeypointFieldLocation as KeypointFieldLocation
+import frc971.vision.sift.TrainingData as TrainingData
+import frc971.vision.sift.TrainingImage as TrainingImage
+import frc971.vision.sift.TransformationMatrix as TransformationMatrix
+
+
+# Takes a 3x3 rotation matrix and 3x1 translation vector, and outputs 12
+# element list, suitable for outputing to flatbuffer
+def rot_and_trans_to_list(R, T):
+    output_list = []
+    for row in range(3):
+        for col in range(3):
+            output_list.append(R[row][col])
+        output_list.append(T[row])
+
+    output_list = output_list + [0., 0., 0., 1.]
+    return output_list
+
 
 def main():
 
-  output_path = sys.argv[1]
-  print("Writing file to ", output_path)
+    target_data_list = None
+    camera_calib_list = None
 
-  target_data_list = target_definition.compute_target_definition()
+    output_path = sys.argv[1]
 
-  fbb = flatbuffers.Builder(0)
+    if (len(sys.argv) > 2):
+        if sys.argv[2] == "test":
+            glog.info("Loading test data")
+            import camera_definition_test
+            import target_definition_test
+            target_data_list = target_definition_test.compute_target_definition(
+            )
+            camera_calib_list = camera_definition_test.camera_list
+        else:
+            glog.error("Unhandled arguments: '%s'" % sys.argv[2])
+            quit()
+    else:
+        glog.info("Loading target configuration data")
+        import camera_definition
+        import target_definition
+        target_data_list = target_definition.compute_target_definition()
+        camera_calib_list = camera_definition.camera_list
 
-  images_vector = []
+    glog.info("Writing file to ", output_path)
 
-  for target_data in target_data_list:
+    fbb = flatbuffers.Builder(0)
 
-    features_vector = []
+    images_vector = []
 
-    for keypoint, keypoint_3d, descriptor in zip(target_data.keypoint_list,
-                                                 target_data.keypoint_list_3d,
-                                                 target_data.descriptor_list):
+    # Iterate overall the training targets
+    for target_data in target_data_list:
 
-      Feature.FeatureStartDescriptorVector(fbb, len(descriptor))
-      for n in reversed(descriptor):
-        fbb.PrependFloat32(n)
-      descriptor_vector = fbb.EndVector(len(descriptor))
+        features_vector = []
 
-      Feature.FeatureStart(fbb)
+        # Iterate over all the keypoints
+        for keypoint, keypoint_3d, descriptor in zip(
+                target_data.keypoint_list, target_data.keypoint_list_3d,
+                target_data.descriptor_list):
 
-      Feature.FeatureAddDescriptor(fbb, descriptor_vector)
-      Feature.FeatureAddX(fbb, keypoint.pt[0])
-      Feature.FeatureAddY(fbb, keypoint.pt[1])
-      Feature.FeatureAddSize(fbb, keypoint.size)
-      Feature.FeatureAddAngle(fbb, keypoint.angle)
-      Feature.FeatureAddResponse(fbb, keypoint.response)
-      Feature.FeatureAddOctave(fbb, keypoint.octave)
+            # Build the Descriptor vector
+            Feature.FeatureStartDescriptorVector(fbb, len(descriptor))
+            for n in reversed(descriptor):
+                assert n == round(n)
+                fbb.PrependUint8(int(round(n)))
+            descriptor_vector = fbb.EndVector(len(descriptor))
 
-      features_vector.append(Feature.FeatureEnd(fbb))
+            # Add all the components to the each Feature
+            Feature.FeatureStart(fbb)
+            Feature.FeatureAddDescriptor(fbb, descriptor_vector)
+            Feature.FeatureAddX(fbb, keypoint.pt[0])
+            Feature.FeatureAddY(fbb, keypoint.pt[1])
+            Feature.FeatureAddSize(fbb, keypoint.size)
+            Feature.FeatureAddAngle(fbb, keypoint.angle)
+            Feature.FeatureAddResponse(fbb, keypoint.response)
+            Feature.FeatureAddOctave(fbb, keypoint.octave)
 
-      ## TODO: Write 3d vector here
+            keypoint_3d_location = KeypointFieldLocation.CreateKeypointFieldLocation(
+                fbb, keypoint_3d[0][0], keypoint_3d[0][1], keypoint_3d[0][2])
 
-    TrainingImage.TrainingImageStartFeaturesVector(fbb, len(features_vector))
-    for feature in reversed(features_vector):
-      fbb.PrependUOffsetTRelative(feature)
-    features_vector_table = fbb.EndVector(len(features_vector))
+            Feature.FeatureAddFieldLocation(fbb, keypoint_3d_location)
 
-    TrainingImage.TrainingImageStart(fbb)
-    TrainingImage.TrainingImageAddFeatures(fbb, features_vector_table)
-    # TODO(Brian): Fill out the transformation matrices.
-    images_vector.append(TrainingImage.TrainingImageEnd(fbb))
+            features_vector.append(Feature.FeatureEnd(fbb))
 
-  TrainingData.TrainingDataStartImagesVector(fbb, len(images_vector))
-  for training_image in reversed(images_vector):
-    fbb.PrependUOffsetTRelative(training_image)
-  images_vector_table = fbb.EndVector(len(images_vector))
+        # Create the field_to_target TransformationMatrix
+        field_to_target_list = rot_and_trans_to_list(
+            target_data.target_rotation, target_data.target_position)
+        TransformationMatrix.TransformationMatrixStartDataVector(
+            fbb, len(field_to_target_list))
+        for n in reversed(field_to_target_list):
+            fbb.PrependFloat32(n)
+        field_to_target_offset = fbb.EndVector(len(field_to_target_list))
 
-  TrainingData.TrainingDataStart(fbb)
-  TrainingData.TrainingDataAddImages(fbb, images_vector_table)
-  fbb.Finish(TrainingData.TrainingDataEnd(fbb))
+        TransformationMatrix.TransformationMatrixStart(fbb)
+        TransformationMatrix.TransformationMatrixAddData(
+            fbb, field_to_target_offset)
+        transformation_mat_offset = TransformationMatrix.TransformationMatrixEnd(
+            fbb)
 
-  bfbs = fbb.Output()
+        # Create the TrainingImage feature vector
+        TrainingImage.TrainingImageStartFeaturesVector(fbb,
+                                                       len(features_vector))
+        for feature in reversed(features_vector):
+            fbb.PrependUOffsetTRelative(feature)
+        features_vector_offset = fbb.EndVector(len(features_vector))
 
-  output_prefix = [
-      b'#ifndef Y2020_VISION_TOOLS_PYTHON_CODE_TRAINING_DATA_H_',
-      b'#define Y2020_VISION_TOOLS_PYTHON_CODE_TRAINING_DATA_H_',
-      b'#include <string_view>',
-      b'namespace frc971 {',
-      b'namespace vision {',
-      b'inline std::string_view SiftTrainingData() {',
-  ]
-  output_suffix = [
-      b'  return std::string_view(kData, sizeof(kData));',
-      b'}',
-      b'}  // namespace vision',
-      b'}  // namespace frc971',
-      b'#endif  // Y2020_VISION_TOOLS_PYTHON_CODE_TRAINING_DATA_H_',
-  ]
+        # Add the TrainingImage data
+        TrainingImage.TrainingImageStart(fbb)
+        TrainingImage.TrainingImageAddFeatures(fbb, features_vector_offset)
+        TrainingImage.TrainingImageAddFieldToTarget(fbb,
+                                                    transformation_mat_offset)
+        TrainingImage.TrainingImageAddTargetPointX(
+            fbb, target_data.target_point_2d[0][0][0])
+        TrainingImage.TrainingImageAddTargetPointY(
+            fbb, target_data.target_point_2d[0][0][1])
+        TrainingImage.TrainingImageAddTargetPointRadius(
+            fbb, target_data.target_radius)
+        images_vector.append(TrainingImage.TrainingImageEnd(fbb))
 
-  with open(output_path, 'wb') as output:
-    for line in output_prefix:
-      output.write(line)
-      output.write(b'\n')
-    output.write(b'alignas(64) static constexpr char kData[] = "')
-    for byte in fbb.Output():
-      output.write(b'\\x' + (b'%x' % byte).zfill(2))
-    output.write(b'";\n')
-    for line in output_suffix:
-      output.write(line)
-      output.write(b'\n')
+    # Create and add Training Data of all targets
+    TrainingData.TrainingDataStartImagesVector(fbb, len(images_vector))
+    for training_image in reversed(images_vector):
+        fbb.PrependUOffsetTRelative(training_image)
+    images_vector_table = fbb.EndVector(len(images_vector))
+
+    # Create camera calibration data
+    camera_calibration_vector = []
+    for camera_calib in camera_calib_list:
+        fixed_extrinsics_list = rot_and_trans_to_list(
+            camera_calib.camera_ext.R, camera_calib.camera_ext.T)
+        TransformationMatrix.TransformationMatrixStartDataVector(
+            fbb, len(fixed_extrinsics_list))
+        for n in reversed(fixed_extrinsics_list):
+            fbb.PrependFloat32(n)
+        fixed_extrinsics_data_offset = fbb.EndVector(
+            len(fixed_extrinsics_list))
+
+        TransformationMatrix.TransformationMatrixStart(fbb)
+        TransformationMatrix.TransformationMatrixAddData(
+            fbb, fixed_extrinsics_data_offset)
+        fixed_extrinsics_vector = TransformationMatrix.TransformationMatrixEnd(
+            fbb)
+
+        # TODO: Need to add in distortion coefficients here
+        # For now, just send camera paramter matrix (fx, fy, cx, cy)
+        camera_int_list = camera_calib.camera_int.camera_matrix.ravel().tolist(
+        )
+        CameraCalibration.CameraCalibrationStartIntrinsicsVector(
+            fbb, len(camera_int_list))
+        for n in reversed(camera_int_list):
+            fbb.PrependFloat32(n)
+        intrinsics_vector = fbb.EndVector(len(camera_int_list))
+
+        dist_coeffs_list = camera_calib.camera_int.dist_coeffs.ravel().tolist()
+        CameraCalibration.CameraCalibrationStartDistCoeffsVector(
+            fbb, len(dist_coeffs_list))
+        for n in reversed(dist_coeffs_list):
+            fbb.PrependFloat32(n)
+        dist_coeffs_vector = fbb.EndVector(len(dist_coeffs_list))
+
+        node_name_offset = fbb.CreateString(camera_calib.node_name)
+        CameraCalibration.CameraCalibrationStart(fbb)
+        CameraCalibration.CameraCalibrationAddNodeName(fbb, node_name_offset)
+        CameraCalibration.CameraCalibrationAddTeamNumber(
+            fbb, camera_calib.team_number)
+        CameraCalibration.CameraCalibrationAddIntrinsics(
+            fbb, intrinsics_vector)
+        CameraCalibration.CameraCalibrationAddDistCoeffs(
+            fbb, dist_coeffs_vector)
+        CameraCalibration.CameraCalibrationAddFixedExtrinsics(
+            fbb, fixed_extrinsics_vector)
+        camera_calibration_vector.append(
+            CameraCalibration.CameraCalibrationEnd(fbb))
+
+    TrainingData.TrainingDataStartCameraCalibrationsVector(
+        fbb, len(camera_calibration_vector))
+    for camera_calibration in reversed(camera_calibration_vector):
+        fbb.PrependUOffsetTRelative(camera_calibration)
+    camera_calibration_vector_table = fbb.EndVector(
+        len(camera_calibration_vector))
+
+    # Fill out TrainingData
+    TrainingData.TrainingDataStart(fbb)
+    TrainingData.TrainingDataAddImages(fbb, images_vector_table)
+    TrainingData.TrainingDataAddCameraCalibrations(
+        fbb, camera_calibration_vector_table)
+    fbb.Finish(TrainingData.TrainingDataEnd(fbb))
+
+    bfbs = fbb.Output()
+
+    output_prefix = [
+        b'#ifndef Y2020_VISION_TOOLS_PYTHON_CODE_TRAINING_DATA_H_',
+        b'#define Y2020_VISION_TOOLS_PYTHON_CODE_TRAINING_DATA_H_',
+        b'#include <string_view>',
+        b'namespace frc971 {',
+        b'namespace vision {',
+        b'inline std::string_view SiftTrainingData() {',
+    ]
+    output_suffix = [
+        b'  return std::string_view(kData, sizeof(kData));',
+        b'}',
+        b'}  // namespace vision',
+        b'}  // namespace frc971',
+        b'#endif  // Y2020_VISION_TOOLS_PYTHON_CODE_TRAINING_DATA_H_',
+    ]
+
+    # Write out the header file
+    with open(output_path, 'wb') as output:
+        for line in output_prefix:
+            output.write(line)
+            output.write(b'\n')
+        output.write(b'alignas(64) static constexpr char kData[] = "')
+        for byte in fbb.Output():
+            output.write(b'\\x' + (b'%x' % byte).zfill(2))
+        output.write(b'";\n')
+        for line in output_suffix:
+            output.write(line)
+            output.write(b'\n')
+
 
 if __name__ == '__main__':
-  main()
+    main()
diff --git a/y2020/vision/tools/python_code/target_definition.py b/y2020/vision/tools/python_code/target_definition.py
index 779eb0b..430e83f 100644
--- a/y2020/vision/tools/python_code/target_definition.py
+++ b/y2020/vision/tools/python_code/target_definition.py
@@ -1,6 +1,7 @@
 import argparse
 import cv2
-import json
+# TODO<Jim>: Add gflags for handling command-line flags
+import glog
 import math
 import numpy as np
 
@@ -8,11 +9,17 @@
 import define_training_data as dtd
 import train_and_match as tam
 
+# TODO<Jim>: Allow command-line setting of logging level
+glog.setLevel("WARN")
 global VISUALIZE_KEYPOINTS
 global USE_BAZEL
 USE_BAZEL = True
 VISUALIZE_KEYPOINTS = False
 
+# For now, just have a 32 pixel radius, based on original training image
+target_radius_default = 32.
+
+
 def bazel_name_fix(filename):
     ret_name = filename
     if USE_BAZEL:
@@ -20,6 +27,7 @@
 
     return ret_name
 
+
 class TargetData:
     def __init__(self, filename):
         self.image_filename = filename
@@ -27,7 +35,8 @@
         if USE_BAZEL:
             from bazel_tools.tools.python.runfiles import runfiles
             r = runfiles.Create()
-            self.image_filename = r.Rlocation(bazel_name_fix(self.image_filename))
+            self.image_filename = r.Rlocation(
+                bazel_name_fix(self.image_filename))
         self.image = tam.load_images([self.image_filename])[0]
         self.polygon_list = []
         self.polygon_list_3d = []
@@ -35,8 +44,15 @@
         self.keypoint_list = []
         self.keypoint_list_3d = None  # numpy array of 3D points
         self.descriptor_list = []
+        self.target_rotation = None
+        self.target_position = None
+        self.target_point_2d = None
+        self.target_radius = target_radius_default
 
-    def extract_features(self, feature_extractor):
+    def extract_features(self, feature_extractor=None):
+        if feature_extractor is None:
+            feature_extractor = tam.load_feature_extractor()
+
         kp_lists, desc_lists = tam.detect_and_compute(feature_extractor,
                                                       [self.image])
         self.keypoint_list = kp_lists[0]
@@ -61,8 +77,8 @@
             # Filter and project points for each polygon in the list
             filtered_keypoints, _, _, _, keep_list = dtd.filter_keypoints_by_polygons(
                 keypoint_list, None, [self.polygon_list[poly_ind]])
-            print("Filtering kept %d of %d features" % (len(keep_list),
-                                                        len(keypoint_list)))
+            glog.info("Filtering kept %d of %d features" %
+                      (len(keep_list), len(keypoint_list)))
             filtered_point_array = np.asarray(
                 [(keypoint.pt[0], keypoint.pt[1])
                  for keypoint in filtered_keypoints]).reshape(-1, 2)
@@ -73,6 +89,7 @@
 
         return point_list_3d
 
+
 def compute_target_definition():
     ############################################################
     # TARGET DEFINITIONS
@@ -81,180 +98,261 @@
     ideal_target_list = []
     training_target_list = []
 
-    # Some general info about our field and targets
-    # Assume camera centered on target at 1 m above ground and distance of 4.85m
-    field_length = 15.98
+    # Using coordinate system as defined in sift.fbs:
+    # field origin (0, 0, 0) at floor height at center of field
+    # Robot orientation with x-axis pointing towards RED ALLIANCE player station
+    # y-axis to left and z-axis up (right-hand coordinate system)
+    # Thus, the red power port target location will be at (-15.98/2,1.67,0),
+    # with orientation (facing out of the field) of M_PI
+
+    # Field constants
+    field_length = 15.983
     power_port_total_height = 3.10
-    power_port_center_y = 1.67
-    power_port_width = 1.22
-    power_port_bottom_wing_height = 1.88
+    power_port_edge_y = 1.089
+    power_port_width = 1.225
+    power_port_bottom_wing_height = 1.828
     power_port_wing_width = 1.83
-    loading_bay_edge_y = 1.11
-    loading_bay_width = 1.52
-    loading_bay_height = 0.94
+    loading_bay_edge_y = 0.784
+    loading_bay_width = 1.524
+    loading_bay_height = 0.933
+    wing_angle = 20. * math.pi / 180.
 
     # Pick the target center location at halfway between top and bottom of the top panel
-    target_center_height = (power_port_total_height + power_port_bottom_wing_height) / 2.
-
-    # TODO: Still need to figure out what this angle actually is
-    wing_angle = 20. * math.pi / 180.
+    power_port_target_height = (
+        power_port_total_height + power_port_bottom_wing_height) / 2.
 
     ###
     ### Red Power Port
     ###
 
     # Create the reference "ideal" image
-    ideal_power_port_red = TargetData('test_images/train_power_port_red.png')
+    ideal_power_port_red = TargetData('test_images/ideal_power_port_red.png')
 
     # Start at lower left corner, and work around clockwise
     # These are taken by manually finding the points in gimp for this image
     power_port_red_main_panel_polygon_points_2d = [(451, 679), (451, 304),
-                                                   (100, 302), (451, 74),
-                                                   (689, 74), (689, 302),
-                                                   (689, 679)]
+                                                   (100, 302), (451, 74), (689,
+                                                                           74),
+                                                   (689, 302), (689, 679)]
 
     # These are "virtual" 3D points based on the expected geometry
     power_port_red_main_panel_polygon_points_3d = [
-        (field_length, -power_port_center_y + power_port_width / 2., 0.),
-        (field_length, -power_port_center_y + power_port_width / 2.,
-         power_port_bottom_wing_height),
-        (field_length, -power_port_center_y + power_port_width / 2.
-         + power_port_wing_width, power_port_bottom_wing_height),
-        (field_length, -power_port_center_y + power_port_width / 2.,
-         power_port_total_height),
-        (field_length, -power_port_center_y - power_port_width / 2.,
-         power_port_total_height),
-        (field_length, -power_port_center_y - power_port_width / 2.,
-         power_port_bottom_wing_height),
-        (field_length, -power_port_center_y - power_port_width / 2., 0.)
+        (-field_length / 2., power_port_edge_y,
+         0.), (-field_length / 2., power_port_edge_y,
+               power_port_bottom_wing_height),
+        (-field_length / 2., power_port_edge_y - power_port_wing_width,
+         power_port_bottom_wing_height), (-field_length / 2.,
+                                          power_port_edge_y,
+                                          power_port_total_height),
+        (-field_length / 2., power_port_edge_y + power_port_width,
+         power_port_total_height), (-field_length / 2.,
+                                    power_port_edge_y + power_port_width,
+                                    power_port_bottom_wing_height),
+        (-field_length / 2., power_port_edge_y + power_port_width, 0.)
     ]
 
     power_port_red_wing_panel_polygon_points_2d = [(689, 74), (1022, 302),
                                                    (689, 302)]
     # These are "virtual" 3D points based on the expected geometry
     power_port_red_wing_panel_polygon_points_3d = [
-        (field_length, -power_port_center_y - power_port_width / 2.,
+        (-field_length / 2., power_port_edge_y + power_port_width,
          power_port_total_height),
-        (field_length - power_port_wing_width * math.sin(wing_angle),
-         -power_port_center_y - power_port_width / 2.
-         - power_port_wing_width * math.cos(wing_angle),
-         power_port_bottom_wing_height),
-        (field_length, -power_port_center_y - power_port_width / 2.,
-         power_port_bottom_wing_height)
+        (-field_length / 2. + power_port_wing_width * math.sin(wing_angle),
+         power_port_edge_y + power_port_width +
+         power_port_wing_width * math.cos(wing_angle),
+         power_port_bottom_wing_height), (-field_length / 2.,
+                                          power_port_edge_y + power_port_width,
+                                          power_port_bottom_wing_height)
     ]
 
     # Populate the red power port
-    ideal_power_port_red.polygon_list.append(power_port_red_main_panel_polygon_points_2d)
-    ideal_power_port_red.polygon_list_3d.append(power_port_red_main_panel_polygon_points_3d)
+    ideal_power_port_red.polygon_list.append(
+        power_port_red_main_panel_polygon_points_2d)
+    ideal_power_port_red.polygon_list_3d.append(
+        power_port_red_main_panel_polygon_points_3d)
 
-    ideal_power_port_red.polygon_list.append(power_port_red_wing_panel_polygon_points_2d)
-    ideal_power_port_red.polygon_list_3d.append(power_port_red_wing_panel_polygon_points_3d)
+    # Define the pose of the target
+    # Location is on the ground, at the center of the target
+    # Orientation is with "x" pointing out of the field, and "z" up
+    # This way, if robot is facing target directly, the x-axes are aligned
+    # and the skew to target is zero
+    ideal_power_port_red.target_rotation = -np.identity(3, np.double)
+    ideal_power_port_red.target_rotation[2][2] = 1.
+    ideal_power_port_red.target_position = np.array([
+        -field_length / 2., power_port_edge_y + power_port_width / 2.,
+        power_port_target_height
+    ])
+
+    # Target point on the image -- needs to match training image
+    # These are manually captured by examining the images,
+    # and entering the pixel values from the target center for each image.
+    # These are currently only used for visualization of the target
+    ideal_power_port_red.target_point_2d = np.float32([[570, 192]]).reshape(
+        -1, 1, 2)  # ideal_power_port_red.png
+    # np.float32([[305, 97]]).reshape(-1, 1, 2),  #train_power_port_red_webcam.png
 
     # Add the ideal 3D target to our list
     ideal_target_list.append(ideal_power_port_red)
     # And add the training image we'll actually use to the training list
-    training_target_list.append(TargetData('test_images/train_power_port_red_webcam.png'))
+    training_target_power_port_red = TargetData(
+        'test_images/train_power_port_red_webcam.png')
+    #'test_images/train_power_port_red_pi-7971-3.png')
+    training_target_power_port_red.target_rotation = ideal_power_port_red.target_rotation
+    training_target_power_port_red.target_position = ideal_power_port_red.target_position
+    training_target_power_port_red.target_radius = target_radius_default
+
+    training_target_list.append(training_target_power_port_red)
 
     ###
     ### Red Loading Bay
     ###
 
-    ideal_loading_bay_red = TargetData('test_images/train_loading_bay_red.png')
+    ideal_loading_bay_red = TargetData('test_images/ideal_loading_bay_red.png')
 
     # Start at lower left corner, and work around clockwise
     # These are taken by manually finding the points in gimp for this image
-    loading_bay_red_polygon_points_2d = [(42, 406), (42, 35), (651, 34), (651, 406)]
+    loading_bay_red_polygon_points_2d = [(42, 406), (42, 35), (651, 34), (651,
+                                                                          406)]
 
     # These are "virtual" 3D points based on the expected geometry
     loading_bay_red_polygon_points_3d = [
-        (field_length, loading_bay_edge_y + loading_bay_width, 0.),
-        (field_length, loading_bay_edge_y + loading_bay_width, loading_bay_height),
-        (field_length, loading_bay_edge_y, loading_bay_height),
-        (field_length, loading_bay_edge_y, 0.)
+        (field_length / 2., loading_bay_edge_y + loading_bay_width, 0.),
+        (field_length / 2., loading_bay_edge_y + loading_bay_width,
+         loading_bay_height), (field_length / 2., loading_bay_edge_y,
+                               loading_bay_height), (field_length / 2.,
+                                                     loading_bay_edge_y, 0.)
     ]
 
-    ideal_loading_bay_red.polygon_list.append(loading_bay_red_polygon_points_2d)
-    ideal_loading_bay_red.polygon_list_3d.append(loading_bay_red_polygon_points_3d)
+    ideal_loading_bay_red.polygon_list.append(
+        loading_bay_red_polygon_points_2d)
+    ideal_loading_bay_red.polygon_list_3d.append(
+        loading_bay_red_polygon_points_3d)
+    # Location of target
+    ideal_loading_bay_red.target_rotation = np.identity(3, np.double)
+    ideal_loading_bay_red.target_position = np.array([
+        field_length / 2., loading_bay_edge_y + loading_bay_width / 2.,
+        loading_bay_height / 2.
+    ])
+    ideal_loading_bay_red.target_point_2d = np.float32([[366, 236]]).reshape(
+        -1, 1, 2)  # ideal_loading_bay_red.png
 
     ideal_target_list.append(ideal_loading_bay_red)
-    training_target_list.append(TargetData('test_images/train_loading_bay_red.png'))
+    training_target_loading_bay_red = TargetData(
+        'test_images/train_loading_bay_red.png')
+    training_target_loading_bay_red.target_rotation = ideal_loading_bay_red.target_rotation
+    training_target_loading_bay_red.target_position = ideal_loading_bay_red.target_position
+    training_target_loading_bay_red.target_radius = target_radius_default
+    training_target_list.append(training_target_loading_bay_red)
 
     ###
     ### Blue Power Port
     ###
 
-    ideal_power_port_blue = TargetData('test_images/train_power_port_blue.png')
+    ideal_power_port_blue = TargetData('test_images/ideal_power_port_blue.png')
 
     # Start at lower left corner, and work around clockwise
     # These are taken by manually finding the points in gimp for this image
     power_port_blue_main_panel_polygon_points_2d = [(438, 693), (438, 285),
-                                                    (93, 285), (440, 50),
-                                                    (692, 50), (692, 285),
-                                                    (692, 693)]
+                                                    (93, 285), (440, 50), (692,
+                                                                           50),
+                                                    (692, 285), (692, 693)]
 
     # These are "virtual" 3D points based on the expected geometry
     power_port_blue_main_panel_polygon_points_3d = [
-        (0., power_port_center_y - power_port_width / 2., 0.),
-        (0., power_port_center_y - power_port_width / 2.,
-         power_port_bottom_wing_height),
-        (0., power_port_center_y - power_port_width / 2. - power_port_wing_width,
-         power_port_bottom_wing_height),
-        (0., power_port_center_y - power_port_width / 2.,
-         power_port_total_height),
-        (0., power_port_center_y + power_port_width / 2.,
-         power_port_total_height),
-        (0., power_port_center_y + power_port_width / 2.,
-         power_port_bottom_wing_height),
-        (0., power_port_center_y + power_port_width / 2., 0.)
+        (field_length / 2., -power_port_edge_y,
+         0.), (field_length / 2., -power_port_edge_y,
+               power_port_bottom_wing_height),
+        (field_length / 2., -power_port_edge_y + power_port_wing_width,
+         power_port_bottom_wing_height), (field_length / 2.,
+                                          -power_port_edge_y,
+                                          power_port_total_height),
+        (field_length / 2., -power_port_edge_y - power_port_width,
+         power_port_total_height), (field_length / 2.,
+                                    -power_port_edge_y - power_port_width,
+                                    power_port_bottom_wing_height),
+        (field_length / 2., -power_port_edge_y - power_port_width, 0.)
     ]
 
     power_port_blue_wing_panel_polygon_points_2d = [(692, 50), (1047, 285),
                                                     (692, 285)]
     # These are "virtual" 3D points based on the expected geometry
     power_port_blue_wing_panel_polygon_points_3d = [
-        (0., power_port_center_y + power_port_width / 2.,
+        (field_length / 2., -power_port_edge_y - power_port_width,
          power_port_total_height),
-        (power_port_wing_width * math.sin(wing_angle),
-         power_port_center_y - power_port_width / 2. +
+        (field_length / 2. - power_port_wing_width * math.sin(wing_angle),
+         -power_port_edge_y - power_port_width -
          power_port_wing_width * math.cos(wing_angle),
          power_port_bottom_wing_height),
-        (0., power_port_center_y + power_port_width / 2.,
+        (field_length / 2., -power_port_edge_y - power_port_width,
          power_port_bottom_wing_height)
     ]
 
     # Populate the blue power port
-    ideal_power_port_blue.polygon_list.append(power_port_blue_main_panel_polygon_points_2d)
-    ideal_power_port_blue.polygon_list_3d.append(power_port_blue_main_panel_polygon_points_3d)
+    ideal_power_port_blue.polygon_list.append(
+        power_port_blue_main_panel_polygon_points_2d)
+    ideal_power_port_blue.polygon_list_3d.append(
+        power_port_blue_main_panel_polygon_points_3d)
 
-    ideal_power_port_blue.polygon_list.append(power_port_blue_wing_panel_polygon_points_2d)
-    ideal_power_port_blue.polygon_list_3d.append(power_port_blue_wing_panel_polygon_points_3d)
+    # Location of target.  Rotation is pointing in -x direction
+    ideal_power_port_blue.target_rotation = np.identity(3, np.double)
+    ideal_power_port_blue.target_position = np.array([
+        field_length / 2., -power_port_edge_y - power_port_width / 2.,
+        power_port_target_height
+    ])
+    ideal_power_port_blue.target_point_2d = np.float32([[567, 180]]).reshape(
+        -1, 1, 2)  # ideal_power_port_blue.png
 
     ideal_target_list.append(ideal_power_port_blue)
-    training_target_list.append(TargetData('test_images/train_power_port_blue.png'))
+    training_target_power_port_blue = TargetData(
+        'test_images/train_power_port_blue.png')
+    training_target_power_port_blue.target_rotation = ideal_power_port_blue.target_rotation
+    training_target_power_port_blue.target_position = ideal_power_port_blue.target_position
+    training_target_power_port_blue.target_radius = target_radius_default
+    training_target_list.append(training_target_power_port_blue)
 
     ###
     ### Blue Loading Bay
     ###
 
-    ideal_loading_bay_blue = TargetData('test_images/train_loading_bay_blue.png')
+    ideal_loading_bay_blue = TargetData(
+        'test_images/ideal_loading_bay_blue.png')
 
     # Start at lower left corner, and work around clockwise
     # These are taken by manually finding the points in gimp for this image
-    loading_bay_blue_polygon_points_2d = [(7, 434), (7, 1), (729, 1), (729, 434)]
+    loading_bay_blue_polygon_points_2d = [(7, 434), (7, 1), (729, 1), (729,
+                                                                       434)]
 
     # These are "virtual" 3D points based on the expected geometry
     loading_bay_blue_polygon_points_3d = [
-        (field_length, loading_bay_edge_y + loading_bay_width, 0.),
-        (field_length, loading_bay_edge_y + loading_bay_width, loading_bay_height),
-        (field_length, loading_bay_edge_y, loading_bay_height),
-        (field_length, loading_bay_edge_y, 0.)
+        (-field_length / 2., -loading_bay_edge_y - loading_bay_width, 0.),
+        (-field_length / 2., -loading_bay_edge_y - loading_bay_width,
+         loading_bay_height), (-field_length / 2., -loading_bay_edge_y,
+                               loading_bay_height), (-field_length / 2.,
+                                                     -loading_bay_edge_y, 0.)
     ]
 
-    ideal_loading_bay_blue.polygon_list.append(loading_bay_blue_polygon_points_2d)
-    ideal_loading_bay_blue.polygon_list_3d.append(loading_bay_blue_polygon_points_3d)
+    ideal_loading_bay_blue.polygon_list.append(
+        loading_bay_blue_polygon_points_2d)
+    ideal_loading_bay_blue.polygon_list_3d.append(
+        loading_bay_blue_polygon_points_3d)
+
+    # Location of target
+    ideal_loading_bay_blue.target_rotation = -np.identity(3, np.double)
+    ideal_loading_bay_blue.target_rotation[2][2] = 1.
+    ideal_loading_bay_blue.target_position = np.array([
+        -field_length / 2., -loading_bay_edge_y - loading_bay_width / 2.,
+        loading_bay_height / 2.
+    ])
+    ideal_loading_bay_blue.target_point_2d = np.float32([[366, 236]]).reshape(
+        -1, 1, 2)  # ideal_loading_bay_blue.png
 
     ideal_target_list.append(ideal_loading_bay_blue)
-    training_target_list.append(TargetData('test_images/train_loading_bay_blue.png'))
+    training_target_loading_bay_blue = TargetData(
+        'test_images/train_loading_bay_blue.png')
+    training_target_loading_bay_blue.target_rotation = ideal_loading_bay_blue.target_rotation
+    training_target_loading_bay_blue.target_position = ideal_loading_bay_blue.target_position
+    training_target_loading_bay_blue.target_radius = target_radius_default
+    training_target_list.append(training_target_loading_bay_blue)
 
     # Create feature extractor
     feature_extractor = tam.load_feature_extractor()
@@ -263,7 +361,8 @@
     camera_params = camera_definition.web_cam_params
 
     for ideal_target in ideal_target_list:
-        print("\nPreparing target for image %s" % ideal_target.image_filename)
+        glog.info(
+            "\nPreparing target for image %s" % ideal_target.image_filename)
         ideal_target.extract_features(feature_extractor)
         ideal_target.filter_keypoints_by_polygons()
         ideal_target.compute_reprojection_maps()
@@ -272,16 +371,21 @@
 
         if VISUALIZE_KEYPOINTS:
             for i in range(len(ideal_target.polygon_list)):
-                ideal_pts_tmp = np.asarray(ideal_target.polygon_list[i]).reshape(
-                    -1, 2)
+                ideal_pts_tmp = np.asarray(
+                    ideal_target.polygon_list[i]).reshape(-1, 2)
                 ideal_pts_3d_tmp = np.asarray(
                     ideal_target.polygon_list_3d[i]).reshape(-1, 3)
                 # We can only compute pose if we have at least 4 points
                 # Only matters for reprojection for visualization
                 # Keeping this code here, since it's helpful when testing
                 if (len(ideal_target.polygon_list[i]) >= 4):
-                    img_copy = dtd.draw_polygon(ideal_target.image.copy(), ideal_target.polygon_list[i], (0,255,0), True)
-                    dtd.visualize_reprojections(img_copy, ideal_pts_tmp, ideal_pts_3d_tmp, camera_params.camera_int.camera_matrix, camera_params.camera_int.distortion_coeffs)
+                    img_copy = dtd.draw_polygon(ideal_target.image.copy(),
+                                                ideal_target.polygon_list[i],
+                                                (0, 255, 0), True)
+                    dtd.visualize_reprojections(
+                        img_copy, ideal_pts_tmp, ideal_pts_3d_tmp,
+                        camera_params.camera_int.camera_matrix,
+                        camera_params.camera_int.dist_coeffs)
 
             for polygon in ideal_target.polygon_list:
                 img_copy = ideal_target.image.copy()
@@ -298,7 +402,7 @@
                     np.asarray(kp_in_poly2d).reshape(-1, 2),
                     np.asarray(kp_in_poly3d).reshape(
                         -1, 3), camera_params.camera_int.camera_matrix,
-                    camera_params.camera_int.distortion_coeffs)
+                    camera_params.camera_int.dist_coeffs)
 
     ###############
     ### Compute 3D points on actual training images
@@ -307,7 +411,9 @@
     AUTO_PROJECTION = True
 
     if AUTO_PROJECTION:
-        print("\n\nAuto projection of training keypoints to 3D using ideal images")
+        glog.info(
+            "\n\nAuto projection of training keypoints to 3D using ideal images"
+        )
         # Match the captured training image against the "ideal" training image
         # and use those matches to pin down the 3D locations of the keypoints
 
@@ -316,35 +422,49 @@
             training_target = training_target_list[target_ind]
             ideal_target = ideal_target_list[target_ind]
 
-            print("\nPreparing target for image %s" % training_target.image_filename)
+            glog.info("\nPreparing target for image %s" %
+                      training_target.image_filename)
             # Extract keypoints and descriptors for model
             training_target.extract_features(feature_extractor)
 
             # Create matcher that we'll use to match with ideal
             matcher = tam.train_matcher([training_target.descriptor_list])
 
-            matches_list = tam.compute_matches(matcher,
-                                               [training_target.descriptor_list],
-                                               [ideal_target.descriptor_list])
+            matches_list = tam.compute_matches(
+                matcher, [training_target.descriptor_list],
+                [ideal_target.descriptor_list])
 
             homography_list, matches_mask_list = tam.compute_homographies(
                 [training_target.keypoint_list], [ideal_target.keypoint_list],
                 matches_list)
 
+            # Compute H_inv, since this maps points in ideal target to
+            # points in the training target
+            H_inv = np.linalg.inv(homography_list[0])
+
             for polygon in ideal_target.polygon_list:
-                ideal_pts_2d = np.asarray(np.float32(polygon)).reshape(-1, 1, 2)
-                H_inv = np.linalg.inv(homography_list[0])
+                ideal_pts_2d = np.asarray(np.float32(polygon)).reshape(
+                    -1, 1, 2)
                 # We use the ideal target's polygons to define the polygons on
                 # the training target
-                transformed_polygon = cv2.perspectiveTransform(ideal_pts_2d, H_inv)
+                transformed_polygon = cv2.perspectiveTransform(
+                    ideal_pts_2d, H_inv)
                 training_target.polygon_list.append(transformed_polygon)
 
-            print("Started with %d keypoints" % len(training_target.keypoint_list))
+            # Also project the target point from the ideal to training image
+            training_target_point_2d = cv2.perspectiveTransform(
+                np.asarray(ideal_target.target_point_2d).reshape(-1, 1, 2),
+                H_inv)
+            training_target.target_point_2d = training_target_point_2d.reshape(
+                -1, 1, 2)
+
+            glog.info("Started with %d keypoints" % len(
+                training_target.keypoint_list))
 
             training_target.keypoint_list, training_target.descriptor_list, rejected_keypoint_list, rejected_descriptor_list, _ = dtd.filter_keypoints_by_polygons(
                 training_target.keypoint_list, training_target.descriptor_list,
                 training_target.polygon_list)
-            print("After filtering by polygons, had %d keypoints" % len(
+            glog.info("After filtering by polygons, had %d keypoints" % len(
                 training_target.keypoint_list))
             if VISUALIZE_KEYPOINTS:
                 tam.show_keypoints(training_target.image,
@@ -383,32 +503,41 @@
                 img_copy = training_target.image.copy()
                 for polygon in training_target.polygon_list:
                     pts = polygon.astype(int).reshape(-1, 2)
-                    img_copy = dtd.draw_polygon(img_copy, pts,
-                                             (255, 0, 0), True)
-                kp_tmp = np.asarray([
-                    (kp.pt[0], kp.pt[1]) for kp in training_target.keypoint_list
-                ]).reshape(-1, 2)
+                    img_copy = dtd.draw_polygon(img_copy, pts, (255, 0, 0),
+                                                True)
+                kp_tmp = np.asarray(
+                    [(kp.pt[0], kp.pt[1])
+                     for kp in training_target.keypoint_list]).reshape(-1, 2)
                 dtd.visualize_reprojections(
                     img_copy, kp_tmp, training_target.keypoint_list_3d,
                     camera_params.camera_int.camera_matrix,
-                    camera_params.camera_int.distortion_coeffs)
+                    camera_params.camera_int.dist_coeffs)
 
     y2020_target_list = training_target_list
     return y2020_target_list
 
+
 if __name__ == '__main__':
     ap = argparse.ArgumentParser()
-    ap.add_argument("--visualize", help="Whether to visualize the results", default=False, action='store_true')
-    ap.add_argument("--no_use_bazel", help="Don't run using Bazel", default=True, action='store_false')
+    ap.add_argument(
+        "--visualize",
+        help="Whether to visualize the results",
+        default=False,
+        action='store_true')
+    ap.add_argument(
+        "-n",
+        "--no_bazel",
+        help="Don't run using Bazel",
+        default=True,
+        action='store_false')
     args = vars(ap.parse_args())
 
     VISUALIZE_KEYPOINTS = args["visualize"]
     if args["visualize"]:
-        print("Visualizing results")
+        glog.info("Visualizing results")
 
-    USE_BAZEL = args["no_use_bazel"]
-    if args["no_use_bazel"]:
-        print("Running on command line (no Bazel)")
+    USE_BAZEL = args["no_bazel"]
+    if args["no_bazel"]:
+        glog.info("Running on command line (no Bazel)")
 
     compute_target_definition()
-    pass
diff --git a/y2020/vision/tools/python_code/target_definition_test.py b/y2020/vision/tools/python_code/target_definition_test.py
new file mode 100644
index 0000000..3c4d308
--- /dev/null
+++ b/y2020/vision/tools/python_code/target_definition_test.py
@@ -0,0 +1,31 @@
+import cv2
+import numpy as np
+
+import camera_definition_test
+import define_training_data as dtd
+import target_definition
+import train_and_match as tam
+
+
+def compute_target_definition():
+    target_data_list = []
+    target_data_test_1 = target_definition.TargetData(
+        'test_images/train_power_port_red.png')
+
+    target_data_test_1.extract_features()
+
+    target_data_test_1.keypoint_list[0].pt = (0., 1.)
+
+    kp_3d = []
+    for i in range(len(target_data_test_1.keypoint_list)):
+        kp_3d.append((i, i, i))
+
+    target_data_test_1.keypoint_list_3d = np.asarray(
+        np.float32(kp_3d)).reshape(-1, 1, 3)
+
+    target_data_test_1.target_rotation = np.identity(3, np.double)
+    target_data_test_1.target_position = np.array([0., 1., 2.])
+    target_data_test_1.target_point_2d = np.array([10., 20.]).reshape(-1, 1, 2)
+    target_data_test_1.target_radius = 32
+    target_data_list.append(target_data_test_1)
+    return target_data_list
diff --git a/y2020/vision/tools/python_code/test_images/ideal_loading_bay_blue.png b/y2020/vision/tools/python_code/test_images/ideal_loading_bay_blue.png
new file mode 100644
index 0000000..c3c0aea
--- /dev/null
+++ b/y2020/vision/tools/python_code/test_images/ideal_loading_bay_blue.png
Binary files differ
diff --git a/y2020/vision/tools/python_code/test_images/ideal_loading_bay_red.png b/y2020/vision/tools/python_code/test_images/ideal_loading_bay_red.png
new file mode 100644
index 0000000..42091a6
--- /dev/null
+++ b/y2020/vision/tools/python_code/test_images/ideal_loading_bay_red.png
Binary files differ
diff --git a/y2020/vision/tools/python_code/test_images/ideal_power_port_blue.png b/y2020/vision/tools/python_code/test_images/ideal_power_port_blue.png
new file mode 100644
index 0000000..a3a7597
--- /dev/null
+++ b/y2020/vision/tools/python_code/test_images/ideal_power_port_blue.png
Binary files differ
diff --git a/y2020/vision/tools/python_code/test_images/ideal_power_port_red.png b/y2020/vision/tools/python_code/test_images/ideal_power_port_red.png
new file mode 100644
index 0000000..9d2f0bf
--- /dev/null
+++ b/y2020/vision/tools/python_code/test_images/ideal_power_port_red.png
Binary files differ
diff --git a/y2020/vision/tools/python_code/train_and_match.py b/y2020/vision/tools/python_code/train_and_match.py
index e5a7f5b..1def399 100644
--- a/y2020/vision/tools/python_code/train_and_match.py
+++ b/y2020/vision/tools/python_code/train_and_match.py
@@ -1,4 +1,5 @@
 import cv2
+import glog
 import math
 import numpy as np
 import time
@@ -8,6 +9,8 @@
 FEATURE_EXTRACTOR_NAME = 'SIFT'
 QUERY_INDEX = 0  # We use a list for both training and query info, but only ever have one query item
 
+glog.setLevel("WARN")
+
 
 # Calculates rotation matrix to euler angles
 # The result is the same as MATLAB except the order
@@ -38,7 +41,7 @@
         # Load image (in color; let opencv convert to B&W for features)
         img_data = cv2.imread(im)
         if img_data is None:
-            print("Failed to load image: ", im)
+            glog.error("Failed to load image: '%s'" % im)
             exit()
         else:
             image_list.append(img_data)
@@ -143,15 +146,12 @@
 
     elif FEATURE_EXTRACTOR_NAME is 'ORB':
         matches = matcher.knnMatch(train_keypoint_lists[0], desc_query, k=2)
-        print(matches)
         good_matches = []
         for m in matches:
             if m:
                 if len(m) == 2:
-                    print(m[0].distance, m[1].distance)
                     if m[0].distance < 0.7 * m[1].distance:
                         good_matches.append(m[0])
-                        print(m[0].distance)
 
         good_matches_list.append(good_matches)
 
@@ -176,14 +176,16 @@
     for i in range(len(train_keypoint_lists)):
         good_matches = good_matches_list[i]
         if len(good_matches) < MIN_MATCH_COUNT:
-            print("Not enough matches are for model ", i, ": ",
-                  len(good_matches), " out of needed #: ", MIN_MATCH_COUNT)
+            glog.warn(
+                "Not enough matches are for model %d: %d out of needed #: %d" %
+                (i, len(good_matches), MIN_MATCH_COUNT))
             homography_list.append([])
             matches_mask_list.append([])
             continue
 
-        print("Got good number of matches for model %d: %d (needed only %d)" %
-              (i, len(good_matches), MIN_MATCH_COUNT))
+        glog.info(
+            "Got good number of matches for model %d: %d (needed only %d)" %
+            (i, len(good_matches), MIN_MATCH_COUNT))
         # Extract and bundle keypoint locations for computations
         src_pts = np.float32([
             train_keypoint_lists[i][m.trainIdx].pt for m in good_matches
@@ -206,7 +208,7 @@
 # Also shows image with query unwarped (to match training image) and target pt
 def show_results(training_images, train_keypoint_lists, query_images,
                  query_keypoint_lists, target_point_list, good_matches_list):
-    print("Showing results for ", len(training_images), " training images")
+    glog.info("Showing results for ", len(training_images), " training images")
 
     homography_list, matches_mask_list = compute_homographies(
         train_keypoint_lists, query_keypoint_lists, good_matches_list)
@@ -214,15 +216,15 @@
         good_matches = good_matches_list[i]
         if len(good_matches) < MIN_MATCH_COUNT:
             continue
-        print("Showing results for model ", i)
+        glog.debug("Showing results for model ", i)
         matches_mask_count = matches_mask_list[i].count(1)
         if matches_mask_count != len(good_matches):
-            print("Homography rejected some matches!  From ",
-                  len(good_matches), ", only ", matches_mask_count,
-                  " were used")
+            glog.info("Homography rejected some matches!  From ",
+                      len(good_matches), ", only ", matches_mask_count,
+                      " were used")
 
         if matches_mask_count < MIN_MATCH_COUNT:
-            print(
+            glog.info(
                 "Skipping match because homography rejected matches down to below ",
                 MIN_MATCH_COUNT)
             continue
@@ -235,7 +237,8 @@
         query_box_pts = cv2.perspectiveTransform(train_box_pts, H)
 
         # Figure out where the training target goes on the query img
-        transformed_target = cv2.perspectiveTransform(target_point_list[i], H)
+        transformed_target = cv2.perspectiveTransform(
+            target_point_list[i].reshape(-1, 1, 2), H)
         # Ballpark the size of the circle so it looks right on image
         radius = int(
             32 * abs(H[0][0] + H[1][1]) / 2)  # Average of scale factors
@@ -260,14 +263,13 @@
         img3 = cv2.drawMatches(query_image, query_keypoint_lists[QUERY_INDEX],
                                training_images[i], train_keypoint_lists[i],
                                good_matches_list[i], None, **draw_params)
-        print("Drawing matches for model ", i,
-              ".  Query on left, Training image on right")
+        glog.debug("Drawing matches for model ", i,
+                   ".  Query on left, Training image on right")
         cv2.imshow('Matches', img3), cv2.waitKey()
 
         # Next, unwarp the query image so it looks like the training view
         H_inv = np.linalg.inv(H)
         query_image_warp = cv2.warpPerspective(query_image, H_inv, (w, h))
-        print("Showing unwarped query image for model ", i)
         cv2.imshow('Unwarped Image', query_image_warp), cv2.waitKey()
 
     # Go ahead and return these, for use elsewhere
@@ -279,9 +281,11 @@
 #        keypoint_list: List of opencv keypoints
 def show_keypoints(image, keypoint_list):
     ret_img = image.copy()
-    ret_img = cv2.drawKeypoints(ret_img, keypoint_list, ret_img,
-                               flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
+    ret_img = cv2.drawKeypoints(
+        ret_img,
+        keypoint_list,
+        ret_img,
+        flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
     cv2.imshow("Keypoints", ret_img)
     cv2.waitKey(0)
     return ret_img
-
diff --git a/y2020/vision/tools/python_code/transform_projection_test.py b/y2020/vision/tools/python_code/transform_projection_test.py
index 00f68bb..01fe695 100644
--- a/y2020/vision/tools/python_code/transform_projection_test.py
+++ b/y2020/vision/tools/python_code/transform_projection_test.py
@@ -8,7 +8,7 @@
 # Import camera from camera_definition
 camera_params = camera_definition.web_cam_params
 cam_mat = camera_params.camera_int.camera_matrix
-distortion_coeffs = camera_params.camera_int.distortion_coeffs
+dist_coeffs = camera_params.camera_int.dist_coeffs
 
 # Height of camera on robot above ground
 cam_above_ground = 0.5
@@ -48,11 +48,11 @@
 
 pts_proj_2d_cam2, jac_2d = cv2.projectPoints(
     pts_3d_target, R_cam_cam2_gt_mat_inv, T_cam_cam2_gt_inv, cam_mat,
-    distortion_coeffs)
+    dist_coeffs)
 
 # Now, solve for the pose using the original 3d points (pts_3d_T_t) and the projections from the new location
 retval, R_cam2_cam_est, T_cam2_cam_est, inliers = cv2.solvePnPRansac(
-    pts_3d_target, pts_proj_2d_cam2, cam_mat, distortion_coeffs)
+    pts_3d_target, pts_proj_2d_cam2, cam_mat, dist_coeffs)
 
 # This is the pose from camera to original target spot.  We need to invert to get back to the pose we want
 R_cam_cam2_est_mat = cv2.Rodrigues(R_cam2_cam_est)[0].T
diff --git a/y2020/vision/tools/python_code/usb_camera_stream.py b/y2020/vision/tools/python_code/usb_camera_stream.py
deleted file mode 100644
index 5d3ae91..0000000
--- a/y2020/vision/tools/python_code/usb_camera_stream.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import cv2
-# Open the device at the ID X for /dev/videoX
-cap = cv2.VideoCapture(2)
-
-#Check whether user selected camera is opened successfully.
-if not (cap.isOpened()):
-    print("Could not open video device")
-    quit()
-
-while True:
-    # Capture frame-by-frame
-    ret, frame = cap.read()
-
-    exp = cap.get(cv2.CAP_PROP_EXPOSURE)
-    print("Exposure:", exp)
-    # Display the resulting frame
-    cv2.imshow('preview', frame)
-
-    #Waits for a user input to quit the application
-    if cv2.waitKey(1) & 0xFF == ord('q'):
-        break
-
-# When everything done, release the capture
-cap.release()
-cv2.destroyAllWindows()
diff --git a/y2020/vision/v4l2_reader.cc b/y2020/vision/v4l2_reader.cc
index f1944c1..91777c7 100644
--- a/y2020/vision/v4l2_reader.cc
+++ b/y2020/vision/v4l2_reader.cc
@@ -6,6 +6,9 @@
 #include <sys/stat.h>
 #include <sys/types.h>
 
+DEFINE_bool(ignore_timestamps, false,
+            "Don't require timestamps on images.  Used to allow webcams");
+
 namespace frc971 {
 namespace vision {
 
@@ -137,8 +140,11 @@
            buffer.m.userptr);
   CHECK_EQ(ImageSize(), buffer.length);
   CHECK(buffer.flags & V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC);
-  CHECK_EQ(buffer.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK,
-           static_cast<uint32_t>(V4L2_BUF_FLAG_TSTAMP_SRC_EOF));
+  if (!FLAGS_ignore_timestamps) {
+    // Require that we have good timestamp on images
+    CHECK_EQ(buffer.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK,
+             static_cast<uint32_t>(V4L2_BUF_FLAG_TSTAMP_SRC_EOF));
+  }
   return {static_cast<int>(buffer.index),
           aos::time::from_timeval(buffer.timestamp)};
 }
diff --git a/y2020/vision/viewer.cc b/y2020/vision/viewer.cc
new file mode 100644
index 0000000..be9b980
--- /dev/null
+++ b/y2020/vision/viewer.cc
@@ -0,0 +1,46 @@
+#include <opencv2/calib3d.hpp>
+#include <opencv2/features2d.hpp>
+#include <opencv2/highgui/highgui.hpp>
+#include <opencv2/imgproc.hpp>
+
+#include "aos/events/shm_event_loop.h"
+#include "aos/init.h"
+#include "y2020/vision/vision_generated.h"
+
+DEFINE_string(config, "config.json", "Path to the config file to use.");
+
+namespace frc971 {
+namespace vision {
+namespace {
+
+void ViewerMain() {
+  aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+      aos::configuration::ReadConfig(FLAGS_config);
+
+  aos::ShmEventLoop event_loop(&config.message());
+
+  event_loop.MakeWatcher("/camera", [](const CameraImage &image) {
+    cv::Mat image_mat(image.rows(), image.cols(), CV_8U);
+    CHECK(image_mat.isContinuous());
+    const int number_pixels = image.rows() * image.cols();
+    for (int i = 0; i < number_pixels; ++i) {
+      reinterpret_cast<uint8_t *>(image_mat.data)[i] =
+          image.data()->data()[i * 2];
+    }
+
+    cv::imshow("Display", image_mat);
+    cv::waitKey(1);
+  });
+
+  event_loop.Run();
+}
+
+}  // namespace
+}  // namespace vision
+}  // namespace frc971
+
+// Quick and lightweight grayscale viewer for images
+int main(int argc, char **argv) {
+  aos::InitGoogle(&argc, &argv);
+  frc971::vision::ViewerMain();
+}
diff --git a/y2020/vision/viewer_replay.cc b/y2020/vision/viewer_replay.cc
new file mode 100644
index 0000000..82ab11a
--- /dev/null
+++ b/y2020/vision/viewer_replay.cc
@@ -0,0 +1,65 @@
+#include <opencv2/calib3d.hpp>
+#include <opencv2/features2d.hpp>
+#include <opencv2/highgui/highgui.hpp>
+#include <opencv2/imgproc.hpp>
+
+#include "aos/events/logging/logger.h"
+#include "aos/events/simulated_event_loop.h"
+#include "aos/init.h"
+#include "y2020/vision/vision_generated.h"
+
+DEFINE_string(config, "y2020/config.json", "Path to the config file to use.");
+DEFINE_string(logfile, "", "Path to the log file to use.");
+DEFINE_string(node, "pi1", "Node name to replay.");
+DEFINE_string(image_save_prefix, "/tmp/img",
+              "Prefix to use for saving images from the logfile.");
+
+namespace frc971 {
+namespace vision {
+namespace {
+
+void ViewerMain() {
+  CHECK(!FLAGS_logfile.empty()) << "You forgot to specify a logfile.";
+
+  aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+      aos::configuration::ReadConfig(FLAGS_config);
+
+  aos::logger::LogReader reader(FLAGS_logfile, &config.message());
+  reader.Register();
+  const aos::Node *node = nullptr;
+  if (aos::configuration::MultiNode(reader.configuration())) {
+    node = aos::configuration::GetNode(reader.configuration(), FLAGS_node);
+  }
+  std::unique_ptr<aos::EventLoop> event_loop =
+      reader.event_loop_factory()->MakeEventLoop("player", node);
+
+  int image_count = 0;
+  event_loop->MakeWatcher("/camera", [&image_count](const CameraImage &image) {
+    cv::Mat image_mat(image.rows(), image.cols(), CV_8U);
+    CHECK(image_mat.isContinuous());
+    const int number_pixels = image.rows() * image.cols();
+    for (int i = 0; i < number_pixels; ++i) {
+      reinterpret_cast<uint8_t *>(image_mat.data)[i] =
+          image.data()->data()[i * 2];
+    }
+
+    cv::imshow("Display", image_mat);
+    if (!FLAGS_image_save_prefix.empty()) {
+      cv::imwrite("/tmp/img" + std::to_string(image_count++) + ".png",
+                  image_mat);
+    }
+    cv::waitKey(1);
+  });
+
+  reader.event_loop_factory()->Run();
+}
+
+}  // namespace
+}  // namespace vision
+}  // namespace frc971
+
+// Quick and lightweight grayscale viewer for images
+int main(int argc, char **argv) {
+  aos::InitGoogle(&argc, &argv);
+  frc971::vision::ViewerMain();
+}
diff --git a/y2020/wpilib_interface.cc b/y2020/wpilib_interface.cc
index 91182fc..4f39acf 100644
--- a/y2020/wpilib_interface.cc
+++ b/y2020/wpilib_interface.cc
@@ -218,8 +218,7 @@
 
     {
       auto builder = superstructure_position_sender_.MakeBuilder();
-      superstructure::Position::Builder position_builder =
-          builder.MakeBuilder<superstructure::Position>();
+
       // TODO(alex): check new absolute encoder api.
       // Hood
       frc971::AbsolutePositionT hood;
@@ -246,6 +245,14 @@
       flatbuffers::Offset<frc971::PotAndAbsolutePosition> turret_offset =
           frc971::PotAndAbsolutePosition::Pack(*builder.fbb(), &turret);
 
+      // Control Panel
+      frc971::RelativePositionT control_panel;
+      CopyPosition(*control_panel_encoder_, &control_panel,
+                   Values::kControlPanelEncoderCountsPerRevolution(),
+                   Values::kControlPanelEncoderRatio(), false);
+      flatbuffers::Offset<frc971::RelativePosition> control_panel_offset =
+          frc971::RelativePosition::Pack(*builder.fbb(), &control_panel);
+
       // Shooter
       y2020::control_loops::superstructure::ShooterPositionT shooter;
       shooter.theta_finisher =
@@ -266,14 +273,8 @@
               y2020::control_loops::superstructure::ShooterPosition::Pack(
                   *builder.fbb(), &shooter);
 
-      // Control Panel
-      frc971::RelativePositionT control_panel;
-      CopyPosition(*control_panel_encoder_, &control_panel,
-                   Values::kControlPanelEncoderCountsPerRevolution(),
-                   Values::kControlPanelEncoderRatio(), false);
-      flatbuffers::Offset<frc971::RelativePosition> control_panel_offset =
-          frc971::RelativePosition::Pack(*builder.fbb(), &control_panel);
-
+      superstructure::Position::Builder position_builder =
+          builder.MakeBuilder<superstructure::Position>();
       position_builder.add_hood(hood_offset);
       position_builder.add_intake_joint(intake_joint_offset);
       position_builder.add_turret(turret_offset);
@@ -427,7 +428,7 @@
     if (climber_falcon_) {
       climber_falcon_->Set(
           ctre::phoenix::motorcontrol::ControlMode::PercentOutput,
-          std::clamp(output.climber_voltage(), -kMaxBringupPower,
+          std::clamp(-output.climber_voltage(), -kMaxBringupPower,
                      kMaxBringupPower) /
               12.0);
     }
@@ -481,6 +482,7 @@
     // Thread 3.
     ::aos::ShmEventLoop sensor_reader_event_loop(&config.message());
     SensorReader sensor_reader(&sensor_reader_event_loop);
+    sensor_reader.set_pwm_trigger(true);
     sensor_reader.set_drivetrain_left_encoder(make_encoder(0));
     sensor_reader.set_drivetrain_right_encoder(make_encoder(1));
     // TODO: pin numbers
@@ -550,8 +552,8 @@
         make_unique<::frc::TalonFX>(4));
     superstructure_writer.set_flywheel_falcon(make_unique<::frc::TalonFX>(9));
     // TODO: check port
-    //superstructure_writer.set_climber_falcon(
-        //make_unique<::ctre::phoenix::motorcontrol::can::TalonFX>(11));
+    superstructure_writer.set_climber_falcon(
+        make_unique<::ctre::phoenix::motorcontrol::can::TalonFX>(1));
 
     AddLoop(&output_event_loop);
 
diff --git a/y2020/www/BUILD b/y2020/www/BUILD
index c0daa07..6b27c66 100644
--- a/y2020/www/BUILD
+++ b/y2020/www/BUILD
@@ -1,25 +1,48 @@
 load("@build_bazel_rules_typescript//:defs.bzl", "ts_library")
 load("@build_bazel_rules_nodejs//:defs.bzl", "rollup_bundle")
+load("//frc971/downloader:downloader.bzl", "aos_downloader_dir")
 
 ts_library(
-    name = "main",
+    name = "camera_main",
     srcs = [
-        "main.ts",
         "image_handler.ts",
+        "camera_main.ts",
     ],
+    visibility = ["//y2020:__subpackages__"],
     deps = [
         "//aos/network/www:proxy",
         "//y2020/vision:vision_ts_fbs",
         "//y2020/vision/sift:sift_ts_fbs",
     ],
-    visibility = ["//y2020:__subpackages__"],
+)
+
+ts_library(
+    name = "field_main",
+    srcs = [
+        "field_main.ts",
+        "field_handler.ts",
+        "constants.ts",
+    ],
+    deps = [
+        "//aos/network/www:proxy",
+        "//y2020/vision/sift:sift_ts_fbs",
+    ],
 )
 
 rollup_bundle(
-    name = "main_bundle",
-    entry_point = "y2020/www/main",
+    name = "camera_main_bundle",
+    entry_point = "y2020/www/camera_main",
+    visibility = ["//y2020:__subpackages__"],
     deps = [
-        "main",
+        "camera_main",
+    ],
+)
+
+rollup_bundle(
+    name = "field_main_bundle",
+    entry_point = "y2020/www/field_main",
+    deps = [
+        "field_main",
     ],
     visibility = ["//y2020:__subpackages__"],
 )
@@ -30,7 +53,7 @@
         "**/*.html",
         "**/*.css",
     ]),
-    visibility=["//visibility:public"],
+    visibility = ["//visibility:public"],
 )
 
 genrule(
@@ -42,5 +65,17 @@
         "flatbuffers.js",
     ],
     cmd = "cp $(location @com_github_google_flatbuffers//:flatjs) $@",
-    visibility=["//y2020:__subpackages__"],
+    visibility = ["//y2020:__subpackages__"],
+)
+
+aos_downloader_dir(
+    name = "www_files",
+    srcs = [
+        ":camera_main_bundle",
+        ":field_main_bundle",
+        ":files",
+        ":flatbuffers",
+    ],
+    dir = "www",
+    visibility = ["//visibility:public"],
 )
diff --git a/y2020/www/camera_main.ts b/y2020/www/camera_main.ts
new file mode 100644
index 0000000..e13452e
--- /dev/null
+++ b/y2020/www/camera_main.ts
@@ -0,0 +1,10 @@
+import {Connection} from 'aos/network/www/proxy';
+
+import {ImageHandler} from './image_handler';
+import {ConfigHandler} from 'aos/network/www/config_handler';
+
+const conn = new Connection();
+
+conn.connect();
+
+const iHandler = new ImageHandler(conn);
diff --git a/y2020/www/camera_viewer.html b/y2020/www/camera_viewer.html
new file mode 100644
index 0000000..4a61d7a
--- /dev/null
+++ b/y2020/www/camera_viewer.html
@@ -0,0 +1,10 @@
+<html>
+  <head>
+    <script src="flatbuffers.js"></script>
+    <script src="camera_main_bundle.min.js" defer></script>
+    <link rel="stylesheet" href="styles.css">
+  </head>
+  <body>
+  </body>
+</html>
+
diff --git a/y2020/www/constants.ts b/y2020/www/constants.ts
new file mode 100644
index 0000000..b94d7a7
--- /dev/null
+++ b/y2020/www/constants.ts
@@ -0,0 +1,7 @@
+// Conversion constants to meters
+export const IN_TO_M = 0.0254;
+export const FT_TO_M = 0.3048;
+// Dimensions of the field in meters
+export const FIELD_WIDTH = 26 * FT_TO_M + 11.25 * IN_TO_M;
+export const FIELD_LENGTH = 52 * FT_TO_M + 5.25 * IN_TO_M;
+
diff --git a/y2020/www/field.html b/y2020/www/field.html
new file mode 100644
index 0000000..ad449db
--- /dev/null
+++ b/y2020/www/field.html
@@ -0,0 +1,10 @@
+<html>
+  <head>
+    <script src="flatbuffers.js"></script>
+    <script src="field_main_bundle.min.js" defer></script>
+    <link rel="stylesheet" href="styles.css">
+  </head>
+  <body>
+  </body>
+</html>
+
diff --git a/y2020/www/field_handler.ts b/y2020/www/field_handler.ts
new file mode 100644
index 0000000..0d2ea34
--- /dev/null
+++ b/y2020/www/field_handler.ts
@@ -0,0 +1,241 @@
+import {Configuration, Channel} from 'aos/configuration_generated';
+import {Connection} from 'aos/network/www/proxy';
+import {Connect} from 'aos/network/connect_generated';
+import {FIELD_LENGTH, FIELD_WIDTH, FT_TO_M, IN_TO_M} from './constants';
+import {ImageMatchResult} from 'y2020/vision/sift/sift_generated'
+
+// (0,0) is field center, +X is toward red DS
+const FIELD_SIDE_Y = FIELD_WIDTH / 2;
+const FIELD_EDGE_X = FIELD_LENGTH / 2;
+
+const DS_WIDTH = 69 * IN_TO_M;
+const DS_ANGLE = 20 * Math.PI / 180;
+const DS_END_X = FIELD_EDGE_X - DS_WIDTH * Math.sin(DS_ANGLE);
+const DS_INSIDE_Y = FIELD_SIDE_Y - DS_WIDTH * Math.cos(DS_ANGLE);
+
+const TRENCH_X = 108 * IN_TO_M;
+const TRENCH_WIDTH = 55.5 * IN_TO_M;
+const TRENCH_INSIDE = FIELD_SIDE_Y - TRENCH_WIDTH;
+
+const SPINNER_LENGTH = 30 * IN_TO_M;
+const SPINNER_TOP_X = 374.59 * IN_TO_M - FIELD_EDGE_X;
+const SPINNER_BOTTOM_X = SPINNER_TOP_X - SPINNER_LENGTH;
+
+const SHIELD_BOTTOM_X = -116 * IN_TO_M;
+const SHIELD_BOTTOM_Y = 43.75 * IN_TO_M;
+
+const SHIELD_TOP_X = 116 * IN_TO_M;
+const SHIELD_TOP_Y = -43.75 * IN_TO_M;
+
+const SHIELD_RIGHT_X = -51.06 * IN_TO_M;
+const SHIELD_RIGHT_Y = -112.88 * IN_TO_M;
+
+const SHIELD_LEFT_X = 51.06 * IN_TO_M;
+const SHIELD_LEFT_Y = 112.88 * IN_TO_M;
+
+const SHIELD_CENTER_TOP_X = (SHIELD_TOP_X + SHIELD_LEFT_X) / 2
+const SHIELD_CENTER_TOP_Y = (SHIELD_TOP_Y + SHIELD_LEFT_Y) / 2
+
+const SHIELD_CENTER_BOTTOM_X = (SHIELD_BOTTOM_X + SHIELD_RIGHT_X) / 2
+const SHIELD_CENTER_BOTTOM_Y = (SHIELD_BOTTOM_Y + SHIELD_RIGHT_Y) / 2
+
+const INITIATION_X = FIELD_EDGE_X - 120 * IN_TO_M;
+
+const TARGET_ZONE_TIP_X = FIELD_EDGE_X - 30 * IN_TO_M;
+const TARGET_ZONE_WIDTH = 48 * IN_TO_M;
+const LOADING_ZONE_WIDTH = 60 * IN_TO_M;
+
+/**
+ * All the messages that are required to display camera information on the field.
+ * Messages not readable on the server node are ignored.
+ */
+const REQUIRED_CHANNELS = [
+  {
+    name: '/pi1/camera',
+    type: 'frc971.vision.sift.ImageMatchResult',
+  },
+  {
+    name: '/pi2/camera',
+    type: 'frc971.vision.sift.ImageMatchResult',
+  },
+  {
+    name: '/pi3/camera',
+    type: 'frc971.vision.sift.ImageMatchResult',
+  },
+  {
+    name: '/pi4/camera',
+    type: 'frc971.vision.sift.ImageMatchResult',
+  },
+  {
+    name: '/pi5/camera',
+    type: 'frc971.vision.sift.ImageMatchResult',
+  },
+];
+
+export class FieldHandler {
+  private canvas = document.createElement('canvas');
+  private imageMatchResult :ImageMatchResult|null = null
+
+  constructor(private readonly connection: Connection) {
+    document.body.appendChild(this.canvas);
+
+    this.connection.addConfigHandler(() => {
+      this.sendConnect();
+    });
+    this.connection.addHandler(ImageMatchResult.getFullyQualifiedName(), (res) => {
+      this.handleImageMatchResult(res);
+    });
+  }
+
+  private handleImageMatchResult(data: Uint8Array): void {
+    const fbBuffer = new flatbuffers.ByteBuffer(data);
+    this.imageMatchResult = ImageMatchResult.getRootAsImageMatchResult(fbBuffer);
+  }
+
+  private sendConnect(): void {
+    const builder = new flatbuffers.Builder(512);
+    const channels: flatbuffers.Offset[] = [];
+    for (const channel of REQUIRED_CHANNELS) {
+      const nameFb = builder.createString(channel.name);
+      const typeFb = builder.createString(channel.type);
+      Channel.startChannel(builder);
+      Channel.addName(builder, nameFb);
+      Channel.addType(builder, typeFb);
+      const channelFb = Channel.endChannel(builder);
+      channels.push(channelFb);
+    }
+
+    const channelsFb = Connect.createChannelsToTransferVector(builder, channels);
+    Connect.startConnect(builder);
+    Connect.addChannelsToTransfer(builder, channelsFb);
+    const connect = Connect.endConnect(builder);
+    builder.finish(connect);
+    this.connection.sendConnectMessage(builder);
+  }
+
+  drawField(): void {
+    const MY_COLOR = 'red';
+    const OTHER_COLOR = 'blue';
+    const ctx = this.canvas.getContext('2d');
+    // draw perimiter
+    ctx.beginPath();
+    ctx.moveTo(FIELD_EDGE_X, DS_INSIDE_Y);
+    ctx.lineTo(DS_END_X, FIELD_SIDE_Y);
+    ctx.lineTo(-DS_END_X, FIELD_SIDE_Y);
+    ctx.lineTo(-FIELD_EDGE_X, DS_INSIDE_Y);
+    ctx.lineTo(-FIELD_EDGE_X, -DS_INSIDE_Y);
+    ctx.lineTo(-DS_END_X, -FIELD_SIDE_Y);
+    ctx.lineTo(DS_END_X, -FIELD_SIDE_Y);
+    ctx.lineTo(FIELD_EDGE_X, -DS_INSIDE_Y);
+    ctx.lineTo(FIELD_EDGE_X, DS_INSIDE_Y);
+    ctx.stroke();
+
+    // draw shield generator
+    ctx.beginPath();
+    ctx.moveTo(SHIELD_BOTTOM_X, SHIELD_BOTTOM_Y);
+    ctx.lineTo(SHIELD_RIGHT_X, SHIELD_RIGHT_Y);
+    ctx.lineTo(SHIELD_TOP_X, SHIELD_TOP_Y);
+    ctx.lineTo(SHIELD_LEFT_X, SHIELD_LEFT_Y);
+    ctx.lineTo(SHIELD_BOTTOM_X, SHIELD_BOTTOM_Y);
+    ctx.moveTo(SHIELD_CENTER_TOP_X, SHIELD_CENTER_TOP_Y);
+    ctx.lineTo(SHIELD_CENTER_BOTTOM_X, SHIELD_CENTER_BOTTOM_Y);
+    ctx.stroke();
+
+    this.drawHalfField(ctx, 'red');
+    ctx.rotate(Math.PI);
+    this.drawHalfField(ctx, 'blue');
+    ctx.rotate(Math.PI);
+  }
+
+  drawHalfField(ctx, color: string): void {
+    // trenches
+    ctx.strokeStyle = color;
+    ctx.beginPath();
+    ctx.moveTo(TRENCH_X, FIELD_SIDE_Y);
+    ctx.lineTo(TRENCH_X, TRENCH_INSIDE);
+    ctx.lineTo(-TRENCH_X, TRENCH_INSIDE);
+    ctx.lineTo(-TRENCH_X, FIELD_SIDE_Y);
+    ctx.stroke();
+
+    ctx.strokeStyle = 'black';
+    ctx.beginPath();
+    ctx.moveTo(SPINNER_TOP_X, FIELD_SIDE_Y);
+    ctx.lineTo(SPINNER_TOP_X, TRENCH_INSIDE);
+    ctx.lineTo(SPINNER_BOTTOM_X, TRENCH_INSIDE);
+    ctx.lineTo(SPINNER_BOTTOM_X, FIELD_SIDE_Y);
+    ctx.stroke();
+
+    ctx.beginPath();
+    ctx.moveTo(INITIATION_X, FIELD_SIDE_Y);
+    ctx.lineTo(INITIATION_X, -FIELD_SIDE_Y);
+    ctx.stroke();
+
+    // target/loading
+    ctx.strokeStyle = color;
+    ctx.beginPath();
+    ctx.moveTo(FIELD_EDGE_X, DS_INSIDE_Y);
+    ctx.lineTo(TARGET_ZONE_TIP_X, DS_INSIDE_Y - 0.5 * TARGET_ZONE_WIDTH);
+    ctx.lineTo(FIELD_EDGE_X, DS_INSIDE_Y - TARGET_ZONE_WIDTH);
+
+    ctx.moveTo(-FIELD_EDGE_X, DS_INSIDE_Y);
+    ctx.lineTo(-TARGET_ZONE_TIP_X, DS_INSIDE_Y - 0.5 * LOADING_ZONE_WIDTH);
+    ctx.lineTo(-FIELD_EDGE_X, DS_INSIDE_Y - LOADING_ZONE_WIDTH);
+    ctx.stroke();
+  }
+
+  drawCamera(x: number, y: number, theta: number): void {
+    const ctx = this.canvas.getContext('2d');
+    ctx.save();
+    ctx.translate(x, y);
+    ctx.rotate(theta);
+    ctx.beginPath();
+    ctx.moveTo(0.5, 0.5);
+    ctx.lineTo(0, 0);
+    ctx.lineTo(0.5, -0.5);
+    ctx.stroke();
+    ctx.beginPath();
+    ctx.arc(0, 0, 0.25, -Math.PI/4, Math.PI/4);
+    ctx.stroke();
+    ctx.restore();
+  }
+
+  draw(): void  {
+    this.reset();
+    this.drawField();
+    //draw cameras
+    if (this.imageMatchResult) {
+      for (const i = 0; i < this.imageMatchResult.cameraPosesLength(); i++) {
+        const pose = this.imageMatchResult.cameraPoses(i);
+        const mat = pose.fieldToCamera();
+        const x = mat.data(3);
+        const y = mat.data(7);
+        this.drawCamera(x, y, 0);
+        console.log(x, y);
+      }
+    }
+
+    window.requestAnimationFrame(() => this.draw());
+  }
+
+  reset(): void {
+    const ctx = this.canvas.getContext('2d');
+    ctx.setTransform(1, 0, 0, 1, 0, 0);
+    const size = window.innerHeight * 0.9;
+    ctx.canvas.height = size;
+    const width = size / 2 + 20;
+    ctx.canvas.width = width;
+    ctx.clearRect(0, 0, size, width);
+
+    // Translate to center of display.
+    ctx.translate(width / 2, size / 2);
+    // Coordinate system is:
+    // x -> forward.
+    // y -> to the left.
+    ctx.rotate(-Math.PI / 2);
+    ctx.scale(1, -1);
+
+    const M_TO_PX = (size - 10) / FIELD_LENGTH;
+    ctx.scale(M_TO_PX, M_TO_PX);
+    ctx.lineWidth = 1 / M_TO_PX;
+  }
+}
diff --git a/y2020/www/field_main.ts b/y2020/www/field_main.ts
new file mode 100644
index 0000000..163c817
--- /dev/null
+++ b/y2020/www/field_main.ts
@@ -0,0 +1,12 @@
+import {Connection} from 'aos/network/www/proxy';
+
+import {FieldHandler} from './field_handler';
+
+const conn = new Connection();
+
+conn.connect();
+
+const fieldHandler = new FieldHandler(conn);
+
+fieldHandler.draw();
+
diff --git a/y2020/www/image_handler.ts b/y2020/www/image_handler.ts
index 802e448..a44156a 100644
--- a/y2020/www/image_handler.ts
+++ b/y2020/www/image_handler.ts
@@ -1,36 +1,134 @@
+import {Channel} from 'aos/configuration_generated';
+import {Connection} from 'aos/network/www/proxy';
+import {Connect} from 'aos/network/connect_generated';
+import {ImageMatchResult} from 'y2020/vision/sift/sift_generated'
 import {CameraImage} from 'y2020/vision/vision_generated';
 
+/*
+ * All the messages that are required to show an image with metadata.
+ * Messages not readable on the server node are ignored.
+ */
+const REQUIRED_CHANNELS = [
+  {
+    name: '/pi1/camera',
+    type: CameraImage.getFullyQualifiedName(),
+  },
+  {
+    name: '/pi2/camera',
+    type: CameraImage.getFullyQualifiedName(),
+  },
+  {
+    name: '/pi3/camera',
+    type: CameraImage.getFullyQualifiedName(),
+  },
+  {
+    name: '/pi4/camera',
+    type: CameraImage.getFullyQualifiedName(),
+  },
+  {
+    name: '/pi5/camera',
+    type: CameraImage.getFullyQualifiedName(),
+  },
+  {
+    name: '/pi1/camera/detailed',
+    type: ImageMatchResult.getFullyQualifiedName(),
+  },
+  {
+    name: '/pi2/camera/detailed',
+    type: ImageMatchResult.getFullyQualifiedName(),
+  },
+  {
+    name: '/pi3/camera/detailed',
+    type: ImageMatchResult.getFullyQualifiedName(),
+  },
+  {
+    name: '/pi4/camera/detailed',
+    type: ImageMatchResult.getFullyQualifiedName(),
+  },
+  {
+    name: '/pi5/camera/detailed',
+    type: ImageMatchResult.getFullyQualifiedName(),
+  },
+];
+
 export class ImageHandler {
   private canvas = document.createElement('canvas');
   private imageBuffer: Uint8ClampedArray|null = null;
+  private image: CameraImage|null = null;
   private imageTimestamp: flatbuffers.Long|null = null;
-  private result: fr971.vision.ImageMatchResult|null = null;
+  private result: ImageMatchResult|null = null;
   private resultTimestamp: flatbuffers.Long|null = null;
+  private width = 0;
+  private height = 0;
+  private imageSkipCount = 3;
 
-  constructor() {
+  constructor(private readonly connection: Connection) {
     document.body.appendChild(this.canvas);
+
+    this.connection.addConfigHandler(() => {
+      this.sendConnect();
+    });
+    this.connection.addHandler(ImageMatchResult.getFullyQualifiedName(), (data) => {
+      this.handleImageMetadata(data);
+    });
+    this.connection.addHandler(CameraImage.getFullyQualifiedName(), (data) => {
+      this.handleImage(data);
+    });
+  }
+
+  private sendConnect(): void {
+    const builder = new flatbuffers.Builder(512);
+    const channels: flatbuffers.Offset[] = [];
+    for (const channel of REQUIRED_CHANNELS) {
+      const nameFb = builder.createString(channel.name);
+      const typeFb = builder.createString(channel.type);
+      Channel.startChannel(builder);
+      Channel.addName(builder, nameFb);
+      Channel.addType(builder, typeFb);
+      const channelFb = Channel.endChannel(builder);
+      channels.push(channelFb);
+    }
+
+    const channelsFb = Connect.createChannelsToTransferVector(builder, channels);
+    Connect.startConnect(builder);
+    Connect.addChannelsToTransfer(builder, channelsFb);
+    const connect = Connect.endConnect(builder);
+    builder.finish(connect);
+    this.connection.sendConnectMessage(builder);
   }
 
   handleImage(data: Uint8Array): void {
-    const fbBuffer = new flatbuffers.ByteBuffer(data);
-    const image = CameraImage.getRootAsCameraImage(fbBuffer);
-    this.imageTimestamp = image.monotonicTimestampNs();
+    console.log('got an image to process');
+    if (this.imageSkipCount != 0) {
+      this.imageSkipCount--;
+      return;
+    } else {
+      this.imageSkipCount = 3;
+    }
 
-    const width = image.cols();
-    const height = image.rows();
-    if (width === 0 || height === 0) {
+    const fbBuffer = new flatbuffers.ByteBuffer(data);
+    this.image = CameraImage.getRootAsCameraImage(fbBuffer);
+    this.imageTimestamp = this.image.monotonicTimestampNs();
+
+    this.width = this.image.cols();
+    this.height = this.image.rows();
+    if (this.width === 0 || this.height === 0) {
       return;
     }
-    this.imageBuffer = new Uint8ClampedArray(width * height * 4); // RGBA
 
+    this.draw();
+  }
+
+  convertImage(): void {
+    this.imageBuffer = new Uint8ClampedArray(this.width * this.height * 4); // RGBA
     // Read four bytes (YUYV) from the data and transform into two pixels of
     // RGBA for canvas
-    for (const j = 0; j < height; j++) {
-      for (const i = 0; i < width; i += 2) {
-        const y1 = image.data((j * width + i) * 2);
-        const u = image.data((j * width + i) * 2 + 1);
-        const y2 = image.data((j * width + i + 1) * 2);
-        const v = image.data((j * width + i + 1) * 2 + 1);
+    for (const j = 0; j < this.height; j++) {
+      for (const i = 0; i < this.width; i += 2) {
+        const y1 = this.image.data((j * this.width + i) * 2);
+        const u = this.image.data((j * this.width + i) * 2 + 1);
+        const y2 = this.image.data((j * this.width + i + 1) * 2);
+        const v = this.image.data((j * this.width + i + 1) * 2 + 1);
 
         // Based on https://en.wikipedia.org/wiki/YUV#Converting_between_Y%E2%80%B2UV_and_RGB
         const c1 = y1 - 16;
@@ -38,60 +136,57 @@
         const d = u - 128;
         const e = v - 128;
 
-        imageBuffer[(j * width + i) * 4 + 0] = (298 * c1 + 409 * e + 128) >> 8;
-        imageBuffer[(j * width + i) * 4 + 1] =
-            (298 * c1 - 100 * d - 208 * e + 128) >> 8;
-        imageBuffer[(j * width + i) * 4 + 2] = (298 * c1 + 516 * d + 128) >> 8;
-        imageBuffer[(j * width + i) * 4 + 3] = 255;
-        imageBuffer[(j * width + i) * 4 + 4] = (298 * c2 + 409 * e + 128) >> 8;
-        imageBuffer[(j * width + i) * 4 + 5] =
-            (298 * c2 - 100 * d - 208 * e + 128) >> 8;
-        imageBuffer[(j * width + i) * 4 + 6] = (298 * c2 + 516 * d + 128) >> 8;
-        imageBuffer[(j * width + i) * 4 + 7] = 255;
+        this.imageBuffer[(j * this.width + i) * 4 + 0] = (298 * c1 + 409 * e + 128) >> 8;
+        this.imageBuffer[(j * this.width + i) * 4 + 1] = (298 * c1 - 100 * d - 208 * e + 128) >> 8;
+        this.imageBuffer[(j * this.width + i) * 4 + 2] = (298 * c1 + 516 * d + 128) >> 8;
+        this.imageBuffer[(j * this.width + i) * 4 + 3] = 255;
+        this.imageBuffer[(j * this.width + i) * 4 + 4] = (298 * c2 + 409 * e + 128) >> 8;
+        this.imageBuffer[(j * this.width + i) * 4 + 5] = (298 * c2 - 100 * d - 208 * e + 128) >> 8;
+        this.imageBuffer[(j * this.width + i) * 4 + 6] = (298 * c2 + 516 * d + 128) >> 8;
+        this.imageBuffer[(j * this.width + i) * 4 + 7] = 255;
       }
     }
-
-    draw();
   }
 
   handleImageMetadata(data: Uint8Array): void {
+    console.log('got an image match result to process');
     const fbBuffer = new flatbuffers.ByteBuffer(data);
-    this.result = frc971.vision.ImageMatchResult.getRootAsImageMatchResult(fbBuffer);
-    this.resultTimestamp = result.imageMonotonicTimestampNs();
-    draw();
+    this.result = ImageMatchResult.getRootAsImageMatchResult(fbBuffer);
+    this.resultTimestamp = this.result.imageMonotonicTimestampNs();
+    this.draw();
   }
 
   draw(): void {
-    if (imageTimestamp.low !== resultTimestamp.low ||
-        imageTimestamp.high !== resultTimestamp.high) {
+  if (!this.imageTimestamp || !this.resultTimestamp ||
+        this.imageTimestamp.low !== this.resultTimestamp.low ||
+        this.imageTimestamp.high !== this.resultTimestamp.high) {
+      console.log('image and result do not match');
+      console.log(this.imageTimestamp.low, this.resultTimestamp.low);
+      console.log(this.imageTimestamp.high, this.resultTimestamp.high);
       return;
     }
+    this.convertImage();
     const ctx = this.canvas.getContext('2d');
 
-    this.canvas.width = width;
-    this.canvas.height = height;
-    const idata = ctx.createImageData(width, height);
+    this.canvas.width = this.width;
+    this.canvas.height = this.height;
+    const idata = ctx.createImageData(this.width, this.height);
     idata.data.set(this.imageBuffer);
     ctx.putImageData(idata, 0, 0);
-    ctx.beginPath();
-    for (const feature of this.result.getFeatures()) {
+    for (const i = 0; i < this.result.featuresLength(); i++) {
+      const feature = this.result.features(i);
       // Based on OpenCV drawKeypoint.
-      ctx.arc(feature.x, feature.y, feature.size, 0, 2 * Math.PI);
-      ctx.moveTo(feature.x, feature.y);
-      // TODO(alex): check that angle is correct (0?, direction?)
-      const angle = feature.angle * Math.PI / 180;
+      ctx.beginPath();
+      ctx.arc(feature.x(), feature.y(), feature.size(), 0, 2 * Math.PI);
+      ctx.stroke();
+
+      ctx.beginPath();
+      ctx.moveTo(feature.x(), feature.y());
+      const angle = feature.angle() * Math.PI / 180;
       ctx.lineTo(
-          feature.x + feature.radius * cos(angle),
-          feature.y + feature.radius * sin(angle));
+          feature.x() + feature.size() * Math.cos(angle),
+          feature.y() + feature.size() * Math.sin(angle));
+      ctx.stroke();
     }
-    ctx.stroke();
-  }
-
-  getId(): string {
-    return CameraImage.getFullyQualifiedName();
-  }
-
-  getResultId(): string {
-    return frc971.vision.ImageMatchResult.getFullyQualifiedName();
   }
 }
diff --git a/y2020/www/index.html b/y2020/www/index.html
index 97e16d4..f9ceca2 100644
--- a/y2020/www/index.html
+++ b/y2020/www/index.html
@@ -1,11 +1,6 @@
 <html>
-  <head>
-    <script src="flatbuffers.js"></script>
-    <script src="main_bundle.min.js" defer></script>
-    <link rel="stylesheet" href="styles.css">
-  </head>
   <body>
-    <div id="config">
-    </div>
+    <a href="camera_viewer.html">Camera Viewer</a><br>
+    <a href="field.html">Field Visualization</a>
   </body>
 </html>
diff --git a/y2020/www/main.ts b/y2020/www/main.ts
deleted file mode 100644
index d414eac..0000000
--- a/y2020/www/main.ts
+++ /dev/null
@@ -1,13 +0,0 @@
-import {Connection} from 'aos/network/www/proxy';
-
-import {ImageHandler} from './image_handler';
-
-const conn = new Connection();
-
-conn.connect();
-
-const iHandler = new ImageHandler();
-
-conn.addHandler(iHandler.getId(), (data) => iHandler.handleImage(data));
-conn.addHandler(
-    iHandler.getResultId(), (data) => iHandler.handleImageMetadata(data));
diff --git a/y2020/y2020.json b/y2020/y2020.json
index 3673eee..b40ac87 100644
--- a/y2020/y2020.json
+++ b/y2020/y2020.json
@@ -60,7 +60,7 @@
       "name": "/aos/roborio",
       "type": "aos.message_bridge.ClientStatistics",
       "source_node": "roborio",
-      "frequency": 2,
+      "frequency": 10,
       "num_senders": 2
     },
     {
@@ -114,7 +114,7 @@
       "name": "/aos/pi1",
       "type": "aos.message_bridge.ClientStatistics",
       "source_node": "pi1",
-      "frequency": 2,
+      "frequency": 10,
       "num_senders": 2
     },
     {
@@ -158,7 +158,7 @@
       "name": "/aos/pi2",
       "type": "aos.message_bridge.ClientStatistics",
       "source_node": "pi2",
-      "frequency": 2,
+      "frequency": 10,
       "num_senders": 2
     },
     {
@@ -202,7 +202,7 @@
       "name": "/aos/pi3",
       "type": "aos.message_bridge.ClientStatistics",
       "source_node": "pi3",
-      "frequency": 2,
+      "frequency": 10,
       "num_senders": 2
     },
     {
@@ -248,6 +248,12 @@
       "num_senders": 2
     },
     {
+      "name": "/superstructure",
+      "type": "y2020.joysticks.Setpoint",
+      "source_node": "roborio",
+      "num_senders": 2
+    },
+    {
       "name": "/drivetrain",
       "type": "frc971.IMUValues",
       "source_node": "roborio",
@@ -320,6 +326,42 @@
       "type": "frc971.vision.sift.ImageMatchResult",
       "source_node": "pi1",
       "frequency": 25,
+      "max_size": 10000,
+      "destination_nodes": [
+        {
+          "name": "roborio",
+          "priority": 1,
+          "time_to_live": 5000000
+        }
+      ]
+    },
+    {
+      "name": "/pi1/camera/detailed",
+      "type": "frc971.vision.sift.ImageMatchResult",
+      "source_node": "pi1",
+      "frequency": 25,
+      "max_size": 1000000
+    },
+    {
+      "name": "/pi1/camera",
+      "type": "frc971.vision.sift.TrainingData",
+      "source_node": "pi1",
+      "frequency": 2,
+      "max_size": 2000000
+    },
+    {
+      "name": "/pi2/camera",
+      "type": "frc971.vision.CameraImage",
+      "source_node": "pi2",
+      "frequency": 25,
+      "max_size": 620000,
+      "num_senders": 18
+    },
+    {
+      "name": "/pi2/camera",
+      "type": "frc971.vision.sift.ImageMatchResult",
+      "source_node": "pi2",
+      "frequency": 25,
       "max_size": 300000,
       "destination_nodes": [
         {
@@ -330,6 +372,56 @@
       ]
     },
     {
+      "name": "/pi2/camera/detailed",
+      "type": "frc971.vision.sift.ImageMatchResult",
+      "source_node": "pi2",
+      "frequency": 25,
+      "max_size": 1000000
+    },
+    {
+      "name": "/pi2/camera",
+      "type": "frc971.vision.sift.TrainingData",
+      "source_node": "pi2",
+      "frequency": 2,
+      "max_size": 2000000
+    },
+    {
+      "name": "/pi3/camera",
+      "type": "frc971.vision.CameraImage",
+      "source_node": "pi3",
+      "frequency": 25,
+      "max_size": 620000,
+      "num_senders": 18
+    },
+    {
+      "name": "/pi3/camera",
+      "type": "frc971.vision.sift.ImageMatchResult",
+      "source_node": "pi3",
+      "frequency": 25,
+      "max_size": 10000,
+      "destination_nodes": [
+        {
+          "name": "roborio",
+          "priority": 1,
+          "time_to_live": 5000000
+        }
+      ]
+    },
+    {
+      "name": "/pi3/camera/detailed",
+      "type": "frc971.vision.sift.ImageMatchResult",
+      "source_node": "pi3",
+      "frequency": 25,
+      "max_size": 1000000
+    },
+    {
+      "name": "/pi3/camera",
+      "type": "frc971.vision.sift.TrainingData",
+      "source_node": "pi3",
+      "frequency": 2,
+      "max_size": 2000000
+    },
+    {
       "name": "/autonomous",
       "type": "aos.common.actions.Status",
       "source_node": "roborio"
@@ -387,6 +479,24 @@
     },
     {
       "match": {
+        "name": "/camera",
+        "source_node": "pi1"
+      },
+      "rename": {
+        "name": "/pi1/camera"
+      }
+    },
+    {
+      "match": {
+        "name": "/camera/detailed",
+        "source_node": "pi1"
+      },
+      "rename": {
+        "name": "/pi1/camera/detailed"
+      }
+    },
+    {
+      "match": {
         "name": "/aos",
         "source_node": "pi2"
       },
@@ -396,6 +506,24 @@
     },
     {
       "match": {
+        "name": "/camera",
+        "source_node": "pi2"
+      },
+      "rename": {
+        "name": "/pi2/camera"
+      }
+    },
+    {
+      "match": {
+        "name": "/camera/detailed",
+        "source_node": "pi2"
+      },
+      "rename": {
+        "name": "/pi2/camera/detailed"
+      }
+    },
+    {
+      "match": {
         "name": "/aos",
         "source_node": "pi3"
       },
@@ -405,6 +533,24 @@
     },
     {
       "match": {
+        "name": "/camera",
+        "source_node": "pi3"
+      },
+      "rename": {
+        "name": "/pi3/camera"
+      }
+    },
+    {
+      "match": {
+        "name": "/camera/detailed",
+        "source_node": "pi3"
+      },
+      "rename": {
+        "name": "/pi3/camera/detailed"
+      }
+    },
+    {
+      "match": {
         "name": "/aos",
         "type": "aos.RobotState"
       },