Merge "Fix bazel query"
diff --git a/aos/events/logging/BUILD b/aos/events/logging/BUILD
index 40e6626..eac61cb 100644
--- a/aos/events/logging/BUILD
+++ b/aos/events/logging/BUILD
@@ -28,7 +28,6 @@
         ":buffer_encoder",
         ":uuid",
         ":logger_fbs",
-        ":lzma_encoder",
         "//aos:configuration",
         "//aos:flatbuffer_merge",
         "//aos:flatbuffers",
@@ -39,7 +38,12 @@
         "@com_github_google_flatbuffers//:flatbuffers",
         "@com_github_google_glog//:glog",
         "@com_google_absl//absl/types:span",
-    ],
+    ] + select({
+        "//tools:cpu_k8": [":lzma_encoder"],
+        "//tools:cpu_aarch64": [":lzma_encoder"],
+        "//conditions:default": [],
+    }),
+
 )
 
 cc_binary(
diff --git a/aos/events/logging/logfile_utils.cc b/aos/events/logging/logfile_utils.cc
index 98527dc..3f85cc7 100644
--- a/aos/events/logging/logfile_utils.cc
+++ b/aos/events/logging/logfile_utils.cc
@@ -863,16 +863,6 @@
         .data = SizePrefixedFlatbufferVector<MessageHeader>::Empty()};
   }
 
-  // The algorithm below is constant time with some assumptions.  We need there
-  // to be no missing messages in the data stream.  This also assumes a queue
-  // hasn't wrapped.  That is conservative, but should let us get started.
-  //
-  // TODO(austin): We can break these assumptions pretty easily once we have a
-  // need.
-  CHECK_EQ(
-      data_queue->back().queue_index - data_queue->front().queue_index + 1u,
-      data_queue->size());
-
   if (remote_queue_index < data_queue->front().queue_index ||
       remote_queue_index > data_queue->back().queue_index) {
     return Message{
@@ -882,21 +872,53 @@
         .data = SizePrefixedFlatbufferVector<MessageHeader>::Empty()};
   }
 
-  // Pull the data out and confirm that the timestamps match as expected.
-  Message result = std::move(
-      (*data_queue)[remote_queue_index - data_queue->front().queue_index]);
-  CHECK_EQ(result.timestamp, monotonic_remote_time)
-      << ": Queue index matches, but timestamp doesn't.  Please investigate!";
-  CHECK_EQ(realtime_clock::time_point(std::chrono::nanoseconds(
-               result.data.message().realtime_sent_time())),
-           realtime_remote_time)
-      << ": Queue index matches, but timestamp doesn't.  Please investigate!";
-  // Now drop the data off the front.  We have deduplicated timestamps, so we
-  // are done.  And all the data is in order.
-  data_queue->erase(data_queue->begin(),
-                    data_queue->begin() + (1 + remote_queue_index -
-                                           data_queue->front().queue_index));
-  return result;
+  // The algorithm below is constant time with some assumptions.  We need there
+  // to be no missing messages in the data stream.  This also assumes a queue
+  // hasn't wrapped.  That is conservative, but should let us get started.
+  if (data_queue->back().queue_index - data_queue->front().queue_index + 1u ==
+      data_queue->size()) {
+    // Pull the data out and confirm that the timestamps match as expected.
+    Message result = std::move(
+        (*data_queue)[remote_queue_index - data_queue->front().queue_index]);
+
+    CHECK_EQ(result.timestamp, monotonic_remote_time)
+        << ": Queue index matches, but timestamp doesn't.  Please investigate!";
+    CHECK_EQ(realtime_clock::time_point(std::chrono::nanoseconds(
+                 result.data.message().realtime_sent_time())),
+             realtime_remote_time)
+        << ": Queue index matches, but timestamp doesn't.  Please investigate!";
+    // Now drop the data off the front.  We have deduplicated timestamps, so we
+    // are done.  And all the data is in order.
+    data_queue->erase(data_queue->begin(),
+                      data_queue->begin() + (1 + remote_queue_index -
+                                             data_queue->front().queue_index));
+    return result;
+  } else {
+    auto it = std::find_if(data_queue->begin(), data_queue->end(),
+                           [remote_queue_index](const Message &m) {
+                             return m.queue_index == remote_queue_index;
+                           });
+    if (it == data_queue->end()) {
+      return Message{
+          .channel_index = message.channel_index,
+          .queue_index = remote_queue_index,
+          .timestamp = monotonic_remote_time,
+          .data = SizePrefixedFlatbufferVector<MessageHeader>::Empty()};
+    }
+
+    Message result = std::move(*it);
+
+    CHECK_EQ(result.timestamp, monotonic_remote_time)
+        << ": Queue index matches, but timestamp doesn't.  Please investigate!";
+    CHECK_EQ(realtime_clock::time_point(std::chrono::nanoseconds(
+                 result.data.message().realtime_sent_time())),
+             realtime_remote_time)
+        << ": Queue index matches, but timestamp doesn't.  Please investigate!";
+
+    data_queue->erase(it);
+
+    return result;
+  }
 }
 
 void TimestampMapper::QueueUntil(monotonic_clock::time_point t) {
diff --git a/aos/events/logging/logfile_utils_test.cc b/aos/events/logging/logfile_utils_test.cc
index 1d83466..a380451 100644
--- a/aos/events/logging/logfile_utils_test.cc
+++ b/aos/events/logging/logfile_utils_test.cc
@@ -1015,6 +1015,64 @@
   }
 }
 
+// Tests that we handle a message which failed to forward or be logged.
+TEST_F(TimestampMapperTest, ReadMissingDataMiddle) {
+  const aos::monotonic_clock::time_point e = monotonic_clock::epoch();
+  {
+    DetachedBufferWriter writer0(logfile0_, std::make_unique<DummyEncoder>());
+    writer0.QueueSpan(config0_.span());
+    DetachedBufferWriter writer1(logfile1_, std::make_unique<DummyEncoder>());
+    writer1.QueueSpan(config2_.span());
+
+    writer0.QueueSizedFlatbuffer(
+        MakeLogMessage(e + chrono::milliseconds(1000), 0, 0x005));
+    writer1.QueueSizedFlatbuffer(MakeTimestampMessage(
+        e + chrono::milliseconds(1000), 0, chrono::seconds(100)));
+
+    // Create both the timestamp and message, but don't log them, simulating a
+    // forwarding drop.
+    MakeLogMessage(e + chrono::milliseconds(2000), 0, 0x006);
+    MakeTimestampMessage(e + chrono::milliseconds(2000), 0,
+                         chrono::seconds(100));
+
+    writer0.QueueSizedFlatbuffer(
+        MakeLogMessage(e + chrono::milliseconds(3000), 0, 0x007));
+    writer1.QueueSizedFlatbuffer(MakeTimestampMessage(
+        e + chrono::milliseconds(3000), 0, chrono::seconds(100)));
+  }
+
+  const std::vector<LogFile> parts = SortParts({logfile0_, logfile1_});
+
+  ASSERT_EQ(parts[0].logger_node, "pi1");
+  ASSERT_EQ(parts[1].logger_node, "pi2");
+
+  TimestampMapper mapper0(FilterPartsForNode(parts, "pi1"));
+  TimestampMapper mapper1(FilterPartsForNode(parts, "pi2"));
+
+  mapper0.AddPeer(&mapper1);
+  mapper1.AddPeer(&mapper0);
+
+  {
+    std::deque<TimestampedMessage> output1;
+
+    ASSERT_TRUE(mapper1.Front() != nullptr);
+    output1.emplace_back(std::move(*mapper1.Front()));
+    mapper1.PopFront();
+
+    ASSERT_TRUE(mapper1.Front() != nullptr);
+    output1.emplace_back(std::move(*mapper1.Front()));
+
+    ASSERT_FALSE(mapper1.Front() == nullptr);
+
+    EXPECT_EQ(output1[0].monotonic_event_time,
+              e + chrono::seconds(100) + chrono::milliseconds(1000));
+    EXPECT_TRUE(output1[0].data.Verify());
+    EXPECT_EQ(output1[1].monotonic_event_time,
+              e + chrono::seconds(100) + chrono::milliseconds(3000));
+    EXPECT_TRUE(output1[1].data.Verify());
+  }
+}
+
 // Tests that we properly sort log files with duplicate timestamps.
 TEST_F(TimestampMapperTest, ReadSameTimestamp) {
   const aos::monotonic_clock::time_point e = monotonic_clock::epoch();
diff --git a/debian/python.BUILD b/debian/python.BUILD
index 7abcf1f..c840a19 100644
--- a/debian/python.BUILD
+++ b/debian/python.BUILD
@@ -10,6 +10,7 @@
         "usr/include/",
         "usr/include/python3.5m/",
     ],
+    target_compatible_with = ["@platforms//cpu:x86_64"],
     visibility = ["//visibility:public"],
 )
 
@@ -47,6 +48,7 @@
         "usr/include/",
         "usr/include/python2.7/",
     ],
+    target_compatible_with = ["@platforms//cpu:x86_64"],
     visibility = ["//visibility:public"],
 )
 
diff --git a/third_party/matplotlib-cpp/BUILD b/third_party/matplotlib-cpp/BUILD
index 4f1f004..1071824 100644
--- a/third_party/matplotlib-cpp/BUILD
+++ b/third_party/matplotlib-cpp/BUILD
@@ -9,7 +9,9 @@
         "@matplotlib_repo//:matplotlib3",
         "@python_repo//:all_files",
     ],
-    target_compatible_with = ["@platforms//os:linux"],
+    # While this is technically compatible with "linux", the
+    # "@python_repo//:all_files" has x86 binaries in it.
+    target_compatible_with = ["@platforms//cpu:x86_64"],
     visibility = ["//visibility:public"],
     deps = [
         "@python_repo//:python3.5_lib",
diff --git a/tools/ci/run-tests.sh b/tools/ci/run-tests.sh
index ef4cff8..005e7dd 100755
--- a/tools/ci/run-tests.sh
+++ b/tools/ci/run-tests.sh
@@ -7,6 +7,9 @@
 
 readonly TARGETS='//... @com_github_google_glog//... @com_google_ceres_solver//...'
 readonly M4F_TARGETS='//...'
+# Sanity check that we are able to build the y2020 roborio code, which confirms
+# that we have the platform compatibility for the roborio set up correctly.
+readonly ROBORIO_TARGETS="${TARGETS} //y2020:download_stripped"
 readonly COMMON='-c opt --stamp=no --curses=no --color=no --symlink_prefix=/'
 
 # Put everything in different output bases so we can get 5 bazel servers
@@ -23,7 +26,7 @@
 tools/bazel --output_base=../roborio_output_base build \
     ${COMMON} \
     --config=roborio \
-    ${TARGETS}
+    ${ROBORIO_TARGETS}
 
 tools/bazel --output_base=../armhf-debian_output_base build \
     ${COMMON} \