Estimate the distributed clock with boots accounted for

Route the current boot through both the noncausal filter, and the
timestamp solver code.  This gets us 1 step closer to exposing boots
to the user.

This stops before changing log_reader though.  We still CHECK on the way
into the SimulatedEventLoopFactory that actually runs reading.  This
felt like a reasonable intermediate point.

Change-Id: I85d0735c449a2aacf8cc457bdbcdbd667f1809ef
Signed-off-by: Austin Schuh <austin.schuh@bluerivertech.com>
diff --git a/aos/events/logging/timestamp_extractor.cc b/aos/events/logging/timestamp_extractor.cc
index 232b842..ae3c4cc 100644
--- a/aos/events/logging/timestamp_extractor.cc
+++ b/aos/events/logging/timestamp_extractor.cc
@@ -53,13 +53,6 @@
     // Confirm that all the parts are from the same boot if there are enough
     // parts to not be from the same boot.
     if (!filtered_parts.empty()) {
-      for (size_t i = 1; i < filtered_parts.size(); ++i) {
-        CHECK_EQ(filtered_parts[i].source_boot_uuid,
-                 filtered_parts[0].source_boot_uuid)
-            << ": Found parts from different boots "
-            << LogFileVectorToString(log_files);
-      }
-
       // Filter the parts relevant to each node when building the mapper.
       mappers.emplace_back(
           std::make_unique<TimestampMapper>(std::move(filtered_parts)));
@@ -126,25 +119,32 @@
 
   // Don't get clever. Use the first time as the start time.  Note: this is
   // different than how log_cat and others work.
-  std::optional<std::tuple<distributed_clock::time_point,
-                           std::vector<monotonic_clock::time_point>>>
+  std::optional<
+      std::tuple<distributed_clock::time_point, std::vector<BootTimestamp>>>
       next_timestamp = multinode_estimator.NextTimestamp();
   CHECK(next_timestamp);
   LOG(INFO) << "Starting at:";
   for (const Node *node : configuration::GetNodes(config)) {
     const size_t node_index = configuration::GetNodeIndex(config, node);
     LOG(INFO) << "  " << node->name()->string_view() << " -> "
-              << std::get<1>(*next_timestamp)[node_index];
+              << std::get<1>(*next_timestamp)[node_index].time;
   }
 
-  multinode_estimator.Start(std::get<1>(*next_timestamp));
+  std::vector<monotonic_clock::time_point> just_monotonic(
+      std::get<1>(*next_timestamp).size());
+  for (size_t i = 0; i < just_monotonic.size(); ++i) {
+    CHECK_EQ(std::get<1>(*next_timestamp)[i].boot, 0u);
+    just_monotonic[i] = std::get<1>(*next_timestamp)[i].time;
+  }
+  multinode_estimator.Start(just_monotonic);
 
   // As we pull off all the timestamps, the time problem is continually solved,
   // filling in the CSV files.
   while (true) {
-    std::optional<std::tuple<distributed_clock::time_point,
-                             std::vector<monotonic_clock::time_point>>>
+    std::optional<
+        std::tuple<distributed_clock::time_point, std::vector<BootTimestamp>>>
         next_timestamp = multinode_estimator.NextTimestamp();
+    // TODO(austin): Figure out how to make the plot work across reboots.
     if (!next_timestamp) {
       break;
     }