blob: d206d5d50b75e3bd641a2fc6a1c138bc3ee1620e [file] [log] [blame]
James Kuszmaul38735e82019-12-07 16:42:06 -08001#include "aos/events/logging/logger.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -08002
3#include <fcntl.h>
Austin Schuh4c4e0092019-12-22 16:18:03 -08004#include <limits.h>
Austin Schuhe309d2a2019-11-29 13:25:21 -08005#include <sys/stat.h>
6#include <sys/types.h>
7#include <sys/uio.h>
8#include <vector>
9
Austin Schuh8bd96322020-02-13 21:18:22 -080010#include "Eigen/Dense"
Austin Schuh2f8fd752020-09-01 22:38:28 -070011#include "absl/strings/escaping.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080012#include "absl/types/span.h"
13#include "aos/events/event_loop.h"
James Kuszmaul38735e82019-12-07 16:42:06 -080014#include "aos/events/logging/logger_generated.h"
Austin Schuh64fab802020-09-09 22:47:47 -070015#include "aos/events/logging/uuid.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080016#include "aos/flatbuffer_merge.h"
Austin Schuh288479d2019-12-18 19:47:52 -080017#include "aos/network/team_number.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080018#include "aos/time/time.h"
Brian Silvermanae7c0332020-09-30 16:58:23 -070019#include "aos/util/file.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080020#include "flatbuffers/flatbuffers.h"
Austin Schuh2f8fd752020-09-01 22:38:28 -070021#include "third_party/gmp/gmpxx.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080022
Austin Schuh15649d62019-12-28 16:36:38 -080023DEFINE_bool(skip_missing_forwarding_entries, false,
24 "If true, drop any forwarding entries with missing data. If "
25 "false, CHECK.");
Austin Schuhe309d2a2019-11-29 13:25:21 -080026
Austin Schuh8bd96322020-02-13 21:18:22 -080027DEFINE_bool(timestamps_to_csv, false,
28 "If true, write all the time synchronization information to a set "
29 "of CSV files in /tmp/. This should only be needed when debugging "
30 "time synchronization.");
31
Austin Schuh2f8fd752020-09-01 22:38:28 -070032DEFINE_bool(skip_order_validation, false,
33 "If true, ignore any out of orderness in replay");
34
Austin Schuhe309d2a2019-11-29 13:25:21 -080035namespace aos {
36namespace logger {
Austin Schuhe309d2a2019-11-29 13:25:21 -080037namespace chrono = std::chrono;
38
Brian Silverman1f345222020-09-24 21:14:48 -070039Logger::Logger(EventLoop *event_loop, const Configuration *configuration,
40 std::function<bool(const Channel *)> should_log)
Austin Schuhe309d2a2019-11-29 13:25:21 -080041 : event_loop_(event_loop),
Austin Schuh0c297012020-09-16 18:41:59 -070042 configuration_(configuration),
Brian Silvermanae7c0332020-09-30 16:58:23 -070043 boot_uuid_(
44 util::ReadFileToStringOrDie("/proc/sys/kernel/random/boot_id")),
Austin Schuh0c297012020-09-16 18:41:59 -070045 name_(network::GetHostname()),
Brian Silverman1f345222020-09-24 21:14:48 -070046 timer_handler_(event_loop_->AddTimer(
47 [this]() { DoLogData(event_loop_->monotonic_now()); })),
Austin Schuh2f8fd752020-09-01 22:38:28 -070048 server_statistics_fetcher_(
49 configuration::MultiNode(event_loop_->configuration())
50 ? event_loop_->MakeFetcher<message_bridge::ServerStatistics>(
51 "/aos")
52 : aos::Fetcher<message_bridge::ServerStatistics>()) {
Brian Silverman1f345222020-09-24 21:14:48 -070053 VLOG(1) << "Creating logger for " << FlatbufferToJson(event_loop_->node());
Austin Schuh2f8fd752020-09-01 22:38:28 -070054
Austin Schuh8d7e0bb2020-10-02 17:57:00 -070055 // Find all the nodes which are logging timestamps on our node. This may
56 // over-estimate if should_log is specified.
57 std::vector<const Node *> timestamp_logger_nodes =
58 configuration::TimestampNodes(configuration_, event_loop_->node());
Austin Schuh2f8fd752020-09-01 22:38:28 -070059
60 std::map<const Channel *, const Node *> timestamp_logger_channels;
61
62 // Now that we have all the nodes accumulated, make remote timestamp loggers
63 // for them.
64 for (const Node *node : timestamp_logger_nodes) {
Austin Schuh8d7e0bb2020-10-02 17:57:00 -070065 // Note: since we are doing a find using the event loop channel, we need to
66 // make sure this channel pointer is part of the event loop configuration,
67 // not configuration_. This only matters when configuration_ !=
68 // event_loop->configuration();
Austin Schuh2f8fd752020-09-01 22:38:28 -070069 const Channel *channel = configuration::GetChannel(
Austin Schuh8d7e0bb2020-10-02 17:57:00 -070070 event_loop->configuration(),
Austin Schuh2f8fd752020-09-01 22:38:28 -070071 absl::StrCat("/aos/remote_timestamps/", node->name()->string_view()),
72 logger::MessageHeader::GetFullyQualifiedName(), event_loop_->name(),
73 event_loop_->node());
74
75 CHECK(channel != nullptr)
76 << ": Remote timestamps are logged on "
77 << event_loop_->node()->name()->string_view()
78 << " but can't find channel /aos/remote_timestamps/"
79 << node->name()->string_view();
Brian Silverman1f345222020-09-24 21:14:48 -070080 if (!should_log(channel)) {
81 continue;
82 }
Austin Schuh2f8fd752020-09-01 22:38:28 -070083 timestamp_logger_channels.insert(std::make_pair(channel, node));
84 }
85
Brian Silvermand90905f2020-09-23 14:42:56 -070086 const size_t our_node_index =
87 configuration::GetNodeIndex(configuration_, event_loop_->node());
Austin Schuh2f8fd752020-09-01 22:38:28 -070088
Brian Silverman1f345222020-09-24 21:14:48 -070089 for (size_t channel_index = 0;
90 channel_index < configuration_->channels()->size(); ++channel_index) {
91 const Channel *const config_channel =
92 configuration_->channels()->Get(channel_index);
Austin Schuh0c297012020-09-16 18:41:59 -070093 // The MakeRawFetcher method needs a channel which is in the event loop
94 // configuration() object, not the configuration_ object. Go look that up
95 // from the config.
96 const Channel *channel = aos::configuration::GetChannel(
97 event_loop_->configuration(), config_channel->name()->string_view(),
98 config_channel->type()->string_view(), "", event_loop_->node());
Austin Schuh8d7e0bb2020-10-02 17:57:00 -070099 CHECK(channel != nullptr)
100 << ": Failed to look up channel "
101 << aos::configuration::CleanedChannelToString(config_channel);
Brian Silverman1f345222020-09-24 21:14:48 -0700102 if (!should_log(channel)) {
103 continue;
104 }
Austin Schuh0c297012020-09-16 18:41:59 -0700105
Austin Schuhe309d2a2019-11-29 13:25:21 -0800106 FetcherStruct fs;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700107 fs.node_index = our_node_index;
Brian Silverman1f345222020-09-24 21:14:48 -0700108 fs.channel_index = channel_index;
109 fs.channel = channel;
110
Austin Schuh6f3babe2020-01-26 20:34:50 -0800111 const bool is_local =
112 configuration::ChannelIsSendableOnNode(channel, event_loop_->node());
113
Austin Schuh15649d62019-12-28 16:36:38 -0800114 const bool is_readable =
115 configuration::ChannelIsReadableOnNode(channel, event_loop_->node());
Brian Silverman1f345222020-09-24 21:14:48 -0700116 const bool is_logged = configuration::ChannelMessageIsLoggedOnNode(
117 channel, event_loop_->node());
118 const bool log_message = is_logged && is_readable;
Austin Schuh15649d62019-12-28 16:36:38 -0800119
Brian Silverman1f345222020-09-24 21:14:48 -0700120 bool log_delivery_times = false;
121 if (event_loop_->node() != nullptr) {
122 log_delivery_times = configuration::ConnectionDeliveryTimeIsLoggedOnNode(
123 channel, event_loop_->node(), event_loop_->node());
124 }
Austin Schuh15649d62019-12-28 16:36:38 -0800125
Austin Schuh2f8fd752020-09-01 22:38:28 -0700126 // Now, detect a MessageHeader timestamp logger where we should just log the
127 // contents to a file directly.
128 const bool log_contents = timestamp_logger_channels.find(channel) !=
129 timestamp_logger_channels.end();
Austin Schuh2f8fd752020-09-01 22:38:28 -0700130
131 if (log_message || log_delivery_times || log_contents) {
Austin Schuh15649d62019-12-28 16:36:38 -0800132 fs.fetcher = event_loop->MakeRawFetcher(channel);
133 VLOG(1) << "Logging channel "
134 << configuration::CleanedChannelToString(channel);
135
136 if (log_delivery_times) {
Austin Schuh6f3babe2020-01-26 20:34:50 -0800137 VLOG(1) << " Delivery times";
Brian Silverman1f345222020-09-24 21:14:48 -0700138 fs.wants_timestamp_writer = true;
Austin Schuh15649d62019-12-28 16:36:38 -0800139 }
Austin Schuh6f3babe2020-01-26 20:34:50 -0800140 if (log_message) {
141 VLOG(1) << " Data";
Brian Silverman1f345222020-09-24 21:14:48 -0700142 fs.wants_writer = true;
Austin Schuh6f3babe2020-01-26 20:34:50 -0800143 if (!is_local) {
144 fs.log_type = LogType::kLogRemoteMessage;
145 }
146 }
Austin Schuh2f8fd752020-09-01 22:38:28 -0700147 if (log_contents) {
148 VLOG(1) << "Timestamp logger channel "
149 << configuration::CleanedChannelToString(channel);
Brian Silverman1f345222020-09-24 21:14:48 -0700150 fs.timestamp_node = timestamp_logger_channels.find(channel)->second;
151 fs.wants_contents_writer = true;
Austin Schuh0c297012020-09-16 18:41:59 -0700152 fs.node_index =
Brian Silverman1f345222020-09-24 21:14:48 -0700153 configuration::GetNodeIndex(configuration_, fs.timestamp_node);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700154 }
Austin Schuh6f3babe2020-01-26 20:34:50 -0800155 fetchers_.emplace_back(std::move(fs));
Austin Schuh15649d62019-12-28 16:36:38 -0800156 }
Brian Silverman1f345222020-09-24 21:14:48 -0700157 }
Austin Schuh8d7e0bb2020-10-02 17:57:00 -0700158
159 // When we are logging remote timestamps, we need to be able to translate from
160 // the channel index that the event loop uses to the channel index in the
161 // config in the log file.
162 event_loop_to_logged_channel_index_.resize(
163 event_loop->configuration()->channels()->size(), -1);
164 for (size_t event_loop_channel_index = 0;
165 event_loop_channel_index <
166 event_loop->configuration()->channels()->size();
167 ++event_loop_channel_index) {
168 const Channel *event_loop_channel =
169 event_loop->configuration()->channels()->Get(event_loop_channel_index);
170
171 const Channel *logged_channel = aos::configuration::GetChannel(
172 configuration_, event_loop_channel->name()->string_view(),
173 event_loop_channel->type()->string_view(), "",
174 configuration::GetNode(configuration_, event_loop_->node()));
175
176 if (logged_channel != nullptr) {
177 event_loop_to_logged_channel_index_[event_loop_channel_index] =
178 configuration::ChannelIndex(configuration_, logged_channel);
179 }
180 }
Brian Silverman1f345222020-09-24 21:14:48 -0700181}
182
183Logger::~Logger() {
184 if (log_namer_) {
185 // If we are replaying a log file, or in simulation, we want to force the
186 // last bit of data to be logged. The easiest way to deal with this is to
187 // poll everything as we go to destroy the class, ie, shut down the logger,
188 // and write it to disk.
189 StopLogging(event_loop_->monotonic_now());
190 }
191}
192
Brian Silvermanae7c0332020-09-30 16:58:23 -0700193void Logger::StartLogging(std::unique_ptr<LogNamer> log_namer,
194 std::string_view log_start_uuid) {
Brian Silverman1f345222020-09-24 21:14:48 -0700195 CHECK(!log_namer_) << ": Already logging";
196 log_namer_ = std::move(log_namer);
Brian Silvermanae7c0332020-09-30 16:58:23 -0700197 log_event_uuid_ = UUID::Random();
198 log_start_uuid_ = log_start_uuid;
Brian Silverman1f345222020-09-24 21:14:48 -0700199 VLOG(1) << "Starting logger for " << FlatbufferToJson(event_loop_->node());
200
201 // We want to do as much work as possible before the initial Fetch. Time
202 // between that and actually starting to log opens up the possibility of
203 // falling off the end of the queue during that time.
204
205 for (FetcherStruct &f : fetchers_) {
206 if (f.wants_writer) {
207 f.writer = log_namer_->MakeWriter(f.channel);
208 }
209 if (f.wants_timestamp_writer) {
210 f.timestamp_writer = log_namer_->MakeTimestampWriter(f.channel);
211 }
212 if (f.wants_contents_writer) {
213 f.contents_writer = log_namer_->MakeForwardedTimestampWriter(
214 f.channel, CHECK_NOTNULL(f.timestamp_node));
215 }
Austin Schuhe309d2a2019-11-29 13:25:21 -0800216 }
217
Brian Silverman1f345222020-09-24 21:14:48 -0700218 CHECK(node_state_.empty());
Austin Schuh0c297012020-09-16 18:41:59 -0700219 node_state_.resize(configuration::MultiNode(configuration_)
220 ? configuration_->nodes()->size()
Austin Schuh2f8fd752020-09-01 22:38:28 -0700221 : 1u);
Austin Schuhe309d2a2019-11-29 13:25:21 -0800222
Austin Schuh2f8fd752020-09-01 22:38:28 -0700223 for (const Node *node : log_namer_->nodes()) {
Brian Silvermand90905f2020-09-23 14:42:56 -0700224 const int node_index = configuration::GetNodeIndex(configuration_, node);
Austin Schuhe309d2a2019-11-29 13:25:21 -0800225
Austin Schuh2f8fd752020-09-01 22:38:28 -0700226 node_state_[node_index].log_file_header = MakeHeader(node);
227 }
Austin Schuhe309d2a2019-11-29 13:25:21 -0800228
Austin Schuh2f8fd752020-09-01 22:38:28 -0700229 // Grab data from each channel right before we declare the log file started
230 // so we can capture the latest message on each channel. This lets us have
231 // non periodic messages with configuration that now get logged.
232 for (FetcherStruct &f : fetchers_) {
Brian Silvermancb805822020-10-06 17:43:35 -0700233 const auto start = event_loop_->monotonic_now();
234 const bool got_new = f.fetcher->Fetch();
235 const auto end = event_loop_->monotonic_now();
236 RecordFetchResult(start, end, got_new, &f);
237
238 // If there is a message, we want to write it.
239 f.written = f.fetcher->context().data == nullptr;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700240 }
241
242 // Clear out any old timestamps in case we are re-starting logging.
243 for (size_t i = 0; i < node_state_.size(); ++i) {
244 SetStartTime(i, monotonic_clock::min_time, realtime_clock::min_time);
245 }
246
247 WriteHeader();
248
249 LOG(INFO) << "Logging node as " << FlatbufferToJson(event_loop_->node())
250 << " start_time " << last_synchronized_time_;
251
252 timer_handler_->Setup(event_loop_->monotonic_now() + polling_period_,
253 polling_period_);
254}
255
Brian Silverman1f345222020-09-24 21:14:48 -0700256std::unique_ptr<LogNamer> Logger::StopLogging(
257 aos::monotonic_clock::time_point end_time) {
258 CHECK(log_namer_) << ": Not logging right now";
259
260 if (end_time != aos::monotonic_clock::min_time) {
261 LogUntil(end_time);
262 }
263 timer_handler_->Disable();
264
265 for (FetcherStruct &f : fetchers_) {
266 f.writer = nullptr;
267 f.timestamp_writer = nullptr;
268 f.contents_writer = nullptr;
269 }
270 node_state_.clear();
271
Brian Silvermanae7c0332020-09-30 16:58:23 -0700272 log_event_uuid_ = UUID::Zero();
273 log_start_uuid_ = std::string();
274
Brian Silverman1f345222020-09-24 21:14:48 -0700275 return std::move(log_namer_);
276}
277
Austin Schuhfa895892020-01-07 20:07:41 -0800278void Logger::WriteHeader() {
Austin Schuh0c297012020-09-16 18:41:59 -0700279 if (configuration::MultiNode(configuration_)) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700280 server_statistics_fetcher_.Fetch();
281 }
282
283 aos::monotonic_clock::time_point monotonic_start_time =
284 event_loop_->monotonic_now();
285 aos::realtime_clock::time_point realtime_start_time =
286 event_loop_->realtime_now();
287
288 // We need to pick a point in time to declare the log file "started". This
289 // starts here. It needs to be after everything is fetched so that the
290 // fetchers are all pointed at the most recent message before the start
291 // time.
292 last_synchronized_time_ = monotonic_start_time;
293
Austin Schuh6f3babe2020-01-26 20:34:50 -0800294 for (const Node *node : log_namer_->nodes()) {
Brian Silvermand90905f2020-09-23 14:42:56 -0700295 const int node_index = configuration::GetNodeIndex(configuration_, node);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700296 MaybeUpdateTimestamp(node, node_index, monotonic_start_time,
297 realtime_start_time);
Austin Schuh64fab802020-09-09 22:47:47 -0700298 log_namer_->WriteHeader(&node_state_[node_index].log_file_header, node);
Austin Schuh6f3babe2020-01-26 20:34:50 -0800299 }
300}
Austin Schuh8bd96322020-02-13 21:18:22 -0800301
Austin Schuh2f8fd752020-09-01 22:38:28 -0700302void Logger::WriteMissingTimestamps() {
Austin Schuh0c297012020-09-16 18:41:59 -0700303 if (configuration::MultiNode(configuration_)) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700304 server_statistics_fetcher_.Fetch();
305 } else {
306 return;
307 }
308
309 if (server_statistics_fetcher_.get() == nullptr) {
310 return;
311 }
312
313 for (const Node *node : log_namer_->nodes()) {
Brian Silvermand90905f2020-09-23 14:42:56 -0700314 const int node_index = configuration::GetNodeIndex(configuration_, node);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700315 if (MaybeUpdateTimestamp(
316 node, node_index,
317 server_statistics_fetcher_.context().monotonic_event_time,
318 server_statistics_fetcher_.context().realtime_event_time)) {
Austin Schuh64fab802020-09-09 22:47:47 -0700319 log_namer_->Rotate(node, &node_state_[node_index].log_file_header);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700320 }
321 }
322}
323
324void Logger::SetStartTime(size_t node_index,
325 aos::monotonic_clock::time_point monotonic_start_time,
326 aos::realtime_clock::time_point realtime_start_time) {
327 node_state_[node_index].monotonic_start_time = monotonic_start_time;
328 node_state_[node_index].realtime_start_time = realtime_start_time;
329 node_state_[node_index]
330 .log_file_header.mutable_message()
331 ->mutate_monotonic_start_time(
332 std::chrono::duration_cast<std::chrono::nanoseconds>(
333 monotonic_start_time.time_since_epoch())
334 .count());
335 if (node_state_[node_index]
336 .log_file_header.mutable_message()
337 ->has_realtime_start_time()) {
338 node_state_[node_index]
339 .log_file_header.mutable_message()
340 ->mutate_realtime_start_time(
341 std::chrono::duration_cast<std::chrono::nanoseconds>(
342 realtime_start_time.time_since_epoch())
343 .count());
344 }
345}
346
347bool Logger::MaybeUpdateTimestamp(
348 const Node *node, int node_index,
349 aos::monotonic_clock::time_point monotonic_start_time,
350 aos::realtime_clock::time_point realtime_start_time) {
Brian Silverman87ac0402020-09-17 14:47:01 -0700351 // Bail early if the start times are already set.
Austin Schuh2f8fd752020-09-01 22:38:28 -0700352 if (node_state_[node_index].monotonic_start_time !=
353 monotonic_clock::min_time) {
354 return false;
355 }
Austin Schuh0c297012020-09-16 18:41:59 -0700356 if (configuration::MultiNode(configuration_)) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700357 if (event_loop_->node() == node) {
358 // There are no offsets to compute for ourself, so always succeed.
359 SetStartTime(node_index, monotonic_start_time, realtime_start_time);
360 return true;
361 } else if (server_statistics_fetcher_.get() != nullptr) {
362 // We must be a remote node now. Look for the connection and see if it is
363 // connected.
364
365 for (const message_bridge::ServerConnection *connection :
366 *server_statistics_fetcher_->connections()) {
367 if (connection->node()->name()->string_view() !=
368 node->name()->string_view()) {
369 continue;
370 }
371
372 if (connection->state() != message_bridge::State::CONNECTED) {
373 VLOG(1) << node->name()->string_view()
374 << " is not connected, can't start it yet.";
375 break;
376 }
377
378 if (!connection->has_monotonic_offset()) {
379 VLOG(1) << "Missing monotonic offset for setting start time for node "
380 << aos::FlatbufferToJson(node);
381 break;
382 }
383
384 VLOG(1) << "Updating start time for " << aos::FlatbufferToJson(node);
385
386 // Found it and it is connected. Compensate and go.
387 monotonic_start_time +=
388 std::chrono::nanoseconds(connection->monotonic_offset());
389
390 SetStartTime(node_index, monotonic_start_time, realtime_start_time);
391 return true;
392 }
393 }
394 } else {
395 SetStartTime(node_index, monotonic_start_time, realtime_start_time);
396 return true;
397 }
398 return false;
399}
400
401aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> Logger::MakeHeader(
402 const Node *node) {
Austin Schuhfa895892020-01-07 20:07:41 -0800403 // Now write the header with this timestamp in it.
404 flatbuffers::FlatBufferBuilder fbb;
Austin Schuhd7b15da2020-02-17 15:06:11 -0800405 fbb.ForceDefaults(true);
Austin Schuhfa895892020-01-07 20:07:41 -0800406
Austin Schuh2f8fd752020-09-01 22:38:28 -0700407 // TODO(austin): Compress this much more efficiently. There are a bunch of
408 // duplicated schemas.
Brian Silvermanae7c0332020-09-30 16:58:23 -0700409 const flatbuffers::Offset<aos::Configuration> configuration_offset =
Austin Schuh0c297012020-09-16 18:41:59 -0700410 CopyFlatBuffer(configuration_, &fbb);
Austin Schuhfa895892020-01-07 20:07:41 -0800411
Brian Silvermanae7c0332020-09-30 16:58:23 -0700412 const flatbuffers::Offset<flatbuffers::String> name_offset =
Austin Schuh0c297012020-09-16 18:41:59 -0700413 fbb.CreateString(name_);
Austin Schuhfa895892020-01-07 20:07:41 -0800414
Brian Silvermanae7c0332020-09-30 16:58:23 -0700415 CHECK(log_event_uuid_ != UUID::Zero());
416 const flatbuffers::Offset<flatbuffers::String> log_event_uuid_offset =
417 fbb.CreateString(log_event_uuid_.string_view());
Austin Schuh64fab802020-09-09 22:47:47 -0700418
Brian Silvermanae7c0332020-09-30 16:58:23 -0700419 const flatbuffers::Offset<flatbuffers::String> logger_instance_uuid_offset =
420 fbb.CreateString(logger_instance_uuid_.string_view());
421
422 flatbuffers::Offset<flatbuffers::String> log_start_uuid_offset;
423 if (!log_start_uuid_.empty()) {
424 log_start_uuid_offset = fbb.CreateString(log_start_uuid_);
425 }
426
427 const flatbuffers::Offset<flatbuffers::String> boot_uuid_offset =
428 fbb.CreateString(boot_uuid_);
429
430 const flatbuffers::Offset<flatbuffers::String> parts_uuid_offset =
Austin Schuh64fab802020-09-09 22:47:47 -0700431 fbb.CreateString("00000000-0000-4000-8000-000000000000");
432
Austin Schuhfa895892020-01-07 20:07:41 -0800433 flatbuffers::Offset<Node> node_offset;
Brian Silverman80993c22020-10-01 15:05:19 -0700434 flatbuffers::Offset<Node> logger_node_offset;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700435
Austin Schuh0c297012020-09-16 18:41:59 -0700436 if (configuration::MultiNode(configuration_)) {
Austin Schuh6f3babe2020-01-26 20:34:50 -0800437 node_offset = CopyFlatBuffer(node, &fbb);
Brian Silverman80993c22020-10-01 15:05:19 -0700438 logger_node_offset = CopyFlatBuffer(event_loop_->node(), &fbb);
Austin Schuhfa895892020-01-07 20:07:41 -0800439 }
440
441 aos::logger::LogFileHeader::Builder log_file_header_builder(fbb);
442
Austin Schuh64fab802020-09-09 22:47:47 -0700443 log_file_header_builder.add_name(name_offset);
Austin Schuhfa895892020-01-07 20:07:41 -0800444
445 // Only add the node if we are running in a multinode configuration.
Austin Schuh6f3babe2020-01-26 20:34:50 -0800446 if (node != nullptr) {
Austin Schuhfa895892020-01-07 20:07:41 -0800447 log_file_header_builder.add_node(node_offset);
Brian Silverman80993c22020-10-01 15:05:19 -0700448 log_file_header_builder.add_logger_node(logger_node_offset);
Austin Schuhfa895892020-01-07 20:07:41 -0800449 }
450
451 log_file_header_builder.add_configuration(configuration_offset);
452 // The worst case theoretical out of order is the polling period times 2.
453 // One message could get logged right after the boundary, but be for right
454 // before the next boundary. And the reverse could happen for another
455 // message. Report back 3x to be extra safe, and because the cost isn't
456 // huge on the read side.
457 log_file_header_builder.add_max_out_of_order_duration(
Brian Silverman1f345222020-09-24 21:14:48 -0700458 std::chrono::nanoseconds(3 * polling_period_).count());
Austin Schuhfa895892020-01-07 20:07:41 -0800459
460 log_file_header_builder.add_monotonic_start_time(
461 std::chrono::duration_cast<std::chrono::nanoseconds>(
Austin Schuh2f8fd752020-09-01 22:38:28 -0700462 monotonic_clock::min_time.time_since_epoch())
Austin Schuhfa895892020-01-07 20:07:41 -0800463 .count());
Austin Schuh2f8fd752020-09-01 22:38:28 -0700464 if (node == event_loop_->node()) {
465 log_file_header_builder.add_realtime_start_time(
466 std::chrono::duration_cast<std::chrono::nanoseconds>(
467 realtime_clock::min_time.time_since_epoch())
468 .count());
Austin Schuh6f3babe2020-01-26 20:34:50 -0800469 }
470
Brian Silvermanae7c0332020-09-30 16:58:23 -0700471 log_file_header_builder.add_log_event_uuid(log_event_uuid_offset);
472 log_file_header_builder.add_logger_instance_uuid(logger_instance_uuid_offset);
473 if (!log_start_uuid_offset.IsNull()) {
474 log_file_header_builder.add_log_start_uuid(log_start_uuid_offset);
475 }
476 log_file_header_builder.add_boot_uuid(boot_uuid_offset);
Austin Schuh64fab802020-09-09 22:47:47 -0700477
478 log_file_header_builder.add_parts_uuid(parts_uuid_offset);
479 log_file_header_builder.add_parts_index(0);
480
Austin Schuh2f8fd752020-09-01 22:38:28 -0700481 fbb.FinishSizePrefixed(log_file_header_builder.Finish());
482 return fbb.Release();
483}
484
Brian Silvermancb805822020-10-06 17:43:35 -0700485void Logger::ResetStatisics() {
486 max_message_fetch_time_ = std::chrono::nanoseconds::zero();
487 max_message_fetch_time_channel_ = -1;
488 max_message_fetch_time_size_ = -1;
489 total_message_fetch_time_ = std::chrono::nanoseconds::zero();
490 total_message_fetch_count_ = 0;
491 total_message_fetch_bytes_ = 0;
492 total_nop_fetch_time_ = std::chrono::nanoseconds::zero();
493 total_nop_fetch_count_ = 0;
494 max_copy_time_ = std::chrono::nanoseconds::zero();
495 max_copy_time_channel_ = -1;
496 max_copy_time_size_ = -1;
497 total_copy_time_ = std::chrono::nanoseconds::zero();
498 total_copy_count_ = 0;
499 total_copy_bytes_ = 0;
500}
501
Austin Schuh2f8fd752020-09-01 22:38:28 -0700502void Logger::Rotate() {
503 for (const Node *node : log_namer_->nodes()) {
Brian Silvermand90905f2020-09-23 14:42:56 -0700504 const int node_index = configuration::GetNodeIndex(configuration_, node);
Austin Schuh64fab802020-09-09 22:47:47 -0700505 log_namer_->Rotate(node, &node_state_[node_index].log_file_header);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700506 }
507}
508
509void Logger::LogUntil(monotonic_clock::time_point t) {
510 WriteMissingTimestamps();
511
512 // Write each channel to disk, one at a time.
513 for (FetcherStruct &f : fetchers_) {
514 while (true) {
515 if (f.written) {
Brian Silvermancb805822020-10-06 17:43:35 -0700516 const auto start = event_loop_->monotonic_now();
517 const bool got_new = f.fetcher->FetchNext();
518 const auto end = event_loop_->monotonic_now();
519 RecordFetchResult(start, end, got_new, &f);
520 if (!got_new) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700521 VLOG(2) << "No new data on "
522 << configuration::CleanedChannelToString(
523 f.fetcher->channel());
524 break;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700525 }
Brian Silvermancb805822020-10-06 17:43:35 -0700526 f.written = false;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700527 }
528
Austin Schuh2f8fd752020-09-01 22:38:28 -0700529 // TODO(james): Write tests to exercise this logic.
Brian Silvermancb805822020-10-06 17:43:35 -0700530 if (f.fetcher->context().monotonic_event_time >= t) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700531 break;
532 }
Brian Silvermancb805822020-10-06 17:43:35 -0700533 if (f.writer != nullptr) {
534 // Write!
535 const auto start = event_loop_->monotonic_now();
536 flatbuffers::FlatBufferBuilder fbb(f.fetcher->context().size +
537 max_header_size_);
538 fbb.ForceDefaults(true);
539
540 fbb.FinishSizePrefixed(PackMessage(&fbb, f.fetcher->context(),
541 f.channel_index, f.log_type));
542 const auto end = event_loop_->monotonic_now();
543 RecordCreateMessageTime(start, end, &f);
544
545 VLOG(2) << "Writing data as node "
546 << FlatbufferToJson(event_loop_->node()) << " for channel "
547 << configuration::CleanedChannelToString(f.fetcher->channel())
548 << " to " << f.writer->filename() << " data "
549 << FlatbufferToJson(
550 flatbuffers::GetSizePrefixedRoot<MessageHeader>(
551 fbb.GetBufferPointer()));
552
553 max_header_size_ = std::max(max_header_size_,
554 fbb.GetSize() - f.fetcher->context().size);
555 f.writer->QueueSizedFlatbuffer(&fbb);
556 }
557
558 if (f.timestamp_writer != nullptr) {
559 // And now handle timestamps.
560 const auto start = event_loop_->monotonic_now();
561 flatbuffers::FlatBufferBuilder fbb;
562 fbb.ForceDefaults(true);
563
564 fbb.FinishSizePrefixed(PackMessage(&fbb, f.fetcher->context(),
565 f.channel_index,
566 LogType::kLogDeliveryTimeOnly));
567 const auto end = event_loop_->monotonic_now();
568 RecordCreateMessageTime(start, end, &f);
569
570 VLOG(2) << "Writing timestamps as node "
571 << FlatbufferToJson(event_loop_->node()) << " for channel "
572 << configuration::CleanedChannelToString(f.fetcher->channel())
573 << " to " << f.timestamp_writer->filename() << " timestamp "
574 << FlatbufferToJson(
575 flatbuffers::GetSizePrefixedRoot<MessageHeader>(
576 fbb.GetBufferPointer()));
577
578 f.timestamp_writer->QueueSizedFlatbuffer(&fbb);
579 }
580
581 if (f.contents_writer != nullptr) {
582 const auto start = event_loop_->monotonic_now();
583 // And now handle the special message contents channel. Copy the
584 // message into a FlatBufferBuilder and save it to disk.
585 // TODO(austin): We can be more efficient here when we start to
586 // care...
587 flatbuffers::FlatBufferBuilder fbb;
588 fbb.ForceDefaults(true);
589
590 const MessageHeader *msg =
591 flatbuffers::GetRoot<MessageHeader>(f.fetcher->context().data);
592
593 logger::MessageHeader::Builder message_header_builder(fbb);
594
595 // TODO(austin): This needs to check the channel_index and confirm
596 // that it should be logged before squirreling away the timestamp to
597 // disk. We don't want to log irrelevant timestamps.
598
599 // Note: this must match the same order as MessageBridgeServer and
600 // PackMessage. We want identical headers to have identical
601 // on-the-wire formats to make comparing them easier.
602
603 // Translate from the channel index that the event loop uses to the
604 // channel index in the log file.
605 message_header_builder.add_channel_index(
606 event_loop_to_logged_channel_index_[msg->channel_index()]);
607
608 message_header_builder.add_queue_index(msg->queue_index());
609 message_header_builder.add_monotonic_sent_time(
610 msg->monotonic_sent_time());
611 message_header_builder.add_realtime_sent_time(
612 msg->realtime_sent_time());
613
614 message_header_builder.add_monotonic_remote_time(
615 msg->monotonic_remote_time());
616 message_header_builder.add_realtime_remote_time(
617 msg->realtime_remote_time());
618 message_header_builder.add_remote_queue_index(
619 msg->remote_queue_index());
620
621 fbb.FinishSizePrefixed(message_header_builder.Finish());
622 const auto end = event_loop_->monotonic_now();
623 RecordCreateMessageTime(start, end, &f);
624
625 f.contents_writer->QueueSizedFlatbuffer(&fbb);
626 }
627
628 f.written = true;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700629 }
630 }
631 last_synchronized_time_ = t;
Austin Schuhfa895892020-01-07 20:07:41 -0800632}
633
Brian Silverman1f345222020-09-24 21:14:48 -0700634void Logger::DoLogData(const monotonic_clock::time_point end_time) {
635 // We want to guarantee that messages aren't out of order by more than
Austin Schuhe309d2a2019-11-29 13:25:21 -0800636 // max_out_of_order_duration. To do this, we need sync points. Every write
637 // cycle should be a sync point.
Austin Schuhe309d2a2019-11-29 13:25:21 -0800638
639 do {
640 // Move the sync point up by at most polling_period. This forces one sync
641 // per iteration, even if it is small.
Brian Silverman1f345222020-09-24 21:14:48 -0700642 LogUntil(std::min(last_synchronized_time_ + polling_period_, end_time));
643
644 on_logged_period_();
Austin Schuhe309d2a2019-11-29 13:25:21 -0800645
Austin Schuhe309d2a2019-11-29 13:25:21 -0800646 // If we missed cycles, we could be pretty far behind. Spin until we are
647 // caught up.
Brian Silverman1f345222020-09-24 21:14:48 -0700648 } while (last_synchronized_time_ + polling_period_ < end_time);
Austin Schuhe309d2a2019-11-29 13:25:21 -0800649}
650
Brian Silvermancb805822020-10-06 17:43:35 -0700651void Logger::RecordFetchResult(aos::monotonic_clock::time_point start,
652 aos::monotonic_clock::time_point end,
653 bool got_new, FetcherStruct *fetcher) {
654 const auto duration = end - start;
655 if (!got_new) {
656 ++total_nop_fetch_count_;
657 total_nop_fetch_time_ += duration;
658 return;
659 }
660 ++total_message_fetch_count_;
661 total_message_fetch_bytes_ += fetcher->fetcher->context().size;
662 total_message_fetch_time_ += duration;
663 if (duration > max_message_fetch_time_) {
664 max_message_fetch_time_ = duration;
665 max_message_fetch_time_channel_ = fetcher->channel_index;
666 max_message_fetch_time_size_ = fetcher->fetcher->context().size;
667 }
668}
669
670void Logger::RecordCreateMessageTime(aos::monotonic_clock::time_point start,
671 aos::monotonic_clock::time_point end,
672 FetcherStruct *fetcher) {
673 const auto duration = end - start;
674 total_copy_time_ += duration;
675 ++total_copy_count_;
676 total_copy_bytes_ += fetcher->fetcher->context().size;
677 if (duration > max_copy_time_) {
678 max_copy_time_ = duration;
679 max_copy_time_channel_ = fetcher->channel_index;
680 max_copy_time_size_ = fetcher->fetcher->context().size;
681 }
682}
683
Austin Schuh11d43732020-09-21 17:28:30 -0700684std::vector<LogFile> SortParts(const std::vector<std::string> &parts) {
Austin Schuh5212cad2020-09-09 23:12:09 -0700685 // Start by grouping all parts by UUID, and extracting the part index.
Austin Schuh11d43732020-09-21 17:28:30 -0700686 // Datastructure to hold all the info extracted from a set of parts which go
687 // together so we can sort them afterwords.
688 struct UnsortedLogParts {
689 // Start times.
690 aos::monotonic_clock::time_point monotonic_start_time;
691 aos::realtime_clock::time_point realtime_start_time;
692
693 // Node to save.
694 std::string node;
695
696 // Pairs of the filename and the part index for sorting.
697 std::vector<std::pair<std::string, int>> parts;
698 };
699
Brian Silvermanae7c0332020-09-30 16:58:23 -0700700 // Map holding the log_event_uuid -> second map. The second map holds the
Austin Schuh11d43732020-09-21 17:28:30 -0700701 // parts_uuid -> list of parts for sorting.
702 std::map<std::string, std::map<std::string, UnsortedLogParts>> parts_list;
Austin Schuh5212cad2020-09-09 23:12:09 -0700703
704 // Sort part files without UUIDs and part indexes as well. Extract everything
705 // useful from the log in the first pass, then sort later.
Austin Schuh11d43732020-09-21 17:28:30 -0700706 struct UnsortedOldParts {
707 // Part information with everything but the list of parts.
708 LogParts parts;
709
710 // Tuple of time for the data and filename needed for sorting after
711 // extracting.
Brian Silvermand90905f2020-09-23 14:42:56 -0700712 std::vector<std::pair<monotonic_clock::time_point, std::string>>
713 unsorted_parts;
Austin Schuh5212cad2020-09-09 23:12:09 -0700714 };
715
Austin Schuh11d43732020-09-21 17:28:30 -0700716 // A list of all the old parts which we don't know how to sort using uuids.
717 // There are enough of these in the wild that this is worth supporting.
718 std::vector<UnsortedOldParts> old_parts;
Austin Schuh5212cad2020-09-09 23:12:09 -0700719
Austin Schuh11d43732020-09-21 17:28:30 -0700720 // Now extract everything into our datastructures above for sorting.
Austin Schuh5212cad2020-09-09 23:12:09 -0700721 for (const std::string &part : parts) {
722 FlatbufferVector<LogFileHeader> log_header = ReadHeader(part);
723
Austin Schuh11d43732020-09-21 17:28:30 -0700724 const monotonic_clock::time_point monotonic_start_time(
725 chrono::nanoseconds(log_header.message().monotonic_start_time()));
726 const realtime_clock::time_point realtime_start_time(
727 chrono::nanoseconds(log_header.message().realtime_start_time()));
728
729 const std::string_view node =
730 log_header.message().has_node()
731 ? log_header.message().node()->name()->string_view()
732 : "";
733
Austin Schuh5212cad2020-09-09 23:12:09 -0700734 // Looks like an old log. No UUID, index, and also single node. We have
735 // little to no multi-node log files in the wild without part UUIDs and
736 // indexes which we care much about.
737 if (!log_header.message().has_parts_uuid() &&
738 !log_header.message().has_parts_index() &&
739 !log_header.message().has_node()) {
Austin Schuh5212cad2020-09-09 23:12:09 -0700740 FlatbufferVector<MessageHeader> first_message = ReadNthMessage(part, 0);
Austin Schuh11d43732020-09-21 17:28:30 -0700741 const monotonic_clock::time_point first_message_time(
Austin Schuh5212cad2020-09-09 23:12:09 -0700742 chrono::nanoseconds(first_message.message().monotonic_sent_time()));
Austin Schuh11d43732020-09-21 17:28:30 -0700743
744 // Find anything with a matching start time. They all go together.
745 auto result = std::find_if(
746 old_parts.begin(), old_parts.end(),
747 [&](const UnsortedOldParts &parts) {
748 return parts.parts.monotonic_start_time == monotonic_start_time &&
749 parts.parts.realtime_start_time == realtime_start_time;
750 });
751
752 if (result == old_parts.end()) {
753 old_parts.emplace_back();
754 old_parts.back().parts.monotonic_start_time = monotonic_start_time;
755 old_parts.back().parts.realtime_start_time = realtime_start_time;
756 old_parts.back().unsorted_parts.emplace_back(
757 std::make_pair(first_message_time, part));
758 } else {
759 result->unsorted_parts.emplace_back(
760 std::make_pair(first_message_time, part));
761 }
Austin Schuh5212cad2020-09-09 23:12:09 -0700762 continue;
763 }
764
Brian Silvermanae7c0332020-09-30 16:58:23 -0700765 CHECK(log_header.message().has_log_event_uuid());
Austin Schuh5212cad2020-09-09 23:12:09 -0700766 CHECK(log_header.message().has_parts_uuid());
767 CHECK(log_header.message().has_parts_index());
768
Brian Silvermanae7c0332020-09-30 16:58:23 -0700769 const std::string log_event_uuid =
770 log_header.message().log_event_uuid()->str();
Austin Schuh5212cad2020-09-09 23:12:09 -0700771 const std::string parts_uuid = log_header.message().parts_uuid()->str();
Austin Schuh11d43732020-09-21 17:28:30 -0700772 int32_t parts_index = log_header.message().parts_index();
773
Brian Silvermanae7c0332020-09-30 16:58:23 -0700774 auto log_it = parts_list.find(log_event_uuid);
Austin Schuh11d43732020-09-21 17:28:30 -0700775 if (log_it == parts_list.end()) {
Brian Silvermanae7c0332020-09-30 16:58:23 -0700776 log_it =
777 parts_list
778 .insert(std::make_pair(log_event_uuid,
779 std::map<std::string, UnsortedLogParts>()))
780 .first;
Austin Schuh5212cad2020-09-09 23:12:09 -0700781 }
Austin Schuh11d43732020-09-21 17:28:30 -0700782
783 auto it = log_it->second.find(parts_uuid);
784 if (it == log_it->second.end()) {
785 it = log_it->second.insert(std::make_pair(parts_uuid, UnsortedLogParts()))
786 .first;
787 it->second.monotonic_start_time = monotonic_start_time;
788 it->second.realtime_start_time = realtime_start_time;
789 it->second.node = std::string(node);
790 }
791
792 // First part might be min_time. If it is, try to put a better time on it.
793 if (it->second.monotonic_start_time == monotonic_clock::min_time) {
794 it->second.monotonic_start_time = monotonic_start_time;
795 } else if (monotonic_start_time != monotonic_clock::min_time) {
796 CHECK_EQ(it->second.monotonic_start_time, monotonic_start_time);
797 }
798 if (it->second.realtime_start_time == realtime_clock::min_time) {
799 it->second.realtime_start_time = realtime_start_time;
800 } else if (realtime_start_time != realtime_clock::min_time) {
801 CHECK_EQ(it->second.realtime_start_time, realtime_start_time);
802 }
803
804 it->second.parts.emplace_back(std::make_pair(part, parts_index));
Austin Schuh5212cad2020-09-09 23:12:09 -0700805 }
806
807 CHECK_NE(old_parts.empty(), parts_list.empty())
808 << ": Can't have a mix of old and new parts.";
809
Austin Schuh11d43732020-09-21 17:28:30 -0700810 // Now reformat old_parts to be in the right datastructure to report.
Austin Schuh5212cad2020-09-09 23:12:09 -0700811 if (!old_parts.empty()) {
Austin Schuh11d43732020-09-21 17:28:30 -0700812 std::vector<LogFile> result;
813 for (UnsortedOldParts &p : old_parts) {
814 // Sort by the oldest message in each file.
815 std::sort(
816 p.unsorted_parts.begin(), p.unsorted_parts.end(),
817 [](const std::pair<monotonic_clock::time_point, std::string> &a,
818 const std::pair<monotonic_clock::time_point, std::string> &b) {
819 return a.first < b.first;
820 });
821 LogFile log_file;
822 for (std::pair<monotonic_clock::time_point, std::string> &f :
823 p.unsorted_parts) {
824 p.parts.parts.emplace_back(std::move(f.second));
825 }
826 log_file.parts.emplace_back(std::move(p.parts));
827 result.emplace_back(std::move(log_file));
Austin Schuh5212cad2020-09-09 23:12:09 -0700828 }
Austin Schuh5212cad2020-09-09 23:12:09 -0700829
Austin Schuh11d43732020-09-21 17:28:30 -0700830 return result;
Austin Schuh5212cad2020-09-09 23:12:09 -0700831 }
832
833 // Now, sort them and produce the final vector form.
Austin Schuh11d43732020-09-21 17:28:30 -0700834 std::vector<LogFile> result;
Austin Schuh5212cad2020-09-09 23:12:09 -0700835 result.reserve(parts_list.size());
Brian Silvermand90905f2020-09-23 14:42:56 -0700836 for (std::pair<const std::string, std::map<std::string, UnsortedLogParts>>
837 &logs : parts_list) {
Austin Schuh11d43732020-09-21 17:28:30 -0700838 LogFile new_file;
Brian Silvermanae7c0332020-09-30 16:58:23 -0700839 new_file.log_event_uuid = logs.first;
Austin Schuh11d43732020-09-21 17:28:30 -0700840 for (std::pair<const std::string, UnsortedLogParts> &parts : logs.second) {
841 LogParts new_parts;
842 new_parts.monotonic_start_time = parts.second.monotonic_start_time;
843 new_parts.realtime_start_time = parts.second.realtime_start_time;
Brian Silvermanae7c0332020-09-30 16:58:23 -0700844 new_parts.log_event_uuid = logs.first;
Austin Schuh11d43732020-09-21 17:28:30 -0700845 new_parts.parts_uuid = parts.first;
846 new_parts.node = std::move(parts.second.node);
847
848 std::sort(parts.second.parts.begin(), parts.second.parts.end(),
849 [](const std::pair<std::string, int> &a,
850 const std::pair<std::string, int> &b) {
851 return a.second < b.second;
852 });
853 new_parts.parts.reserve(parts.second.parts.size());
854 for (std::pair<std::string, int> &p : parts.second.parts) {
855 new_parts.parts.emplace_back(std::move(p.first));
856 }
857 new_file.parts.emplace_back(std::move(new_parts));
Austin Schuh5212cad2020-09-09 23:12:09 -0700858 }
Austin Schuh11d43732020-09-21 17:28:30 -0700859 result.emplace_back(std::move(new_file));
860 }
861 return result;
862}
863
864std::ostream &operator<<(std::ostream &stream, const LogFile &file) {
865 stream << "{";
Brian Silvermanae7c0332020-09-30 16:58:23 -0700866 if (!file.log_event_uuid.empty()) {
867 stream << "\"log_event_uuid\": \"" << file.log_event_uuid << "\", ";
Austin Schuh11d43732020-09-21 17:28:30 -0700868 }
869 stream << "\"parts\": [";
870 for (size_t i = 0; i < file.parts.size(); ++i) {
871 if (i != 0u) {
872 stream << ", ";
873 }
874 stream << file.parts[i];
875 }
876 stream << "]}";
877 return stream;
878}
879std::ostream &operator<<(std::ostream &stream, const LogParts &parts) {
880 stream << "{";
Brian Silvermanae7c0332020-09-30 16:58:23 -0700881 if (!parts.log_event_uuid.empty()) {
882 stream << "\"log_event_uuid\": \"" << parts.log_event_uuid << "\", ";
Austin Schuh11d43732020-09-21 17:28:30 -0700883 }
884 if (!parts.parts_uuid.empty()) {
885 stream << "\"parts_uuid\": \"" << parts.parts_uuid << "\", ";
886 }
887 if (!parts.node.empty()) {
888 stream << "\"node\": \"" << parts.node << "\", ";
889 }
890 stream << "\"monotonic_start_time\": " << parts.monotonic_start_time
891 << ", \"realtime_start_time\": " << parts.realtime_start_time << ", [";
892
893 for (size_t i = 0; i < parts.parts.size(); ++i) {
894 if (i != 0u) {
895 stream << ", ";
896 }
897 stream << parts.parts[i];
898 }
899
900 stream << "]}";
901 return stream;
902}
903
904std::vector<std::vector<std::string>> ToLogReaderVector(
905 const std::vector<LogFile> &log_files) {
906 std::vector<std::vector<std::string>> result;
907 for (const LogFile &log_file : log_files) {
908 for (const LogParts &log_parts : log_file.parts) {
909 std::vector<std::string> parts;
910 for (const std::string &part : log_parts.parts) {
911 parts.emplace_back(part);
912 }
913 result.emplace_back(std::move(parts));
914 }
Austin Schuh5212cad2020-09-09 23:12:09 -0700915 }
916 return result;
917}
918
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800919LogReader::LogReader(std::string_view filename,
920 const Configuration *replay_configuration)
Austin Schuhfa895892020-01-07 20:07:41 -0800921 : LogReader(std::vector<std::string>{std::string(filename)},
922 replay_configuration) {}
923
924LogReader::LogReader(const std::vector<std::string> &filenames,
925 const Configuration *replay_configuration)
Austin Schuh6f3babe2020-01-26 20:34:50 -0800926 : LogReader(std::vector<std::vector<std::string>>{filenames},
927 replay_configuration) {}
928
Austin Schuh11d43732020-09-21 17:28:30 -0700929// TODO(austin): Make this the base and kill the others. This has much better
930// context for sorting.
931LogReader::LogReader(const std::vector<LogFile> &log_files,
932 const Configuration *replay_configuration)
933 : LogReader(ToLogReaderVector(log_files), replay_configuration) {}
934
Austin Schuh6f3babe2020-01-26 20:34:50 -0800935LogReader::LogReader(const std::vector<std::vector<std::string>> &filenames,
936 const Configuration *replay_configuration)
937 : filenames_(filenames),
938 log_file_header_(ReadHeader(filenames[0][0])),
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800939 replay_configuration_(replay_configuration) {
Austin Schuh6331ef92020-01-07 18:28:09 -0800940 MakeRemappedConfig();
Austin Schuh6f3babe2020-01-26 20:34:50 -0800941
Austin Schuh8d7e0bb2020-10-02 17:57:00 -0700942 // Remap all existing remote timestamp channels. They will be recreated, and
943 // the data logged isn't relevant anymore.
Austin Schuh3c5dae52020-10-06 18:55:18 -0700944 for (const Node *node : configuration::GetNodes(logged_configuration())) {
Austin Schuh8d7e0bb2020-10-02 17:57:00 -0700945 std::vector<const Node *> timestamp_logger_nodes =
946 configuration::TimestampNodes(logged_configuration(), node);
947 for (const Node *remote_node : timestamp_logger_nodes) {
948 const std::string channel = absl::StrCat(
949 "/aos/remote_timestamps/", remote_node->name()->string_view());
950 CHECK(HasChannel<logger::MessageHeader>(channel, node))
951 << ": Failed to find {\"name\": \"" << channel << "\", \"type\": \""
952 << logger::MessageHeader::GetFullyQualifiedName() << "\"} for node "
953 << node->name()->string_view();
954 RemapLoggedChannel<logger::MessageHeader>(channel, node);
955 }
956 }
957
Austin Schuh6aa77be2020-02-22 21:06:40 -0800958 if (replay_configuration) {
959 CHECK_EQ(configuration::MultiNode(configuration()),
960 configuration::MultiNode(replay_configuration))
Austin Schuh2f8fd752020-09-01 22:38:28 -0700961 << ": Log file and replay config need to both be multi or single "
962 "node.";
Austin Schuh6aa77be2020-02-22 21:06:40 -0800963 }
964
Austin Schuh6f3babe2020-01-26 20:34:50 -0800965 if (!configuration::MultiNode(configuration())) {
Austin Schuh858c9f32020-08-31 16:56:12 -0700966 states_.emplace_back(
967 std::make_unique<State>(std::make_unique<ChannelMerger>(filenames)));
Austin Schuh8bd96322020-02-13 21:18:22 -0800968 } else {
Austin Schuh6aa77be2020-02-22 21:06:40 -0800969 if (replay_configuration) {
James Kuszmaul46d82582020-05-09 19:50:09 -0700970 CHECK_EQ(logged_configuration()->nodes()->size(),
Austin Schuh6aa77be2020-02-22 21:06:40 -0800971 replay_configuration->nodes()->size())
Austin Schuh2f8fd752020-09-01 22:38:28 -0700972 << ": Log file and replay config need to have matching nodes "
973 "lists.";
James Kuszmaul46d82582020-05-09 19:50:09 -0700974 for (const Node *node : *logged_configuration()->nodes()) {
975 if (configuration::GetNode(replay_configuration, node) == nullptr) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700976 LOG(FATAL) << "Found node " << FlatbufferToJson(node)
977 << " in logged config that is not present in the replay "
978 "config.";
James Kuszmaul46d82582020-05-09 19:50:09 -0700979 }
980 }
Austin Schuh6aa77be2020-02-22 21:06:40 -0800981 }
Austin Schuh8bd96322020-02-13 21:18:22 -0800982 states_.resize(configuration()->nodes()->size());
Austin Schuh6f3babe2020-01-26 20:34:50 -0800983 }
Austin Schuhe309d2a2019-11-29 13:25:21 -0800984}
985
Austin Schuh6aa77be2020-02-22 21:06:40 -0800986LogReader::~LogReader() {
Austin Schuh39580f12020-08-01 14:44:08 -0700987 if (event_loop_factory_unique_ptr_) {
988 Deregister();
989 } else if (event_loop_factory_ != nullptr) {
990 LOG(FATAL) << "Must call Deregister before the SimulatedEventLoopFactory "
991 "is destroyed";
992 }
Austin Schuh8bd96322020-02-13 21:18:22 -0800993 if (offset_fp_ != nullptr) {
994 fclose(offset_fp_);
995 }
Austin Schuh2f8fd752020-09-01 22:38:28 -0700996 // Zero out some buffers. It's easy to do use-after-frees on these, so make
997 // it more obvious.
Austin Schuh39580f12020-08-01 14:44:08 -0700998 if (remapped_configuration_buffer_) {
999 remapped_configuration_buffer_->Wipe();
1000 }
1001 log_file_header_.Wipe();
Austin Schuh8bd96322020-02-13 21:18:22 -08001002}
Austin Schuhe309d2a2019-11-29 13:25:21 -08001003
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001004const Configuration *LogReader::logged_configuration() const {
Austin Schuh6f3babe2020-01-26 20:34:50 -08001005 return log_file_header_.message().configuration();
Austin Schuhe309d2a2019-11-29 13:25:21 -08001006}
1007
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001008const Configuration *LogReader::configuration() const {
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001009 return remapped_configuration_;
1010}
1011
Austin Schuh6f3babe2020-01-26 20:34:50 -08001012std::vector<const Node *> LogReader::Nodes() const {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001013 // Because the Node pointer will only be valid if it actually points to
1014 // memory owned by remapped_configuration_, we need to wait for the
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001015 // remapped_configuration_ to be populated before accessing it.
Austin Schuh6f3babe2020-01-26 20:34:50 -08001016 //
1017 // Also, note, that when ever a map is changed, the nodes in here are
1018 // invalidated.
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001019 CHECK(remapped_configuration_ != nullptr)
1020 << ": Need to call Register before the node() pointer will be valid.";
Austin Schuh6f3babe2020-01-26 20:34:50 -08001021 return configuration::GetNodes(remapped_configuration_);
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001022}
Austin Schuh15649d62019-12-28 16:36:38 -08001023
Austin Schuh11d43732020-09-21 17:28:30 -07001024monotonic_clock::time_point LogReader::monotonic_start_time(
1025 const Node *node) const {
Austin Schuh8bd96322020-02-13 21:18:22 -08001026 State *state =
1027 states_[configuration::GetNodeIndex(configuration(), node)].get();
1028 CHECK(state != nullptr) << ": Unknown node " << FlatbufferToJson(node);
1029
Austin Schuh858c9f32020-08-31 16:56:12 -07001030 return state->monotonic_start_time();
Austin Schuhe309d2a2019-11-29 13:25:21 -08001031}
1032
Austin Schuh11d43732020-09-21 17:28:30 -07001033realtime_clock::time_point LogReader::realtime_start_time(
1034 const Node *node) const {
Austin Schuh8bd96322020-02-13 21:18:22 -08001035 State *state =
1036 states_[configuration::GetNodeIndex(configuration(), node)].get();
1037 CHECK(state != nullptr) << ": Unknown node " << FlatbufferToJson(node);
1038
Austin Schuh858c9f32020-08-31 16:56:12 -07001039 return state->realtime_start_time();
Austin Schuhe309d2a2019-11-29 13:25:21 -08001040}
1041
James Kuszmaul84ff3e52020-01-03 19:48:53 -08001042void LogReader::Register() {
1043 event_loop_factory_unique_ptr_ =
Austin Schuhac0771c2020-01-07 18:36:30 -08001044 std::make_unique<SimulatedEventLoopFactory>(configuration());
James Kuszmaul84ff3e52020-01-03 19:48:53 -08001045 Register(event_loop_factory_unique_ptr_.get());
1046}
1047
Austin Schuh92547522019-12-28 14:33:43 -08001048void LogReader::Register(SimulatedEventLoopFactory *event_loop_factory) {
Austin Schuh92547522019-12-28 14:33:43 -08001049 event_loop_factory_ = event_loop_factory;
Austin Schuhe5bbd9e2020-09-21 17:29:20 -07001050 remapped_configuration_ = event_loop_factory_->configuration();
Austin Schuh92547522019-12-28 14:33:43 -08001051
Brian Silvermand90905f2020-09-23 14:42:56 -07001052 for (const Node *node : configuration::GetNodes(configuration())) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001053 const size_t node_index =
1054 configuration::GetNodeIndex(configuration(), node);
Austin Schuh858c9f32020-08-31 16:56:12 -07001055 states_[node_index] =
1056 std::make_unique<State>(std::make_unique<ChannelMerger>(filenames_));
Austin Schuh8bd96322020-02-13 21:18:22 -08001057 State *state = states_[node_index].get();
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001058 state->set_event_loop(state->SetNodeEventLoopFactory(
Austin Schuh858c9f32020-08-31 16:56:12 -07001059 event_loop_factory_->GetNodeEventLoopFactory(node)));
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001060
1061 state->SetChannelCount(logged_configuration()->channels()->size());
Austin Schuhcde938c2020-02-02 17:30:07 -08001062 }
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001063
1064 // Register after making all the State objects so we can build references
1065 // between them.
1066 for (const Node *node : configuration::GetNodes(configuration())) {
1067 const size_t node_index =
1068 configuration::GetNodeIndex(configuration(), node);
1069 State *state = states_[node_index].get();
1070
1071 Register(state->event_loop());
1072 }
1073
James Kuszmaul46d82582020-05-09 19:50:09 -07001074 if (live_nodes_ == 0) {
1075 LOG(FATAL)
1076 << "Don't have logs from any of the nodes in the replay config--are "
1077 "you sure that the replay config matches the original config?";
1078 }
Austin Schuh6f3babe2020-01-26 20:34:50 -08001079
Austin Schuh2f8fd752020-09-01 22:38:28 -07001080 // We need to now seed our per-node time offsets and get everything set up
1081 // to run.
1082 const size_t num_nodes = nodes_count();
Austin Schuhcde938c2020-02-02 17:30:07 -08001083
Austin Schuh8bd96322020-02-13 21:18:22 -08001084 // It is easiest to solve for per node offsets with a matrix rather than
1085 // trying to solve the equations by hand. So let's get after it.
1086 //
1087 // Now, build up the map matrix.
1088 //
Austin Schuh2f8fd752020-09-01 22:38:28 -07001089 // offset_matrix_ = (map_matrix_ + slope_matrix_) * [ta; tb; tc]
1090 map_matrix_ = Eigen::Matrix<mpq_class, Eigen::Dynamic, Eigen::Dynamic>::Zero(
1091 filters_.size() + 1, num_nodes);
1092 slope_matrix_ =
1093 Eigen::Matrix<mpq_class, Eigen::Dynamic, Eigen::Dynamic>::Zero(
1094 filters_.size() + 1, num_nodes);
Austin Schuhcde938c2020-02-02 17:30:07 -08001095
Austin Schuh2f8fd752020-09-01 22:38:28 -07001096 offset_matrix_ =
1097 Eigen::Matrix<mpq_class, Eigen::Dynamic, 1>::Zero(filters_.size() + 1);
1098 valid_matrix_ =
1099 Eigen::Matrix<bool, Eigen::Dynamic, 1>::Zero(filters_.size() + 1);
1100 last_valid_matrix_ =
1101 Eigen::Matrix<bool, Eigen::Dynamic, 1>::Zero(filters_.size() + 1);
Austin Schuhcde938c2020-02-02 17:30:07 -08001102
Austin Schuh2f8fd752020-09-01 22:38:28 -07001103 time_offset_matrix_ = Eigen::VectorXd::Zero(num_nodes);
1104 time_slope_matrix_ = Eigen::VectorXd::Zero(num_nodes);
Austin Schuh8bd96322020-02-13 21:18:22 -08001105
Austin Schuh2f8fd752020-09-01 22:38:28 -07001106 // All times should average out to the distributed clock.
1107 for (int i = 0; i < map_matrix_.cols(); ++i) {
1108 // 1/num_nodes.
1109 map_matrix_(0, i) = mpq_class(1, num_nodes);
1110 }
1111 valid_matrix_(0) = true;
Austin Schuh8bd96322020-02-13 21:18:22 -08001112
1113 {
1114 // Now, add the a - b -> sample elements.
1115 size_t i = 1;
1116 for (std::pair<const std::tuple<const Node *, const Node *>,
Austin Schuh2f8fd752020-09-01 22:38:28 -07001117 std::tuple<message_bridge::NoncausalOffsetEstimator>>
1118 &filter : filters_) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001119 const Node *const node_a = std::get<0>(filter.first);
1120 const Node *const node_b = std::get<1>(filter.first);
1121
1122 const size_t node_a_index =
1123 configuration::GetNodeIndex(configuration(), node_a);
1124 const size_t node_b_index =
1125 configuration::GetNodeIndex(configuration(), node_b);
1126
Austin Schuh2f8fd752020-09-01 22:38:28 -07001127 // -a
1128 map_matrix_(i, node_a_index) = mpq_class(-1);
1129 // +b
1130 map_matrix_(i, node_b_index) = mpq_class(1);
Austin Schuh8bd96322020-02-13 21:18:22 -08001131
1132 // -> sample
Austin Schuh2f8fd752020-09-01 22:38:28 -07001133 std::get<0>(filter.second)
1134 .set_slope_pointer(&slope_matrix_(i, node_a_index));
1135 std::get<0>(filter.second).set_offset_pointer(&offset_matrix_(i, 0));
1136
1137 valid_matrix_(i) = false;
1138 std::get<0>(filter.second).set_valid_pointer(&valid_matrix_(i));
Austin Schuh8bd96322020-02-13 21:18:22 -08001139
1140 ++i;
Austin Schuh6f3babe2020-01-26 20:34:50 -08001141 }
1142 }
1143
Austin Schuh858c9f32020-08-31 16:56:12 -07001144 for (std::unique_ptr<State> &state : states_) {
1145 state->SeedSortedMessages();
1146 }
1147
Austin Schuh2f8fd752020-09-01 22:38:28 -07001148 // Rank of the map matrix tells you if all the nodes are in communication
1149 // with each other, which tells you if the offsets are observable.
1150 const size_t connected_nodes =
1151 Eigen::FullPivLU<
1152 Eigen::Matrix<mpq_class, Eigen::Dynamic, Eigen::Dynamic>>(map_matrix_)
1153 .rank();
1154
1155 // We don't need to support isolated nodes until someone has a real use
1156 // case.
1157 CHECK_EQ(connected_nodes, num_nodes)
1158 << ": There is a node which isn't communicating with the rest.";
1159
1160 // And solve.
Austin Schuh8bd96322020-02-13 21:18:22 -08001161 UpdateOffsets();
1162
Austin Schuh2f8fd752020-09-01 22:38:28 -07001163 // We want to start the log file at the last start time of the log files
1164 // from all the nodes. Compute how long each node's simulation needs to run
1165 // to move time to this point.
Austin Schuh8bd96322020-02-13 21:18:22 -08001166 distributed_clock::time_point start_time = distributed_clock::min_time;
Austin Schuhcde938c2020-02-02 17:30:07 -08001167
Austin Schuh2f8fd752020-09-01 22:38:28 -07001168 // TODO(austin): We want an "OnStart" callback for each node rather than
1169 // running until the last node.
1170
Austin Schuh8bd96322020-02-13 21:18:22 -08001171 for (std::unique_ptr<State> &state : states_) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001172 VLOG(1) << "Start time is " << state->monotonic_start_time() << " for node "
1173 << MaybeNodeName(state->event_loop()->node()) << "now "
1174 << state->monotonic_now();
1175 // And start computing the start time on the distributed clock now that
1176 // that works.
Austin Schuh858c9f32020-08-31 16:56:12 -07001177 start_time = std::max(
1178 start_time, state->ToDistributedClock(state->monotonic_start_time()));
Austin Schuhcde938c2020-02-02 17:30:07 -08001179 }
Austin Schuh2f8fd752020-09-01 22:38:28 -07001180
1181 CHECK_GE(start_time, distributed_clock::epoch())
1182 << ": Hmm, we have a node starting before the start of time. Offset "
1183 "everything.";
Austin Schuhcde938c2020-02-02 17:30:07 -08001184
Austin Schuh6f3babe2020-01-26 20:34:50 -08001185 // Forwarding is tracked per channel. If it is enabled, we want to turn it
1186 // off. Otherwise messages replayed will get forwarded across to the other
Austin Schuh2f8fd752020-09-01 22:38:28 -07001187 // nodes, and also replayed on the other nodes. This may not satisfy all
1188 // our users, but it'll start the discussion.
Austin Schuh6f3babe2020-01-26 20:34:50 -08001189 if (configuration::MultiNode(event_loop_factory_->configuration())) {
1190 for (size_t i = 0; i < logged_configuration()->channels()->size(); ++i) {
1191 const Channel *channel = logged_configuration()->channels()->Get(i);
1192 const Node *node = configuration::GetNode(
1193 configuration(), channel->source_node()->string_view());
1194
Austin Schuh8bd96322020-02-13 21:18:22 -08001195 State *state =
1196 states_[configuration::GetNodeIndex(configuration(), node)].get();
Austin Schuh6f3babe2020-01-26 20:34:50 -08001197
1198 const Channel *remapped_channel =
Austin Schuh858c9f32020-08-31 16:56:12 -07001199 RemapChannel(state->event_loop(), channel);
Austin Schuh6f3babe2020-01-26 20:34:50 -08001200
1201 event_loop_factory_->DisableForwarding(remapped_channel);
1202 }
Austin Schuh4c3b9702020-08-30 11:34:55 -07001203
1204 // If we are replaying a log, we don't want a bunch of redundant messages
1205 // from both the real message bridge and simulated message bridge.
1206 event_loop_factory_->DisableStatistics();
Austin Schuh6f3babe2020-01-26 20:34:50 -08001207 }
1208
Austin Schuhcde938c2020-02-02 17:30:07 -08001209 // While we are starting the system up, we might be relying on matching data
1210 // to timestamps on log files where the timestamp log file starts before the
1211 // data. In this case, it is reasonable to expect missing data.
1212 ignore_missing_data_ = true;
Austin Schuh2f8fd752020-09-01 22:38:28 -07001213 VLOG(1) << "Running until " << start_time << " in Register";
Austin Schuh8bd96322020-02-13 21:18:22 -08001214 event_loop_factory_->RunFor(start_time.time_since_epoch());
Brian Silverman8a32ce62020-08-12 12:02:38 -07001215 VLOG(1) << "At start time";
Austin Schuhcde938c2020-02-02 17:30:07 -08001216 // Now that we are running for real, missing data means that the log file is
1217 // corrupted or went wrong.
1218 ignore_missing_data_ = false;
Austin Schuh92547522019-12-28 14:33:43 -08001219
Austin Schuh8bd96322020-02-13 21:18:22 -08001220 for (std::unique_ptr<State> &state : states_) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001221 // Make the RT clock be correct before handing it to the user.
1222 if (state->realtime_start_time() != realtime_clock::min_time) {
1223 state->SetRealtimeOffset(state->monotonic_start_time(),
1224 state->realtime_start_time());
1225 }
1226 VLOG(1) << "Start time is " << state->monotonic_start_time() << " for node "
1227 << MaybeNodeName(state->event_loop()->node()) << "now "
1228 << state->monotonic_now();
1229 }
1230
1231 if (FLAGS_timestamps_to_csv) {
1232 for (std::pair<const std::tuple<const Node *, const Node *>,
1233 std::tuple<message_bridge::NoncausalOffsetEstimator>>
1234 &filter : filters_) {
1235 const Node *const node_a = std::get<0>(filter.first);
1236 const Node *const node_b = std::get<1>(filter.first);
1237
1238 std::get<0>(filter.second)
1239 .SetFirstFwdTime(event_loop_factory_->GetNodeEventLoopFactory(node_a)
1240 ->monotonic_now());
1241 std::get<0>(filter.second)
1242 .SetFirstRevTime(event_loop_factory_->GetNodeEventLoopFactory(node_b)
1243 ->monotonic_now());
1244 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001245 }
1246}
1247
Austin Schuh2f8fd752020-09-01 22:38:28 -07001248void LogReader::UpdateOffsets() {
1249 VLOG(2) << "Samples are " << offset_matrix_;
1250 VLOG(2) << "Map is " << (map_matrix_ + slope_matrix_);
1251 std::tie(time_slope_matrix_, time_offset_matrix_) = SolveOffsets();
1252 Eigen::IOFormat HeavyFmt(Eigen::FullPrecision, 0, ", ", ";\n", "[", "]", "[",
1253 "]");
1254 VLOG(1) << "First slope " << time_slope_matrix_.transpose().format(HeavyFmt)
1255 << " offset " << time_offset_matrix_.transpose().format(HeavyFmt);
1256
1257 size_t node_index = 0;
1258 for (std::unique_ptr<State> &state : states_) {
1259 state->SetDistributedOffset(offset(node_index), slope(node_index));
1260 VLOG(1) << "Offset for node " << node_index << " "
1261 << MaybeNodeName(state->event_loop()->node()) << "is "
1262 << aos::distributed_clock::time_point(offset(node_index))
1263 << " slope " << std::setprecision(9) << std::fixed
1264 << slope(node_index);
1265 ++node_index;
1266 }
1267
1268 if (VLOG_IS_ON(1)) {
1269 LogFit("Offset is");
1270 }
1271}
1272
1273void LogReader::LogFit(std::string_view prefix) {
1274 for (std::unique_ptr<State> &state : states_) {
1275 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << " now "
1276 << state->monotonic_now() << " distributed "
1277 << event_loop_factory_->distributed_now();
1278 }
1279
1280 for (std::pair<const std::tuple<const Node *, const Node *>,
1281 std::tuple<message_bridge::NoncausalOffsetEstimator>> &filter :
1282 filters_) {
1283 message_bridge::NoncausalOffsetEstimator *estimator =
1284 &std::get<0>(filter.second);
1285
1286 if (estimator->a_timestamps().size() == 0 &&
1287 estimator->b_timestamps().size() == 0) {
1288 continue;
1289 }
1290
1291 if (VLOG_IS_ON(1)) {
1292 estimator->LogFit(prefix);
1293 }
1294
1295 const Node *const node_a = std::get<0>(filter.first);
1296 const Node *const node_b = std::get<1>(filter.first);
1297
1298 const size_t node_a_index =
1299 configuration::GetNodeIndex(configuration(), node_a);
1300 const size_t node_b_index =
1301 configuration::GetNodeIndex(configuration(), node_b);
1302
1303 const double recovered_slope =
1304 slope(node_b_index) / slope(node_a_index) - 1.0;
1305 const int64_t recovered_offset =
1306 offset(node_b_index).count() - offset(node_a_index).count() *
1307 slope(node_b_index) /
1308 slope(node_a_index);
1309
1310 VLOG(1) << "Recovered slope " << std::setprecision(20) << recovered_slope
1311 << " (error " << recovered_slope - estimator->fit().slope() << ") "
1312 << " offset " << std::setprecision(20) << recovered_offset
1313 << " (error "
1314 << recovered_offset - estimator->fit().offset().count() << ")";
1315
1316 const aos::distributed_clock::time_point a0 =
1317 states_[node_a_index]->ToDistributedClock(
1318 std::get<0>(estimator->a_timestamps()[0]));
1319 const aos::distributed_clock::time_point a1 =
1320 states_[node_a_index]->ToDistributedClock(
1321 std::get<0>(estimator->a_timestamps()[1]));
1322
1323 VLOG(1) << node_a->name()->string_view() << " timestamps()[0] = "
1324 << std::get<0>(estimator->a_timestamps()[0]) << " -> " << a0
1325 << " distributed -> " << node_b->name()->string_view() << " "
1326 << states_[node_b_index]->FromDistributedClock(a0) << " should be "
1327 << aos::monotonic_clock::time_point(
1328 std::chrono::nanoseconds(static_cast<int64_t>(
1329 std::get<0>(estimator->a_timestamps()[0])
1330 .time_since_epoch()
1331 .count() *
1332 (1.0 + estimator->fit().slope()))) +
1333 estimator->fit().offset())
1334 << ((a0 <= event_loop_factory_->distributed_now())
1335 ? ""
1336 : " After now, investigate");
1337 VLOG(1) << node_a->name()->string_view() << " timestamps()[1] = "
1338 << std::get<0>(estimator->a_timestamps()[1]) << " -> " << a1
1339 << " distributed -> " << node_b->name()->string_view() << " "
1340 << states_[node_b_index]->FromDistributedClock(a1) << " should be "
1341 << aos::monotonic_clock::time_point(
1342 std::chrono::nanoseconds(static_cast<int64_t>(
1343 std::get<0>(estimator->a_timestamps()[1])
1344 .time_since_epoch()
1345 .count() *
1346 (1.0 + estimator->fit().slope()))) +
1347 estimator->fit().offset())
1348 << ((event_loop_factory_->distributed_now() <= a1)
1349 ? ""
1350 : " Before now, investigate");
1351
1352 const aos::distributed_clock::time_point b0 =
1353 states_[node_b_index]->ToDistributedClock(
1354 std::get<0>(estimator->b_timestamps()[0]));
1355 const aos::distributed_clock::time_point b1 =
1356 states_[node_b_index]->ToDistributedClock(
1357 std::get<0>(estimator->b_timestamps()[1]));
1358
1359 VLOG(1) << node_b->name()->string_view() << " timestamps()[0] = "
1360 << std::get<0>(estimator->b_timestamps()[0]) << " -> " << b0
1361 << " distributed -> " << node_a->name()->string_view() << " "
1362 << states_[node_a_index]->FromDistributedClock(b0)
1363 << ((b0 <= event_loop_factory_->distributed_now())
1364 ? ""
1365 : " After now, investigate");
1366 VLOG(1) << node_b->name()->string_view() << " timestamps()[1] = "
1367 << std::get<0>(estimator->b_timestamps()[1]) << " -> " << b1
1368 << " distributed -> " << node_a->name()->string_view() << " "
1369 << states_[node_a_index]->FromDistributedClock(b1)
1370 << ((event_loop_factory_->distributed_now() <= b1)
1371 ? ""
1372 : " Before now, investigate");
1373 }
1374}
1375
1376message_bridge::NoncausalOffsetEstimator *LogReader::GetFilter(
Austin Schuh8bd96322020-02-13 21:18:22 -08001377 const Node *node_a, const Node *node_b) {
1378 CHECK_NE(node_a, node_b);
1379 CHECK_EQ(configuration::GetNode(configuration(), node_a), node_a);
1380 CHECK_EQ(configuration::GetNode(configuration(), node_b), node_b);
1381
1382 if (node_a > node_b) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001383 return GetFilter(node_b, node_a);
Austin Schuh8bd96322020-02-13 21:18:22 -08001384 }
1385
1386 auto tuple = std::make_tuple(node_a, node_b);
1387
1388 auto it = filters_.find(tuple);
1389
1390 if (it == filters_.end()) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001391 auto &x =
1392 filters_
1393 .insert(std::make_pair(
1394 tuple, std::make_tuple(message_bridge::NoncausalOffsetEstimator(
1395 node_a, node_b))))
1396 .first->second;
Austin Schuh8bd96322020-02-13 21:18:22 -08001397 if (FLAGS_timestamps_to_csv) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001398 std::get<0>(x).SetFwdCsvFileName(absl::StrCat(
1399 "/tmp/timestamp_noncausal_", node_a->name()->string_view(), "_",
1400 node_b->name()->string_view()));
1401 std::get<0>(x).SetRevCsvFileName(absl::StrCat(
1402 "/tmp/timestamp_noncausal_", node_b->name()->string_view(), "_",
1403 node_a->name()->string_view()));
Austin Schuh8bd96322020-02-13 21:18:22 -08001404 }
1405
Austin Schuh2f8fd752020-09-01 22:38:28 -07001406 return &std::get<0>(x);
Austin Schuh8bd96322020-02-13 21:18:22 -08001407 } else {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001408 return &std::get<0>(it->second);
Austin Schuh8bd96322020-02-13 21:18:22 -08001409 }
1410}
1411
Austin Schuhe309d2a2019-11-29 13:25:21 -08001412void LogReader::Register(EventLoop *event_loop) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001413 State *state =
1414 states_[configuration::GetNodeIndex(configuration(), event_loop->node())]
1415 .get();
Austin Schuh6f3babe2020-01-26 20:34:50 -08001416
Austin Schuh858c9f32020-08-31 16:56:12 -07001417 state->set_event_loop(event_loop);
Austin Schuhe309d2a2019-11-29 13:25:21 -08001418
Tyler Chatow67ddb032020-01-12 14:30:04 -08001419 // We don't run timing reports when trying to print out logged data, because
1420 // otherwise we would end up printing out the timing reports themselves...
1421 // This is only really relevant when we are replaying into a simulation.
Austin Schuh6f3babe2020-01-26 20:34:50 -08001422 event_loop->SkipTimingReport();
1423 event_loop->SkipAosLog();
Austin Schuh39788ff2019-12-01 18:22:57 -08001424
Austin Schuh858c9f32020-08-31 16:56:12 -07001425 const bool has_data = state->SetNode();
Austin Schuhe309d2a2019-11-29 13:25:21 -08001426
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001427 for (size_t logged_channel_index = 0;
1428 logged_channel_index < logged_configuration()->channels()->size();
1429 ++logged_channel_index) {
1430 const Channel *channel = RemapChannel(
1431 event_loop,
1432 logged_configuration()->channels()->Get(logged_channel_index));
Austin Schuh8bd96322020-02-13 21:18:22 -08001433
Austin Schuh2f8fd752020-09-01 22:38:28 -07001434 message_bridge::NoncausalOffsetEstimator *filter = nullptr;
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001435 aos::Sender<MessageHeader> *remote_timestamp_sender = nullptr;
1436
1437 State *source_state = nullptr;
Austin Schuh8bd96322020-02-13 21:18:22 -08001438
1439 if (!configuration::ChannelIsSendableOnNode(channel, event_loop->node()) &&
1440 configuration::ChannelIsReadableOnNode(channel, event_loop->node())) {
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001441 // We've got a message which is being forwarded to this node.
1442 const Node *source_node = configuration::GetNode(
Austin Schuh8bd96322020-02-13 21:18:22 -08001443 event_loop->configuration(), channel->source_node()->string_view());
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001444 filter = GetFilter(event_loop->node(), source_node);
Austin Schuh8bd96322020-02-13 21:18:22 -08001445
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001446 // Delivery timestamps are supposed to be logged back on the source node.
1447 // Configure remote timestamps to be sent.
1448 const bool delivery_time_is_logged =
1449 configuration::ConnectionDeliveryTimeIsLoggedOnNode(
1450 channel, event_loop->node(), source_node);
1451
1452 source_state =
1453 states_[configuration::GetNodeIndex(configuration(), source_node)]
1454 .get();
1455
1456 if (delivery_time_is_logged) {
1457 remote_timestamp_sender =
1458 source_state->RemoteTimestampSender(event_loop->node());
Austin Schuh8bd96322020-02-13 21:18:22 -08001459 }
1460 }
Austin Schuh858c9f32020-08-31 16:56:12 -07001461
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001462 state->SetChannel(
1463 logged_channel_index,
1464 configuration::ChannelIndex(event_loop->configuration(), channel),
1465 event_loop->MakeRawSender(channel), filter, remote_timestamp_sender,
1466 source_state);
Austin Schuhe309d2a2019-11-29 13:25:21 -08001467 }
1468
Austin Schuh6aa77be2020-02-22 21:06:40 -08001469 // If we didn't find any log files with data in them, we won't ever get a
1470 // callback or be live. So skip the rest of the setup.
1471 if (!has_data) {
1472 return;
1473 }
1474
Austin Schuh858c9f32020-08-31 16:56:12 -07001475 state->set_timer_handler(event_loop->AddTimer([this, state]() {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001476 VLOG(1) << "Starting sending " << MaybeNodeName(state->event_loop()->node())
1477 << "at " << state->event_loop()->context().monotonic_event_time
1478 << " now " << state->monotonic_now();
Austin Schuh858c9f32020-08-31 16:56:12 -07001479 if (state->OldestMessageTime() == monotonic_clock::max_time) {
Austin Schuh6f3babe2020-01-26 20:34:50 -08001480 --live_nodes_;
Austin Schuh2f8fd752020-09-01 22:38:28 -07001481 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Node down!";
Austin Schuh6f3babe2020-01-26 20:34:50 -08001482 if (live_nodes_ == 0) {
1483 event_loop_factory_->Exit();
1484 }
James Kuszmaul314f1672020-01-03 20:02:08 -08001485 return;
1486 }
Austin Schuh6f3babe2020-01-26 20:34:50 -08001487 TimestampMerger::DeliveryTimestamp channel_timestamp;
Austin Schuh05b70472020-01-01 17:11:17 -08001488 int channel_index;
1489 FlatbufferVector<MessageHeader> channel_data =
1490 FlatbufferVector<MessageHeader>::Empty();
1491
Austin Schuh2f8fd752020-09-01 22:38:28 -07001492 if (VLOG_IS_ON(1)) {
1493 LogFit("Offset was");
1494 }
1495
1496 bool update_time;
Austin Schuh05b70472020-01-01 17:11:17 -08001497 std::tie(channel_timestamp, channel_index, channel_data) =
Austin Schuh2f8fd752020-09-01 22:38:28 -07001498 state->PopOldest(&update_time);
Austin Schuh05b70472020-01-01 17:11:17 -08001499
Austin Schuhe309d2a2019-11-29 13:25:21 -08001500 const monotonic_clock::time_point monotonic_now =
Austin Schuh858c9f32020-08-31 16:56:12 -07001501 state->event_loop()->context().monotonic_event_time;
Austin Schuh2f8fd752020-09-01 22:38:28 -07001502 if (!FLAGS_skip_order_validation) {
1503 CHECK(monotonic_now == channel_timestamp.monotonic_event_time)
1504 << ": " << FlatbufferToJson(state->event_loop()->node()) << " Now "
1505 << monotonic_now << " trying to send "
1506 << channel_timestamp.monotonic_event_time << " failure "
1507 << state->DebugString();
1508 } else if (monotonic_now != channel_timestamp.monotonic_event_time) {
1509 LOG(WARNING) << "Check failed: monotonic_now == "
1510 "channel_timestamp.monotonic_event_time) ("
1511 << monotonic_now << " vs. "
1512 << channel_timestamp.monotonic_event_time
1513 << "): " << FlatbufferToJson(state->event_loop()->node())
1514 << " Now " << monotonic_now << " trying to send "
1515 << channel_timestamp.monotonic_event_time << " failure "
1516 << state->DebugString();
1517 }
Austin Schuhe309d2a2019-11-29 13:25:21 -08001518
Austin Schuh6f3babe2020-01-26 20:34:50 -08001519 if (channel_timestamp.monotonic_event_time >
Austin Schuh858c9f32020-08-31 16:56:12 -07001520 state->monotonic_start_time() ||
Austin Schuh15649d62019-12-28 16:36:38 -08001521 event_loop_factory_ != nullptr) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001522 if ((!ignore_missing_data_ && !FLAGS_skip_missing_forwarding_entries &&
Austin Schuh858c9f32020-08-31 16:56:12 -07001523 !state->at_end()) ||
Austin Schuh05b70472020-01-01 17:11:17 -08001524 channel_data.message().data() != nullptr) {
1525 CHECK(channel_data.message().data() != nullptr)
1526 << ": Got a message without data. Forwarding entry which was "
Austin Schuh2f8fd752020-09-01 22:38:28 -07001527 "not matched? Use --skip_missing_forwarding_entries to "
Brian Silverman87ac0402020-09-17 14:47:01 -07001528 "ignore this.";
Austin Schuh92547522019-12-28 14:33:43 -08001529
Austin Schuh2f8fd752020-09-01 22:38:28 -07001530 if (update_time) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001531 // Confirm that the message was sent on the sending node before the
1532 // destination node (this node). As a proxy, do this by making sure
1533 // that time on the source node is past when the message was sent.
Austin Schuh2f8fd752020-09-01 22:38:28 -07001534 if (!FLAGS_skip_order_validation) {
1535 CHECK_LT(channel_timestamp.monotonic_remote_time,
1536 state->monotonic_remote_now(channel_index))
1537 << state->event_loop()->node()->name()->string_view() << " to "
1538 << state->remote_node(channel_index)->name()->string_view()
1539 << " " << state->DebugString();
1540 } else if (channel_timestamp.monotonic_remote_time >=
1541 state->monotonic_remote_now(channel_index)) {
1542 LOG(WARNING)
1543 << "Check failed: channel_timestamp.monotonic_remote_time < "
1544 "state->monotonic_remote_now(channel_index) ("
1545 << channel_timestamp.monotonic_remote_time << " vs. "
1546 << state->monotonic_remote_now(channel_index) << ") "
1547 << state->event_loop()->node()->name()->string_view() << " to "
1548 << state->remote_node(channel_index)->name()->string_view()
1549 << " currently " << channel_timestamp.monotonic_event_time
1550 << " ("
1551 << state->ToDistributedClock(
1552 channel_timestamp.monotonic_event_time)
1553 << ") remote event time "
1554 << channel_timestamp.monotonic_remote_time << " ("
1555 << state->RemoteToDistributedClock(
1556 channel_index, channel_timestamp.monotonic_remote_time)
1557 << ") " << state->DebugString();
1558 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001559
1560 if (FLAGS_timestamps_to_csv) {
1561 if (offset_fp_ == nullptr) {
1562 offset_fp_ = fopen("/tmp/offsets.csv", "w");
1563 fprintf(
1564 offset_fp_,
1565 "# time_since_start, offset node 0, offset node 1, ...\n");
1566 first_time_ = channel_timestamp.realtime_event_time;
1567 }
1568
1569 fprintf(offset_fp_, "%.9f",
1570 std::chrono::duration_cast<std::chrono::duration<double>>(
1571 channel_timestamp.realtime_event_time - first_time_)
1572 .count());
Austin Schuh2f8fd752020-09-01 22:38:28 -07001573 for (int i = 1; i < time_offset_matrix_.rows(); ++i) {
1574 fprintf(offset_fp_, ", %.9f",
1575 time_offset_matrix_(i, 0) +
1576 time_slope_matrix_(i, 0) *
1577 chrono::duration<double>(
1578 event_loop_factory_->distributed_now()
1579 .time_since_epoch())
1580 .count());
Austin Schuh8bd96322020-02-13 21:18:22 -08001581 }
1582 fprintf(offset_fp_, "\n");
1583 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001584 }
1585
Austin Schuh15649d62019-12-28 16:36:38 -08001586 // If we have access to the factory, use it to fix the realtime time.
Austin Schuh858c9f32020-08-31 16:56:12 -07001587 state->SetRealtimeOffset(channel_timestamp.monotonic_event_time,
1588 channel_timestamp.realtime_event_time);
Austin Schuh15649d62019-12-28 16:36:38 -08001589
Austin Schuh2f8fd752020-09-01 22:38:28 -07001590 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Sending "
1591 << channel_timestamp.monotonic_event_time;
1592 // TODO(austin): std::move channel_data in and make that efficient in
1593 // simulation.
Austin Schuh858c9f32020-08-31 16:56:12 -07001594 state->Send(channel_index, channel_data.message().data()->Data(),
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001595 channel_data.message().data()->size(), channel_timestamp);
Austin Schuh2f8fd752020-09-01 22:38:28 -07001596 } else if (state->at_end() && !ignore_missing_data_) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001597 // We are at the end of the log file and found missing data. Finish
Austin Schuh2f8fd752020-09-01 22:38:28 -07001598 // reading the rest of the log file and call it quits. We don't want
1599 // to replay partial data.
Austin Schuh858c9f32020-08-31 16:56:12 -07001600 while (state->OldestMessageTime() != monotonic_clock::max_time) {
1601 bool update_time_dummy;
1602 state->PopOldest(&update_time_dummy);
Austin Schuh8bd96322020-02-13 21:18:22 -08001603 }
Austin Schuh2f8fd752020-09-01 22:38:28 -07001604 } else {
1605 CHECK(channel_data.message().data() == nullptr) << ": Nullptr";
Austin Schuh92547522019-12-28 14:33:43 -08001606 }
Austin Schuhe309d2a2019-11-29 13:25:21 -08001607 } else {
Austin Schuh6f3babe2020-01-26 20:34:50 -08001608 LOG(WARNING)
1609 << "Not sending data from before the start of the log file. "
1610 << channel_timestamp.monotonic_event_time.time_since_epoch().count()
1611 << " start " << monotonic_start_time().time_since_epoch().count()
1612 << " " << FlatbufferToJson(channel_data);
Austin Schuhe309d2a2019-11-29 13:25:21 -08001613 }
1614
Austin Schuh858c9f32020-08-31 16:56:12 -07001615 const monotonic_clock::time_point next_time = state->OldestMessageTime();
Austin Schuh6f3babe2020-01-26 20:34:50 -08001616 if (next_time != monotonic_clock::max_time) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001617 VLOG(1) << "Scheduling " << MaybeNodeName(state->event_loop()->node())
1618 << "wakeup for " << next_time << "("
1619 << state->ToDistributedClock(next_time)
1620 << " distributed), now is " << state->monotonic_now();
Austin Schuh858c9f32020-08-31 16:56:12 -07001621 state->Setup(next_time);
James Kuszmaul314f1672020-01-03 20:02:08 -08001622 } else {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001623 VLOG(1) << MaybeNodeName(state->event_loop()->node())
1624 << "No next message, scheduling shutdown";
1625 // Set a timer up immediately after now to die. If we don't do this,
1626 // then the senders waiting on the message we just read will never get
1627 // called.
Austin Schuheecb9282020-01-08 17:43:30 -08001628 if (event_loop_factory_ != nullptr) {
Austin Schuh858c9f32020-08-31 16:56:12 -07001629 state->Setup(monotonic_now + event_loop_factory_->send_delay() +
1630 std::chrono::nanoseconds(1));
Austin Schuheecb9282020-01-08 17:43:30 -08001631 }
Austin Schuhe309d2a2019-11-29 13:25:21 -08001632 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001633
Austin Schuh2f8fd752020-09-01 22:38:28 -07001634 // Once we make this call, the current time changes. So do everything
1635 // which involves time before changing it. That especially includes
1636 // sending the message.
1637 if (update_time) {
1638 VLOG(1) << MaybeNodeName(state->event_loop()->node())
1639 << "updating offsets";
1640
1641 std::vector<aos::monotonic_clock::time_point> before_times;
1642 before_times.resize(states_.size());
1643 std::transform(states_.begin(), states_.end(), before_times.begin(),
1644 [](const std::unique_ptr<State> &state) {
1645 return state->monotonic_now();
1646 });
1647
1648 for (size_t i = 0; i < states_.size(); ++i) {
Brian Silvermand90905f2020-09-23 14:42:56 -07001649 VLOG(1) << MaybeNodeName(states_[i]->event_loop()->node()) << "before "
1650 << states_[i]->monotonic_now();
Austin Schuh2f8fd752020-09-01 22:38:28 -07001651 }
1652
Austin Schuh8bd96322020-02-13 21:18:22 -08001653 UpdateOffsets();
Austin Schuh2f8fd752020-09-01 22:38:28 -07001654 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Now is now "
1655 << state->monotonic_now();
1656
1657 for (size_t i = 0; i < states_.size(); ++i) {
Brian Silvermand90905f2020-09-23 14:42:56 -07001658 VLOG(1) << MaybeNodeName(states_[i]->event_loop()->node()) << "after "
1659 << states_[i]->monotonic_now();
Austin Schuh2f8fd752020-09-01 22:38:28 -07001660 }
1661
1662 // TODO(austin): We should be perfect.
1663 const std::chrono::nanoseconds kTolerance{3};
1664 if (!FLAGS_skip_order_validation) {
1665 CHECK_GE(next_time, state->monotonic_now())
1666 << ": Time skipped the next event.";
1667
1668 for (size_t i = 0; i < states_.size(); ++i) {
1669 CHECK_GE(states_[i]->monotonic_now(), before_times[i] - kTolerance)
1670 << ": Time changed too much on node "
1671 << MaybeNodeName(states_[i]->event_loop()->node());
1672 CHECK_LE(states_[i]->monotonic_now(), before_times[i] + kTolerance)
1673 << ": Time changed too much on node "
1674 << states_[i]->event_loop()->node()->name()->string_view();
1675 }
1676 } else {
1677 if (next_time < state->monotonic_now()) {
1678 LOG(WARNING) << "Check failed: next_time >= "
1679 "state->monotonic_now() ("
1680 << next_time << " vs. " << state->monotonic_now()
1681 << "): Time skipped the next event.";
1682 }
1683 for (size_t i = 0; i < states_.size(); ++i) {
1684 if (states_[i]->monotonic_now() >= before_times[i] - kTolerance) {
1685 LOG(WARNING) << "Check failed: "
1686 "states_[i]->monotonic_now() "
1687 ">= before_times[i] - kTolerance ("
1688 << states_[i]->monotonic_now() << " vs. "
1689 << before_times[i] - kTolerance
1690 << ") : Time changed too much on node "
1691 << MaybeNodeName(states_[i]->event_loop()->node());
1692 }
1693 if (states_[i]->monotonic_now() <= before_times[i] + kTolerance) {
1694 LOG(WARNING) << "Check failed: "
1695 "states_[i]->monotonic_now() "
1696 "<= before_times[i] + kTolerance ("
1697 << states_[i]->monotonic_now() << " vs. "
1698 << before_times[i] - kTolerance
1699 << ") : Time changed too much on node "
1700 << MaybeNodeName(states_[i]->event_loop()->node());
1701 }
1702 }
1703 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001704 }
Austin Schuh2f8fd752020-09-01 22:38:28 -07001705
1706 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Done sending at "
1707 << state->event_loop()->context().monotonic_event_time << " now "
1708 << state->monotonic_now();
Austin Schuh858c9f32020-08-31 16:56:12 -07001709 }));
Austin Schuhe309d2a2019-11-29 13:25:21 -08001710
Austin Schuh6f3babe2020-01-26 20:34:50 -08001711 ++live_nodes_;
1712
Austin Schuh858c9f32020-08-31 16:56:12 -07001713 if (state->OldestMessageTime() != monotonic_clock::max_time) {
1714 event_loop->OnRun([state]() { state->Setup(state->OldestMessageTime()); });
Austin Schuhe309d2a2019-11-29 13:25:21 -08001715 }
1716}
1717
1718void LogReader::Deregister() {
James Kuszmaul84ff3e52020-01-03 19:48:53 -08001719 // Make sure that things get destroyed in the correct order, rather than
1720 // relying on getting the order correct in the class definition.
Austin Schuh8bd96322020-02-13 21:18:22 -08001721 for (std::unique_ptr<State> &state : states_) {
Austin Schuh858c9f32020-08-31 16:56:12 -07001722 state->Deregister();
Austin Schuhe309d2a2019-11-29 13:25:21 -08001723 }
Austin Schuh92547522019-12-28 14:33:43 -08001724
James Kuszmaul84ff3e52020-01-03 19:48:53 -08001725 event_loop_factory_unique_ptr_.reset();
1726 event_loop_factory_ = nullptr;
Austin Schuhe309d2a2019-11-29 13:25:21 -08001727}
1728
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001729void LogReader::RemapLoggedChannel(std::string_view name, std::string_view type,
1730 std::string_view add_prefix) {
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001731 for (size_t ii = 0; ii < logged_configuration()->channels()->size(); ++ii) {
1732 const Channel *const channel = logged_configuration()->channels()->Get(ii);
1733 if (channel->name()->str() == name &&
1734 channel->type()->string_view() == type) {
1735 CHECK_EQ(0u, remapped_channels_.count(ii))
1736 << "Already remapped channel "
1737 << configuration::CleanedChannelToString(channel);
1738 remapped_channels_[ii] = std::string(add_prefix) + std::string(name);
1739 VLOG(1) << "Remapping channel "
1740 << configuration::CleanedChannelToString(channel)
1741 << " to have name " << remapped_channels_[ii];
Austin Schuh6331ef92020-01-07 18:28:09 -08001742 MakeRemappedConfig();
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001743 return;
1744 }
1745 }
1746 LOG(FATAL) << "Unabled to locate channel with name " << name << " and type "
1747 << type;
1748}
1749
Austin Schuh01b4c352020-09-21 23:09:39 -07001750void LogReader::RemapLoggedChannel(std::string_view name, std::string_view type,
1751 const Node *node,
1752 std::string_view add_prefix) {
1753 VLOG(1) << "Node is " << aos::FlatbufferToJson(node);
1754 const Channel *remapped_channel =
1755 configuration::GetChannel(logged_configuration(), name, type, "", node);
1756 CHECK(remapped_channel != nullptr) << ": Failed to find {\"name\": \"" << name
1757 << "\", \"type\": \"" << type << "\"}";
1758 VLOG(1) << "Original {\"name\": \"" << name << "\", \"type\": \"" << type
1759 << "\"}";
1760 VLOG(1) << "Remapped "
1761 << aos::configuration::StrippedChannelToString(remapped_channel);
1762
1763 // We want to make /spray on node 0 go to /0/spray by snooping the maps. And
1764 // we want it to degrade if the heuristics fail to just work.
1765 //
1766 // The easiest way to do this is going to be incredibly specific and verbose.
1767 // Look up /spray, to /0/spray. Then, prefix the result with /original to get
1768 // /original/0/spray. Then, create a map from /original/spray to
1769 // /original/0/spray for just the type we were asked for.
1770 if (name != remapped_channel->name()->string_view()) {
1771 MapT new_map;
1772 new_map.match = std::make_unique<ChannelT>();
1773 new_map.match->name = absl::StrCat(add_prefix, name);
1774 new_map.match->type = type;
1775 if (node != nullptr) {
1776 new_map.match->source_node = node->name()->str();
1777 }
1778 new_map.rename = std::make_unique<ChannelT>();
1779 new_map.rename->name =
1780 absl::StrCat(add_prefix, remapped_channel->name()->string_view());
1781 maps_.emplace_back(std::move(new_map));
1782 }
1783
1784 const size_t channel_index =
1785 configuration::ChannelIndex(logged_configuration(), remapped_channel);
1786 CHECK_EQ(0u, remapped_channels_.count(channel_index))
1787 << "Already remapped channel "
1788 << configuration::CleanedChannelToString(remapped_channel);
1789 remapped_channels_[channel_index] =
1790 absl::StrCat(add_prefix, remapped_channel->name()->string_view());
1791 MakeRemappedConfig();
1792}
1793
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001794void LogReader::MakeRemappedConfig() {
Austin Schuh8bd96322020-02-13 21:18:22 -08001795 for (std::unique_ptr<State> &state : states_) {
Austin Schuh6aa77be2020-02-22 21:06:40 -08001796 if (state) {
Austin Schuh858c9f32020-08-31 16:56:12 -07001797 CHECK(!state->event_loop())
Austin Schuh6aa77be2020-02-22 21:06:40 -08001798 << ": Can't change the mapping after the events are scheduled.";
1799 }
Austin Schuh6f3babe2020-01-26 20:34:50 -08001800 }
Austin Schuhac0771c2020-01-07 18:36:30 -08001801
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001802 // If no remapping occurred and we are using the original config, then there
1803 // is nothing interesting to do here.
1804 if (remapped_channels_.empty() && replay_configuration_ == nullptr) {
Austin Schuh6f3babe2020-01-26 20:34:50 -08001805 remapped_configuration_ = logged_configuration();
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001806 return;
1807 }
1808 // Config to copy Channel definitions from. Use the specified
1809 // replay_configuration_ if it has been provided.
1810 const Configuration *const base_config = replay_configuration_ == nullptr
1811 ? logged_configuration()
1812 : replay_configuration_;
1813 // The remapped config will be identical to the base_config, except that it
1814 // will have a bunch of extra channels in the channel list, which are exact
1815 // copies of the remapped channels, but with different names.
1816 // Because the flatbuffers API is a pain to work with, this requires a bit of
1817 // a song-and-dance to get copied over.
1818 // The order of operations is to:
1819 // 1) Make a flatbuffer builder for a config that will just contain a list of
1820 // the new channels that we want to add.
1821 // 2) For each channel that we are remapping:
1822 // a) Make a buffer/builder and construct into it a Channel table that only
1823 // contains the new name for the channel.
1824 // b) Merge the new channel with just the name into the channel that we are
1825 // trying to copy, built in the flatbuffer builder made in 1. This gives
1826 // us the new channel definition that we need.
1827 // 3) Using this list of offsets, build the Configuration of just new
1828 // Channels.
1829 // 4) Merge the Configuration with the new Channels into the base_config.
1830 // 5) Call MergeConfiguration() on that result to give MergeConfiguration a
1831 // chance to sanitize the config.
1832
1833 // This is the builder that we use for the config containing all the new
1834 // channels.
1835 flatbuffers::FlatBufferBuilder new_config_fbb;
Austin Schuhd7b15da2020-02-17 15:06:11 -08001836 new_config_fbb.ForceDefaults(true);
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001837 std::vector<flatbuffers::Offset<Channel>> channel_offsets;
1838 for (auto &pair : remapped_channels_) {
1839 // This is the builder that we use for creating the Channel with just the
1840 // new name.
1841 flatbuffers::FlatBufferBuilder new_name_fbb;
Austin Schuhd7b15da2020-02-17 15:06:11 -08001842 new_name_fbb.ForceDefaults(true);
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001843 const flatbuffers::Offset<flatbuffers::String> name_offset =
1844 new_name_fbb.CreateString(pair.second);
1845 ChannelBuilder new_name_builder(new_name_fbb);
1846 new_name_builder.add_name(name_offset);
1847 new_name_fbb.Finish(new_name_builder.Finish());
1848 const FlatbufferDetachedBuffer<Channel> new_name = new_name_fbb.Release();
Austin Schuh2f8fd752020-09-01 22:38:28 -07001849 // Retrieve the channel that we want to copy, confirming that it is
1850 // actually present in base_config.
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001851 const Channel *const base_channel = CHECK_NOTNULL(configuration::GetChannel(
1852 base_config, logged_configuration()->channels()->Get(pair.first), "",
1853 nullptr));
1854 // Actually create the new channel and put it into the vector of Offsets
1855 // that we will use to create the new Configuration.
1856 channel_offsets.emplace_back(MergeFlatBuffers<Channel>(
1857 reinterpret_cast<const flatbuffers::Table *>(base_channel),
1858 reinterpret_cast<const flatbuffers::Table *>(&new_name.message()),
1859 &new_config_fbb));
1860 }
1861 // Create the Configuration containing the new channels that we want to add.
Austin Schuh01b4c352020-09-21 23:09:39 -07001862 const auto new_channel_vector_offsets =
Austin Schuhfa895892020-01-07 20:07:41 -08001863 new_config_fbb.CreateVector(channel_offsets);
Austin Schuh01b4c352020-09-21 23:09:39 -07001864
1865 // Now create the new maps.
1866 std::vector<flatbuffers::Offset<Map>> map_offsets;
1867 for (const MapT &map : maps_) {
1868 const flatbuffers::Offset<flatbuffers::String> match_name_offset =
1869 new_config_fbb.CreateString(map.match->name);
1870 const flatbuffers::Offset<flatbuffers::String> match_type_offset =
1871 new_config_fbb.CreateString(map.match->type);
1872 const flatbuffers::Offset<flatbuffers::String> rename_name_offset =
1873 new_config_fbb.CreateString(map.rename->name);
1874 flatbuffers::Offset<flatbuffers::String> match_source_node_offset;
1875 if (!map.match->source_node.empty()) {
1876 match_source_node_offset =
1877 new_config_fbb.CreateString(map.match->source_node);
1878 }
1879 Channel::Builder match_builder(new_config_fbb);
1880 match_builder.add_name(match_name_offset);
1881 match_builder.add_type(match_type_offset);
1882 if (!map.match->source_node.empty()) {
1883 match_builder.add_source_node(match_source_node_offset);
1884 }
1885 const flatbuffers::Offset<Channel> match_offset = match_builder.Finish();
1886
1887 Channel::Builder rename_builder(new_config_fbb);
1888 rename_builder.add_name(rename_name_offset);
1889 const flatbuffers::Offset<Channel> rename_offset = rename_builder.Finish();
1890
1891 Map::Builder map_builder(new_config_fbb);
1892 map_builder.add_match(match_offset);
1893 map_builder.add_rename(rename_offset);
1894 map_offsets.emplace_back(map_builder.Finish());
1895 }
1896
1897 const auto new_maps_offsets = new_config_fbb.CreateVector(map_offsets);
1898
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001899 ConfigurationBuilder new_config_builder(new_config_fbb);
Austin Schuh01b4c352020-09-21 23:09:39 -07001900 new_config_builder.add_channels(new_channel_vector_offsets);
1901 new_config_builder.add_maps(new_maps_offsets);
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001902 new_config_fbb.Finish(new_config_builder.Finish());
1903 const FlatbufferDetachedBuffer<Configuration> new_name_config =
1904 new_config_fbb.Release();
1905 // Merge the new channels configuration into the base_config, giving us the
1906 // remapped configuration.
1907 remapped_configuration_buffer_ =
1908 std::make_unique<FlatbufferDetachedBuffer<Configuration>>(
1909 MergeFlatBuffers<Configuration>(base_config,
1910 &new_name_config.message()));
1911 // Call MergeConfiguration to deal with sanitizing the config.
1912 remapped_configuration_buffer_ =
1913 std::make_unique<FlatbufferDetachedBuffer<Configuration>>(
1914 configuration::MergeConfiguration(*remapped_configuration_buffer_));
1915
1916 remapped_configuration_ = &remapped_configuration_buffer_->message();
1917}
1918
Austin Schuh6f3babe2020-01-26 20:34:50 -08001919const Channel *LogReader::RemapChannel(const EventLoop *event_loop,
1920 const Channel *channel) {
1921 std::string_view channel_name = channel->name()->string_view();
1922 std::string_view channel_type = channel->type()->string_view();
1923 const int channel_index =
1924 configuration::ChannelIndex(logged_configuration(), channel);
1925 // If the channel is remapped, find the correct channel name to use.
1926 if (remapped_channels_.count(channel_index) > 0) {
Austin Schuhee711052020-08-24 16:06:09 -07001927 VLOG(3) << "Got remapped channel on "
Austin Schuh6f3babe2020-01-26 20:34:50 -08001928 << configuration::CleanedChannelToString(channel);
1929 channel_name = remapped_channels_[channel_index];
1930 }
1931
Austin Schuhee711052020-08-24 16:06:09 -07001932 VLOG(2) << "Going to remap channel " << channel_name << " " << channel_type;
Austin Schuh6f3babe2020-01-26 20:34:50 -08001933 const Channel *remapped_channel = configuration::GetChannel(
1934 event_loop->configuration(), channel_name, channel_type,
1935 event_loop->name(), event_loop->node());
1936
1937 CHECK(remapped_channel != nullptr)
1938 << ": Unable to send {\"name\": \"" << channel_name << "\", \"type\": \""
1939 << channel_type << "\"} because it is not in the provided configuration.";
1940
1941 return remapped_channel;
1942}
1943
Austin Schuh858c9f32020-08-31 16:56:12 -07001944LogReader::State::State(std::unique_ptr<ChannelMerger> channel_merger)
1945 : channel_merger_(std::move(channel_merger)) {}
1946
1947EventLoop *LogReader::State::SetNodeEventLoopFactory(
1948 NodeEventLoopFactory *node_event_loop_factory) {
1949 node_event_loop_factory_ = node_event_loop_factory;
1950 event_loop_unique_ptr_ =
1951 node_event_loop_factory_->MakeEventLoop("log_reader");
1952 return event_loop_unique_ptr_.get();
1953}
1954
1955void LogReader::State::SetChannelCount(size_t count) {
1956 channels_.resize(count);
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001957 remote_timestamp_senders_.resize(count);
Austin Schuh858c9f32020-08-31 16:56:12 -07001958 filters_.resize(count);
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001959 channel_source_state_.resize(count);
1960 factory_channel_index_.resize(count);
1961 queue_index_map_.resize(count);
Austin Schuh858c9f32020-08-31 16:56:12 -07001962}
1963
1964void LogReader::State::SetChannel(
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001965 size_t logged_channel_index, size_t factory_channel_index,
1966 std::unique_ptr<RawSender> sender,
Austin Schuh2f8fd752020-09-01 22:38:28 -07001967 message_bridge::NoncausalOffsetEstimator *filter,
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001968 aos::Sender<MessageHeader> *remote_timestamp_sender, State *source_state) {
1969 channels_[logged_channel_index] = std::move(sender);
1970 filters_[logged_channel_index] = filter;
1971 remote_timestamp_senders_[logged_channel_index] = remote_timestamp_sender;
1972
1973 if (source_state) {
1974 channel_source_state_[logged_channel_index] = source_state;
1975
1976 if (remote_timestamp_sender != nullptr) {
1977 source_state->queue_index_map_[logged_channel_index] =
1978 std::make_unique<std::vector<State::SentTimestamp>>();
1979 }
1980 }
1981
1982 factory_channel_index_[logged_channel_index] = factory_channel_index;
1983}
1984
1985bool LogReader::State::Send(
1986 size_t channel_index, const void *data, size_t size,
1987 const TimestampMerger::DeliveryTimestamp &delivery_timestamp) {
1988 aos::RawSender *sender = channels_[channel_index].get();
1989 uint32_t remote_queue_index = 0xffffffff;
1990
1991 if (remote_timestamp_senders_[channel_index] != nullptr) {
1992 std::vector<SentTimestamp> *queue_index_map =
1993 CHECK_NOTNULL(CHECK_NOTNULL(channel_source_state_[channel_index])
1994 ->queue_index_map_[channel_index]
1995 .get());
1996
1997 SentTimestamp search;
1998 search.monotonic_event_time = delivery_timestamp.monotonic_remote_time;
1999 search.realtime_event_time = delivery_timestamp.realtime_remote_time;
2000 search.queue_index = delivery_timestamp.remote_queue_index;
2001
2002 // Find the sent time if available.
2003 auto element = std::lower_bound(
2004 queue_index_map->begin(), queue_index_map->end(), search,
2005 [](SentTimestamp a, SentTimestamp b) {
2006 if (b.monotonic_event_time < a.monotonic_event_time) {
2007 return false;
2008 }
2009 if (b.monotonic_event_time > a.monotonic_event_time) {
2010 return true;
2011 }
2012
2013 if (b.queue_index < a.queue_index) {
2014 return false;
2015 }
2016 if (b.queue_index > a.queue_index) {
2017 return true;
2018 }
2019
2020 CHECK_EQ(a.realtime_event_time, b.realtime_event_time);
2021 return false;
2022 });
2023
2024 // TODO(austin): Be a bit more principled here, but we will want to do that
2025 // after the logger rewrite. We hit this when one node finishes, but the
2026 // other node isn't done yet. So there is no send time, but there is a
2027 // receive time.
2028 if (element != queue_index_map->end()) {
2029 CHECK_EQ(element->monotonic_event_time,
2030 delivery_timestamp.monotonic_remote_time);
2031 CHECK_EQ(element->realtime_event_time,
2032 delivery_timestamp.realtime_remote_time);
2033 CHECK_EQ(element->queue_index, delivery_timestamp.remote_queue_index);
2034
2035 remote_queue_index = element->actual_queue_index;
2036 }
2037 }
2038
2039 // Send! Use the replayed queue index here instead of the logged queue index
2040 // for the remote queue index. This makes re-logging work.
2041 const bool sent =
2042 sender->Send(data, size, delivery_timestamp.monotonic_remote_time,
2043 delivery_timestamp.realtime_remote_time, remote_queue_index);
2044 if (!sent) return false;
2045
2046 if (queue_index_map_[channel_index]) {
2047 SentTimestamp timestamp;
2048 timestamp.monotonic_event_time = delivery_timestamp.monotonic_event_time;
2049 timestamp.realtime_event_time = delivery_timestamp.realtime_event_time;
2050 timestamp.queue_index = delivery_timestamp.queue_index;
2051 timestamp.actual_queue_index = sender->sent_queue_index();
2052 queue_index_map_[channel_index]->emplace_back(timestamp);
2053 } else if (remote_timestamp_senders_[channel_index] != nullptr) {
2054 aos::Sender<MessageHeader>::Builder builder =
2055 remote_timestamp_senders_[channel_index]->MakeBuilder();
2056
2057 logger::MessageHeader::Builder message_header_builder =
2058 builder.MakeBuilder<logger::MessageHeader>();
2059
2060 message_header_builder.add_channel_index(
2061 factory_channel_index_[channel_index]);
2062
2063 // Swap the remote and sent metrics. They are from the sender's
2064 // perspective, not the receiver's perspective.
2065 message_header_builder.add_monotonic_sent_time(
2066 sender->monotonic_sent_time().time_since_epoch().count());
2067 message_header_builder.add_realtime_sent_time(
2068 sender->realtime_sent_time().time_since_epoch().count());
2069 message_header_builder.add_queue_index(sender->sent_queue_index());
2070
2071 message_header_builder.add_monotonic_remote_time(
2072 delivery_timestamp.monotonic_remote_time.time_since_epoch().count());
2073 message_header_builder.add_realtime_remote_time(
2074 delivery_timestamp.realtime_remote_time.time_since_epoch().count());
2075
2076 message_header_builder.add_remote_queue_index(remote_queue_index);
2077
2078 builder.Send(message_header_builder.Finish());
2079 }
2080
2081 return true;
2082}
2083
2084aos::Sender<MessageHeader> *LogReader::State::RemoteTimestampSender(
2085 const Node *delivered_node) {
2086 auto sender = remote_timestamp_senders_map_.find(delivered_node);
2087
2088 if (sender == remote_timestamp_senders_map_.end()) {
2089 sender = remote_timestamp_senders_map_
2090 .emplace(std::make_pair(
2091 delivered_node,
2092 event_loop()->MakeSender<MessageHeader>(
2093 absl::StrCat("/aos/remote_timestamps/",
2094 delivered_node->name()->string_view()))))
2095 .first;
2096 }
2097
2098 return &(sender->second);
Austin Schuh858c9f32020-08-31 16:56:12 -07002099}
2100
2101std::tuple<TimestampMerger::DeliveryTimestamp, int,
2102 FlatbufferVector<MessageHeader>>
2103LogReader::State::PopOldest(bool *update_time) {
2104 CHECK_GT(sorted_messages_.size(), 0u);
2105
2106 std::tuple<TimestampMerger::DeliveryTimestamp, int,
Austin Schuh2f8fd752020-09-01 22:38:28 -07002107 FlatbufferVector<MessageHeader>,
2108 message_bridge::NoncausalOffsetEstimator *>
Austin Schuh858c9f32020-08-31 16:56:12 -07002109 result = std::move(sorted_messages_.front());
Austin Schuh2f8fd752020-09-01 22:38:28 -07002110 VLOG(2) << MaybeNodeName(event_loop_->node()) << "PopOldest Popping "
Austin Schuh858c9f32020-08-31 16:56:12 -07002111 << std::get<0>(result).monotonic_event_time;
2112 sorted_messages_.pop_front();
2113 SeedSortedMessages();
2114
Austin Schuh2f8fd752020-09-01 22:38:28 -07002115 if (std::get<3>(result) != nullptr) {
2116 *update_time = std::get<3>(result)->Pop(
2117 event_loop_->node(), std::get<0>(result).monotonic_event_time);
2118 } else {
2119 *update_time = false;
2120 }
Austin Schuh858c9f32020-08-31 16:56:12 -07002121 return std::make_tuple(std::get<0>(result), std::get<1>(result),
2122 std::move(std::get<2>(result)));
2123}
2124
2125monotonic_clock::time_point LogReader::State::OldestMessageTime() const {
2126 if (sorted_messages_.size() > 0) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07002127 VLOG(2) << MaybeNodeName(event_loop_->node()) << "oldest message at "
Austin Schuh858c9f32020-08-31 16:56:12 -07002128 << std::get<0>(sorted_messages_.front()).monotonic_event_time;
2129 return std::get<0>(sorted_messages_.front()).monotonic_event_time;
2130 }
2131
2132 return channel_merger_->OldestMessageTime();
2133}
2134
2135void LogReader::State::SeedSortedMessages() {
2136 const aos::monotonic_clock::time_point end_queue_time =
2137 (sorted_messages_.size() > 0
2138 ? std::get<0>(sorted_messages_.front()).monotonic_event_time
2139 : channel_merger_->monotonic_start_time()) +
2140 std::chrono::seconds(2);
2141
2142 while (true) {
2143 if (channel_merger_->OldestMessageTime() == monotonic_clock::max_time) {
2144 return;
2145 }
2146 if (sorted_messages_.size() > 0) {
2147 // Stop placing sorted messages on the list once we have 2 seconds
2148 // queued up (but queue at least until the log starts.
2149 if (end_queue_time <
2150 std::get<0>(sorted_messages_.back()).monotonic_event_time) {
2151 return;
2152 }
2153 }
2154
2155 TimestampMerger::DeliveryTimestamp channel_timestamp;
2156 int channel_index;
2157 FlatbufferVector<MessageHeader> channel_data =
2158 FlatbufferVector<MessageHeader>::Empty();
2159
Austin Schuh2f8fd752020-09-01 22:38:28 -07002160 message_bridge::NoncausalOffsetEstimator *filter = nullptr;
2161
Austin Schuh858c9f32020-08-31 16:56:12 -07002162 std::tie(channel_timestamp, channel_index, channel_data) =
2163 channel_merger_->PopOldest();
2164
Austin Schuh2f8fd752020-09-01 22:38:28 -07002165 // Skip any messages without forwarding information.
2166 if (channel_timestamp.monotonic_remote_time != monotonic_clock::min_time) {
2167 // Got a forwarding timestamp!
2168 filter = filters_[channel_index];
2169
2170 CHECK(filter != nullptr);
2171
2172 // Call the correct method depending on if we are the forward or
2173 // reverse direction here.
2174 filter->Sample(event_loop_->node(),
2175 channel_timestamp.monotonic_event_time,
2176 channel_timestamp.monotonic_remote_time);
2177 }
Austin Schuh858c9f32020-08-31 16:56:12 -07002178 sorted_messages_.emplace_back(channel_timestamp, channel_index,
Austin Schuh2f8fd752020-09-01 22:38:28 -07002179 std::move(channel_data), filter);
Austin Schuh858c9f32020-08-31 16:56:12 -07002180 }
2181}
2182
2183void LogReader::State::Deregister() {
2184 for (size_t i = 0; i < channels_.size(); ++i) {
2185 channels_[i].reset();
2186 }
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07002187 remote_timestamp_senders_map_.clear();
Austin Schuh858c9f32020-08-31 16:56:12 -07002188 event_loop_unique_ptr_.reset();
2189 event_loop_ = nullptr;
2190 timer_handler_ = nullptr;
2191 node_event_loop_factory_ = nullptr;
2192}
2193
Austin Schuhe309d2a2019-11-29 13:25:21 -08002194} // namespace logger
2195} // namespace aos