blob: c38cf0378dc7ec84d47d66076a668170debf4d43 [file] [log] [blame]
James Kuszmaul38735e82019-12-07 16:42:06 -08001#include "aos/events/logging/logger.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -08002
3#include <fcntl.h>
Austin Schuh4c4e0092019-12-22 16:18:03 -08004#include <limits.h>
Austin Schuhe309d2a2019-11-29 13:25:21 -08005#include <sys/stat.h>
6#include <sys/types.h>
7#include <sys/uio.h>
8#include <vector>
9
Austin Schuh8bd96322020-02-13 21:18:22 -080010#include "Eigen/Dense"
Austin Schuh2f8fd752020-09-01 22:38:28 -070011#include "absl/strings/escaping.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080012#include "absl/types/span.h"
13#include "aos/events/event_loop.h"
Austin Schuhf6f9bf32020-10-11 14:37:43 -070014#include "aos/events/logging/logfile_sorting.h"
James Kuszmaul38735e82019-12-07 16:42:06 -080015#include "aos/events/logging/logger_generated.h"
Austin Schuh64fab802020-09-09 22:47:47 -070016#include "aos/events/logging/uuid.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080017#include "aos/flatbuffer_merge.h"
Austin Schuh288479d2019-12-18 19:47:52 -080018#include "aos/network/team_number.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080019#include "aos/time/time.h"
Brian Silvermanae7c0332020-09-30 16:58:23 -070020#include "aos/util/file.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080021#include "flatbuffers/flatbuffers.h"
Austin Schuh2f8fd752020-09-01 22:38:28 -070022#include "third_party/gmp/gmpxx.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080023
Austin Schuh15649d62019-12-28 16:36:38 -080024DEFINE_bool(skip_missing_forwarding_entries, false,
25 "If true, drop any forwarding entries with missing data. If "
26 "false, CHECK.");
Austin Schuhe309d2a2019-11-29 13:25:21 -080027
Austin Schuh8bd96322020-02-13 21:18:22 -080028DEFINE_bool(timestamps_to_csv, false,
29 "If true, write all the time synchronization information to a set "
30 "of CSV files in /tmp/. This should only be needed when debugging "
31 "time synchronization.");
32
Austin Schuh2f8fd752020-09-01 22:38:28 -070033DEFINE_bool(skip_order_validation, false,
34 "If true, ignore any out of orderness in replay");
35
Austin Schuhe309d2a2019-11-29 13:25:21 -080036namespace aos {
37namespace logger {
Austin Schuh0afc4d12020-10-19 11:42:04 -070038namespace {
39// Helper to safely read a header, or CHECK.
40FlatbufferVector<LogFileHeader> MaybeReadHeaderOrDie(
41 const std::vector<std::vector<std::string>> &filenames) {
42 CHECK_GE(filenames.size(), 1u) << ": Empty filenames list";
43 CHECK_GE(filenames[0].size(), 1u) << ": Empty filenames list";
44 return ReadHeader(filenames[0][0]);
45}
Austin Schuhe309d2a2019-11-29 13:25:21 -080046namespace chrono = std::chrono;
Austin Schuh0afc4d12020-10-19 11:42:04 -070047} // namespace
Austin Schuhe309d2a2019-11-29 13:25:21 -080048
Brian Silverman1f345222020-09-24 21:14:48 -070049Logger::Logger(EventLoop *event_loop, const Configuration *configuration,
50 std::function<bool(const Channel *)> should_log)
Austin Schuhe309d2a2019-11-29 13:25:21 -080051 : event_loop_(event_loop),
Austin Schuh0c297012020-09-16 18:41:59 -070052 configuration_(configuration),
Brian Silvermanae7c0332020-09-30 16:58:23 -070053 boot_uuid_(
54 util::ReadFileToStringOrDie("/proc/sys/kernel/random/boot_id")),
Austin Schuh0c297012020-09-16 18:41:59 -070055 name_(network::GetHostname()),
Brian Silverman1f345222020-09-24 21:14:48 -070056 timer_handler_(event_loop_->AddTimer(
57 [this]() { DoLogData(event_loop_->monotonic_now()); })),
Austin Schuh2f8fd752020-09-01 22:38:28 -070058 server_statistics_fetcher_(
59 configuration::MultiNode(event_loop_->configuration())
60 ? event_loop_->MakeFetcher<message_bridge::ServerStatistics>(
61 "/aos")
62 : aos::Fetcher<message_bridge::ServerStatistics>()) {
Brian Silverman1f345222020-09-24 21:14:48 -070063 VLOG(1) << "Creating logger for " << FlatbufferToJson(event_loop_->node());
Austin Schuh2f8fd752020-09-01 22:38:28 -070064
Austin Schuh8d7e0bb2020-10-02 17:57:00 -070065 // Find all the nodes which are logging timestamps on our node. This may
66 // over-estimate if should_log is specified.
67 std::vector<const Node *> timestamp_logger_nodes =
68 configuration::TimestampNodes(configuration_, event_loop_->node());
Austin Schuh2f8fd752020-09-01 22:38:28 -070069
70 std::map<const Channel *, const Node *> timestamp_logger_channels;
71
72 // Now that we have all the nodes accumulated, make remote timestamp loggers
73 // for them.
74 for (const Node *node : timestamp_logger_nodes) {
Austin Schuh8d7e0bb2020-10-02 17:57:00 -070075 // Note: since we are doing a find using the event loop channel, we need to
76 // make sure this channel pointer is part of the event loop configuration,
77 // not configuration_. This only matters when configuration_ !=
78 // event_loop->configuration();
Austin Schuh2f8fd752020-09-01 22:38:28 -070079 const Channel *channel = configuration::GetChannel(
Austin Schuh8d7e0bb2020-10-02 17:57:00 -070080 event_loop->configuration(),
Austin Schuh2f8fd752020-09-01 22:38:28 -070081 absl::StrCat("/aos/remote_timestamps/", node->name()->string_view()),
82 logger::MessageHeader::GetFullyQualifiedName(), event_loop_->name(),
83 event_loop_->node());
84
85 CHECK(channel != nullptr)
86 << ": Remote timestamps are logged on "
87 << event_loop_->node()->name()->string_view()
88 << " but can't find channel /aos/remote_timestamps/"
89 << node->name()->string_view();
Brian Silverman1f345222020-09-24 21:14:48 -070090 if (!should_log(channel)) {
91 continue;
92 }
Austin Schuh2f8fd752020-09-01 22:38:28 -070093 timestamp_logger_channels.insert(std::make_pair(channel, node));
94 }
95
Brian Silvermand90905f2020-09-23 14:42:56 -070096 const size_t our_node_index =
97 configuration::GetNodeIndex(configuration_, event_loop_->node());
Austin Schuh2f8fd752020-09-01 22:38:28 -070098
Brian Silverman1f345222020-09-24 21:14:48 -070099 for (size_t channel_index = 0;
100 channel_index < configuration_->channels()->size(); ++channel_index) {
101 const Channel *const config_channel =
102 configuration_->channels()->Get(channel_index);
Austin Schuh0c297012020-09-16 18:41:59 -0700103 // The MakeRawFetcher method needs a channel which is in the event loop
104 // configuration() object, not the configuration_ object. Go look that up
105 // from the config.
106 const Channel *channel = aos::configuration::GetChannel(
107 event_loop_->configuration(), config_channel->name()->string_view(),
108 config_channel->type()->string_view(), "", event_loop_->node());
Austin Schuh8d7e0bb2020-10-02 17:57:00 -0700109 CHECK(channel != nullptr)
110 << ": Failed to look up channel "
111 << aos::configuration::CleanedChannelToString(config_channel);
Brian Silverman1f345222020-09-24 21:14:48 -0700112 if (!should_log(channel)) {
113 continue;
114 }
Austin Schuh0c297012020-09-16 18:41:59 -0700115
Austin Schuhe309d2a2019-11-29 13:25:21 -0800116 FetcherStruct fs;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700117 fs.node_index = our_node_index;
Brian Silverman1f345222020-09-24 21:14:48 -0700118 fs.channel_index = channel_index;
119 fs.channel = channel;
120
Austin Schuh6f3babe2020-01-26 20:34:50 -0800121 const bool is_local =
122 configuration::ChannelIsSendableOnNode(channel, event_loop_->node());
123
Austin Schuh15649d62019-12-28 16:36:38 -0800124 const bool is_readable =
125 configuration::ChannelIsReadableOnNode(channel, event_loop_->node());
Brian Silverman1f345222020-09-24 21:14:48 -0700126 const bool is_logged = configuration::ChannelMessageIsLoggedOnNode(
127 channel, event_loop_->node());
128 const bool log_message = is_logged && is_readable;
Austin Schuh15649d62019-12-28 16:36:38 -0800129
Brian Silverman1f345222020-09-24 21:14:48 -0700130 bool log_delivery_times = false;
131 if (event_loop_->node() != nullptr) {
132 log_delivery_times = configuration::ConnectionDeliveryTimeIsLoggedOnNode(
133 channel, event_loop_->node(), event_loop_->node());
134 }
Austin Schuh15649d62019-12-28 16:36:38 -0800135
Austin Schuh2f8fd752020-09-01 22:38:28 -0700136 // Now, detect a MessageHeader timestamp logger where we should just log the
137 // contents to a file directly.
138 const bool log_contents = timestamp_logger_channels.find(channel) !=
139 timestamp_logger_channels.end();
Austin Schuh2f8fd752020-09-01 22:38:28 -0700140
141 if (log_message || log_delivery_times || log_contents) {
Austin Schuh15649d62019-12-28 16:36:38 -0800142 fs.fetcher = event_loop->MakeRawFetcher(channel);
143 VLOG(1) << "Logging channel "
144 << configuration::CleanedChannelToString(channel);
145
146 if (log_delivery_times) {
Austin Schuh6f3babe2020-01-26 20:34:50 -0800147 VLOG(1) << " Delivery times";
Brian Silverman1f345222020-09-24 21:14:48 -0700148 fs.wants_timestamp_writer = true;
Austin Schuh15649d62019-12-28 16:36:38 -0800149 }
Austin Schuh6f3babe2020-01-26 20:34:50 -0800150 if (log_message) {
151 VLOG(1) << " Data";
Brian Silverman1f345222020-09-24 21:14:48 -0700152 fs.wants_writer = true;
Austin Schuh6f3babe2020-01-26 20:34:50 -0800153 if (!is_local) {
154 fs.log_type = LogType::kLogRemoteMessage;
155 }
156 }
Austin Schuh2f8fd752020-09-01 22:38:28 -0700157 if (log_contents) {
158 VLOG(1) << "Timestamp logger channel "
159 << configuration::CleanedChannelToString(channel);
Brian Silverman1f345222020-09-24 21:14:48 -0700160 fs.timestamp_node = timestamp_logger_channels.find(channel)->second;
161 fs.wants_contents_writer = true;
Austin Schuh0c297012020-09-16 18:41:59 -0700162 fs.node_index =
Brian Silverman1f345222020-09-24 21:14:48 -0700163 configuration::GetNodeIndex(configuration_, fs.timestamp_node);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700164 }
Austin Schuh6f3babe2020-01-26 20:34:50 -0800165 fetchers_.emplace_back(std::move(fs));
Austin Schuh15649d62019-12-28 16:36:38 -0800166 }
Brian Silverman1f345222020-09-24 21:14:48 -0700167 }
Austin Schuh8d7e0bb2020-10-02 17:57:00 -0700168
169 // When we are logging remote timestamps, we need to be able to translate from
170 // the channel index that the event loop uses to the channel index in the
171 // config in the log file.
172 event_loop_to_logged_channel_index_.resize(
173 event_loop->configuration()->channels()->size(), -1);
174 for (size_t event_loop_channel_index = 0;
175 event_loop_channel_index <
176 event_loop->configuration()->channels()->size();
177 ++event_loop_channel_index) {
178 const Channel *event_loop_channel =
179 event_loop->configuration()->channels()->Get(event_loop_channel_index);
180
181 const Channel *logged_channel = aos::configuration::GetChannel(
182 configuration_, event_loop_channel->name()->string_view(),
183 event_loop_channel->type()->string_view(), "",
184 configuration::GetNode(configuration_, event_loop_->node()));
185
186 if (logged_channel != nullptr) {
187 event_loop_to_logged_channel_index_[event_loop_channel_index] =
188 configuration::ChannelIndex(configuration_, logged_channel);
189 }
190 }
Brian Silverman1f345222020-09-24 21:14:48 -0700191}
192
193Logger::~Logger() {
194 if (log_namer_) {
195 // If we are replaying a log file, or in simulation, we want to force the
196 // last bit of data to be logged. The easiest way to deal with this is to
197 // poll everything as we go to destroy the class, ie, shut down the logger,
198 // and write it to disk.
199 StopLogging(event_loop_->monotonic_now());
200 }
201}
202
Brian Silvermanae7c0332020-09-30 16:58:23 -0700203void Logger::StartLogging(std::unique_ptr<LogNamer> log_namer,
204 std::string_view log_start_uuid) {
Brian Silverman1f345222020-09-24 21:14:48 -0700205 CHECK(!log_namer_) << ": Already logging";
206 log_namer_ = std::move(log_namer);
Brian Silvermanae7c0332020-09-30 16:58:23 -0700207 log_event_uuid_ = UUID::Random();
208 log_start_uuid_ = log_start_uuid;
Brian Silverman1f345222020-09-24 21:14:48 -0700209 VLOG(1) << "Starting logger for " << FlatbufferToJson(event_loop_->node());
210
211 // We want to do as much work as possible before the initial Fetch. Time
212 // between that and actually starting to log opens up the possibility of
213 // falling off the end of the queue during that time.
214
215 for (FetcherStruct &f : fetchers_) {
216 if (f.wants_writer) {
217 f.writer = log_namer_->MakeWriter(f.channel);
218 }
219 if (f.wants_timestamp_writer) {
220 f.timestamp_writer = log_namer_->MakeTimestampWriter(f.channel);
221 }
222 if (f.wants_contents_writer) {
223 f.contents_writer = log_namer_->MakeForwardedTimestampWriter(
224 f.channel, CHECK_NOTNULL(f.timestamp_node));
225 }
Austin Schuhe309d2a2019-11-29 13:25:21 -0800226 }
227
Brian Silverman1f345222020-09-24 21:14:48 -0700228 CHECK(node_state_.empty());
Austin Schuh0c297012020-09-16 18:41:59 -0700229 node_state_.resize(configuration::MultiNode(configuration_)
230 ? configuration_->nodes()->size()
Austin Schuh2f8fd752020-09-01 22:38:28 -0700231 : 1u);
Austin Schuhe309d2a2019-11-29 13:25:21 -0800232
Austin Schuh2f8fd752020-09-01 22:38:28 -0700233 for (const Node *node : log_namer_->nodes()) {
Brian Silvermand90905f2020-09-23 14:42:56 -0700234 const int node_index = configuration::GetNodeIndex(configuration_, node);
Austin Schuhe309d2a2019-11-29 13:25:21 -0800235
Austin Schuh2f8fd752020-09-01 22:38:28 -0700236 node_state_[node_index].log_file_header = MakeHeader(node);
237 }
Austin Schuhe309d2a2019-11-29 13:25:21 -0800238
Austin Schuh2f8fd752020-09-01 22:38:28 -0700239 // Grab data from each channel right before we declare the log file started
240 // so we can capture the latest message on each channel. This lets us have
241 // non periodic messages with configuration that now get logged.
242 for (FetcherStruct &f : fetchers_) {
Brian Silvermancb805822020-10-06 17:43:35 -0700243 const auto start = event_loop_->monotonic_now();
244 const bool got_new = f.fetcher->Fetch();
245 const auto end = event_loop_->monotonic_now();
246 RecordFetchResult(start, end, got_new, &f);
247
248 // If there is a message, we want to write it.
249 f.written = f.fetcher->context().data == nullptr;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700250 }
251
252 // Clear out any old timestamps in case we are re-starting logging.
253 for (size_t i = 0; i < node_state_.size(); ++i) {
254 SetStartTime(i, monotonic_clock::min_time, realtime_clock::min_time);
255 }
256
257 WriteHeader();
258
259 LOG(INFO) << "Logging node as " << FlatbufferToJson(event_loop_->node())
260 << " start_time " << last_synchronized_time_;
261
262 timer_handler_->Setup(event_loop_->monotonic_now() + polling_period_,
263 polling_period_);
264}
265
Brian Silverman1f345222020-09-24 21:14:48 -0700266std::unique_ptr<LogNamer> Logger::StopLogging(
267 aos::monotonic_clock::time_point end_time) {
268 CHECK(log_namer_) << ": Not logging right now";
269
270 if (end_time != aos::monotonic_clock::min_time) {
271 LogUntil(end_time);
272 }
273 timer_handler_->Disable();
274
275 for (FetcherStruct &f : fetchers_) {
276 f.writer = nullptr;
277 f.timestamp_writer = nullptr;
278 f.contents_writer = nullptr;
279 }
280 node_state_.clear();
281
Brian Silvermanae7c0332020-09-30 16:58:23 -0700282 log_event_uuid_ = UUID::Zero();
283 log_start_uuid_ = std::string();
284
Brian Silverman1f345222020-09-24 21:14:48 -0700285 return std::move(log_namer_);
286}
287
Austin Schuhfa895892020-01-07 20:07:41 -0800288void Logger::WriteHeader() {
Austin Schuh0c297012020-09-16 18:41:59 -0700289 if (configuration::MultiNode(configuration_)) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700290 server_statistics_fetcher_.Fetch();
291 }
292
293 aos::monotonic_clock::time_point monotonic_start_time =
294 event_loop_->monotonic_now();
295 aos::realtime_clock::time_point realtime_start_time =
296 event_loop_->realtime_now();
297
298 // We need to pick a point in time to declare the log file "started". This
299 // starts here. It needs to be after everything is fetched so that the
300 // fetchers are all pointed at the most recent message before the start
301 // time.
302 last_synchronized_time_ = monotonic_start_time;
303
Austin Schuh6f3babe2020-01-26 20:34:50 -0800304 for (const Node *node : log_namer_->nodes()) {
Brian Silvermand90905f2020-09-23 14:42:56 -0700305 const int node_index = configuration::GetNodeIndex(configuration_, node);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700306 MaybeUpdateTimestamp(node, node_index, monotonic_start_time,
307 realtime_start_time);
Austin Schuh64fab802020-09-09 22:47:47 -0700308 log_namer_->WriteHeader(&node_state_[node_index].log_file_header, node);
Austin Schuh6f3babe2020-01-26 20:34:50 -0800309 }
310}
Austin Schuh8bd96322020-02-13 21:18:22 -0800311
Austin Schuh2f8fd752020-09-01 22:38:28 -0700312void Logger::WriteMissingTimestamps() {
Austin Schuh0c297012020-09-16 18:41:59 -0700313 if (configuration::MultiNode(configuration_)) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700314 server_statistics_fetcher_.Fetch();
315 } else {
316 return;
317 }
318
319 if (server_statistics_fetcher_.get() == nullptr) {
320 return;
321 }
322
323 for (const Node *node : log_namer_->nodes()) {
Brian Silvermand90905f2020-09-23 14:42:56 -0700324 const int node_index = configuration::GetNodeIndex(configuration_, node);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700325 if (MaybeUpdateTimestamp(
326 node, node_index,
327 server_statistics_fetcher_.context().monotonic_event_time,
328 server_statistics_fetcher_.context().realtime_event_time)) {
Austin Schuh64fab802020-09-09 22:47:47 -0700329 log_namer_->Rotate(node, &node_state_[node_index].log_file_header);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700330 }
331 }
332}
333
334void Logger::SetStartTime(size_t node_index,
335 aos::monotonic_clock::time_point monotonic_start_time,
336 aos::realtime_clock::time_point realtime_start_time) {
337 node_state_[node_index].monotonic_start_time = monotonic_start_time;
338 node_state_[node_index].realtime_start_time = realtime_start_time;
339 node_state_[node_index]
340 .log_file_header.mutable_message()
341 ->mutate_monotonic_start_time(
342 std::chrono::duration_cast<std::chrono::nanoseconds>(
343 monotonic_start_time.time_since_epoch())
344 .count());
345 if (node_state_[node_index]
346 .log_file_header.mutable_message()
347 ->has_realtime_start_time()) {
348 node_state_[node_index]
349 .log_file_header.mutable_message()
350 ->mutate_realtime_start_time(
351 std::chrono::duration_cast<std::chrono::nanoseconds>(
352 realtime_start_time.time_since_epoch())
353 .count());
354 }
355}
356
357bool Logger::MaybeUpdateTimestamp(
358 const Node *node, int node_index,
359 aos::monotonic_clock::time_point monotonic_start_time,
360 aos::realtime_clock::time_point realtime_start_time) {
Brian Silverman87ac0402020-09-17 14:47:01 -0700361 // Bail early if the start times are already set.
Austin Schuh2f8fd752020-09-01 22:38:28 -0700362 if (node_state_[node_index].monotonic_start_time !=
363 monotonic_clock::min_time) {
364 return false;
365 }
Austin Schuh0c297012020-09-16 18:41:59 -0700366 if (configuration::MultiNode(configuration_)) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700367 if (event_loop_->node() == node) {
368 // There are no offsets to compute for ourself, so always succeed.
369 SetStartTime(node_index, monotonic_start_time, realtime_start_time);
370 return true;
371 } else if (server_statistics_fetcher_.get() != nullptr) {
372 // We must be a remote node now. Look for the connection and see if it is
373 // connected.
374
375 for (const message_bridge::ServerConnection *connection :
376 *server_statistics_fetcher_->connections()) {
377 if (connection->node()->name()->string_view() !=
378 node->name()->string_view()) {
379 continue;
380 }
381
382 if (connection->state() != message_bridge::State::CONNECTED) {
383 VLOG(1) << node->name()->string_view()
384 << " is not connected, can't start it yet.";
385 break;
386 }
387
388 if (!connection->has_monotonic_offset()) {
389 VLOG(1) << "Missing monotonic offset for setting start time for node "
390 << aos::FlatbufferToJson(node);
391 break;
392 }
393
394 VLOG(1) << "Updating start time for " << aos::FlatbufferToJson(node);
395
396 // Found it and it is connected. Compensate and go.
397 monotonic_start_time +=
398 std::chrono::nanoseconds(connection->monotonic_offset());
399
400 SetStartTime(node_index, monotonic_start_time, realtime_start_time);
401 return true;
402 }
403 }
404 } else {
405 SetStartTime(node_index, monotonic_start_time, realtime_start_time);
406 return true;
407 }
408 return false;
409}
410
411aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> Logger::MakeHeader(
412 const Node *node) {
Austin Schuhfa895892020-01-07 20:07:41 -0800413 // Now write the header with this timestamp in it.
414 flatbuffers::FlatBufferBuilder fbb;
Austin Schuhd7b15da2020-02-17 15:06:11 -0800415 fbb.ForceDefaults(true);
Austin Schuhfa895892020-01-07 20:07:41 -0800416
Austin Schuh2f8fd752020-09-01 22:38:28 -0700417 // TODO(austin): Compress this much more efficiently. There are a bunch of
418 // duplicated schemas.
Brian Silvermanae7c0332020-09-30 16:58:23 -0700419 const flatbuffers::Offset<aos::Configuration> configuration_offset =
Austin Schuh0c297012020-09-16 18:41:59 -0700420 CopyFlatBuffer(configuration_, &fbb);
Austin Schuhfa895892020-01-07 20:07:41 -0800421
Brian Silvermanae7c0332020-09-30 16:58:23 -0700422 const flatbuffers::Offset<flatbuffers::String> name_offset =
Austin Schuh0c297012020-09-16 18:41:59 -0700423 fbb.CreateString(name_);
Austin Schuhfa895892020-01-07 20:07:41 -0800424
Brian Silvermanae7c0332020-09-30 16:58:23 -0700425 CHECK(log_event_uuid_ != UUID::Zero());
426 const flatbuffers::Offset<flatbuffers::String> log_event_uuid_offset =
427 fbb.CreateString(log_event_uuid_.string_view());
Austin Schuh64fab802020-09-09 22:47:47 -0700428
Brian Silvermanae7c0332020-09-30 16:58:23 -0700429 const flatbuffers::Offset<flatbuffers::String> logger_instance_uuid_offset =
430 fbb.CreateString(logger_instance_uuid_.string_view());
431
432 flatbuffers::Offset<flatbuffers::String> log_start_uuid_offset;
433 if (!log_start_uuid_.empty()) {
434 log_start_uuid_offset = fbb.CreateString(log_start_uuid_);
435 }
436
437 const flatbuffers::Offset<flatbuffers::String> boot_uuid_offset =
438 fbb.CreateString(boot_uuid_);
439
440 const flatbuffers::Offset<flatbuffers::String> parts_uuid_offset =
Austin Schuh64fab802020-09-09 22:47:47 -0700441 fbb.CreateString("00000000-0000-4000-8000-000000000000");
442
Austin Schuhfa895892020-01-07 20:07:41 -0800443 flatbuffers::Offset<Node> node_offset;
Brian Silverman80993c22020-10-01 15:05:19 -0700444 flatbuffers::Offset<Node> logger_node_offset;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700445
Austin Schuh0c297012020-09-16 18:41:59 -0700446 if (configuration::MultiNode(configuration_)) {
Austin Schuha4fc60f2020-11-01 23:06:47 -0800447 // TODO(austin): Reuse the node we just copied in above.
448 node_offset = RecursiveCopyFlatBuffer(node, &fbb);
449 logger_node_offset = RecursiveCopyFlatBuffer(event_loop_->node(), &fbb);
Austin Schuhfa895892020-01-07 20:07:41 -0800450 }
451
452 aos::logger::LogFileHeader::Builder log_file_header_builder(fbb);
453
Austin Schuh64fab802020-09-09 22:47:47 -0700454 log_file_header_builder.add_name(name_offset);
Austin Schuhfa895892020-01-07 20:07:41 -0800455
456 // Only add the node if we are running in a multinode configuration.
Austin Schuh6f3babe2020-01-26 20:34:50 -0800457 if (node != nullptr) {
Austin Schuhfa895892020-01-07 20:07:41 -0800458 log_file_header_builder.add_node(node_offset);
Brian Silverman80993c22020-10-01 15:05:19 -0700459 log_file_header_builder.add_logger_node(logger_node_offset);
Austin Schuhfa895892020-01-07 20:07:41 -0800460 }
461
462 log_file_header_builder.add_configuration(configuration_offset);
463 // The worst case theoretical out of order is the polling period times 2.
464 // One message could get logged right after the boundary, but be for right
465 // before the next boundary. And the reverse could happen for another
466 // message. Report back 3x to be extra safe, and because the cost isn't
467 // huge on the read side.
468 log_file_header_builder.add_max_out_of_order_duration(
Brian Silverman1f345222020-09-24 21:14:48 -0700469 std::chrono::nanoseconds(3 * polling_period_).count());
Austin Schuhfa895892020-01-07 20:07:41 -0800470
471 log_file_header_builder.add_monotonic_start_time(
472 std::chrono::duration_cast<std::chrono::nanoseconds>(
Austin Schuh2f8fd752020-09-01 22:38:28 -0700473 monotonic_clock::min_time.time_since_epoch())
Austin Schuhfa895892020-01-07 20:07:41 -0800474 .count());
Austin Schuh2f8fd752020-09-01 22:38:28 -0700475 if (node == event_loop_->node()) {
476 log_file_header_builder.add_realtime_start_time(
477 std::chrono::duration_cast<std::chrono::nanoseconds>(
478 realtime_clock::min_time.time_since_epoch())
479 .count());
Austin Schuh6f3babe2020-01-26 20:34:50 -0800480 }
481
Brian Silvermanae7c0332020-09-30 16:58:23 -0700482 log_file_header_builder.add_log_event_uuid(log_event_uuid_offset);
483 log_file_header_builder.add_logger_instance_uuid(logger_instance_uuid_offset);
484 if (!log_start_uuid_offset.IsNull()) {
485 log_file_header_builder.add_log_start_uuid(log_start_uuid_offset);
486 }
487 log_file_header_builder.add_boot_uuid(boot_uuid_offset);
Austin Schuh64fab802020-09-09 22:47:47 -0700488
489 log_file_header_builder.add_parts_uuid(parts_uuid_offset);
490 log_file_header_builder.add_parts_index(0);
491
Austin Schuh2f8fd752020-09-01 22:38:28 -0700492 fbb.FinishSizePrefixed(log_file_header_builder.Finish());
Austin Schuha4fc60f2020-11-01 23:06:47 -0800493 aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> result(
494 fbb.Release());
495
496 CHECK(result.Verify()) << ": Built a corrupted header.";
497
498 return result;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700499}
500
Brian Silvermancb805822020-10-06 17:43:35 -0700501void Logger::ResetStatisics() {
502 max_message_fetch_time_ = std::chrono::nanoseconds::zero();
503 max_message_fetch_time_channel_ = -1;
504 max_message_fetch_time_size_ = -1;
505 total_message_fetch_time_ = std::chrono::nanoseconds::zero();
506 total_message_fetch_count_ = 0;
507 total_message_fetch_bytes_ = 0;
508 total_nop_fetch_time_ = std::chrono::nanoseconds::zero();
509 total_nop_fetch_count_ = 0;
510 max_copy_time_ = std::chrono::nanoseconds::zero();
511 max_copy_time_channel_ = -1;
512 max_copy_time_size_ = -1;
513 total_copy_time_ = std::chrono::nanoseconds::zero();
514 total_copy_count_ = 0;
515 total_copy_bytes_ = 0;
516}
517
Austin Schuh2f8fd752020-09-01 22:38:28 -0700518void Logger::Rotate() {
519 for (const Node *node : log_namer_->nodes()) {
Brian Silvermand90905f2020-09-23 14:42:56 -0700520 const int node_index = configuration::GetNodeIndex(configuration_, node);
Austin Schuh64fab802020-09-09 22:47:47 -0700521 log_namer_->Rotate(node, &node_state_[node_index].log_file_header);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700522 }
523}
524
525void Logger::LogUntil(monotonic_clock::time_point t) {
526 WriteMissingTimestamps();
527
528 // Write each channel to disk, one at a time.
529 for (FetcherStruct &f : fetchers_) {
530 while (true) {
531 if (f.written) {
Brian Silvermancb805822020-10-06 17:43:35 -0700532 const auto start = event_loop_->monotonic_now();
533 const bool got_new = f.fetcher->FetchNext();
534 const auto end = event_loop_->monotonic_now();
535 RecordFetchResult(start, end, got_new, &f);
536 if (!got_new) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700537 VLOG(2) << "No new data on "
538 << configuration::CleanedChannelToString(
539 f.fetcher->channel());
540 break;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700541 }
Brian Silvermancb805822020-10-06 17:43:35 -0700542 f.written = false;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700543 }
544
Austin Schuh2f8fd752020-09-01 22:38:28 -0700545 // TODO(james): Write tests to exercise this logic.
Brian Silvermancb805822020-10-06 17:43:35 -0700546 if (f.fetcher->context().monotonic_event_time >= t) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700547 break;
548 }
Brian Silvermancb805822020-10-06 17:43:35 -0700549 if (f.writer != nullptr) {
550 // Write!
551 const auto start = event_loop_->monotonic_now();
552 flatbuffers::FlatBufferBuilder fbb(f.fetcher->context().size +
553 max_header_size_);
554 fbb.ForceDefaults(true);
555
556 fbb.FinishSizePrefixed(PackMessage(&fbb, f.fetcher->context(),
557 f.channel_index, f.log_type));
558 const auto end = event_loop_->monotonic_now();
559 RecordCreateMessageTime(start, end, &f);
560
561 VLOG(2) << "Writing data as node "
562 << FlatbufferToJson(event_loop_->node()) << " for channel "
563 << configuration::CleanedChannelToString(f.fetcher->channel())
564 << " to " << f.writer->filename() << " data "
565 << FlatbufferToJson(
566 flatbuffers::GetSizePrefixedRoot<MessageHeader>(
567 fbb.GetBufferPointer()));
568
569 max_header_size_ = std::max(max_header_size_,
570 fbb.GetSize() - f.fetcher->context().size);
571 f.writer->QueueSizedFlatbuffer(&fbb);
572 }
573
574 if (f.timestamp_writer != nullptr) {
575 // And now handle timestamps.
576 const auto start = event_loop_->monotonic_now();
577 flatbuffers::FlatBufferBuilder fbb;
578 fbb.ForceDefaults(true);
579
580 fbb.FinishSizePrefixed(PackMessage(&fbb, f.fetcher->context(),
581 f.channel_index,
582 LogType::kLogDeliveryTimeOnly));
583 const auto end = event_loop_->monotonic_now();
584 RecordCreateMessageTime(start, end, &f);
585
586 VLOG(2) << "Writing timestamps as node "
587 << FlatbufferToJson(event_loop_->node()) << " for channel "
588 << configuration::CleanedChannelToString(f.fetcher->channel())
589 << " to " << f.timestamp_writer->filename() << " timestamp "
590 << FlatbufferToJson(
591 flatbuffers::GetSizePrefixedRoot<MessageHeader>(
592 fbb.GetBufferPointer()));
593
594 f.timestamp_writer->QueueSizedFlatbuffer(&fbb);
595 }
596
597 if (f.contents_writer != nullptr) {
598 const auto start = event_loop_->monotonic_now();
599 // And now handle the special message contents channel. Copy the
600 // message into a FlatBufferBuilder and save it to disk.
601 // TODO(austin): We can be more efficient here when we start to
602 // care...
603 flatbuffers::FlatBufferBuilder fbb;
604 fbb.ForceDefaults(true);
605
606 const MessageHeader *msg =
607 flatbuffers::GetRoot<MessageHeader>(f.fetcher->context().data);
608
609 logger::MessageHeader::Builder message_header_builder(fbb);
610
611 // TODO(austin): This needs to check the channel_index and confirm
612 // that it should be logged before squirreling away the timestamp to
613 // disk. We don't want to log irrelevant timestamps.
614
615 // Note: this must match the same order as MessageBridgeServer and
616 // PackMessage. We want identical headers to have identical
617 // on-the-wire formats to make comparing them easier.
618
619 // Translate from the channel index that the event loop uses to the
620 // channel index in the log file.
621 message_header_builder.add_channel_index(
622 event_loop_to_logged_channel_index_[msg->channel_index()]);
623
624 message_header_builder.add_queue_index(msg->queue_index());
625 message_header_builder.add_monotonic_sent_time(
626 msg->monotonic_sent_time());
627 message_header_builder.add_realtime_sent_time(
628 msg->realtime_sent_time());
629
630 message_header_builder.add_monotonic_remote_time(
631 msg->monotonic_remote_time());
632 message_header_builder.add_realtime_remote_time(
633 msg->realtime_remote_time());
634 message_header_builder.add_remote_queue_index(
635 msg->remote_queue_index());
636
637 fbb.FinishSizePrefixed(message_header_builder.Finish());
638 const auto end = event_loop_->monotonic_now();
639 RecordCreateMessageTime(start, end, &f);
640
641 f.contents_writer->QueueSizedFlatbuffer(&fbb);
642 }
643
644 f.written = true;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700645 }
646 }
647 last_synchronized_time_ = t;
Austin Schuhfa895892020-01-07 20:07:41 -0800648}
649
Brian Silverman1f345222020-09-24 21:14:48 -0700650void Logger::DoLogData(const monotonic_clock::time_point end_time) {
651 // We want to guarantee that messages aren't out of order by more than
Austin Schuhe309d2a2019-11-29 13:25:21 -0800652 // max_out_of_order_duration. To do this, we need sync points. Every write
653 // cycle should be a sync point.
Austin Schuhe309d2a2019-11-29 13:25:21 -0800654
655 do {
656 // Move the sync point up by at most polling_period. This forces one sync
657 // per iteration, even if it is small.
Brian Silverman1f345222020-09-24 21:14:48 -0700658 LogUntil(std::min(last_synchronized_time_ + polling_period_, end_time));
659
660 on_logged_period_();
Austin Schuhe309d2a2019-11-29 13:25:21 -0800661
Austin Schuhe309d2a2019-11-29 13:25:21 -0800662 // If we missed cycles, we could be pretty far behind. Spin until we are
663 // caught up.
Brian Silverman1f345222020-09-24 21:14:48 -0700664 } while (last_synchronized_time_ + polling_period_ < end_time);
Austin Schuhe309d2a2019-11-29 13:25:21 -0800665}
666
Brian Silvermancb805822020-10-06 17:43:35 -0700667void Logger::RecordFetchResult(aos::monotonic_clock::time_point start,
668 aos::monotonic_clock::time_point end,
669 bool got_new, FetcherStruct *fetcher) {
670 const auto duration = end - start;
671 if (!got_new) {
672 ++total_nop_fetch_count_;
673 total_nop_fetch_time_ += duration;
674 return;
675 }
676 ++total_message_fetch_count_;
677 total_message_fetch_bytes_ += fetcher->fetcher->context().size;
678 total_message_fetch_time_ += duration;
679 if (duration > max_message_fetch_time_) {
680 max_message_fetch_time_ = duration;
681 max_message_fetch_time_channel_ = fetcher->channel_index;
682 max_message_fetch_time_size_ = fetcher->fetcher->context().size;
683 }
684}
685
686void Logger::RecordCreateMessageTime(aos::monotonic_clock::time_point start,
687 aos::monotonic_clock::time_point end,
688 FetcherStruct *fetcher) {
689 const auto duration = end - start;
690 total_copy_time_ += duration;
691 ++total_copy_count_;
692 total_copy_bytes_ += fetcher->fetcher->context().size;
693 if (duration > max_copy_time_) {
694 max_copy_time_ = duration;
695 max_copy_time_channel_ = fetcher->channel_index;
696 max_copy_time_size_ = fetcher->fetcher->context().size;
697 }
698}
699
Austin Schuh11d43732020-09-21 17:28:30 -0700700std::vector<std::vector<std::string>> ToLogReaderVector(
701 const std::vector<LogFile> &log_files) {
702 std::vector<std::vector<std::string>> result;
703 for (const LogFile &log_file : log_files) {
704 for (const LogParts &log_parts : log_file.parts) {
705 std::vector<std::string> parts;
706 for (const std::string &part : log_parts.parts) {
707 parts.emplace_back(part);
708 }
709 result.emplace_back(std::move(parts));
710 }
Austin Schuh5212cad2020-09-09 23:12:09 -0700711 }
712 return result;
713}
714
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800715LogReader::LogReader(std::string_view filename,
716 const Configuration *replay_configuration)
Austin Schuhfa895892020-01-07 20:07:41 -0800717 : LogReader(std::vector<std::string>{std::string(filename)},
718 replay_configuration) {}
719
720LogReader::LogReader(const std::vector<std::string> &filenames,
721 const Configuration *replay_configuration)
Austin Schuh6f3babe2020-01-26 20:34:50 -0800722 : LogReader(std::vector<std::vector<std::string>>{filenames},
723 replay_configuration) {}
724
Austin Schuh11d43732020-09-21 17:28:30 -0700725// TODO(austin): Make this the base and kill the others. This has much better
726// context for sorting.
727LogReader::LogReader(const std::vector<LogFile> &log_files,
728 const Configuration *replay_configuration)
729 : LogReader(ToLogReaderVector(log_files), replay_configuration) {}
730
Austin Schuh6f3babe2020-01-26 20:34:50 -0800731LogReader::LogReader(const std::vector<std::vector<std::string>> &filenames,
732 const Configuration *replay_configuration)
733 : filenames_(filenames),
Austin Schuh0afc4d12020-10-19 11:42:04 -0700734 log_file_header_(MaybeReadHeaderOrDie(filenames)),
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800735 replay_configuration_(replay_configuration) {
Austin Schuh6331ef92020-01-07 18:28:09 -0800736 MakeRemappedConfig();
Austin Schuh6f3babe2020-01-26 20:34:50 -0800737
Austin Schuh8d7e0bb2020-10-02 17:57:00 -0700738 // Remap all existing remote timestamp channels. They will be recreated, and
739 // the data logged isn't relevant anymore.
Austin Schuh3c5dae52020-10-06 18:55:18 -0700740 for (const Node *node : configuration::GetNodes(logged_configuration())) {
Austin Schuh8d7e0bb2020-10-02 17:57:00 -0700741 std::vector<const Node *> timestamp_logger_nodes =
742 configuration::TimestampNodes(logged_configuration(), node);
743 for (const Node *remote_node : timestamp_logger_nodes) {
744 const std::string channel = absl::StrCat(
745 "/aos/remote_timestamps/", remote_node->name()->string_view());
746 CHECK(HasChannel<logger::MessageHeader>(channel, node))
747 << ": Failed to find {\"name\": \"" << channel << "\", \"type\": \""
748 << logger::MessageHeader::GetFullyQualifiedName() << "\"} for node "
749 << node->name()->string_view();
750 RemapLoggedChannel<logger::MessageHeader>(channel, node);
751 }
752 }
753
Austin Schuh6aa77be2020-02-22 21:06:40 -0800754 if (replay_configuration) {
755 CHECK_EQ(configuration::MultiNode(configuration()),
756 configuration::MultiNode(replay_configuration))
Austin Schuh2f8fd752020-09-01 22:38:28 -0700757 << ": Log file and replay config need to both be multi or single "
758 "node.";
Austin Schuh6aa77be2020-02-22 21:06:40 -0800759 }
760
Austin Schuh6f3babe2020-01-26 20:34:50 -0800761 if (!configuration::MultiNode(configuration())) {
Austin Schuh858c9f32020-08-31 16:56:12 -0700762 states_.emplace_back(
763 std::make_unique<State>(std::make_unique<ChannelMerger>(filenames)));
Austin Schuh8bd96322020-02-13 21:18:22 -0800764 } else {
Austin Schuh6aa77be2020-02-22 21:06:40 -0800765 if (replay_configuration) {
James Kuszmaul46d82582020-05-09 19:50:09 -0700766 CHECK_EQ(logged_configuration()->nodes()->size(),
Austin Schuh6aa77be2020-02-22 21:06:40 -0800767 replay_configuration->nodes()->size())
Austin Schuh2f8fd752020-09-01 22:38:28 -0700768 << ": Log file and replay config need to have matching nodes "
769 "lists.";
James Kuszmaul46d82582020-05-09 19:50:09 -0700770 for (const Node *node : *logged_configuration()->nodes()) {
771 if (configuration::GetNode(replay_configuration, node) == nullptr) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700772 LOG(FATAL) << "Found node " << FlatbufferToJson(node)
773 << " in logged config that is not present in the replay "
774 "config.";
James Kuszmaul46d82582020-05-09 19:50:09 -0700775 }
776 }
Austin Schuh6aa77be2020-02-22 21:06:40 -0800777 }
Austin Schuh8bd96322020-02-13 21:18:22 -0800778 states_.resize(configuration()->nodes()->size());
Austin Schuh6f3babe2020-01-26 20:34:50 -0800779 }
Austin Schuhe309d2a2019-11-29 13:25:21 -0800780}
781
Austin Schuh6aa77be2020-02-22 21:06:40 -0800782LogReader::~LogReader() {
Austin Schuh39580f12020-08-01 14:44:08 -0700783 if (event_loop_factory_unique_ptr_) {
784 Deregister();
785 } else if (event_loop_factory_ != nullptr) {
786 LOG(FATAL) << "Must call Deregister before the SimulatedEventLoopFactory "
787 "is destroyed";
788 }
Austin Schuh8bd96322020-02-13 21:18:22 -0800789 if (offset_fp_ != nullptr) {
790 fclose(offset_fp_);
791 }
Austin Schuh2f8fd752020-09-01 22:38:28 -0700792 // Zero out some buffers. It's easy to do use-after-frees on these, so make
793 // it more obvious.
Austin Schuh39580f12020-08-01 14:44:08 -0700794 if (remapped_configuration_buffer_) {
795 remapped_configuration_buffer_->Wipe();
796 }
797 log_file_header_.Wipe();
Austin Schuh8bd96322020-02-13 21:18:22 -0800798}
Austin Schuhe309d2a2019-11-29 13:25:21 -0800799
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800800const Configuration *LogReader::logged_configuration() const {
Austin Schuh6f3babe2020-01-26 20:34:50 -0800801 return log_file_header_.message().configuration();
Austin Schuhe309d2a2019-11-29 13:25:21 -0800802}
803
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800804const Configuration *LogReader::configuration() const {
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800805 return remapped_configuration_;
806}
807
Austin Schuh6f3babe2020-01-26 20:34:50 -0800808std::vector<const Node *> LogReader::Nodes() const {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700809 // Because the Node pointer will only be valid if it actually points to
810 // memory owned by remapped_configuration_, we need to wait for the
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800811 // remapped_configuration_ to be populated before accessing it.
Austin Schuh6f3babe2020-01-26 20:34:50 -0800812 //
813 // Also, note, that when ever a map is changed, the nodes in here are
814 // invalidated.
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800815 CHECK(remapped_configuration_ != nullptr)
816 << ": Need to call Register before the node() pointer will be valid.";
Austin Schuh6f3babe2020-01-26 20:34:50 -0800817 return configuration::GetNodes(remapped_configuration_);
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800818}
Austin Schuh15649d62019-12-28 16:36:38 -0800819
Austin Schuh11d43732020-09-21 17:28:30 -0700820monotonic_clock::time_point LogReader::monotonic_start_time(
821 const Node *node) const {
Austin Schuh8bd96322020-02-13 21:18:22 -0800822 State *state =
823 states_[configuration::GetNodeIndex(configuration(), node)].get();
824 CHECK(state != nullptr) << ": Unknown node " << FlatbufferToJson(node);
825
Austin Schuh858c9f32020-08-31 16:56:12 -0700826 return state->monotonic_start_time();
Austin Schuhe309d2a2019-11-29 13:25:21 -0800827}
828
Austin Schuh11d43732020-09-21 17:28:30 -0700829realtime_clock::time_point LogReader::realtime_start_time(
830 const Node *node) const {
Austin Schuh8bd96322020-02-13 21:18:22 -0800831 State *state =
832 states_[configuration::GetNodeIndex(configuration(), node)].get();
833 CHECK(state != nullptr) << ": Unknown node " << FlatbufferToJson(node);
834
Austin Schuh858c9f32020-08-31 16:56:12 -0700835 return state->realtime_start_time();
Austin Schuhe309d2a2019-11-29 13:25:21 -0800836}
837
James Kuszmaul84ff3e52020-01-03 19:48:53 -0800838void LogReader::Register() {
839 event_loop_factory_unique_ptr_ =
Austin Schuhac0771c2020-01-07 18:36:30 -0800840 std::make_unique<SimulatedEventLoopFactory>(configuration());
James Kuszmaul84ff3e52020-01-03 19:48:53 -0800841 Register(event_loop_factory_unique_ptr_.get());
842}
843
Austin Schuh92547522019-12-28 14:33:43 -0800844void LogReader::Register(SimulatedEventLoopFactory *event_loop_factory) {
Austin Schuh92547522019-12-28 14:33:43 -0800845 event_loop_factory_ = event_loop_factory;
Austin Schuhe5bbd9e2020-09-21 17:29:20 -0700846 remapped_configuration_ = event_loop_factory_->configuration();
Austin Schuh92547522019-12-28 14:33:43 -0800847
Brian Silvermand90905f2020-09-23 14:42:56 -0700848 for (const Node *node : configuration::GetNodes(configuration())) {
Austin Schuh8bd96322020-02-13 21:18:22 -0800849 const size_t node_index =
850 configuration::GetNodeIndex(configuration(), node);
Austin Schuh858c9f32020-08-31 16:56:12 -0700851 states_[node_index] =
852 std::make_unique<State>(std::make_unique<ChannelMerger>(filenames_));
Austin Schuh8bd96322020-02-13 21:18:22 -0800853 State *state = states_[node_index].get();
Austin Schuh8d7e0bb2020-10-02 17:57:00 -0700854 state->set_event_loop(state->SetNodeEventLoopFactory(
Austin Schuh858c9f32020-08-31 16:56:12 -0700855 event_loop_factory_->GetNodeEventLoopFactory(node)));
Austin Schuh8d7e0bb2020-10-02 17:57:00 -0700856
857 state->SetChannelCount(logged_configuration()->channels()->size());
Austin Schuhcde938c2020-02-02 17:30:07 -0800858 }
Austin Schuh8d7e0bb2020-10-02 17:57:00 -0700859
860 // Register after making all the State objects so we can build references
861 // between them.
862 for (const Node *node : configuration::GetNodes(configuration())) {
863 const size_t node_index =
864 configuration::GetNodeIndex(configuration(), node);
865 State *state = states_[node_index].get();
866
867 Register(state->event_loop());
868 }
869
James Kuszmaul46d82582020-05-09 19:50:09 -0700870 if (live_nodes_ == 0) {
871 LOG(FATAL)
872 << "Don't have logs from any of the nodes in the replay config--are "
873 "you sure that the replay config matches the original config?";
874 }
Austin Schuh6f3babe2020-01-26 20:34:50 -0800875
Austin Schuh2f8fd752020-09-01 22:38:28 -0700876 // We need to now seed our per-node time offsets and get everything set up
877 // to run.
878 const size_t num_nodes = nodes_count();
Austin Schuhcde938c2020-02-02 17:30:07 -0800879
Austin Schuh8bd96322020-02-13 21:18:22 -0800880 // It is easiest to solve for per node offsets with a matrix rather than
881 // trying to solve the equations by hand. So let's get after it.
882 //
883 // Now, build up the map matrix.
884 //
Austin Schuh2f8fd752020-09-01 22:38:28 -0700885 // offset_matrix_ = (map_matrix_ + slope_matrix_) * [ta; tb; tc]
886 map_matrix_ = Eigen::Matrix<mpq_class, Eigen::Dynamic, Eigen::Dynamic>::Zero(
887 filters_.size() + 1, num_nodes);
888 slope_matrix_ =
889 Eigen::Matrix<mpq_class, Eigen::Dynamic, Eigen::Dynamic>::Zero(
890 filters_.size() + 1, num_nodes);
Austin Schuhcde938c2020-02-02 17:30:07 -0800891
Austin Schuh2f8fd752020-09-01 22:38:28 -0700892 offset_matrix_ =
893 Eigen::Matrix<mpq_class, Eigen::Dynamic, 1>::Zero(filters_.size() + 1);
894 valid_matrix_ =
895 Eigen::Matrix<bool, Eigen::Dynamic, 1>::Zero(filters_.size() + 1);
896 last_valid_matrix_ =
897 Eigen::Matrix<bool, Eigen::Dynamic, 1>::Zero(filters_.size() + 1);
Austin Schuhcde938c2020-02-02 17:30:07 -0800898
Austin Schuh2f8fd752020-09-01 22:38:28 -0700899 time_offset_matrix_ = Eigen::VectorXd::Zero(num_nodes);
900 time_slope_matrix_ = Eigen::VectorXd::Zero(num_nodes);
Austin Schuh8bd96322020-02-13 21:18:22 -0800901
Austin Schuh2f8fd752020-09-01 22:38:28 -0700902 // All times should average out to the distributed clock.
903 for (int i = 0; i < map_matrix_.cols(); ++i) {
904 // 1/num_nodes.
905 map_matrix_(0, i) = mpq_class(1, num_nodes);
906 }
907 valid_matrix_(0) = true;
Austin Schuh8bd96322020-02-13 21:18:22 -0800908
909 {
910 // Now, add the a - b -> sample elements.
911 size_t i = 1;
912 for (std::pair<const std::tuple<const Node *, const Node *>,
Austin Schuh2f8fd752020-09-01 22:38:28 -0700913 std::tuple<message_bridge::NoncausalOffsetEstimator>>
914 &filter : filters_) {
Austin Schuh8bd96322020-02-13 21:18:22 -0800915 const Node *const node_a = std::get<0>(filter.first);
916 const Node *const node_b = std::get<1>(filter.first);
917
918 const size_t node_a_index =
919 configuration::GetNodeIndex(configuration(), node_a);
920 const size_t node_b_index =
921 configuration::GetNodeIndex(configuration(), node_b);
922
Austin Schuh2f8fd752020-09-01 22:38:28 -0700923 // -a
924 map_matrix_(i, node_a_index) = mpq_class(-1);
925 // +b
926 map_matrix_(i, node_b_index) = mpq_class(1);
Austin Schuh8bd96322020-02-13 21:18:22 -0800927
928 // -> sample
Austin Schuh2f8fd752020-09-01 22:38:28 -0700929 std::get<0>(filter.second)
930 .set_slope_pointer(&slope_matrix_(i, node_a_index));
931 std::get<0>(filter.second).set_offset_pointer(&offset_matrix_(i, 0));
932
933 valid_matrix_(i) = false;
934 std::get<0>(filter.second).set_valid_pointer(&valid_matrix_(i));
Austin Schuh8bd96322020-02-13 21:18:22 -0800935
936 ++i;
Austin Schuh6f3babe2020-01-26 20:34:50 -0800937 }
938 }
939
Austin Schuh858c9f32020-08-31 16:56:12 -0700940 for (std::unique_ptr<State> &state : states_) {
941 state->SeedSortedMessages();
942 }
943
Austin Schuh2f8fd752020-09-01 22:38:28 -0700944 // Rank of the map matrix tells you if all the nodes are in communication
945 // with each other, which tells you if the offsets are observable.
946 const size_t connected_nodes =
947 Eigen::FullPivLU<
948 Eigen::Matrix<mpq_class, Eigen::Dynamic, Eigen::Dynamic>>(map_matrix_)
949 .rank();
950
951 // We don't need to support isolated nodes until someone has a real use
952 // case.
953 CHECK_EQ(connected_nodes, num_nodes)
954 << ": There is a node which isn't communicating with the rest.";
955
956 // And solve.
Austin Schuh8bd96322020-02-13 21:18:22 -0800957 UpdateOffsets();
958
Austin Schuh2f8fd752020-09-01 22:38:28 -0700959 // We want to start the log file at the last start time of the log files
960 // from all the nodes. Compute how long each node's simulation needs to run
961 // to move time to this point.
Austin Schuh8bd96322020-02-13 21:18:22 -0800962 distributed_clock::time_point start_time = distributed_clock::min_time;
Austin Schuhcde938c2020-02-02 17:30:07 -0800963
Austin Schuh2f8fd752020-09-01 22:38:28 -0700964 // TODO(austin): We want an "OnStart" callback for each node rather than
965 // running until the last node.
966
Austin Schuh8bd96322020-02-13 21:18:22 -0800967 for (std::unique_ptr<State> &state : states_) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700968 VLOG(1) << "Start time is " << state->monotonic_start_time() << " for node "
969 << MaybeNodeName(state->event_loop()->node()) << "now "
970 << state->monotonic_now();
971 // And start computing the start time on the distributed clock now that
972 // that works.
Austin Schuh858c9f32020-08-31 16:56:12 -0700973 start_time = std::max(
974 start_time, state->ToDistributedClock(state->monotonic_start_time()));
Austin Schuhcde938c2020-02-02 17:30:07 -0800975 }
Austin Schuh2f8fd752020-09-01 22:38:28 -0700976
977 CHECK_GE(start_time, distributed_clock::epoch())
978 << ": Hmm, we have a node starting before the start of time. Offset "
979 "everything.";
Austin Schuhcde938c2020-02-02 17:30:07 -0800980
Austin Schuh6f3babe2020-01-26 20:34:50 -0800981 // Forwarding is tracked per channel. If it is enabled, we want to turn it
982 // off. Otherwise messages replayed will get forwarded across to the other
Austin Schuh2f8fd752020-09-01 22:38:28 -0700983 // nodes, and also replayed on the other nodes. This may not satisfy all
984 // our users, but it'll start the discussion.
Austin Schuh6f3babe2020-01-26 20:34:50 -0800985 if (configuration::MultiNode(event_loop_factory_->configuration())) {
986 for (size_t i = 0; i < logged_configuration()->channels()->size(); ++i) {
987 const Channel *channel = logged_configuration()->channels()->Get(i);
988 const Node *node = configuration::GetNode(
989 configuration(), channel->source_node()->string_view());
990
Austin Schuh8bd96322020-02-13 21:18:22 -0800991 State *state =
992 states_[configuration::GetNodeIndex(configuration(), node)].get();
Austin Schuh6f3babe2020-01-26 20:34:50 -0800993
994 const Channel *remapped_channel =
Austin Schuh858c9f32020-08-31 16:56:12 -0700995 RemapChannel(state->event_loop(), channel);
Austin Schuh6f3babe2020-01-26 20:34:50 -0800996
997 event_loop_factory_->DisableForwarding(remapped_channel);
998 }
Austin Schuh4c3b9702020-08-30 11:34:55 -0700999
1000 // If we are replaying a log, we don't want a bunch of redundant messages
1001 // from both the real message bridge and simulated message bridge.
1002 event_loop_factory_->DisableStatistics();
Austin Schuh6f3babe2020-01-26 20:34:50 -08001003 }
1004
Austin Schuhcde938c2020-02-02 17:30:07 -08001005 // While we are starting the system up, we might be relying on matching data
1006 // to timestamps on log files where the timestamp log file starts before the
1007 // data. In this case, it is reasonable to expect missing data.
1008 ignore_missing_data_ = true;
Austin Schuh2f8fd752020-09-01 22:38:28 -07001009 VLOG(1) << "Running until " << start_time << " in Register";
Austin Schuh8bd96322020-02-13 21:18:22 -08001010 event_loop_factory_->RunFor(start_time.time_since_epoch());
Brian Silverman8a32ce62020-08-12 12:02:38 -07001011 VLOG(1) << "At start time";
Austin Schuhcde938c2020-02-02 17:30:07 -08001012 // Now that we are running for real, missing data means that the log file is
1013 // corrupted or went wrong.
1014 ignore_missing_data_ = false;
Austin Schuh92547522019-12-28 14:33:43 -08001015
Austin Schuh8bd96322020-02-13 21:18:22 -08001016 for (std::unique_ptr<State> &state : states_) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001017 // Make the RT clock be correct before handing it to the user.
1018 if (state->realtime_start_time() != realtime_clock::min_time) {
1019 state->SetRealtimeOffset(state->monotonic_start_time(),
1020 state->realtime_start_time());
1021 }
1022 VLOG(1) << "Start time is " << state->monotonic_start_time() << " for node "
1023 << MaybeNodeName(state->event_loop()->node()) << "now "
1024 << state->monotonic_now();
1025 }
1026
1027 if (FLAGS_timestamps_to_csv) {
1028 for (std::pair<const std::tuple<const Node *, const Node *>,
1029 std::tuple<message_bridge::NoncausalOffsetEstimator>>
1030 &filter : filters_) {
1031 const Node *const node_a = std::get<0>(filter.first);
1032 const Node *const node_b = std::get<1>(filter.first);
1033
1034 std::get<0>(filter.second)
1035 .SetFirstFwdTime(event_loop_factory_->GetNodeEventLoopFactory(node_a)
1036 ->monotonic_now());
1037 std::get<0>(filter.second)
1038 .SetFirstRevTime(event_loop_factory_->GetNodeEventLoopFactory(node_b)
1039 ->monotonic_now());
1040 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001041 }
1042}
1043
Austin Schuh2f8fd752020-09-01 22:38:28 -07001044void LogReader::UpdateOffsets() {
1045 VLOG(2) << "Samples are " << offset_matrix_;
1046 VLOG(2) << "Map is " << (map_matrix_ + slope_matrix_);
1047 std::tie(time_slope_matrix_, time_offset_matrix_) = SolveOffsets();
1048 Eigen::IOFormat HeavyFmt(Eigen::FullPrecision, 0, ", ", ";\n", "[", "]", "[",
1049 "]");
1050 VLOG(1) << "First slope " << time_slope_matrix_.transpose().format(HeavyFmt)
1051 << " offset " << time_offset_matrix_.transpose().format(HeavyFmt);
1052
1053 size_t node_index = 0;
1054 for (std::unique_ptr<State> &state : states_) {
1055 state->SetDistributedOffset(offset(node_index), slope(node_index));
1056 VLOG(1) << "Offset for node " << node_index << " "
1057 << MaybeNodeName(state->event_loop()->node()) << "is "
1058 << aos::distributed_clock::time_point(offset(node_index))
1059 << " slope " << std::setprecision(9) << std::fixed
1060 << slope(node_index);
1061 ++node_index;
1062 }
1063
1064 if (VLOG_IS_ON(1)) {
1065 LogFit("Offset is");
1066 }
1067}
1068
1069void LogReader::LogFit(std::string_view prefix) {
1070 for (std::unique_ptr<State> &state : states_) {
1071 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << " now "
1072 << state->monotonic_now() << " distributed "
1073 << event_loop_factory_->distributed_now();
1074 }
1075
1076 for (std::pair<const std::tuple<const Node *, const Node *>,
1077 std::tuple<message_bridge::NoncausalOffsetEstimator>> &filter :
1078 filters_) {
1079 message_bridge::NoncausalOffsetEstimator *estimator =
1080 &std::get<0>(filter.second);
1081
1082 if (estimator->a_timestamps().size() == 0 &&
1083 estimator->b_timestamps().size() == 0) {
1084 continue;
1085 }
1086
1087 if (VLOG_IS_ON(1)) {
1088 estimator->LogFit(prefix);
1089 }
1090
1091 const Node *const node_a = std::get<0>(filter.first);
1092 const Node *const node_b = std::get<1>(filter.first);
1093
1094 const size_t node_a_index =
1095 configuration::GetNodeIndex(configuration(), node_a);
1096 const size_t node_b_index =
1097 configuration::GetNodeIndex(configuration(), node_b);
1098
1099 const double recovered_slope =
1100 slope(node_b_index) / slope(node_a_index) - 1.0;
1101 const int64_t recovered_offset =
1102 offset(node_b_index).count() - offset(node_a_index).count() *
1103 slope(node_b_index) /
1104 slope(node_a_index);
1105
1106 VLOG(1) << "Recovered slope " << std::setprecision(20) << recovered_slope
1107 << " (error " << recovered_slope - estimator->fit().slope() << ") "
1108 << " offset " << std::setprecision(20) << recovered_offset
1109 << " (error "
1110 << recovered_offset - estimator->fit().offset().count() << ")";
1111
1112 const aos::distributed_clock::time_point a0 =
1113 states_[node_a_index]->ToDistributedClock(
1114 std::get<0>(estimator->a_timestamps()[0]));
1115 const aos::distributed_clock::time_point a1 =
1116 states_[node_a_index]->ToDistributedClock(
1117 std::get<0>(estimator->a_timestamps()[1]));
1118
1119 VLOG(1) << node_a->name()->string_view() << " timestamps()[0] = "
1120 << std::get<0>(estimator->a_timestamps()[0]) << " -> " << a0
1121 << " distributed -> " << node_b->name()->string_view() << " "
1122 << states_[node_b_index]->FromDistributedClock(a0) << " should be "
1123 << aos::monotonic_clock::time_point(
1124 std::chrono::nanoseconds(static_cast<int64_t>(
1125 std::get<0>(estimator->a_timestamps()[0])
1126 .time_since_epoch()
1127 .count() *
1128 (1.0 + estimator->fit().slope()))) +
1129 estimator->fit().offset())
1130 << ((a0 <= event_loop_factory_->distributed_now())
1131 ? ""
1132 : " After now, investigate");
1133 VLOG(1) << node_a->name()->string_view() << " timestamps()[1] = "
1134 << std::get<0>(estimator->a_timestamps()[1]) << " -> " << a1
1135 << " distributed -> " << node_b->name()->string_view() << " "
1136 << states_[node_b_index]->FromDistributedClock(a1) << " should be "
1137 << aos::monotonic_clock::time_point(
1138 std::chrono::nanoseconds(static_cast<int64_t>(
1139 std::get<0>(estimator->a_timestamps()[1])
1140 .time_since_epoch()
1141 .count() *
1142 (1.0 + estimator->fit().slope()))) +
1143 estimator->fit().offset())
1144 << ((event_loop_factory_->distributed_now() <= a1)
1145 ? ""
1146 : " Before now, investigate");
1147
1148 const aos::distributed_clock::time_point b0 =
1149 states_[node_b_index]->ToDistributedClock(
1150 std::get<0>(estimator->b_timestamps()[0]));
1151 const aos::distributed_clock::time_point b1 =
1152 states_[node_b_index]->ToDistributedClock(
1153 std::get<0>(estimator->b_timestamps()[1]));
1154
1155 VLOG(1) << node_b->name()->string_view() << " timestamps()[0] = "
1156 << std::get<0>(estimator->b_timestamps()[0]) << " -> " << b0
1157 << " distributed -> " << node_a->name()->string_view() << " "
1158 << states_[node_a_index]->FromDistributedClock(b0)
1159 << ((b0 <= event_loop_factory_->distributed_now())
1160 ? ""
1161 : " After now, investigate");
1162 VLOG(1) << node_b->name()->string_view() << " timestamps()[1] = "
1163 << std::get<0>(estimator->b_timestamps()[1]) << " -> " << b1
1164 << " distributed -> " << node_a->name()->string_view() << " "
1165 << states_[node_a_index]->FromDistributedClock(b1)
1166 << ((event_loop_factory_->distributed_now() <= b1)
1167 ? ""
1168 : " Before now, investigate");
1169 }
1170}
1171
1172message_bridge::NoncausalOffsetEstimator *LogReader::GetFilter(
Austin Schuh8bd96322020-02-13 21:18:22 -08001173 const Node *node_a, const Node *node_b) {
1174 CHECK_NE(node_a, node_b);
1175 CHECK_EQ(configuration::GetNode(configuration(), node_a), node_a);
1176 CHECK_EQ(configuration::GetNode(configuration(), node_b), node_b);
1177
1178 if (node_a > node_b) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001179 return GetFilter(node_b, node_a);
Austin Schuh8bd96322020-02-13 21:18:22 -08001180 }
1181
1182 auto tuple = std::make_tuple(node_a, node_b);
1183
1184 auto it = filters_.find(tuple);
1185
1186 if (it == filters_.end()) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001187 auto &x =
1188 filters_
1189 .insert(std::make_pair(
1190 tuple, std::make_tuple(message_bridge::NoncausalOffsetEstimator(
1191 node_a, node_b))))
1192 .first->second;
Austin Schuh8bd96322020-02-13 21:18:22 -08001193 if (FLAGS_timestamps_to_csv) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001194 std::get<0>(x).SetFwdCsvFileName(absl::StrCat(
1195 "/tmp/timestamp_noncausal_", node_a->name()->string_view(), "_",
1196 node_b->name()->string_view()));
1197 std::get<0>(x).SetRevCsvFileName(absl::StrCat(
1198 "/tmp/timestamp_noncausal_", node_b->name()->string_view(), "_",
1199 node_a->name()->string_view()));
Austin Schuh8bd96322020-02-13 21:18:22 -08001200 }
1201
Austin Schuh2f8fd752020-09-01 22:38:28 -07001202 return &std::get<0>(x);
Austin Schuh8bd96322020-02-13 21:18:22 -08001203 } else {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001204 return &std::get<0>(it->second);
Austin Schuh8bd96322020-02-13 21:18:22 -08001205 }
1206}
1207
Austin Schuhe309d2a2019-11-29 13:25:21 -08001208void LogReader::Register(EventLoop *event_loop) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001209 State *state =
1210 states_[configuration::GetNodeIndex(configuration(), event_loop->node())]
1211 .get();
Austin Schuh6f3babe2020-01-26 20:34:50 -08001212
Austin Schuh858c9f32020-08-31 16:56:12 -07001213 state->set_event_loop(event_loop);
Austin Schuhe309d2a2019-11-29 13:25:21 -08001214
Tyler Chatow67ddb032020-01-12 14:30:04 -08001215 // We don't run timing reports when trying to print out logged data, because
1216 // otherwise we would end up printing out the timing reports themselves...
1217 // This is only really relevant when we are replaying into a simulation.
Austin Schuh6f3babe2020-01-26 20:34:50 -08001218 event_loop->SkipTimingReport();
1219 event_loop->SkipAosLog();
Austin Schuh39788ff2019-12-01 18:22:57 -08001220
Austin Schuh858c9f32020-08-31 16:56:12 -07001221 const bool has_data = state->SetNode();
Austin Schuhe309d2a2019-11-29 13:25:21 -08001222
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001223 for (size_t logged_channel_index = 0;
1224 logged_channel_index < logged_configuration()->channels()->size();
1225 ++logged_channel_index) {
1226 const Channel *channel = RemapChannel(
1227 event_loop,
1228 logged_configuration()->channels()->Get(logged_channel_index));
Austin Schuh8bd96322020-02-13 21:18:22 -08001229
Austin Schuh2f8fd752020-09-01 22:38:28 -07001230 message_bridge::NoncausalOffsetEstimator *filter = nullptr;
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001231 aos::Sender<MessageHeader> *remote_timestamp_sender = nullptr;
1232
1233 State *source_state = nullptr;
Austin Schuh8bd96322020-02-13 21:18:22 -08001234
1235 if (!configuration::ChannelIsSendableOnNode(channel, event_loop->node()) &&
1236 configuration::ChannelIsReadableOnNode(channel, event_loop->node())) {
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001237 // We've got a message which is being forwarded to this node.
1238 const Node *source_node = configuration::GetNode(
Austin Schuh8bd96322020-02-13 21:18:22 -08001239 event_loop->configuration(), channel->source_node()->string_view());
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001240 filter = GetFilter(event_loop->node(), source_node);
Austin Schuh8bd96322020-02-13 21:18:22 -08001241
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001242 // Delivery timestamps are supposed to be logged back on the source node.
1243 // Configure remote timestamps to be sent.
1244 const bool delivery_time_is_logged =
1245 configuration::ConnectionDeliveryTimeIsLoggedOnNode(
1246 channel, event_loop->node(), source_node);
1247
1248 source_state =
1249 states_[configuration::GetNodeIndex(configuration(), source_node)]
1250 .get();
1251
1252 if (delivery_time_is_logged) {
1253 remote_timestamp_sender =
1254 source_state->RemoteTimestampSender(event_loop->node());
Austin Schuh8bd96322020-02-13 21:18:22 -08001255 }
1256 }
Austin Schuh858c9f32020-08-31 16:56:12 -07001257
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001258 state->SetChannel(
1259 logged_channel_index,
1260 configuration::ChannelIndex(event_loop->configuration(), channel),
1261 event_loop->MakeRawSender(channel), filter, remote_timestamp_sender,
1262 source_state);
Austin Schuhe309d2a2019-11-29 13:25:21 -08001263 }
1264
Austin Schuh6aa77be2020-02-22 21:06:40 -08001265 // If we didn't find any log files with data in them, we won't ever get a
1266 // callback or be live. So skip the rest of the setup.
1267 if (!has_data) {
1268 return;
1269 }
1270
Austin Schuh858c9f32020-08-31 16:56:12 -07001271 state->set_timer_handler(event_loop->AddTimer([this, state]() {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001272 VLOG(1) << "Starting sending " << MaybeNodeName(state->event_loop()->node())
1273 << "at " << state->event_loop()->context().monotonic_event_time
1274 << " now " << state->monotonic_now();
Austin Schuh858c9f32020-08-31 16:56:12 -07001275 if (state->OldestMessageTime() == monotonic_clock::max_time) {
Austin Schuh6f3babe2020-01-26 20:34:50 -08001276 --live_nodes_;
Austin Schuh2f8fd752020-09-01 22:38:28 -07001277 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Node down!";
Austin Schuh6f3babe2020-01-26 20:34:50 -08001278 if (live_nodes_ == 0) {
1279 event_loop_factory_->Exit();
1280 }
James Kuszmaul314f1672020-01-03 20:02:08 -08001281 return;
1282 }
Austin Schuh6f3babe2020-01-26 20:34:50 -08001283 TimestampMerger::DeliveryTimestamp channel_timestamp;
Austin Schuh05b70472020-01-01 17:11:17 -08001284 int channel_index;
1285 FlatbufferVector<MessageHeader> channel_data =
1286 FlatbufferVector<MessageHeader>::Empty();
1287
Austin Schuh2f8fd752020-09-01 22:38:28 -07001288 if (VLOG_IS_ON(1)) {
1289 LogFit("Offset was");
1290 }
1291
1292 bool update_time;
Austin Schuh05b70472020-01-01 17:11:17 -08001293 std::tie(channel_timestamp, channel_index, channel_data) =
Austin Schuh2f8fd752020-09-01 22:38:28 -07001294 state->PopOldest(&update_time);
Austin Schuh05b70472020-01-01 17:11:17 -08001295
Austin Schuhe309d2a2019-11-29 13:25:21 -08001296 const monotonic_clock::time_point monotonic_now =
Austin Schuh858c9f32020-08-31 16:56:12 -07001297 state->event_loop()->context().monotonic_event_time;
Austin Schuh2f8fd752020-09-01 22:38:28 -07001298 if (!FLAGS_skip_order_validation) {
1299 CHECK(monotonic_now == channel_timestamp.monotonic_event_time)
1300 << ": " << FlatbufferToJson(state->event_loop()->node()) << " Now "
1301 << monotonic_now << " trying to send "
1302 << channel_timestamp.monotonic_event_time << " failure "
1303 << state->DebugString();
1304 } else if (monotonic_now != channel_timestamp.monotonic_event_time) {
1305 LOG(WARNING) << "Check failed: monotonic_now == "
1306 "channel_timestamp.monotonic_event_time) ("
1307 << monotonic_now << " vs. "
1308 << channel_timestamp.monotonic_event_time
1309 << "): " << FlatbufferToJson(state->event_loop()->node())
1310 << " Now " << monotonic_now << " trying to send "
1311 << channel_timestamp.monotonic_event_time << " failure "
1312 << state->DebugString();
1313 }
Austin Schuhe309d2a2019-11-29 13:25:21 -08001314
Austin Schuh6f3babe2020-01-26 20:34:50 -08001315 if (channel_timestamp.monotonic_event_time >
Austin Schuh858c9f32020-08-31 16:56:12 -07001316 state->monotonic_start_time() ||
Austin Schuh15649d62019-12-28 16:36:38 -08001317 event_loop_factory_ != nullptr) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001318 if ((!ignore_missing_data_ && !FLAGS_skip_missing_forwarding_entries &&
Austin Schuh858c9f32020-08-31 16:56:12 -07001319 !state->at_end()) ||
Austin Schuh05b70472020-01-01 17:11:17 -08001320 channel_data.message().data() != nullptr) {
1321 CHECK(channel_data.message().data() != nullptr)
1322 << ": Got a message without data. Forwarding entry which was "
Austin Schuh2f8fd752020-09-01 22:38:28 -07001323 "not matched? Use --skip_missing_forwarding_entries to "
Brian Silverman87ac0402020-09-17 14:47:01 -07001324 "ignore this.";
Austin Schuh92547522019-12-28 14:33:43 -08001325
Austin Schuh2f8fd752020-09-01 22:38:28 -07001326 if (update_time) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001327 // Confirm that the message was sent on the sending node before the
1328 // destination node (this node). As a proxy, do this by making sure
1329 // that time on the source node is past when the message was sent.
Austin Schuh2f8fd752020-09-01 22:38:28 -07001330 if (!FLAGS_skip_order_validation) {
1331 CHECK_LT(channel_timestamp.monotonic_remote_time,
1332 state->monotonic_remote_now(channel_index))
1333 << state->event_loop()->node()->name()->string_view() << " to "
1334 << state->remote_node(channel_index)->name()->string_view()
1335 << " " << state->DebugString();
1336 } else if (channel_timestamp.monotonic_remote_time >=
1337 state->monotonic_remote_now(channel_index)) {
1338 LOG(WARNING)
1339 << "Check failed: channel_timestamp.monotonic_remote_time < "
1340 "state->monotonic_remote_now(channel_index) ("
1341 << channel_timestamp.monotonic_remote_time << " vs. "
1342 << state->monotonic_remote_now(channel_index) << ") "
1343 << state->event_loop()->node()->name()->string_view() << " to "
1344 << state->remote_node(channel_index)->name()->string_view()
1345 << " currently " << channel_timestamp.monotonic_event_time
1346 << " ("
1347 << state->ToDistributedClock(
1348 channel_timestamp.monotonic_event_time)
1349 << ") remote event time "
1350 << channel_timestamp.monotonic_remote_time << " ("
1351 << state->RemoteToDistributedClock(
1352 channel_index, channel_timestamp.monotonic_remote_time)
1353 << ") " << state->DebugString();
1354 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001355
1356 if (FLAGS_timestamps_to_csv) {
1357 if (offset_fp_ == nullptr) {
1358 offset_fp_ = fopen("/tmp/offsets.csv", "w");
1359 fprintf(
1360 offset_fp_,
1361 "# time_since_start, offset node 0, offset node 1, ...\n");
1362 first_time_ = channel_timestamp.realtime_event_time;
1363 }
1364
1365 fprintf(offset_fp_, "%.9f",
1366 std::chrono::duration_cast<std::chrono::duration<double>>(
1367 channel_timestamp.realtime_event_time - first_time_)
1368 .count());
Austin Schuh2f8fd752020-09-01 22:38:28 -07001369 for (int i = 1; i < time_offset_matrix_.rows(); ++i) {
1370 fprintf(offset_fp_, ", %.9f",
1371 time_offset_matrix_(i, 0) +
1372 time_slope_matrix_(i, 0) *
1373 chrono::duration<double>(
1374 event_loop_factory_->distributed_now()
1375 .time_since_epoch())
1376 .count());
Austin Schuh8bd96322020-02-13 21:18:22 -08001377 }
1378 fprintf(offset_fp_, "\n");
1379 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001380 }
1381
Austin Schuh15649d62019-12-28 16:36:38 -08001382 // If we have access to the factory, use it to fix the realtime time.
Austin Schuh858c9f32020-08-31 16:56:12 -07001383 state->SetRealtimeOffset(channel_timestamp.monotonic_event_time,
1384 channel_timestamp.realtime_event_time);
Austin Schuh15649d62019-12-28 16:36:38 -08001385
Austin Schuh2f8fd752020-09-01 22:38:28 -07001386 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Sending "
1387 << channel_timestamp.monotonic_event_time;
1388 // TODO(austin): std::move channel_data in and make that efficient in
1389 // simulation.
Austin Schuh858c9f32020-08-31 16:56:12 -07001390 state->Send(channel_index, channel_data.message().data()->Data(),
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001391 channel_data.message().data()->size(), channel_timestamp);
Austin Schuh2f8fd752020-09-01 22:38:28 -07001392 } else if (state->at_end() && !ignore_missing_data_) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001393 // We are at the end of the log file and found missing data. Finish
Austin Schuh2f8fd752020-09-01 22:38:28 -07001394 // reading the rest of the log file and call it quits. We don't want
1395 // to replay partial data.
Austin Schuh858c9f32020-08-31 16:56:12 -07001396 while (state->OldestMessageTime() != monotonic_clock::max_time) {
1397 bool update_time_dummy;
1398 state->PopOldest(&update_time_dummy);
Austin Schuh8bd96322020-02-13 21:18:22 -08001399 }
Austin Schuh2f8fd752020-09-01 22:38:28 -07001400 } else {
1401 CHECK(channel_data.message().data() == nullptr) << ": Nullptr";
Austin Schuh92547522019-12-28 14:33:43 -08001402 }
Austin Schuhe309d2a2019-11-29 13:25:21 -08001403 } else {
Austin Schuh6f3babe2020-01-26 20:34:50 -08001404 LOG(WARNING)
1405 << "Not sending data from before the start of the log file. "
1406 << channel_timestamp.monotonic_event_time.time_since_epoch().count()
1407 << " start " << monotonic_start_time().time_since_epoch().count()
Austin Schuhd85baf82020-10-19 11:50:12 -07001408 << " "
1409 << FlatbufferToJson(channel_data,
1410 {.multi_line = false, .max_vector_size = 100});
Austin Schuhe309d2a2019-11-29 13:25:21 -08001411 }
1412
Austin Schuh858c9f32020-08-31 16:56:12 -07001413 const monotonic_clock::time_point next_time = state->OldestMessageTime();
Austin Schuh6f3babe2020-01-26 20:34:50 -08001414 if (next_time != monotonic_clock::max_time) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001415 VLOG(1) << "Scheduling " << MaybeNodeName(state->event_loop()->node())
1416 << "wakeup for " << next_time << "("
1417 << state->ToDistributedClock(next_time)
1418 << " distributed), now is " << state->monotonic_now();
Austin Schuh858c9f32020-08-31 16:56:12 -07001419 state->Setup(next_time);
James Kuszmaul314f1672020-01-03 20:02:08 -08001420 } else {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001421 VLOG(1) << MaybeNodeName(state->event_loop()->node())
1422 << "No next message, scheduling shutdown";
1423 // Set a timer up immediately after now to die. If we don't do this,
1424 // then the senders waiting on the message we just read will never get
1425 // called.
Austin Schuheecb9282020-01-08 17:43:30 -08001426 if (event_loop_factory_ != nullptr) {
Austin Schuh858c9f32020-08-31 16:56:12 -07001427 state->Setup(monotonic_now + event_loop_factory_->send_delay() +
1428 std::chrono::nanoseconds(1));
Austin Schuheecb9282020-01-08 17:43:30 -08001429 }
Austin Schuhe309d2a2019-11-29 13:25:21 -08001430 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001431
Austin Schuh2f8fd752020-09-01 22:38:28 -07001432 // Once we make this call, the current time changes. So do everything
1433 // which involves time before changing it. That especially includes
1434 // sending the message.
1435 if (update_time) {
1436 VLOG(1) << MaybeNodeName(state->event_loop()->node())
1437 << "updating offsets";
1438
1439 std::vector<aos::monotonic_clock::time_point> before_times;
1440 before_times.resize(states_.size());
1441 std::transform(states_.begin(), states_.end(), before_times.begin(),
1442 [](const std::unique_ptr<State> &state) {
1443 return state->monotonic_now();
1444 });
1445
1446 for (size_t i = 0; i < states_.size(); ++i) {
Brian Silvermand90905f2020-09-23 14:42:56 -07001447 VLOG(1) << MaybeNodeName(states_[i]->event_loop()->node()) << "before "
1448 << states_[i]->monotonic_now();
Austin Schuh2f8fd752020-09-01 22:38:28 -07001449 }
1450
Austin Schuh8bd96322020-02-13 21:18:22 -08001451 UpdateOffsets();
Austin Schuh2f8fd752020-09-01 22:38:28 -07001452 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Now is now "
1453 << state->monotonic_now();
1454
1455 for (size_t i = 0; i < states_.size(); ++i) {
Brian Silvermand90905f2020-09-23 14:42:56 -07001456 VLOG(1) << MaybeNodeName(states_[i]->event_loop()->node()) << "after "
1457 << states_[i]->monotonic_now();
Austin Schuh2f8fd752020-09-01 22:38:28 -07001458 }
1459
1460 // TODO(austin): We should be perfect.
1461 const std::chrono::nanoseconds kTolerance{3};
1462 if (!FLAGS_skip_order_validation) {
1463 CHECK_GE(next_time, state->monotonic_now())
1464 << ": Time skipped the next event.";
1465
1466 for (size_t i = 0; i < states_.size(); ++i) {
1467 CHECK_GE(states_[i]->monotonic_now(), before_times[i] - kTolerance)
1468 << ": Time changed too much on node "
1469 << MaybeNodeName(states_[i]->event_loop()->node());
1470 CHECK_LE(states_[i]->monotonic_now(), before_times[i] + kTolerance)
1471 << ": Time changed too much on node "
1472 << states_[i]->event_loop()->node()->name()->string_view();
1473 }
1474 } else {
1475 if (next_time < state->monotonic_now()) {
1476 LOG(WARNING) << "Check failed: next_time >= "
1477 "state->monotonic_now() ("
1478 << next_time << " vs. " << state->monotonic_now()
1479 << "): Time skipped the next event.";
1480 }
1481 for (size_t i = 0; i < states_.size(); ++i) {
1482 if (states_[i]->monotonic_now() >= before_times[i] - kTolerance) {
1483 LOG(WARNING) << "Check failed: "
1484 "states_[i]->monotonic_now() "
1485 ">= before_times[i] - kTolerance ("
1486 << states_[i]->monotonic_now() << " vs. "
1487 << before_times[i] - kTolerance
1488 << ") : Time changed too much on node "
1489 << MaybeNodeName(states_[i]->event_loop()->node());
1490 }
1491 if (states_[i]->monotonic_now() <= before_times[i] + kTolerance) {
1492 LOG(WARNING) << "Check failed: "
1493 "states_[i]->monotonic_now() "
1494 "<= before_times[i] + kTolerance ("
1495 << states_[i]->monotonic_now() << " vs. "
1496 << before_times[i] - kTolerance
1497 << ") : Time changed too much on node "
1498 << MaybeNodeName(states_[i]->event_loop()->node());
1499 }
1500 }
1501 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001502 }
Austin Schuh2f8fd752020-09-01 22:38:28 -07001503
1504 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Done sending at "
1505 << state->event_loop()->context().monotonic_event_time << " now "
1506 << state->monotonic_now();
Austin Schuh858c9f32020-08-31 16:56:12 -07001507 }));
Austin Schuhe309d2a2019-11-29 13:25:21 -08001508
Austin Schuh6f3babe2020-01-26 20:34:50 -08001509 ++live_nodes_;
1510
Austin Schuh858c9f32020-08-31 16:56:12 -07001511 if (state->OldestMessageTime() != monotonic_clock::max_time) {
1512 event_loop->OnRun([state]() { state->Setup(state->OldestMessageTime()); });
Austin Schuhe309d2a2019-11-29 13:25:21 -08001513 }
1514}
1515
1516void LogReader::Deregister() {
James Kuszmaul84ff3e52020-01-03 19:48:53 -08001517 // Make sure that things get destroyed in the correct order, rather than
1518 // relying on getting the order correct in the class definition.
Austin Schuh8bd96322020-02-13 21:18:22 -08001519 for (std::unique_ptr<State> &state : states_) {
Austin Schuh858c9f32020-08-31 16:56:12 -07001520 state->Deregister();
Austin Schuhe309d2a2019-11-29 13:25:21 -08001521 }
Austin Schuh92547522019-12-28 14:33:43 -08001522
James Kuszmaul84ff3e52020-01-03 19:48:53 -08001523 event_loop_factory_unique_ptr_.reset();
1524 event_loop_factory_ = nullptr;
Austin Schuhe309d2a2019-11-29 13:25:21 -08001525}
1526
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001527void LogReader::RemapLoggedChannel(std::string_view name, std::string_view type,
1528 std::string_view add_prefix) {
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001529 for (size_t ii = 0; ii < logged_configuration()->channels()->size(); ++ii) {
1530 const Channel *const channel = logged_configuration()->channels()->Get(ii);
1531 if (channel->name()->str() == name &&
1532 channel->type()->string_view() == type) {
1533 CHECK_EQ(0u, remapped_channels_.count(ii))
1534 << "Already remapped channel "
1535 << configuration::CleanedChannelToString(channel);
1536 remapped_channels_[ii] = std::string(add_prefix) + std::string(name);
1537 VLOG(1) << "Remapping channel "
1538 << configuration::CleanedChannelToString(channel)
1539 << " to have name " << remapped_channels_[ii];
Austin Schuh6331ef92020-01-07 18:28:09 -08001540 MakeRemappedConfig();
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001541 return;
1542 }
1543 }
1544 LOG(FATAL) << "Unabled to locate channel with name " << name << " and type "
1545 << type;
1546}
1547
Austin Schuh01b4c352020-09-21 23:09:39 -07001548void LogReader::RemapLoggedChannel(std::string_view name, std::string_view type,
1549 const Node *node,
1550 std::string_view add_prefix) {
1551 VLOG(1) << "Node is " << aos::FlatbufferToJson(node);
1552 const Channel *remapped_channel =
1553 configuration::GetChannel(logged_configuration(), name, type, "", node);
1554 CHECK(remapped_channel != nullptr) << ": Failed to find {\"name\": \"" << name
1555 << "\", \"type\": \"" << type << "\"}";
1556 VLOG(1) << "Original {\"name\": \"" << name << "\", \"type\": \"" << type
1557 << "\"}";
1558 VLOG(1) << "Remapped "
1559 << aos::configuration::StrippedChannelToString(remapped_channel);
1560
1561 // We want to make /spray on node 0 go to /0/spray by snooping the maps. And
1562 // we want it to degrade if the heuristics fail to just work.
1563 //
1564 // The easiest way to do this is going to be incredibly specific and verbose.
1565 // Look up /spray, to /0/spray. Then, prefix the result with /original to get
1566 // /original/0/spray. Then, create a map from /original/spray to
1567 // /original/0/spray for just the type we were asked for.
1568 if (name != remapped_channel->name()->string_view()) {
1569 MapT new_map;
1570 new_map.match = std::make_unique<ChannelT>();
1571 new_map.match->name = absl::StrCat(add_prefix, name);
1572 new_map.match->type = type;
1573 if (node != nullptr) {
1574 new_map.match->source_node = node->name()->str();
1575 }
1576 new_map.rename = std::make_unique<ChannelT>();
1577 new_map.rename->name =
1578 absl::StrCat(add_prefix, remapped_channel->name()->string_view());
1579 maps_.emplace_back(std::move(new_map));
1580 }
1581
1582 const size_t channel_index =
1583 configuration::ChannelIndex(logged_configuration(), remapped_channel);
1584 CHECK_EQ(0u, remapped_channels_.count(channel_index))
1585 << "Already remapped channel "
1586 << configuration::CleanedChannelToString(remapped_channel);
1587 remapped_channels_[channel_index] =
1588 absl::StrCat(add_prefix, remapped_channel->name()->string_view());
1589 MakeRemappedConfig();
1590}
1591
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001592void LogReader::MakeRemappedConfig() {
Austin Schuh8bd96322020-02-13 21:18:22 -08001593 for (std::unique_ptr<State> &state : states_) {
Austin Schuh6aa77be2020-02-22 21:06:40 -08001594 if (state) {
Austin Schuh858c9f32020-08-31 16:56:12 -07001595 CHECK(!state->event_loop())
Austin Schuh6aa77be2020-02-22 21:06:40 -08001596 << ": Can't change the mapping after the events are scheduled.";
1597 }
Austin Schuh6f3babe2020-01-26 20:34:50 -08001598 }
Austin Schuhac0771c2020-01-07 18:36:30 -08001599
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001600 // If no remapping occurred and we are using the original config, then there
1601 // is nothing interesting to do here.
1602 if (remapped_channels_.empty() && replay_configuration_ == nullptr) {
Austin Schuh6f3babe2020-01-26 20:34:50 -08001603 remapped_configuration_ = logged_configuration();
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001604 return;
1605 }
1606 // Config to copy Channel definitions from. Use the specified
1607 // replay_configuration_ if it has been provided.
1608 const Configuration *const base_config = replay_configuration_ == nullptr
1609 ? logged_configuration()
1610 : replay_configuration_;
1611 // The remapped config will be identical to the base_config, except that it
1612 // will have a bunch of extra channels in the channel list, which are exact
1613 // copies of the remapped channels, but with different names.
1614 // Because the flatbuffers API is a pain to work with, this requires a bit of
1615 // a song-and-dance to get copied over.
1616 // The order of operations is to:
1617 // 1) Make a flatbuffer builder for a config that will just contain a list of
1618 // the new channels that we want to add.
1619 // 2) For each channel that we are remapping:
1620 // a) Make a buffer/builder and construct into it a Channel table that only
1621 // contains the new name for the channel.
1622 // b) Merge the new channel with just the name into the channel that we are
1623 // trying to copy, built in the flatbuffer builder made in 1. This gives
1624 // us the new channel definition that we need.
1625 // 3) Using this list of offsets, build the Configuration of just new
1626 // Channels.
1627 // 4) Merge the Configuration with the new Channels into the base_config.
1628 // 5) Call MergeConfiguration() on that result to give MergeConfiguration a
1629 // chance to sanitize the config.
1630
1631 // This is the builder that we use for the config containing all the new
1632 // channels.
1633 flatbuffers::FlatBufferBuilder new_config_fbb;
Austin Schuhd7b15da2020-02-17 15:06:11 -08001634 new_config_fbb.ForceDefaults(true);
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001635 std::vector<flatbuffers::Offset<Channel>> channel_offsets;
1636 for (auto &pair : remapped_channels_) {
1637 // This is the builder that we use for creating the Channel with just the
1638 // new name.
1639 flatbuffers::FlatBufferBuilder new_name_fbb;
Austin Schuhd7b15da2020-02-17 15:06:11 -08001640 new_name_fbb.ForceDefaults(true);
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001641 const flatbuffers::Offset<flatbuffers::String> name_offset =
1642 new_name_fbb.CreateString(pair.second);
1643 ChannelBuilder new_name_builder(new_name_fbb);
1644 new_name_builder.add_name(name_offset);
1645 new_name_fbb.Finish(new_name_builder.Finish());
1646 const FlatbufferDetachedBuffer<Channel> new_name = new_name_fbb.Release();
Austin Schuh2f8fd752020-09-01 22:38:28 -07001647 // Retrieve the channel that we want to copy, confirming that it is
1648 // actually present in base_config.
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001649 const Channel *const base_channel = CHECK_NOTNULL(configuration::GetChannel(
1650 base_config, logged_configuration()->channels()->Get(pair.first), "",
1651 nullptr));
1652 // Actually create the new channel and put it into the vector of Offsets
1653 // that we will use to create the new Configuration.
1654 channel_offsets.emplace_back(MergeFlatBuffers<Channel>(
1655 reinterpret_cast<const flatbuffers::Table *>(base_channel),
1656 reinterpret_cast<const flatbuffers::Table *>(&new_name.message()),
1657 &new_config_fbb));
1658 }
1659 // Create the Configuration containing the new channels that we want to add.
Austin Schuh01b4c352020-09-21 23:09:39 -07001660 const auto new_channel_vector_offsets =
Austin Schuhfa895892020-01-07 20:07:41 -08001661 new_config_fbb.CreateVector(channel_offsets);
Austin Schuh01b4c352020-09-21 23:09:39 -07001662
1663 // Now create the new maps.
1664 std::vector<flatbuffers::Offset<Map>> map_offsets;
1665 for (const MapT &map : maps_) {
1666 const flatbuffers::Offset<flatbuffers::String> match_name_offset =
1667 new_config_fbb.CreateString(map.match->name);
1668 const flatbuffers::Offset<flatbuffers::String> match_type_offset =
1669 new_config_fbb.CreateString(map.match->type);
1670 const flatbuffers::Offset<flatbuffers::String> rename_name_offset =
1671 new_config_fbb.CreateString(map.rename->name);
1672 flatbuffers::Offset<flatbuffers::String> match_source_node_offset;
1673 if (!map.match->source_node.empty()) {
1674 match_source_node_offset =
1675 new_config_fbb.CreateString(map.match->source_node);
1676 }
1677 Channel::Builder match_builder(new_config_fbb);
1678 match_builder.add_name(match_name_offset);
1679 match_builder.add_type(match_type_offset);
1680 if (!map.match->source_node.empty()) {
1681 match_builder.add_source_node(match_source_node_offset);
1682 }
1683 const flatbuffers::Offset<Channel> match_offset = match_builder.Finish();
1684
1685 Channel::Builder rename_builder(new_config_fbb);
1686 rename_builder.add_name(rename_name_offset);
1687 const flatbuffers::Offset<Channel> rename_offset = rename_builder.Finish();
1688
1689 Map::Builder map_builder(new_config_fbb);
1690 map_builder.add_match(match_offset);
1691 map_builder.add_rename(rename_offset);
1692 map_offsets.emplace_back(map_builder.Finish());
1693 }
1694
1695 const auto new_maps_offsets = new_config_fbb.CreateVector(map_offsets);
1696
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001697 ConfigurationBuilder new_config_builder(new_config_fbb);
Austin Schuh01b4c352020-09-21 23:09:39 -07001698 new_config_builder.add_channels(new_channel_vector_offsets);
1699 new_config_builder.add_maps(new_maps_offsets);
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001700 new_config_fbb.Finish(new_config_builder.Finish());
1701 const FlatbufferDetachedBuffer<Configuration> new_name_config =
1702 new_config_fbb.Release();
1703 // Merge the new channels configuration into the base_config, giving us the
1704 // remapped configuration.
1705 remapped_configuration_buffer_ =
1706 std::make_unique<FlatbufferDetachedBuffer<Configuration>>(
1707 MergeFlatBuffers<Configuration>(base_config,
1708 &new_name_config.message()));
1709 // Call MergeConfiguration to deal with sanitizing the config.
1710 remapped_configuration_buffer_ =
1711 std::make_unique<FlatbufferDetachedBuffer<Configuration>>(
1712 configuration::MergeConfiguration(*remapped_configuration_buffer_));
1713
1714 remapped_configuration_ = &remapped_configuration_buffer_->message();
1715}
1716
Austin Schuh6f3babe2020-01-26 20:34:50 -08001717const Channel *LogReader::RemapChannel(const EventLoop *event_loop,
1718 const Channel *channel) {
1719 std::string_view channel_name = channel->name()->string_view();
1720 std::string_view channel_type = channel->type()->string_view();
1721 const int channel_index =
1722 configuration::ChannelIndex(logged_configuration(), channel);
1723 // If the channel is remapped, find the correct channel name to use.
1724 if (remapped_channels_.count(channel_index) > 0) {
Austin Schuhee711052020-08-24 16:06:09 -07001725 VLOG(3) << "Got remapped channel on "
Austin Schuh6f3babe2020-01-26 20:34:50 -08001726 << configuration::CleanedChannelToString(channel);
1727 channel_name = remapped_channels_[channel_index];
1728 }
1729
Austin Schuhee711052020-08-24 16:06:09 -07001730 VLOG(2) << "Going to remap channel " << channel_name << " " << channel_type;
Austin Schuh6f3babe2020-01-26 20:34:50 -08001731 const Channel *remapped_channel = configuration::GetChannel(
1732 event_loop->configuration(), channel_name, channel_type,
1733 event_loop->name(), event_loop->node());
1734
1735 CHECK(remapped_channel != nullptr)
1736 << ": Unable to send {\"name\": \"" << channel_name << "\", \"type\": \""
1737 << channel_type << "\"} because it is not in the provided configuration.";
1738
1739 return remapped_channel;
1740}
1741
Austin Schuh858c9f32020-08-31 16:56:12 -07001742LogReader::State::State(std::unique_ptr<ChannelMerger> channel_merger)
1743 : channel_merger_(std::move(channel_merger)) {}
1744
1745EventLoop *LogReader::State::SetNodeEventLoopFactory(
1746 NodeEventLoopFactory *node_event_loop_factory) {
1747 node_event_loop_factory_ = node_event_loop_factory;
1748 event_loop_unique_ptr_ =
1749 node_event_loop_factory_->MakeEventLoop("log_reader");
1750 return event_loop_unique_ptr_.get();
1751}
1752
1753void LogReader::State::SetChannelCount(size_t count) {
1754 channels_.resize(count);
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001755 remote_timestamp_senders_.resize(count);
Austin Schuh858c9f32020-08-31 16:56:12 -07001756 filters_.resize(count);
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001757 channel_source_state_.resize(count);
1758 factory_channel_index_.resize(count);
1759 queue_index_map_.resize(count);
Austin Schuh858c9f32020-08-31 16:56:12 -07001760}
1761
1762void LogReader::State::SetChannel(
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001763 size_t logged_channel_index, size_t factory_channel_index,
1764 std::unique_ptr<RawSender> sender,
Austin Schuh2f8fd752020-09-01 22:38:28 -07001765 message_bridge::NoncausalOffsetEstimator *filter,
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001766 aos::Sender<MessageHeader> *remote_timestamp_sender, State *source_state) {
1767 channels_[logged_channel_index] = std::move(sender);
1768 filters_[logged_channel_index] = filter;
1769 remote_timestamp_senders_[logged_channel_index] = remote_timestamp_sender;
1770
1771 if (source_state) {
1772 channel_source_state_[logged_channel_index] = source_state;
1773
1774 if (remote_timestamp_sender != nullptr) {
1775 source_state->queue_index_map_[logged_channel_index] =
1776 std::make_unique<std::vector<State::SentTimestamp>>();
1777 }
1778 }
1779
1780 factory_channel_index_[logged_channel_index] = factory_channel_index;
1781}
1782
1783bool LogReader::State::Send(
1784 size_t channel_index, const void *data, size_t size,
1785 const TimestampMerger::DeliveryTimestamp &delivery_timestamp) {
1786 aos::RawSender *sender = channels_[channel_index].get();
1787 uint32_t remote_queue_index = 0xffffffff;
1788
1789 if (remote_timestamp_senders_[channel_index] != nullptr) {
1790 std::vector<SentTimestamp> *queue_index_map =
1791 CHECK_NOTNULL(CHECK_NOTNULL(channel_source_state_[channel_index])
1792 ->queue_index_map_[channel_index]
1793 .get());
1794
1795 SentTimestamp search;
1796 search.monotonic_event_time = delivery_timestamp.monotonic_remote_time;
1797 search.realtime_event_time = delivery_timestamp.realtime_remote_time;
1798 search.queue_index = delivery_timestamp.remote_queue_index;
1799
1800 // Find the sent time if available.
1801 auto element = std::lower_bound(
1802 queue_index_map->begin(), queue_index_map->end(), search,
1803 [](SentTimestamp a, SentTimestamp b) {
1804 if (b.monotonic_event_time < a.monotonic_event_time) {
1805 return false;
1806 }
1807 if (b.monotonic_event_time > a.monotonic_event_time) {
1808 return true;
1809 }
1810
1811 if (b.queue_index < a.queue_index) {
1812 return false;
1813 }
1814 if (b.queue_index > a.queue_index) {
1815 return true;
1816 }
1817
1818 CHECK_EQ(a.realtime_event_time, b.realtime_event_time);
1819 return false;
1820 });
1821
1822 // TODO(austin): Be a bit more principled here, but we will want to do that
1823 // after the logger rewrite. We hit this when one node finishes, but the
1824 // other node isn't done yet. So there is no send time, but there is a
1825 // receive time.
1826 if (element != queue_index_map->end()) {
1827 CHECK_EQ(element->monotonic_event_time,
1828 delivery_timestamp.monotonic_remote_time);
1829 CHECK_EQ(element->realtime_event_time,
1830 delivery_timestamp.realtime_remote_time);
1831 CHECK_EQ(element->queue_index, delivery_timestamp.remote_queue_index);
1832
1833 remote_queue_index = element->actual_queue_index;
1834 }
1835 }
1836
1837 // Send! Use the replayed queue index here instead of the logged queue index
1838 // for the remote queue index. This makes re-logging work.
1839 const bool sent =
1840 sender->Send(data, size, delivery_timestamp.monotonic_remote_time,
1841 delivery_timestamp.realtime_remote_time, remote_queue_index);
1842 if (!sent) return false;
1843
1844 if (queue_index_map_[channel_index]) {
1845 SentTimestamp timestamp;
1846 timestamp.monotonic_event_time = delivery_timestamp.monotonic_event_time;
1847 timestamp.realtime_event_time = delivery_timestamp.realtime_event_time;
1848 timestamp.queue_index = delivery_timestamp.queue_index;
1849 timestamp.actual_queue_index = sender->sent_queue_index();
1850 queue_index_map_[channel_index]->emplace_back(timestamp);
1851 } else if (remote_timestamp_senders_[channel_index] != nullptr) {
1852 aos::Sender<MessageHeader>::Builder builder =
1853 remote_timestamp_senders_[channel_index]->MakeBuilder();
1854
1855 logger::MessageHeader::Builder message_header_builder =
1856 builder.MakeBuilder<logger::MessageHeader>();
1857
1858 message_header_builder.add_channel_index(
1859 factory_channel_index_[channel_index]);
1860
1861 // Swap the remote and sent metrics. They are from the sender's
1862 // perspective, not the receiver's perspective.
1863 message_header_builder.add_monotonic_sent_time(
1864 sender->monotonic_sent_time().time_since_epoch().count());
1865 message_header_builder.add_realtime_sent_time(
1866 sender->realtime_sent_time().time_since_epoch().count());
1867 message_header_builder.add_queue_index(sender->sent_queue_index());
1868
1869 message_header_builder.add_monotonic_remote_time(
1870 delivery_timestamp.monotonic_remote_time.time_since_epoch().count());
1871 message_header_builder.add_realtime_remote_time(
1872 delivery_timestamp.realtime_remote_time.time_since_epoch().count());
1873
1874 message_header_builder.add_remote_queue_index(remote_queue_index);
1875
1876 builder.Send(message_header_builder.Finish());
1877 }
1878
1879 return true;
1880}
1881
1882aos::Sender<MessageHeader> *LogReader::State::RemoteTimestampSender(
1883 const Node *delivered_node) {
1884 auto sender = remote_timestamp_senders_map_.find(delivered_node);
1885
1886 if (sender == remote_timestamp_senders_map_.end()) {
1887 sender = remote_timestamp_senders_map_
1888 .emplace(std::make_pair(
1889 delivered_node,
1890 event_loop()->MakeSender<MessageHeader>(
1891 absl::StrCat("/aos/remote_timestamps/",
1892 delivered_node->name()->string_view()))))
1893 .first;
1894 }
1895
1896 return &(sender->second);
Austin Schuh858c9f32020-08-31 16:56:12 -07001897}
1898
1899std::tuple<TimestampMerger::DeliveryTimestamp, int,
1900 FlatbufferVector<MessageHeader>>
1901LogReader::State::PopOldest(bool *update_time) {
1902 CHECK_GT(sorted_messages_.size(), 0u);
1903
1904 std::tuple<TimestampMerger::DeliveryTimestamp, int,
Austin Schuh2f8fd752020-09-01 22:38:28 -07001905 FlatbufferVector<MessageHeader>,
1906 message_bridge::NoncausalOffsetEstimator *>
Austin Schuh858c9f32020-08-31 16:56:12 -07001907 result = std::move(sorted_messages_.front());
Austin Schuh2f8fd752020-09-01 22:38:28 -07001908 VLOG(2) << MaybeNodeName(event_loop_->node()) << "PopOldest Popping "
Austin Schuh858c9f32020-08-31 16:56:12 -07001909 << std::get<0>(result).monotonic_event_time;
1910 sorted_messages_.pop_front();
1911 SeedSortedMessages();
1912
Austin Schuh2f8fd752020-09-01 22:38:28 -07001913 if (std::get<3>(result) != nullptr) {
1914 *update_time = std::get<3>(result)->Pop(
1915 event_loop_->node(), std::get<0>(result).monotonic_event_time);
1916 } else {
1917 *update_time = false;
1918 }
Austin Schuh858c9f32020-08-31 16:56:12 -07001919 return std::make_tuple(std::get<0>(result), std::get<1>(result),
1920 std::move(std::get<2>(result)));
1921}
1922
1923monotonic_clock::time_point LogReader::State::OldestMessageTime() const {
1924 if (sorted_messages_.size() > 0) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001925 VLOG(2) << MaybeNodeName(event_loop_->node()) << "oldest message at "
Austin Schuh858c9f32020-08-31 16:56:12 -07001926 << std::get<0>(sorted_messages_.front()).monotonic_event_time;
1927 return std::get<0>(sorted_messages_.front()).monotonic_event_time;
1928 }
1929
1930 return channel_merger_->OldestMessageTime();
1931}
1932
1933void LogReader::State::SeedSortedMessages() {
1934 const aos::monotonic_clock::time_point end_queue_time =
1935 (sorted_messages_.size() > 0
1936 ? std::get<0>(sorted_messages_.front()).monotonic_event_time
1937 : channel_merger_->monotonic_start_time()) +
1938 std::chrono::seconds(2);
1939
1940 while (true) {
1941 if (channel_merger_->OldestMessageTime() == monotonic_clock::max_time) {
1942 return;
1943 }
1944 if (sorted_messages_.size() > 0) {
1945 // Stop placing sorted messages on the list once we have 2 seconds
1946 // queued up (but queue at least until the log starts.
1947 if (end_queue_time <
1948 std::get<0>(sorted_messages_.back()).monotonic_event_time) {
1949 return;
1950 }
1951 }
1952
1953 TimestampMerger::DeliveryTimestamp channel_timestamp;
1954 int channel_index;
1955 FlatbufferVector<MessageHeader> channel_data =
1956 FlatbufferVector<MessageHeader>::Empty();
1957
Austin Schuh2f8fd752020-09-01 22:38:28 -07001958 message_bridge::NoncausalOffsetEstimator *filter = nullptr;
1959
Austin Schuh858c9f32020-08-31 16:56:12 -07001960 std::tie(channel_timestamp, channel_index, channel_data) =
1961 channel_merger_->PopOldest();
1962
Austin Schuh2f8fd752020-09-01 22:38:28 -07001963 // Skip any messages without forwarding information.
1964 if (channel_timestamp.monotonic_remote_time != monotonic_clock::min_time) {
1965 // Got a forwarding timestamp!
1966 filter = filters_[channel_index];
1967
1968 CHECK(filter != nullptr);
1969
1970 // Call the correct method depending on if we are the forward or
1971 // reverse direction here.
1972 filter->Sample(event_loop_->node(),
1973 channel_timestamp.monotonic_event_time,
1974 channel_timestamp.monotonic_remote_time);
1975 }
Austin Schuh858c9f32020-08-31 16:56:12 -07001976 sorted_messages_.emplace_back(channel_timestamp, channel_index,
Austin Schuh2f8fd752020-09-01 22:38:28 -07001977 std::move(channel_data), filter);
Austin Schuh858c9f32020-08-31 16:56:12 -07001978 }
1979}
1980
1981void LogReader::State::Deregister() {
1982 for (size_t i = 0; i < channels_.size(); ++i) {
1983 channels_[i].reset();
1984 }
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001985 remote_timestamp_senders_map_.clear();
Austin Schuh858c9f32020-08-31 16:56:12 -07001986 event_loop_unique_ptr_.reset();
1987 event_loop_ = nullptr;
1988 timer_handler_ = nullptr;
1989 node_event_loop_factory_ = nullptr;
1990}
1991
Austin Schuhe309d2a2019-11-29 13:25:21 -08001992} // namespace logger
1993} // namespace aos