blob: 977a82faf3f8e6ffaeefbb17b07b3ff20154368d [file] [log] [blame]
James Kuszmaul38735e82019-12-07 16:42:06 -08001#include "aos/events/logging/logger.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -08002
3#include <fcntl.h>
Austin Schuh4c4e0092019-12-22 16:18:03 -08004#include <limits.h>
Austin Schuhe309d2a2019-11-29 13:25:21 -08005#include <sys/stat.h>
6#include <sys/types.h>
7#include <sys/uio.h>
8#include <vector>
9
Austin Schuh8bd96322020-02-13 21:18:22 -080010#include "Eigen/Dense"
Austin Schuh2f8fd752020-09-01 22:38:28 -070011#include "absl/strings/escaping.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080012#include "absl/types/span.h"
13#include "aos/events/event_loop.h"
Austin Schuhf6f9bf32020-10-11 14:37:43 -070014#include "aos/events/logging/logfile_sorting.h"
James Kuszmaul38735e82019-12-07 16:42:06 -080015#include "aos/events/logging/logger_generated.h"
Austin Schuh64fab802020-09-09 22:47:47 -070016#include "aos/events/logging/uuid.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080017#include "aos/flatbuffer_merge.h"
Austin Schuh288479d2019-12-18 19:47:52 -080018#include "aos/network/team_number.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080019#include "aos/time/time.h"
Brian Silvermanae7c0332020-09-30 16:58:23 -070020#include "aos/util/file.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080021#include "flatbuffers/flatbuffers.h"
Austin Schuh2f8fd752020-09-01 22:38:28 -070022#include "third_party/gmp/gmpxx.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080023
Austin Schuh15649d62019-12-28 16:36:38 -080024DEFINE_bool(skip_missing_forwarding_entries, false,
25 "If true, drop any forwarding entries with missing data. If "
26 "false, CHECK.");
Austin Schuhe309d2a2019-11-29 13:25:21 -080027
Austin Schuh8bd96322020-02-13 21:18:22 -080028DEFINE_bool(timestamps_to_csv, false,
29 "If true, write all the time synchronization information to a set "
30 "of CSV files in /tmp/. This should only be needed when debugging "
31 "time synchronization.");
32
Austin Schuh2f8fd752020-09-01 22:38:28 -070033DEFINE_bool(skip_order_validation, false,
34 "If true, ignore any out of orderness in replay");
35
Austin Schuhe309d2a2019-11-29 13:25:21 -080036namespace aos {
37namespace logger {
Austin Schuh0afc4d12020-10-19 11:42:04 -070038namespace {
39// Helper to safely read a header, or CHECK.
40FlatbufferVector<LogFileHeader> MaybeReadHeaderOrDie(
41 const std::vector<std::vector<std::string>> &filenames) {
42 CHECK_GE(filenames.size(), 1u) << ": Empty filenames list";
43 CHECK_GE(filenames[0].size(), 1u) << ": Empty filenames list";
Austin Schuh3bd4c402020-11-06 18:19:06 -080044 std::optional<FlatbufferVector<LogFileHeader>> result =
45 ReadHeader(filenames[0][0]);
46 CHECK(result);
47 return result.value();
Austin Schuh0afc4d12020-10-19 11:42:04 -070048}
Austin Schuhe309d2a2019-11-29 13:25:21 -080049namespace chrono = std::chrono;
Austin Schuh0afc4d12020-10-19 11:42:04 -070050} // namespace
Austin Schuhe309d2a2019-11-29 13:25:21 -080051
Brian Silverman1f345222020-09-24 21:14:48 -070052Logger::Logger(EventLoop *event_loop, const Configuration *configuration,
53 std::function<bool(const Channel *)> should_log)
Austin Schuhe309d2a2019-11-29 13:25:21 -080054 : event_loop_(event_loop),
Austin Schuh0c297012020-09-16 18:41:59 -070055 configuration_(configuration),
Brian Silvermanae7c0332020-09-30 16:58:23 -070056 boot_uuid_(
57 util::ReadFileToStringOrDie("/proc/sys/kernel/random/boot_id")),
Austin Schuh0c297012020-09-16 18:41:59 -070058 name_(network::GetHostname()),
Brian Silverman1f345222020-09-24 21:14:48 -070059 timer_handler_(event_loop_->AddTimer(
60 [this]() { DoLogData(event_loop_->monotonic_now()); })),
Austin Schuh2f8fd752020-09-01 22:38:28 -070061 server_statistics_fetcher_(
62 configuration::MultiNode(event_loop_->configuration())
63 ? event_loop_->MakeFetcher<message_bridge::ServerStatistics>(
64 "/aos")
65 : aos::Fetcher<message_bridge::ServerStatistics>()) {
Brian Silverman1f345222020-09-24 21:14:48 -070066 VLOG(1) << "Creating logger for " << FlatbufferToJson(event_loop_->node());
Austin Schuh2f8fd752020-09-01 22:38:28 -070067
Austin Schuh8d7e0bb2020-10-02 17:57:00 -070068 // Find all the nodes which are logging timestamps on our node. This may
69 // over-estimate if should_log is specified.
70 std::vector<const Node *> timestamp_logger_nodes =
71 configuration::TimestampNodes(configuration_, event_loop_->node());
Austin Schuh2f8fd752020-09-01 22:38:28 -070072
73 std::map<const Channel *, const Node *> timestamp_logger_channels;
74
75 // Now that we have all the nodes accumulated, make remote timestamp loggers
76 // for them.
77 for (const Node *node : timestamp_logger_nodes) {
Austin Schuh8d7e0bb2020-10-02 17:57:00 -070078 // Note: since we are doing a find using the event loop channel, we need to
79 // make sure this channel pointer is part of the event loop configuration,
80 // not configuration_. This only matters when configuration_ !=
81 // event_loop->configuration();
Austin Schuh2f8fd752020-09-01 22:38:28 -070082 const Channel *channel = configuration::GetChannel(
Austin Schuh8d7e0bb2020-10-02 17:57:00 -070083 event_loop->configuration(),
Austin Schuh2f8fd752020-09-01 22:38:28 -070084 absl::StrCat("/aos/remote_timestamps/", node->name()->string_view()),
85 logger::MessageHeader::GetFullyQualifiedName(), event_loop_->name(),
86 event_loop_->node());
87
88 CHECK(channel != nullptr)
89 << ": Remote timestamps are logged on "
90 << event_loop_->node()->name()->string_view()
91 << " but can't find channel /aos/remote_timestamps/"
92 << node->name()->string_view();
Brian Silverman1f345222020-09-24 21:14:48 -070093 if (!should_log(channel)) {
94 continue;
95 }
Austin Schuh2f8fd752020-09-01 22:38:28 -070096 timestamp_logger_channels.insert(std::make_pair(channel, node));
97 }
98
Brian Silvermand90905f2020-09-23 14:42:56 -070099 const size_t our_node_index =
100 configuration::GetNodeIndex(configuration_, event_loop_->node());
Austin Schuh2f8fd752020-09-01 22:38:28 -0700101
Brian Silverman1f345222020-09-24 21:14:48 -0700102 for (size_t channel_index = 0;
103 channel_index < configuration_->channels()->size(); ++channel_index) {
104 const Channel *const config_channel =
105 configuration_->channels()->Get(channel_index);
Austin Schuh0c297012020-09-16 18:41:59 -0700106 // The MakeRawFetcher method needs a channel which is in the event loop
107 // configuration() object, not the configuration_ object. Go look that up
108 // from the config.
109 const Channel *channel = aos::configuration::GetChannel(
110 event_loop_->configuration(), config_channel->name()->string_view(),
111 config_channel->type()->string_view(), "", event_loop_->node());
Austin Schuh8d7e0bb2020-10-02 17:57:00 -0700112 CHECK(channel != nullptr)
113 << ": Failed to look up channel "
114 << aos::configuration::CleanedChannelToString(config_channel);
Brian Silverman1f345222020-09-24 21:14:48 -0700115 if (!should_log(channel)) {
116 continue;
117 }
Austin Schuh0c297012020-09-16 18:41:59 -0700118
Austin Schuhe309d2a2019-11-29 13:25:21 -0800119 FetcherStruct fs;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700120 fs.node_index = our_node_index;
Brian Silverman1f345222020-09-24 21:14:48 -0700121 fs.channel_index = channel_index;
122 fs.channel = channel;
123
Austin Schuh6f3babe2020-01-26 20:34:50 -0800124 const bool is_local =
125 configuration::ChannelIsSendableOnNode(channel, event_loop_->node());
126
Austin Schuh15649d62019-12-28 16:36:38 -0800127 const bool is_readable =
128 configuration::ChannelIsReadableOnNode(channel, event_loop_->node());
Brian Silverman1f345222020-09-24 21:14:48 -0700129 const bool is_logged = configuration::ChannelMessageIsLoggedOnNode(
130 channel, event_loop_->node());
131 const bool log_message = is_logged && is_readable;
Austin Schuh15649d62019-12-28 16:36:38 -0800132
Brian Silverman1f345222020-09-24 21:14:48 -0700133 bool log_delivery_times = false;
134 if (event_loop_->node() != nullptr) {
135 log_delivery_times = configuration::ConnectionDeliveryTimeIsLoggedOnNode(
136 channel, event_loop_->node(), event_loop_->node());
137 }
Austin Schuh15649d62019-12-28 16:36:38 -0800138
Austin Schuh2f8fd752020-09-01 22:38:28 -0700139 // Now, detect a MessageHeader timestamp logger where we should just log the
140 // contents to a file directly.
141 const bool log_contents = timestamp_logger_channels.find(channel) !=
142 timestamp_logger_channels.end();
Austin Schuh2f8fd752020-09-01 22:38:28 -0700143
144 if (log_message || log_delivery_times || log_contents) {
Austin Schuh15649d62019-12-28 16:36:38 -0800145 fs.fetcher = event_loop->MakeRawFetcher(channel);
146 VLOG(1) << "Logging channel "
147 << configuration::CleanedChannelToString(channel);
148
149 if (log_delivery_times) {
Austin Schuh6f3babe2020-01-26 20:34:50 -0800150 VLOG(1) << " Delivery times";
Brian Silverman1f345222020-09-24 21:14:48 -0700151 fs.wants_timestamp_writer = true;
Austin Schuh15649d62019-12-28 16:36:38 -0800152 }
Austin Schuh6f3babe2020-01-26 20:34:50 -0800153 if (log_message) {
154 VLOG(1) << " Data";
Brian Silverman1f345222020-09-24 21:14:48 -0700155 fs.wants_writer = true;
Austin Schuh6f3babe2020-01-26 20:34:50 -0800156 if (!is_local) {
157 fs.log_type = LogType::kLogRemoteMessage;
158 }
159 }
Austin Schuh2f8fd752020-09-01 22:38:28 -0700160 if (log_contents) {
161 VLOG(1) << "Timestamp logger channel "
162 << configuration::CleanedChannelToString(channel);
Brian Silverman1f345222020-09-24 21:14:48 -0700163 fs.timestamp_node = timestamp_logger_channels.find(channel)->second;
164 fs.wants_contents_writer = true;
Austin Schuh0c297012020-09-16 18:41:59 -0700165 fs.node_index =
Brian Silverman1f345222020-09-24 21:14:48 -0700166 configuration::GetNodeIndex(configuration_, fs.timestamp_node);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700167 }
Austin Schuh6f3babe2020-01-26 20:34:50 -0800168 fetchers_.emplace_back(std::move(fs));
Austin Schuh15649d62019-12-28 16:36:38 -0800169 }
Brian Silverman1f345222020-09-24 21:14:48 -0700170 }
Austin Schuh8d7e0bb2020-10-02 17:57:00 -0700171
172 // When we are logging remote timestamps, we need to be able to translate from
173 // the channel index that the event loop uses to the channel index in the
174 // config in the log file.
175 event_loop_to_logged_channel_index_.resize(
176 event_loop->configuration()->channels()->size(), -1);
177 for (size_t event_loop_channel_index = 0;
178 event_loop_channel_index <
179 event_loop->configuration()->channels()->size();
180 ++event_loop_channel_index) {
181 const Channel *event_loop_channel =
182 event_loop->configuration()->channels()->Get(event_loop_channel_index);
183
184 const Channel *logged_channel = aos::configuration::GetChannel(
185 configuration_, event_loop_channel->name()->string_view(),
186 event_loop_channel->type()->string_view(), "",
187 configuration::GetNode(configuration_, event_loop_->node()));
188
189 if (logged_channel != nullptr) {
190 event_loop_to_logged_channel_index_[event_loop_channel_index] =
191 configuration::ChannelIndex(configuration_, logged_channel);
192 }
193 }
Brian Silverman1f345222020-09-24 21:14:48 -0700194}
195
196Logger::~Logger() {
197 if (log_namer_) {
198 // If we are replaying a log file, or in simulation, we want to force the
199 // last bit of data to be logged. The easiest way to deal with this is to
200 // poll everything as we go to destroy the class, ie, shut down the logger,
201 // and write it to disk.
202 StopLogging(event_loop_->monotonic_now());
203 }
204}
205
Brian Silvermanae7c0332020-09-30 16:58:23 -0700206void Logger::StartLogging(std::unique_ptr<LogNamer> log_namer,
207 std::string_view log_start_uuid) {
Brian Silverman1f345222020-09-24 21:14:48 -0700208 CHECK(!log_namer_) << ": Already logging";
209 log_namer_ = std::move(log_namer);
Brian Silvermanae7c0332020-09-30 16:58:23 -0700210 log_event_uuid_ = UUID::Random();
211 log_start_uuid_ = log_start_uuid;
Brian Silverman1f345222020-09-24 21:14:48 -0700212 VLOG(1) << "Starting logger for " << FlatbufferToJson(event_loop_->node());
213
214 // We want to do as much work as possible before the initial Fetch. Time
215 // between that and actually starting to log opens up the possibility of
216 // falling off the end of the queue during that time.
217
218 for (FetcherStruct &f : fetchers_) {
219 if (f.wants_writer) {
220 f.writer = log_namer_->MakeWriter(f.channel);
221 }
222 if (f.wants_timestamp_writer) {
223 f.timestamp_writer = log_namer_->MakeTimestampWriter(f.channel);
224 }
225 if (f.wants_contents_writer) {
226 f.contents_writer = log_namer_->MakeForwardedTimestampWriter(
227 f.channel, CHECK_NOTNULL(f.timestamp_node));
228 }
Austin Schuhe309d2a2019-11-29 13:25:21 -0800229 }
230
Brian Silverman1f345222020-09-24 21:14:48 -0700231 CHECK(node_state_.empty());
Austin Schuh0c297012020-09-16 18:41:59 -0700232 node_state_.resize(configuration::MultiNode(configuration_)
233 ? configuration_->nodes()->size()
Austin Schuh2f8fd752020-09-01 22:38:28 -0700234 : 1u);
Austin Schuhe309d2a2019-11-29 13:25:21 -0800235
Austin Schuh2f8fd752020-09-01 22:38:28 -0700236 for (const Node *node : log_namer_->nodes()) {
Brian Silvermand90905f2020-09-23 14:42:56 -0700237 const int node_index = configuration::GetNodeIndex(configuration_, node);
Austin Schuhe309d2a2019-11-29 13:25:21 -0800238
Austin Schuh2f8fd752020-09-01 22:38:28 -0700239 node_state_[node_index].log_file_header = MakeHeader(node);
240 }
Austin Schuhe309d2a2019-11-29 13:25:21 -0800241
Austin Schuh2f8fd752020-09-01 22:38:28 -0700242 // Grab data from each channel right before we declare the log file started
243 // so we can capture the latest message on each channel. This lets us have
244 // non periodic messages with configuration that now get logged.
245 for (FetcherStruct &f : fetchers_) {
Brian Silvermancb805822020-10-06 17:43:35 -0700246 const auto start = event_loop_->monotonic_now();
247 const bool got_new = f.fetcher->Fetch();
248 const auto end = event_loop_->monotonic_now();
249 RecordFetchResult(start, end, got_new, &f);
250
251 // If there is a message, we want to write it.
252 f.written = f.fetcher->context().data == nullptr;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700253 }
254
255 // Clear out any old timestamps in case we are re-starting logging.
256 for (size_t i = 0; i < node_state_.size(); ++i) {
257 SetStartTime(i, monotonic_clock::min_time, realtime_clock::min_time);
258 }
259
260 WriteHeader();
261
262 LOG(INFO) << "Logging node as " << FlatbufferToJson(event_loop_->node())
263 << " start_time " << last_synchronized_time_;
264
265 timer_handler_->Setup(event_loop_->monotonic_now() + polling_period_,
266 polling_period_);
267}
268
Brian Silverman1f345222020-09-24 21:14:48 -0700269std::unique_ptr<LogNamer> Logger::StopLogging(
270 aos::monotonic_clock::time_point end_time) {
271 CHECK(log_namer_) << ": Not logging right now";
272
273 if (end_time != aos::monotonic_clock::min_time) {
274 LogUntil(end_time);
275 }
276 timer_handler_->Disable();
277
278 for (FetcherStruct &f : fetchers_) {
279 f.writer = nullptr;
280 f.timestamp_writer = nullptr;
281 f.contents_writer = nullptr;
282 }
283 node_state_.clear();
284
Brian Silvermanae7c0332020-09-30 16:58:23 -0700285 log_event_uuid_ = UUID::Zero();
286 log_start_uuid_ = std::string();
287
Brian Silverman1f345222020-09-24 21:14:48 -0700288 return std::move(log_namer_);
289}
290
Austin Schuhfa895892020-01-07 20:07:41 -0800291void Logger::WriteHeader() {
Austin Schuh0c297012020-09-16 18:41:59 -0700292 if (configuration::MultiNode(configuration_)) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700293 server_statistics_fetcher_.Fetch();
294 }
295
296 aos::monotonic_clock::time_point monotonic_start_time =
297 event_loop_->monotonic_now();
298 aos::realtime_clock::time_point realtime_start_time =
299 event_loop_->realtime_now();
300
301 // We need to pick a point in time to declare the log file "started". This
302 // starts here. It needs to be after everything is fetched so that the
303 // fetchers are all pointed at the most recent message before the start
304 // time.
305 last_synchronized_time_ = monotonic_start_time;
306
Austin Schuh6f3babe2020-01-26 20:34:50 -0800307 for (const Node *node : log_namer_->nodes()) {
Brian Silvermand90905f2020-09-23 14:42:56 -0700308 const int node_index = configuration::GetNodeIndex(configuration_, node);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700309 MaybeUpdateTimestamp(node, node_index, monotonic_start_time,
310 realtime_start_time);
Austin Schuh64fab802020-09-09 22:47:47 -0700311 log_namer_->WriteHeader(&node_state_[node_index].log_file_header, node);
Austin Schuh6f3babe2020-01-26 20:34:50 -0800312 }
313}
Austin Schuh8bd96322020-02-13 21:18:22 -0800314
Austin Schuh2f8fd752020-09-01 22:38:28 -0700315void Logger::WriteMissingTimestamps() {
Austin Schuh0c297012020-09-16 18:41:59 -0700316 if (configuration::MultiNode(configuration_)) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700317 server_statistics_fetcher_.Fetch();
318 } else {
319 return;
320 }
321
322 if (server_statistics_fetcher_.get() == nullptr) {
323 return;
324 }
325
326 for (const Node *node : log_namer_->nodes()) {
Brian Silvermand90905f2020-09-23 14:42:56 -0700327 const int node_index = configuration::GetNodeIndex(configuration_, node);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700328 if (MaybeUpdateTimestamp(
329 node, node_index,
330 server_statistics_fetcher_.context().monotonic_event_time,
331 server_statistics_fetcher_.context().realtime_event_time)) {
Austin Schuh64fab802020-09-09 22:47:47 -0700332 log_namer_->Rotate(node, &node_state_[node_index].log_file_header);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700333 }
334 }
335}
336
337void Logger::SetStartTime(size_t node_index,
338 aos::monotonic_clock::time_point monotonic_start_time,
339 aos::realtime_clock::time_point realtime_start_time) {
340 node_state_[node_index].monotonic_start_time = monotonic_start_time;
341 node_state_[node_index].realtime_start_time = realtime_start_time;
342 node_state_[node_index]
343 .log_file_header.mutable_message()
344 ->mutate_monotonic_start_time(
345 std::chrono::duration_cast<std::chrono::nanoseconds>(
346 monotonic_start_time.time_since_epoch())
347 .count());
348 if (node_state_[node_index]
349 .log_file_header.mutable_message()
350 ->has_realtime_start_time()) {
351 node_state_[node_index]
352 .log_file_header.mutable_message()
353 ->mutate_realtime_start_time(
354 std::chrono::duration_cast<std::chrono::nanoseconds>(
355 realtime_start_time.time_since_epoch())
356 .count());
357 }
358}
359
360bool Logger::MaybeUpdateTimestamp(
361 const Node *node, int node_index,
362 aos::monotonic_clock::time_point monotonic_start_time,
363 aos::realtime_clock::time_point realtime_start_time) {
Brian Silverman87ac0402020-09-17 14:47:01 -0700364 // Bail early if the start times are already set.
Austin Schuh2f8fd752020-09-01 22:38:28 -0700365 if (node_state_[node_index].monotonic_start_time !=
366 monotonic_clock::min_time) {
367 return false;
368 }
Austin Schuh0c297012020-09-16 18:41:59 -0700369 if (configuration::MultiNode(configuration_)) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700370 if (event_loop_->node() == node) {
371 // There are no offsets to compute for ourself, so always succeed.
372 SetStartTime(node_index, monotonic_start_time, realtime_start_time);
373 return true;
374 } else if (server_statistics_fetcher_.get() != nullptr) {
375 // We must be a remote node now. Look for the connection and see if it is
376 // connected.
377
378 for (const message_bridge::ServerConnection *connection :
379 *server_statistics_fetcher_->connections()) {
380 if (connection->node()->name()->string_view() !=
381 node->name()->string_view()) {
382 continue;
383 }
384
385 if (connection->state() != message_bridge::State::CONNECTED) {
386 VLOG(1) << node->name()->string_view()
387 << " is not connected, can't start it yet.";
388 break;
389 }
390
391 if (!connection->has_monotonic_offset()) {
392 VLOG(1) << "Missing monotonic offset for setting start time for node "
393 << aos::FlatbufferToJson(node);
394 break;
395 }
396
397 VLOG(1) << "Updating start time for " << aos::FlatbufferToJson(node);
398
399 // Found it and it is connected. Compensate and go.
400 monotonic_start_time +=
401 std::chrono::nanoseconds(connection->monotonic_offset());
402
403 SetStartTime(node_index, monotonic_start_time, realtime_start_time);
404 return true;
405 }
406 }
407 } else {
408 SetStartTime(node_index, monotonic_start_time, realtime_start_time);
409 return true;
410 }
411 return false;
412}
413
414aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> Logger::MakeHeader(
415 const Node *node) {
Austin Schuhfa895892020-01-07 20:07:41 -0800416 // Now write the header with this timestamp in it.
417 flatbuffers::FlatBufferBuilder fbb;
Austin Schuhd7b15da2020-02-17 15:06:11 -0800418 fbb.ForceDefaults(true);
Austin Schuhfa895892020-01-07 20:07:41 -0800419
Austin Schuh2f8fd752020-09-01 22:38:28 -0700420 // TODO(austin): Compress this much more efficiently. There are a bunch of
421 // duplicated schemas.
Brian Silvermanae7c0332020-09-30 16:58:23 -0700422 const flatbuffers::Offset<aos::Configuration> configuration_offset =
Austin Schuh0c297012020-09-16 18:41:59 -0700423 CopyFlatBuffer(configuration_, &fbb);
Austin Schuhfa895892020-01-07 20:07:41 -0800424
Brian Silvermanae7c0332020-09-30 16:58:23 -0700425 const flatbuffers::Offset<flatbuffers::String> name_offset =
Austin Schuh0c297012020-09-16 18:41:59 -0700426 fbb.CreateString(name_);
Austin Schuhfa895892020-01-07 20:07:41 -0800427
Brian Silvermanae7c0332020-09-30 16:58:23 -0700428 CHECK(log_event_uuid_ != UUID::Zero());
429 const flatbuffers::Offset<flatbuffers::String> log_event_uuid_offset =
430 fbb.CreateString(log_event_uuid_.string_view());
Austin Schuh64fab802020-09-09 22:47:47 -0700431
Brian Silvermanae7c0332020-09-30 16:58:23 -0700432 const flatbuffers::Offset<flatbuffers::String> logger_instance_uuid_offset =
433 fbb.CreateString(logger_instance_uuid_.string_view());
434
435 flatbuffers::Offset<flatbuffers::String> log_start_uuid_offset;
436 if (!log_start_uuid_.empty()) {
437 log_start_uuid_offset = fbb.CreateString(log_start_uuid_);
438 }
439
440 const flatbuffers::Offset<flatbuffers::String> boot_uuid_offset =
441 fbb.CreateString(boot_uuid_);
442
443 const flatbuffers::Offset<flatbuffers::String> parts_uuid_offset =
Austin Schuh64fab802020-09-09 22:47:47 -0700444 fbb.CreateString("00000000-0000-4000-8000-000000000000");
445
Austin Schuhfa895892020-01-07 20:07:41 -0800446 flatbuffers::Offset<Node> node_offset;
Brian Silverman80993c22020-10-01 15:05:19 -0700447 flatbuffers::Offset<Node> logger_node_offset;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700448
Austin Schuh0c297012020-09-16 18:41:59 -0700449 if (configuration::MultiNode(configuration_)) {
Austin Schuha4fc60f2020-11-01 23:06:47 -0800450 // TODO(austin): Reuse the node we just copied in above.
451 node_offset = RecursiveCopyFlatBuffer(node, &fbb);
452 logger_node_offset = RecursiveCopyFlatBuffer(event_loop_->node(), &fbb);
Austin Schuhfa895892020-01-07 20:07:41 -0800453 }
454
455 aos::logger::LogFileHeader::Builder log_file_header_builder(fbb);
456
Austin Schuh64fab802020-09-09 22:47:47 -0700457 log_file_header_builder.add_name(name_offset);
Austin Schuhfa895892020-01-07 20:07:41 -0800458
459 // Only add the node if we are running in a multinode configuration.
Austin Schuh6f3babe2020-01-26 20:34:50 -0800460 if (node != nullptr) {
Austin Schuhfa895892020-01-07 20:07:41 -0800461 log_file_header_builder.add_node(node_offset);
Brian Silverman80993c22020-10-01 15:05:19 -0700462 log_file_header_builder.add_logger_node(logger_node_offset);
Austin Schuhfa895892020-01-07 20:07:41 -0800463 }
464
465 log_file_header_builder.add_configuration(configuration_offset);
466 // The worst case theoretical out of order is the polling period times 2.
467 // One message could get logged right after the boundary, but be for right
468 // before the next boundary. And the reverse could happen for another
469 // message. Report back 3x to be extra safe, and because the cost isn't
470 // huge on the read side.
471 log_file_header_builder.add_max_out_of_order_duration(
Brian Silverman1f345222020-09-24 21:14:48 -0700472 std::chrono::nanoseconds(3 * polling_period_).count());
Austin Schuhfa895892020-01-07 20:07:41 -0800473
474 log_file_header_builder.add_monotonic_start_time(
475 std::chrono::duration_cast<std::chrono::nanoseconds>(
Austin Schuh2f8fd752020-09-01 22:38:28 -0700476 monotonic_clock::min_time.time_since_epoch())
Austin Schuhfa895892020-01-07 20:07:41 -0800477 .count());
Austin Schuh2f8fd752020-09-01 22:38:28 -0700478 if (node == event_loop_->node()) {
479 log_file_header_builder.add_realtime_start_time(
480 std::chrono::duration_cast<std::chrono::nanoseconds>(
481 realtime_clock::min_time.time_since_epoch())
482 .count());
Austin Schuh6f3babe2020-01-26 20:34:50 -0800483 }
484
Brian Silvermanae7c0332020-09-30 16:58:23 -0700485 log_file_header_builder.add_log_event_uuid(log_event_uuid_offset);
486 log_file_header_builder.add_logger_instance_uuid(logger_instance_uuid_offset);
487 if (!log_start_uuid_offset.IsNull()) {
488 log_file_header_builder.add_log_start_uuid(log_start_uuid_offset);
489 }
490 log_file_header_builder.add_boot_uuid(boot_uuid_offset);
Austin Schuh64fab802020-09-09 22:47:47 -0700491
492 log_file_header_builder.add_parts_uuid(parts_uuid_offset);
493 log_file_header_builder.add_parts_index(0);
494
Austin Schuh2f8fd752020-09-01 22:38:28 -0700495 fbb.FinishSizePrefixed(log_file_header_builder.Finish());
Austin Schuha4fc60f2020-11-01 23:06:47 -0800496 aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> result(
497 fbb.Release());
498
499 CHECK(result.Verify()) << ": Built a corrupted header.";
500
501 return result;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700502}
503
Brian Silvermancb805822020-10-06 17:43:35 -0700504void Logger::ResetStatisics() {
505 max_message_fetch_time_ = std::chrono::nanoseconds::zero();
506 max_message_fetch_time_channel_ = -1;
507 max_message_fetch_time_size_ = -1;
508 total_message_fetch_time_ = std::chrono::nanoseconds::zero();
509 total_message_fetch_count_ = 0;
510 total_message_fetch_bytes_ = 0;
511 total_nop_fetch_time_ = std::chrono::nanoseconds::zero();
512 total_nop_fetch_count_ = 0;
513 max_copy_time_ = std::chrono::nanoseconds::zero();
514 max_copy_time_channel_ = -1;
515 max_copy_time_size_ = -1;
516 total_copy_time_ = std::chrono::nanoseconds::zero();
517 total_copy_count_ = 0;
518 total_copy_bytes_ = 0;
519}
520
Austin Schuh2f8fd752020-09-01 22:38:28 -0700521void Logger::Rotate() {
522 for (const Node *node : log_namer_->nodes()) {
Brian Silvermand90905f2020-09-23 14:42:56 -0700523 const int node_index = configuration::GetNodeIndex(configuration_, node);
Austin Schuh64fab802020-09-09 22:47:47 -0700524 log_namer_->Rotate(node, &node_state_[node_index].log_file_header);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700525 }
526}
527
528void Logger::LogUntil(monotonic_clock::time_point t) {
529 WriteMissingTimestamps();
530
531 // Write each channel to disk, one at a time.
532 for (FetcherStruct &f : fetchers_) {
533 while (true) {
534 if (f.written) {
Brian Silvermancb805822020-10-06 17:43:35 -0700535 const auto start = event_loop_->monotonic_now();
536 const bool got_new = f.fetcher->FetchNext();
537 const auto end = event_loop_->monotonic_now();
538 RecordFetchResult(start, end, got_new, &f);
539 if (!got_new) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700540 VLOG(2) << "No new data on "
541 << configuration::CleanedChannelToString(
542 f.fetcher->channel());
543 break;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700544 }
Brian Silvermancb805822020-10-06 17:43:35 -0700545 f.written = false;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700546 }
547
Austin Schuh2f8fd752020-09-01 22:38:28 -0700548 // TODO(james): Write tests to exercise this logic.
Brian Silvermancb805822020-10-06 17:43:35 -0700549 if (f.fetcher->context().monotonic_event_time >= t) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700550 break;
551 }
Brian Silvermancb805822020-10-06 17:43:35 -0700552 if (f.writer != nullptr) {
553 // Write!
554 const auto start = event_loop_->monotonic_now();
555 flatbuffers::FlatBufferBuilder fbb(f.fetcher->context().size +
556 max_header_size_);
557 fbb.ForceDefaults(true);
558
559 fbb.FinishSizePrefixed(PackMessage(&fbb, f.fetcher->context(),
560 f.channel_index, f.log_type));
561 const auto end = event_loop_->monotonic_now();
562 RecordCreateMessageTime(start, end, &f);
563
564 VLOG(2) << "Writing data as node "
565 << FlatbufferToJson(event_loop_->node()) << " for channel "
566 << configuration::CleanedChannelToString(f.fetcher->channel())
567 << " to " << f.writer->filename() << " data "
568 << FlatbufferToJson(
569 flatbuffers::GetSizePrefixedRoot<MessageHeader>(
570 fbb.GetBufferPointer()));
571
572 max_header_size_ = std::max(max_header_size_,
573 fbb.GetSize() - f.fetcher->context().size);
574 f.writer->QueueSizedFlatbuffer(&fbb);
575 }
576
577 if (f.timestamp_writer != nullptr) {
578 // And now handle timestamps.
579 const auto start = event_loop_->monotonic_now();
580 flatbuffers::FlatBufferBuilder fbb;
581 fbb.ForceDefaults(true);
582
583 fbb.FinishSizePrefixed(PackMessage(&fbb, f.fetcher->context(),
584 f.channel_index,
585 LogType::kLogDeliveryTimeOnly));
586 const auto end = event_loop_->monotonic_now();
587 RecordCreateMessageTime(start, end, &f);
588
589 VLOG(2) << "Writing timestamps as node "
590 << FlatbufferToJson(event_loop_->node()) << " for channel "
591 << configuration::CleanedChannelToString(f.fetcher->channel())
592 << " to " << f.timestamp_writer->filename() << " timestamp "
593 << FlatbufferToJson(
594 flatbuffers::GetSizePrefixedRoot<MessageHeader>(
595 fbb.GetBufferPointer()));
596
597 f.timestamp_writer->QueueSizedFlatbuffer(&fbb);
598 }
599
600 if (f.contents_writer != nullptr) {
601 const auto start = event_loop_->monotonic_now();
602 // And now handle the special message contents channel. Copy the
603 // message into a FlatBufferBuilder and save it to disk.
604 // TODO(austin): We can be more efficient here when we start to
605 // care...
606 flatbuffers::FlatBufferBuilder fbb;
607 fbb.ForceDefaults(true);
608
609 const MessageHeader *msg =
610 flatbuffers::GetRoot<MessageHeader>(f.fetcher->context().data);
611
612 logger::MessageHeader::Builder message_header_builder(fbb);
613
614 // TODO(austin): This needs to check the channel_index and confirm
615 // that it should be logged before squirreling away the timestamp to
616 // disk. We don't want to log irrelevant timestamps.
617
618 // Note: this must match the same order as MessageBridgeServer and
619 // PackMessage. We want identical headers to have identical
620 // on-the-wire formats to make comparing them easier.
621
622 // Translate from the channel index that the event loop uses to the
623 // channel index in the log file.
624 message_header_builder.add_channel_index(
625 event_loop_to_logged_channel_index_[msg->channel_index()]);
626
627 message_header_builder.add_queue_index(msg->queue_index());
628 message_header_builder.add_monotonic_sent_time(
629 msg->monotonic_sent_time());
630 message_header_builder.add_realtime_sent_time(
631 msg->realtime_sent_time());
632
633 message_header_builder.add_monotonic_remote_time(
634 msg->monotonic_remote_time());
635 message_header_builder.add_realtime_remote_time(
636 msg->realtime_remote_time());
637 message_header_builder.add_remote_queue_index(
638 msg->remote_queue_index());
639
640 fbb.FinishSizePrefixed(message_header_builder.Finish());
641 const auto end = event_loop_->monotonic_now();
642 RecordCreateMessageTime(start, end, &f);
643
644 f.contents_writer->QueueSizedFlatbuffer(&fbb);
645 }
646
647 f.written = true;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700648 }
649 }
650 last_synchronized_time_ = t;
Austin Schuhfa895892020-01-07 20:07:41 -0800651}
652
Brian Silverman1f345222020-09-24 21:14:48 -0700653void Logger::DoLogData(const monotonic_clock::time_point end_time) {
654 // We want to guarantee that messages aren't out of order by more than
Austin Schuhe309d2a2019-11-29 13:25:21 -0800655 // max_out_of_order_duration. To do this, we need sync points. Every write
656 // cycle should be a sync point.
Austin Schuhe309d2a2019-11-29 13:25:21 -0800657
658 do {
659 // Move the sync point up by at most polling_period. This forces one sync
660 // per iteration, even if it is small.
Brian Silverman1f345222020-09-24 21:14:48 -0700661 LogUntil(std::min(last_synchronized_time_ + polling_period_, end_time));
662
663 on_logged_period_();
Austin Schuhe309d2a2019-11-29 13:25:21 -0800664
Austin Schuhe309d2a2019-11-29 13:25:21 -0800665 // If we missed cycles, we could be pretty far behind. Spin until we are
666 // caught up.
Brian Silverman1f345222020-09-24 21:14:48 -0700667 } while (last_synchronized_time_ + polling_period_ < end_time);
Austin Schuhe309d2a2019-11-29 13:25:21 -0800668}
669
Brian Silvermancb805822020-10-06 17:43:35 -0700670void Logger::RecordFetchResult(aos::monotonic_clock::time_point start,
671 aos::monotonic_clock::time_point end,
672 bool got_new, FetcherStruct *fetcher) {
673 const auto duration = end - start;
674 if (!got_new) {
675 ++total_nop_fetch_count_;
676 total_nop_fetch_time_ += duration;
677 return;
678 }
679 ++total_message_fetch_count_;
680 total_message_fetch_bytes_ += fetcher->fetcher->context().size;
681 total_message_fetch_time_ += duration;
682 if (duration > max_message_fetch_time_) {
683 max_message_fetch_time_ = duration;
684 max_message_fetch_time_channel_ = fetcher->channel_index;
685 max_message_fetch_time_size_ = fetcher->fetcher->context().size;
686 }
687}
688
689void Logger::RecordCreateMessageTime(aos::monotonic_clock::time_point start,
690 aos::monotonic_clock::time_point end,
691 FetcherStruct *fetcher) {
692 const auto duration = end - start;
693 total_copy_time_ += duration;
694 ++total_copy_count_;
695 total_copy_bytes_ += fetcher->fetcher->context().size;
696 if (duration > max_copy_time_) {
697 max_copy_time_ = duration;
698 max_copy_time_channel_ = fetcher->channel_index;
699 max_copy_time_size_ = fetcher->fetcher->context().size;
700 }
701}
702
Austin Schuh11d43732020-09-21 17:28:30 -0700703std::vector<std::vector<std::string>> ToLogReaderVector(
704 const std::vector<LogFile> &log_files) {
705 std::vector<std::vector<std::string>> result;
706 for (const LogFile &log_file : log_files) {
707 for (const LogParts &log_parts : log_file.parts) {
708 std::vector<std::string> parts;
709 for (const std::string &part : log_parts.parts) {
710 parts.emplace_back(part);
711 }
712 result.emplace_back(std::move(parts));
713 }
Austin Schuh5212cad2020-09-09 23:12:09 -0700714 }
715 return result;
716}
717
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800718LogReader::LogReader(std::string_view filename,
719 const Configuration *replay_configuration)
Austin Schuhfa895892020-01-07 20:07:41 -0800720 : LogReader(std::vector<std::string>{std::string(filename)},
721 replay_configuration) {}
722
723LogReader::LogReader(const std::vector<std::string> &filenames,
724 const Configuration *replay_configuration)
Austin Schuh6f3babe2020-01-26 20:34:50 -0800725 : LogReader(std::vector<std::vector<std::string>>{filenames},
726 replay_configuration) {}
727
Austin Schuh11d43732020-09-21 17:28:30 -0700728// TODO(austin): Make this the base and kill the others. This has much better
729// context for sorting.
730LogReader::LogReader(const std::vector<LogFile> &log_files,
731 const Configuration *replay_configuration)
732 : LogReader(ToLogReaderVector(log_files), replay_configuration) {}
733
Austin Schuh6f3babe2020-01-26 20:34:50 -0800734LogReader::LogReader(const std::vector<std::vector<std::string>> &filenames,
735 const Configuration *replay_configuration)
736 : filenames_(filenames),
Austin Schuh0afc4d12020-10-19 11:42:04 -0700737 log_file_header_(MaybeReadHeaderOrDie(filenames)),
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800738 replay_configuration_(replay_configuration) {
Austin Schuh6331ef92020-01-07 18:28:09 -0800739 MakeRemappedConfig();
Austin Schuh6f3babe2020-01-26 20:34:50 -0800740
Austin Schuh8d7e0bb2020-10-02 17:57:00 -0700741 // Remap all existing remote timestamp channels. They will be recreated, and
742 // the data logged isn't relevant anymore.
Austin Schuh3c5dae52020-10-06 18:55:18 -0700743 for (const Node *node : configuration::GetNodes(logged_configuration())) {
Austin Schuh8d7e0bb2020-10-02 17:57:00 -0700744 std::vector<const Node *> timestamp_logger_nodes =
745 configuration::TimestampNodes(logged_configuration(), node);
746 for (const Node *remote_node : timestamp_logger_nodes) {
747 const std::string channel = absl::StrCat(
748 "/aos/remote_timestamps/", remote_node->name()->string_view());
749 CHECK(HasChannel<logger::MessageHeader>(channel, node))
750 << ": Failed to find {\"name\": \"" << channel << "\", \"type\": \""
751 << logger::MessageHeader::GetFullyQualifiedName() << "\"} for node "
752 << node->name()->string_view();
753 RemapLoggedChannel<logger::MessageHeader>(channel, node);
754 }
755 }
756
Austin Schuh6aa77be2020-02-22 21:06:40 -0800757 if (replay_configuration) {
758 CHECK_EQ(configuration::MultiNode(configuration()),
759 configuration::MultiNode(replay_configuration))
Austin Schuh2f8fd752020-09-01 22:38:28 -0700760 << ": Log file and replay config need to both be multi or single "
761 "node.";
Austin Schuh6aa77be2020-02-22 21:06:40 -0800762 }
763
Austin Schuh6f3babe2020-01-26 20:34:50 -0800764 if (!configuration::MultiNode(configuration())) {
Austin Schuh858c9f32020-08-31 16:56:12 -0700765 states_.emplace_back(
766 std::make_unique<State>(std::make_unique<ChannelMerger>(filenames)));
Austin Schuh8bd96322020-02-13 21:18:22 -0800767 } else {
Austin Schuh6aa77be2020-02-22 21:06:40 -0800768 if (replay_configuration) {
James Kuszmaul46d82582020-05-09 19:50:09 -0700769 CHECK_EQ(logged_configuration()->nodes()->size(),
Austin Schuh6aa77be2020-02-22 21:06:40 -0800770 replay_configuration->nodes()->size())
Austin Schuh2f8fd752020-09-01 22:38:28 -0700771 << ": Log file and replay config need to have matching nodes "
772 "lists.";
James Kuszmaul46d82582020-05-09 19:50:09 -0700773 for (const Node *node : *logged_configuration()->nodes()) {
774 if (configuration::GetNode(replay_configuration, node) == nullptr) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700775 LOG(FATAL) << "Found node " << FlatbufferToJson(node)
776 << " in logged config that is not present in the replay "
777 "config.";
James Kuszmaul46d82582020-05-09 19:50:09 -0700778 }
779 }
Austin Schuh6aa77be2020-02-22 21:06:40 -0800780 }
Austin Schuh8bd96322020-02-13 21:18:22 -0800781 states_.resize(configuration()->nodes()->size());
Austin Schuh6f3babe2020-01-26 20:34:50 -0800782 }
Austin Schuhe309d2a2019-11-29 13:25:21 -0800783}
784
Austin Schuh6aa77be2020-02-22 21:06:40 -0800785LogReader::~LogReader() {
Austin Schuh39580f12020-08-01 14:44:08 -0700786 if (event_loop_factory_unique_ptr_) {
787 Deregister();
788 } else if (event_loop_factory_ != nullptr) {
789 LOG(FATAL) << "Must call Deregister before the SimulatedEventLoopFactory "
790 "is destroyed";
791 }
Austin Schuh8bd96322020-02-13 21:18:22 -0800792 if (offset_fp_ != nullptr) {
793 fclose(offset_fp_);
794 }
Austin Schuh2f8fd752020-09-01 22:38:28 -0700795 // Zero out some buffers. It's easy to do use-after-frees on these, so make
796 // it more obvious.
Austin Schuh39580f12020-08-01 14:44:08 -0700797 if (remapped_configuration_buffer_) {
798 remapped_configuration_buffer_->Wipe();
799 }
800 log_file_header_.Wipe();
Austin Schuh8bd96322020-02-13 21:18:22 -0800801}
Austin Schuhe309d2a2019-11-29 13:25:21 -0800802
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800803const Configuration *LogReader::logged_configuration() const {
Austin Schuh6f3babe2020-01-26 20:34:50 -0800804 return log_file_header_.message().configuration();
Austin Schuhe309d2a2019-11-29 13:25:21 -0800805}
806
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800807const Configuration *LogReader::configuration() const {
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800808 return remapped_configuration_;
809}
810
Austin Schuh6f3babe2020-01-26 20:34:50 -0800811std::vector<const Node *> LogReader::Nodes() const {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700812 // Because the Node pointer will only be valid if it actually points to
813 // memory owned by remapped_configuration_, we need to wait for the
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800814 // remapped_configuration_ to be populated before accessing it.
Austin Schuh6f3babe2020-01-26 20:34:50 -0800815 //
816 // Also, note, that when ever a map is changed, the nodes in here are
817 // invalidated.
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800818 CHECK(remapped_configuration_ != nullptr)
819 << ": Need to call Register before the node() pointer will be valid.";
Austin Schuh6f3babe2020-01-26 20:34:50 -0800820 return configuration::GetNodes(remapped_configuration_);
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800821}
Austin Schuh15649d62019-12-28 16:36:38 -0800822
Austin Schuh11d43732020-09-21 17:28:30 -0700823monotonic_clock::time_point LogReader::monotonic_start_time(
824 const Node *node) const {
Austin Schuh8bd96322020-02-13 21:18:22 -0800825 State *state =
826 states_[configuration::GetNodeIndex(configuration(), node)].get();
827 CHECK(state != nullptr) << ": Unknown node " << FlatbufferToJson(node);
828
Austin Schuh858c9f32020-08-31 16:56:12 -0700829 return state->monotonic_start_time();
Austin Schuhe309d2a2019-11-29 13:25:21 -0800830}
831
Austin Schuh11d43732020-09-21 17:28:30 -0700832realtime_clock::time_point LogReader::realtime_start_time(
833 const Node *node) const {
Austin Schuh8bd96322020-02-13 21:18:22 -0800834 State *state =
835 states_[configuration::GetNodeIndex(configuration(), node)].get();
836 CHECK(state != nullptr) << ": Unknown node " << FlatbufferToJson(node);
837
Austin Schuh858c9f32020-08-31 16:56:12 -0700838 return state->realtime_start_time();
Austin Schuhe309d2a2019-11-29 13:25:21 -0800839}
840
James Kuszmaul84ff3e52020-01-03 19:48:53 -0800841void LogReader::Register() {
842 event_loop_factory_unique_ptr_ =
Austin Schuhac0771c2020-01-07 18:36:30 -0800843 std::make_unique<SimulatedEventLoopFactory>(configuration());
James Kuszmaul84ff3e52020-01-03 19:48:53 -0800844 Register(event_loop_factory_unique_ptr_.get());
845}
846
Austin Schuh92547522019-12-28 14:33:43 -0800847void LogReader::Register(SimulatedEventLoopFactory *event_loop_factory) {
Austin Schuh92547522019-12-28 14:33:43 -0800848 event_loop_factory_ = event_loop_factory;
Austin Schuhe5bbd9e2020-09-21 17:29:20 -0700849 remapped_configuration_ = event_loop_factory_->configuration();
Austin Schuh92547522019-12-28 14:33:43 -0800850
Brian Silvermand90905f2020-09-23 14:42:56 -0700851 for (const Node *node : configuration::GetNodes(configuration())) {
Austin Schuh8bd96322020-02-13 21:18:22 -0800852 const size_t node_index =
853 configuration::GetNodeIndex(configuration(), node);
Austin Schuh858c9f32020-08-31 16:56:12 -0700854 states_[node_index] =
855 std::make_unique<State>(std::make_unique<ChannelMerger>(filenames_));
Austin Schuh8bd96322020-02-13 21:18:22 -0800856 State *state = states_[node_index].get();
Austin Schuh8d7e0bb2020-10-02 17:57:00 -0700857 state->set_event_loop(state->SetNodeEventLoopFactory(
Austin Schuh858c9f32020-08-31 16:56:12 -0700858 event_loop_factory_->GetNodeEventLoopFactory(node)));
Austin Schuh8d7e0bb2020-10-02 17:57:00 -0700859
860 state->SetChannelCount(logged_configuration()->channels()->size());
Austin Schuhcde938c2020-02-02 17:30:07 -0800861 }
Austin Schuh8d7e0bb2020-10-02 17:57:00 -0700862
863 // Register after making all the State objects so we can build references
864 // between them.
865 for (const Node *node : configuration::GetNodes(configuration())) {
866 const size_t node_index =
867 configuration::GetNodeIndex(configuration(), node);
868 State *state = states_[node_index].get();
869
870 Register(state->event_loop());
871 }
872
James Kuszmaul46d82582020-05-09 19:50:09 -0700873 if (live_nodes_ == 0) {
874 LOG(FATAL)
875 << "Don't have logs from any of the nodes in the replay config--are "
876 "you sure that the replay config matches the original config?";
877 }
Austin Schuh6f3babe2020-01-26 20:34:50 -0800878
Austin Schuh2f8fd752020-09-01 22:38:28 -0700879 // We need to now seed our per-node time offsets and get everything set up
880 // to run.
881 const size_t num_nodes = nodes_count();
Austin Schuhcde938c2020-02-02 17:30:07 -0800882
Austin Schuh8bd96322020-02-13 21:18:22 -0800883 // It is easiest to solve for per node offsets with a matrix rather than
884 // trying to solve the equations by hand. So let's get after it.
885 //
886 // Now, build up the map matrix.
887 //
Austin Schuh2f8fd752020-09-01 22:38:28 -0700888 // offset_matrix_ = (map_matrix_ + slope_matrix_) * [ta; tb; tc]
889 map_matrix_ = Eigen::Matrix<mpq_class, Eigen::Dynamic, Eigen::Dynamic>::Zero(
890 filters_.size() + 1, num_nodes);
891 slope_matrix_ =
892 Eigen::Matrix<mpq_class, Eigen::Dynamic, Eigen::Dynamic>::Zero(
893 filters_.size() + 1, num_nodes);
Austin Schuhcde938c2020-02-02 17:30:07 -0800894
Austin Schuh2f8fd752020-09-01 22:38:28 -0700895 offset_matrix_ =
896 Eigen::Matrix<mpq_class, Eigen::Dynamic, 1>::Zero(filters_.size() + 1);
897 valid_matrix_ =
898 Eigen::Matrix<bool, Eigen::Dynamic, 1>::Zero(filters_.size() + 1);
899 last_valid_matrix_ =
900 Eigen::Matrix<bool, Eigen::Dynamic, 1>::Zero(filters_.size() + 1);
Austin Schuhcde938c2020-02-02 17:30:07 -0800901
Austin Schuh2f8fd752020-09-01 22:38:28 -0700902 time_offset_matrix_ = Eigen::VectorXd::Zero(num_nodes);
903 time_slope_matrix_ = Eigen::VectorXd::Zero(num_nodes);
Austin Schuh8bd96322020-02-13 21:18:22 -0800904
Austin Schuh2f8fd752020-09-01 22:38:28 -0700905 // All times should average out to the distributed clock.
906 for (int i = 0; i < map_matrix_.cols(); ++i) {
907 // 1/num_nodes.
908 map_matrix_(0, i) = mpq_class(1, num_nodes);
909 }
910 valid_matrix_(0) = true;
Austin Schuh8bd96322020-02-13 21:18:22 -0800911
912 {
913 // Now, add the a - b -> sample elements.
914 size_t i = 1;
915 for (std::pair<const std::tuple<const Node *, const Node *>,
Austin Schuh2f8fd752020-09-01 22:38:28 -0700916 std::tuple<message_bridge::NoncausalOffsetEstimator>>
917 &filter : filters_) {
Austin Schuh8bd96322020-02-13 21:18:22 -0800918 const Node *const node_a = std::get<0>(filter.first);
919 const Node *const node_b = std::get<1>(filter.first);
920
921 const size_t node_a_index =
922 configuration::GetNodeIndex(configuration(), node_a);
923 const size_t node_b_index =
924 configuration::GetNodeIndex(configuration(), node_b);
925
Austin Schuh2f8fd752020-09-01 22:38:28 -0700926 // -a
927 map_matrix_(i, node_a_index) = mpq_class(-1);
928 // +b
929 map_matrix_(i, node_b_index) = mpq_class(1);
Austin Schuh8bd96322020-02-13 21:18:22 -0800930
931 // -> sample
Austin Schuh2f8fd752020-09-01 22:38:28 -0700932 std::get<0>(filter.second)
933 .set_slope_pointer(&slope_matrix_(i, node_a_index));
934 std::get<0>(filter.second).set_offset_pointer(&offset_matrix_(i, 0));
935
936 valid_matrix_(i) = false;
937 std::get<0>(filter.second).set_valid_pointer(&valid_matrix_(i));
Austin Schuh8bd96322020-02-13 21:18:22 -0800938
939 ++i;
Austin Schuh6f3babe2020-01-26 20:34:50 -0800940 }
941 }
942
Austin Schuh858c9f32020-08-31 16:56:12 -0700943 for (std::unique_ptr<State> &state : states_) {
944 state->SeedSortedMessages();
945 }
946
Austin Schuh2f8fd752020-09-01 22:38:28 -0700947 // Rank of the map matrix tells you if all the nodes are in communication
948 // with each other, which tells you if the offsets are observable.
949 const size_t connected_nodes =
950 Eigen::FullPivLU<
951 Eigen::Matrix<mpq_class, Eigen::Dynamic, Eigen::Dynamic>>(map_matrix_)
952 .rank();
953
954 // We don't need to support isolated nodes until someone has a real use
955 // case.
956 CHECK_EQ(connected_nodes, num_nodes)
957 << ": There is a node which isn't communicating with the rest.";
958
959 // And solve.
Austin Schuh8bd96322020-02-13 21:18:22 -0800960 UpdateOffsets();
961
Austin Schuh2f8fd752020-09-01 22:38:28 -0700962 // We want to start the log file at the last start time of the log files
963 // from all the nodes. Compute how long each node's simulation needs to run
964 // to move time to this point.
Austin Schuh8bd96322020-02-13 21:18:22 -0800965 distributed_clock::time_point start_time = distributed_clock::min_time;
Austin Schuhcde938c2020-02-02 17:30:07 -0800966
Austin Schuh2f8fd752020-09-01 22:38:28 -0700967 // TODO(austin): We want an "OnStart" callback for each node rather than
968 // running until the last node.
969
Austin Schuh8bd96322020-02-13 21:18:22 -0800970 for (std::unique_ptr<State> &state : states_) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700971 VLOG(1) << "Start time is " << state->monotonic_start_time() << " for node "
972 << MaybeNodeName(state->event_loop()->node()) << "now "
973 << state->monotonic_now();
974 // And start computing the start time on the distributed clock now that
975 // that works.
Austin Schuh858c9f32020-08-31 16:56:12 -0700976 start_time = std::max(
977 start_time, state->ToDistributedClock(state->monotonic_start_time()));
Austin Schuhcde938c2020-02-02 17:30:07 -0800978 }
Austin Schuh2f8fd752020-09-01 22:38:28 -0700979
980 CHECK_GE(start_time, distributed_clock::epoch())
981 << ": Hmm, we have a node starting before the start of time. Offset "
982 "everything.";
Austin Schuhcde938c2020-02-02 17:30:07 -0800983
Austin Schuh6f3babe2020-01-26 20:34:50 -0800984 // Forwarding is tracked per channel. If it is enabled, we want to turn it
985 // off. Otherwise messages replayed will get forwarded across to the other
Austin Schuh2f8fd752020-09-01 22:38:28 -0700986 // nodes, and also replayed on the other nodes. This may not satisfy all
987 // our users, but it'll start the discussion.
Austin Schuh6f3babe2020-01-26 20:34:50 -0800988 if (configuration::MultiNode(event_loop_factory_->configuration())) {
989 for (size_t i = 0; i < logged_configuration()->channels()->size(); ++i) {
990 const Channel *channel = logged_configuration()->channels()->Get(i);
991 const Node *node = configuration::GetNode(
992 configuration(), channel->source_node()->string_view());
993
Austin Schuh8bd96322020-02-13 21:18:22 -0800994 State *state =
995 states_[configuration::GetNodeIndex(configuration(), node)].get();
Austin Schuh6f3babe2020-01-26 20:34:50 -0800996
997 const Channel *remapped_channel =
Austin Schuh858c9f32020-08-31 16:56:12 -0700998 RemapChannel(state->event_loop(), channel);
Austin Schuh6f3babe2020-01-26 20:34:50 -0800999
1000 event_loop_factory_->DisableForwarding(remapped_channel);
1001 }
Austin Schuh4c3b9702020-08-30 11:34:55 -07001002
1003 // If we are replaying a log, we don't want a bunch of redundant messages
1004 // from both the real message bridge and simulated message bridge.
1005 event_loop_factory_->DisableStatistics();
Austin Schuh6f3babe2020-01-26 20:34:50 -08001006 }
1007
Austin Schuhcde938c2020-02-02 17:30:07 -08001008 // While we are starting the system up, we might be relying on matching data
1009 // to timestamps on log files where the timestamp log file starts before the
1010 // data. In this case, it is reasonable to expect missing data.
1011 ignore_missing_data_ = true;
Austin Schuh2f8fd752020-09-01 22:38:28 -07001012 VLOG(1) << "Running until " << start_time << " in Register";
Austin Schuh8bd96322020-02-13 21:18:22 -08001013 event_loop_factory_->RunFor(start_time.time_since_epoch());
Brian Silverman8a32ce62020-08-12 12:02:38 -07001014 VLOG(1) << "At start time";
Austin Schuhcde938c2020-02-02 17:30:07 -08001015 // Now that we are running for real, missing data means that the log file is
1016 // corrupted or went wrong.
1017 ignore_missing_data_ = false;
Austin Schuh92547522019-12-28 14:33:43 -08001018
Austin Schuh8bd96322020-02-13 21:18:22 -08001019 for (std::unique_ptr<State> &state : states_) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001020 // Make the RT clock be correct before handing it to the user.
1021 if (state->realtime_start_time() != realtime_clock::min_time) {
1022 state->SetRealtimeOffset(state->monotonic_start_time(),
1023 state->realtime_start_time());
1024 }
1025 VLOG(1) << "Start time is " << state->monotonic_start_time() << " for node "
1026 << MaybeNodeName(state->event_loop()->node()) << "now "
1027 << state->monotonic_now();
1028 }
1029
1030 if (FLAGS_timestamps_to_csv) {
1031 for (std::pair<const std::tuple<const Node *, const Node *>,
1032 std::tuple<message_bridge::NoncausalOffsetEstimator>>
1033 &filter : filters_) {
1034 const Node *const node_a = std::get<0>(filter.first);
1035 const Node *const node_b = std::get<1>(filter.first);
1036
1037 std::get<0>(filter.second)
1038 .SetFirstFwdTime(event_loop_factory_->GetNodeEventLoopFactory(node_a)
1039 ->monotonic_now());
1040 std::get<0>(filter.second)
1041 .SetFirstRevTime(event_loop_factory_->GetNodeEventLoopFactory(node_b)
1042 ->monotonic_now());
1043 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001044 }
1045}
1046
Austin Schuh2f8fd752020-09-01 22:38:28 -07001047void LogReader::UpdateOffsets() {
1048 VLOG(2) << "Samples are " << offset_matrix_;
1049 VLOG(2) << "Map is " << (map_matrix_ + slope_matrix_);
1050 std::tie(time_slope_matrix_, time_offset_matrix_) = SolveOffsets();
1051 Eigen::IOFormat HeavyFmt(Eigen::FullPrecision, 0, ", ", ";\n", "[", "]", "[",
1052 "]");
1053 VLOG(1) << "First slope " << time_slope_matrix_.transpose().format(HeavyFmt)
1054 << " offset " << time_offset_matrix_.transpose().format(HeavyFmt);
1055
1056 size_t node_index = 0;
1057 for (std::unique_ptr<State> &state : states_) {
1058 state->SetDistributedOffset(offset(node_index), slope(node_index));
1059 VLOG(1) << "Offset for node " << node_index << " "
1060 << MaybeNodeName(state->event_loop()->node()) << "is "
1061 << aos::distributed_clock::time_point(offset(node_index))
1062 << " slope " << std::setprecision(9) << std::fixed
1063 << slope(node_index);
1064 ++node_index;
1065 }
1066
1067 if (VLOG_IS_ON(1)) {
1068 LogFit("Offset is");
1069 }
1070}
1071
1072void LogReader::LogFit(std::string_view prefix) {
1073 for (std::unique_ptr<State> &state : states_) {
1074 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << " now "
1075 << state->monotonic_now() << " distributed "
1076 << event_loop_factory_->distributed_now();
1077 }
1078
1079 for (std::pair<const std::tuple<const Node *, const Node *>,
1080 std::tuple<message_bridge::NoncausalOffsetEstimator>> &filter :
1081 filters_) {
1082 message_bridge::NoncausalOffsetEstimator *estimator =
1083 &std::get<0>(filter.second);
1084
1085 if (estimator->a_timestamps().size() == 0 &&
1086 estimator->b_timestamps().size() == 0) {
1087 continue;
1088 }
1089
1090 if (VLOG_IS_ON(1)) {
1091 estimator->LogFit(prefix);
1092 }
1093
1094 const Node *const node_a = std::get<0>(filter.first);
1095 const Node *const node_b = std::get<1>(filter.first);
1096
1097 const size_t node_a_index =
1098 configuration::GetNodeIndex(configuration(), node_a);
1099 const size_t node_b_index =
1100 configuration::GetNodeIndex(configuration(), node_b);
1101
1102 const double recovered_slope =
1103 slope(node_b_index) / slope(node_a_index) - 1.0;
1104 const int64_t recovered_offset =
1105 offset(node_b_index).count() - offset(node_a_index).count() *
1106 slope(node_b_index) /
1107 slope(node_a_index);
1108
1109 VLOG(1) << "Recovered slope " << std::setprecision(20) << recovered_slope
1110 << " (error " << recovered_slope - estimator->fit().slope() << ") "
1111 << " offset " << std::setprecision(20) << recovered_offset
1112 << " (error "
1113 << recovered_offset - estimator->fit().offset().count() << ")";
1114
1115 const aos::distributed_clock::time_point a0 =
1116 states_[node_a_index]->ToDistributedClock(
1117 std::get<0>(estimator->a_timestamps()[0]));
1118 const aos::distributed_clock::time_point a1 =
1119 states_[node_a_index]->ToDistributedClock(
1120 std::get<0>(estimator->a_timestamps()[1]));
1121
1122 VLOG(1) << node_a->name()->string_view() << " timestamps()[0] = "
1123 << std::get<0>(estimator->a_timestamps()[0]) << " -> " << a0
1124 << " distributed -> " << node_b->name()->string_view() << " "
1125 << states_[node_b_index]->FromDistributedClock(a0) << " should be "
1126 << aos::monotonic_clock::time_point(
1127 std::chrono::nanoseconds(static_cast<int64_t>(
1128 std::get<0>(estimator->a_timestamps()[0])
1129 .time_since_epoch()
1130 .count() *
1131 (1.0 + estimator->fit().slope()))) +
1132 estimator->fit().offset())
1133 << ((a0 <= event_loop_factory_->distributed_now())
1134 ? ""
1135 : " After now, investigate");
1136 VLOG(1) << node_a->name()->string_view() << " timestamps()[1] = "
1137 << std::get<0>(estimator->a_timestamps()[1]) << " -> " << a1
1138 << " distributed -> " << node_b->name()->string_view() << " "
1139 << states_[node_b_index]->FromDistributedClock(a1) << " should be "
1140 << aos::monotonic_clock::time_point(
1141 std::chrono::nanoseconds(static_cast<int64_t>(
1142 std::get<0>(estimator->a_timestamps()[1])
1143 .time_since_epoch()
1144 .count() *
1145 (1.0 + estimator->fit().slope()))) +
1146 estimator->fit().offset())
1147 << ((event_loop_factory_->distributed_now() <= a1)
1148 ? ""
1149 : " Before now, investigate");
1150
1151 const aos::distributed_clock::time_point b0 =
1152 states_[node_b_index]->ToDistributedClock(
1153 std::get<0>(estimator->b_timestamps()[0]));
1154 const aos::distributed_clock::time_point b1 =
1155 states_[node_b_index]->ToDistributedClock(
1156 std::get<0>(estimator->b_timestamps()[1]));
1157
1158 VLOG(1) << node_b->name()->string_view() << " timestamps()[0] = "
1159 << std::get<0>(estimator->b_timestamps()[0]) << " -> " << b0
1160 << " distributed -> " << node_a->name()->string_view() << " "
1161 << states_[node_a_index]->FromDistributedClock(b0)
1162 << ((b0 <= event_loop_factory_->distributed_now())
1163 ? ""
1164 : " After now, investigate");
1165 VLOG(1) << node_b->name()->string_view() << " timestamps()[1] = "
1166 << std::get<0>(estimator->b_timestamps()[1]) << " -> " << b1
1167 << " distributed -> " << node_a->name()->string_view() << " "
1168 << states_[node_a_index]->FromDistributedClock(b1)
1169 << ((event_loop_factory_->distributed_now() <= b1)
1170 ? ""
1171 : " Before now, investigate");
1172 }
1173}
1174
1175message_bridge::NoncausalOffsetEstimator *LogReader::GetFilter(
Austin Schuh8bd96322020-02-13 21:18:22 -08001176 const Node *node_a, const Node *node_b) {
1177 CHECK_NE(node_a, node_b);
1178 CHECK_EQ(configuration::GetNode(configuration(), node_a), node_a);
1179 CHECK_EQ(configuration::GetNode(configuration(), node_b), node_b);
1180
1181 if (node_a > node_b) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001182 return GetFilter(node_b, node_a);
Austin Schuh8bd96322020-02-13 21:18:22 -08001183 }
1184
1185 auto tuple = std::make_tuple(node_a, node_b);
1186
1187 auto it = filters_.find(tuple);
1188
1189 if (it == filters_.end()) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001190 auto &x =
1191 filters_
1192 .insert(std::make_pair(
1193 tuple, std::make_tuple(message_bridge::NoncausalOffsetEstimator(
1194 node_a, node_b))))
1195 .first->second;
Austin Schuh8bd96322020-02-13 21:18:22 -08001196 if (FLAGS_timestamps_to_csv) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001197 std::get<0>(x).SetFwdCsvFileName(absl::StrCat(
1198 "/tmp/timestamp_noncausal_", node_a->name()->string_view(), "_",
1199 node_b->name()->string_view()));
1200 std::get<0>(x).SetRevCsvFileName(absl::StrCat(
1201 "/tmp/timestamp_noncausal_", node_b->name()->string_view(), "_",
1202 node_a->name()->string_view()));
Austin Schuh8bd96322020-02-13 21:18:22 -08001203 }
1204
Austin Schuh2f8fd752020-09-01 22:38:28 -07001205 return &std::get<0>(x);
Austin Schuh8bd96322020-02-13 21:18:22 -08001206 } else {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001207 return &std::get<0>(it->second);
Austin Schuh8bd96322020-02-13 21:18:22 -08001208 }
1209}
1210
Austin Schuhe309d2a2019-11-29 13:25:21 -08001211void LogReader::Register(EventLoop *event_loop) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001212 State *state =
1213 states_[configuration::GetNodeIndex(configuration(), event_loop->node())]
1214 .get();
Austin Schuh6f3babe2020-01-26 20:34:50 -08001215
Austin Schuh858c9f32020-08-31 16:56:12 -07001216 state->set_event_loop(event_loop);
Austin Schuhe309d2a2019-11-29 13:25:21 -08001217
Tyler Chatow67ddb032020-01-12 14:30:04 -08001218 // We don't run timing reports when trying to print out logged data, because
1219 // otherwise we would end up printing out the timing reports themselves...
1220 // This is only really relevant when we are replaying into a simulation.
Austin Schuh6f3babe2020-01-26 20:34:50 -08001221 event_loop->SkipTimingReport();
1222 event_loop->SkipAosLog();
Austin Schuh39788ff2019-12-01 18:22:57 -08001223
Austin Schuh858c9f32020-08-31 16:56:12 -07001224 const bool has_data = state->SetNode();
Austin Schuhe309d2a2019-11-29 13:25:21 -08001225
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001226 for (size_t logged_channel_index = 0;
1227 logged_channel_index < logged_configuration()->channels()->size();
1228 ++logged_channel_index) {
1229 const Channel *channel = RemapChannel(
1230 event_loop,
1231 logged_configuration()->channels()->Get(logged_channel_index));
Austin Schuh8bd96322020-02-13 21:18:22 -08001232
Austin Schuh2f8fd752020-09-01 22:38:28 -07001233 message_bridge::NoncausalOffsetEstimator *filter = nullptr;
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001234 aos::Sender<MessageHeader> *remote_timestamp_sender = nullptr;
1235
1236 State *source_state = nullptr;
Austin Schuh8bd96322020-02-13 21:18:22 -08001237
1238 if (!configuration::ChannelIsSendableOnNode(channel, event_loop->node()) &&
1239 configuration::ChannelIsReadableOnNode(channel, event_loop->node())) {
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001240 // We've got a message which is being forwarded to this node.
1241 const Node *source_node = configuration::GetNode(
Austin Schuh8bd96322020-02-13 21:18:22 -08001242 event_loop->configuration(), channel->source_node()->string_view());
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001243 filter = GetFilter(event_loop->node(), source_node);
Austin Schuh8bd96322020-02-13 21:18:22 -08001244
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001245 // Delivery timestamps are supposed to be logged back on the source node.
1246 // Configure remote timestamps to be sent.
1247 const bool delivery_time_is_logged =
1248 configuration::ConnectionDeliveryTimeIsLoggedOnNode(
1249 channel, event_loop->node(), source_node);
1250
1251 source_state =
1252 states_[configuration::GetNodeIndex(configuration(), source_node)]
1253 .get();
1254
1255 if (delivery_time_is_logged) {
1256 remote_timestamp_sender =
1257 source_state->RemoteTimestampSender(event_loop->node());
Austin Schuh8bd96322020-02-13 21:18:22 -08001258 }
1259 }
Austin Schuh858c9f32020-08-31 16:56:12 -07001260
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001261 state->SetChannel(
1262 logged_channel_index,
1263 configuration::ChannelIndex(event_loop->configuration(), channel),
1264 event_loop->MakeRawSender(channel), filter, remote_timestamp_sender,
1265 source_state);
Austin Schuhe309d2a2019-11-29 13:25:21 -08001266 }
1267
Austin Schuh6aa77be2020-02-22 21:06:40 -08001268 // If we didn't find any log files with data in them, we won't ever get a
1269 // callback or be live. So skip the rest of the setup.
1270 if (!has_data) {
1271 return;
1272 }
1273
Austin Schuh858c9f32020-08-31 16:56:12 -07001274 state->set_timer_handler(event_loop->AddTimer([this, state]() {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001275 VLOG(1) << "Starting sending " << MaybeNodeName(state->event_loop()->node())
1276 << "at " << state->event_loop()->context().monotonic_event_time
1277 << " now " << state->monotonic_now();
Austin Schuh858c9f32020-08-31 16:56:12 -07001278 if (state->OldestMessageTime() == monotonic_clock::max_time) {
Austin Schuh6f3babe2020-01-26 20:34:50 -08001279 --live_nodes_;
Austin Schuh2f8fd752020-09-01 22:38:28 -07001280 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Node down!";
Austin Schuh6f3babe2020-01-26 20:34:50 -08001281 if (live_nodes_ == 0) {
1282 event_loop_factory_->Exit();
1283 }
James Kuszmaul314f1672020-01-03 20:02:08 -08001284 return;
1285 }
Austin Schuh6f3babe2020-01-26 20:34:50 -08001286 TimestampMerger::DeliveryTimestamp channel_timestamp;
Austin Schuh05b70472020-01-01 17:11:17 -08001287 int channel_index;
1288 FlatbufferVector<MessageHeader> channel_data =
1289 FlatbufferVector<MessageHeader>::Empty();
1290
Austin Schuh2f8fd752020-09-01 22:38:28 -07001291 if (VLOG_IS_ON(1)) {
1292 LogFit("Offset was");
1293 }
1294
1295 bool update_time;
Austin Schuh05b70472020-01-01 17:11:17 -08001296 std::tie(channel_timestamp, channel_index, channel_data) =
Austin Schuh2f8fd752020-09-01 22:38:28 -07001297 state->PopOldest(&update_time);
Austin Schuh05b70472020-01-01 17:11:17 -08001298
Austin Schuhe309d2a2019-11-29 13:25:21 -08001299 const monotonic_clock::time_point monotonic_now =
Austin Schuh858c9f32020-08-31 16:56:12 -07001300 state->event_loop()->context().monotonic_event_time;
Austin Schuh2f8fd752020-09-01 22:38:28 -07001301 if (!FLAGS_skip_order_validation) {
1302 CHECK(monotonic_now == channel_timestamp.monotonic_event_time)
1303 << ": " << FlatbufferToJson(state->event_loop()->node()) << " Now "
1304 << monotonic_now << " trying to send "
1305 << channel_timestamp.monotonic_event_time << " failure "
1306 << state->DebugString();
1307 } else if (monotonic_now != channel_timestamp.monotonic_event_time) {
1308 LOG(WARNING) << "Check failed: monotonic_now == "
1309 "channel_timestamp.monotonic_event_time) ("
1310 << monotonic_now << " vs. "
1311 << channel_timestamp.monotonic_event_time
1312 << "): " << FlatbufferToJson(state->event_loop()->node())
1313 << " Now " << monotonic_now << " trying to send "
1314 << channel_timestamp.monotonic_event_time << " failure "
1315 << state->DebugString();
1316 }
Austin Schuhe309d2a2019-11-29 13:25:21 -08001317
Austin Schuh6f3babe2020-01-26 20:34:50 -08001318 if (channel_timestamp.monotonic_event_time >
Austin Schuh858c9f32020-08-31 16:56:12 -07001319 state->monotonic_start_time() ||
Austin Schuh15649d62019-12-28 16:36:38 -08001320 event_loop_factory_ != nullptr) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001321 if ((!ignore_missing_data_ && !FLAGS_skip_missing_forwarding_entries &&
Austin Schuh858c9f32020-08-31 16:56:12 -07001322 !state->at_end()) ||
Austin Schuh05b70472020-01-01 17:11:17 -08001323 channel_data.message().data() != nullptr) {
1324 CHECK(channel_data.message().data() != nullptr)
1325 << ": Got a message without data. Forwarding entry which was "
Austin Schuh2f8fd752020-09-01 22:38:28 -07001326 "not matched? Use --skip_missing_forwarding_entries to "
Brian Silverman87ac0402020-09-17 14:47:01 -07001327 "ignore this.";
Austin Schuh92547522019-12-28 14:33:43 -08001328
Austin Schuh2f8fd752020-09-01 22:38:28 -07001329 if (update_time) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001330 // Confirm that the message was sent on the sending node before the
1331 // destination node (this node). As a proxy, do this by making sure
1332 // that time on the source node is past when the message was sent.
Austin Schuh2f8fd752020-09-01 22:38:28 -07001333 if (!FLAGS_skip_order_validation) {
1334 CHECK_LT(channel_timestamp.monotonic_remote_time,
1335 state->monotonic_remote_now(channel_index))
1336 << state->event_loop()->node()->name()->string_view() << " to "
1337 << state->remote_node(channel_index)->name()->string_view()
1338 << " " << state->DebugString();
1339 } else if (channel_timestamp.monotonic_remote_time >=
1340 state->monotonic_remote_now(channel_index)) {
1341 LOG(WARNING)
1342 << "Check failed: channel_timestamp.monotonic_remote_time < "
1343 "state->monotonic_remote_now(channel_index) ("
1344 << channel_timestamp.monotonic_remote_time << " vs. "
1345 << state->monotonic_remote_now(channel_index) << ") "
1346 << state->event_loop()->node()->name()->string_view() << " to "
1347 << state->remote_node(channel_index)->name()->string_view()
1348 << " currently " << channel_timestamp.monotonic_event_time
1349 << " ("
1350 << state->ToDistributedClock(
1351 channel_timestamp.monotonic_event_time)
1352 << ") remote event time "
1353 << channel_timestamp.monotonic_remote_time << " ("
1354 << state->RemoteToDistributedClock(
1355 channel_index, channel_timestamp.monotonic_remote_time)
1356 << ") " << state->DebugString();
1357 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001358
1359 if (FLAGS_timestamps_to_csv) {
1360 if (offset_fp_ == nullptr) {
1361 offset_fp_ = fopen("/tmp/offsets.csv", "w");
1362 fprintf(
1363 offset_fp_,
1364 "# time_since_start, offset node 0, offset node 1, ...\n");
1365 first_time_ = channel_timestamp.realtime_event_time;
1366 }
1367
1368 fprintf(offset_fp_, "%.9f",
1369 std::chrono::duration_cast<std::chrono::duration<double>>(
1370 channel_timestamp.realtime_event_time - first_time_)
1371 .count());
Austin Schuh2f8fd752020-09-01 22:38:28 -07001372 for (int i = 1; i < time_offset_matrix_.rows(); ++i) {
1373 fprintf(offset_fp_, ", %.9f",
1374 time_offset_matrix_(i, 0) +
1375 time_slope_matrix_(i, 0) *
1376 chrono::duration<double>(
1377 event_loop_factory_->distributed_now()
1378 .time_since_epoch())
1379 .count());
Austin Schuh8bd96322020-02-13 21:18:22 -08001380 }
1381 fprintf(offset_fp_, "\n");
1382 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001383 }
1384
Austin Schuh15649d62019-12-28 16:36:38 -08001385 // If we have access to the factory, use it to fix the realtime time.
Austin Schuh858c9f32020-08-31 16:56:12 -07001386 state->SetRealtimeOffset(channel_timestamp.monotonic_event_time,
1387 channel_timestamp.realtime_event_time);
Austin Schuh15649d62019-12-28 16:36:38 -08001388
Austin Schuh2f8fd752020-09-01 22:38:28 -07001389 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Sending "
1390 << channel_timestamp.monotonic_event_time;
1391 // TODO(austin): std::move channel_data in and make that efficient in
1392 // simulation.
Austin Schuh858c9f32020-08-31 16:56:12 -07001393 state->Send(channel_index, channel_data.message().data()->Data(),
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001394 channel_data.message().data()->size(), channel_timestamp);
Austin Schuh2f8fd752020-09-01 22:38:28 -07001395 } else if (state->at_end() && !ignore_missing_data_) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001396 // We are at the end of the log file and found missing data. Finish
Austin Schuh2f8fd752020-09-01 22:38:28 -07001397 // reading the rest of the log file and call it quits. We don't want
1398 // to replay partial data.
Austin Schuh858c9f32020-08-31 16:56:12 -07001399 while (state->OldestMessageTime() != monotonic_clock::max_time) {
1400 bool update_time_dummy;
1401 state->PopOldest(&update_time_dummy);
Austin Schuh8bd96322020-02-13 21:18:22 -08001402 }
Austin Schuh2f8fd752020-09-01 22:38:28 -07001403 } else {
1404 CHECK(channel_data.message().data() == nullptr) << ": Nullptr";
Austin Schuh92547522019-12-28 14:33:43 -08001405 }
Austin Schuhe309d2a2019-11-29 13:25:21 -08001406 } else {
Austin Schuh6f3babe2020-01-26 20:34:50 -08001407 LOG(WARNING)
1408 << "Not sending data from before the start of the log file. "
1409 << channel_timestamp.monotonic_event_time.time_since_epoch().count()
1410 << " start " << monotonic_start_time().time_since_epoch().count()
Austin Schuhd85baf82020-10-19 11:50:12 -07001411 << " "
1412 << FlatbufferToJson(channel_data,
1413 {.multi_line = false, .max_vector_size = 100});
Austin Schuhe309d2a2019-11-29 13:25:21 -08001414 }
1415
Austin Schuh858c9f32020-08-31 16:56:12 -07001416 const monotonic_clock::time_point next_time = state->OldestMessageTime();
Austin Schuh6f3babe2020-01-26 20:34:50 -08001417 if (next_time != monotonic_clock::max_time) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001418 VLOG(1) << "Scheduling " << MaybeNodeName(state->event_loop()->node())
1419 << "wakeup for " << next_time << "("
1420 << state->ToDistributedClock(next_time)
1421 << " distributed), now is " << state->monotonic_now();
Austin Schuh858c9f32020-08-31 16:56:12 -07001422 state->Setup(next_time);
James Kuszmaul314f1672020-01-03 20:02:08 -08001423 } else {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001424 VLOG(1) << MaybeNodeName(state->event_loop()->node())
1425 << "No next message, scheduling shutdown";
1426 // Set a timer up immediately after now to die. If we don't do this,
1427 // then the senders waiting on the message we just read will never get
1428 // called.
Austin Schuheecb9282020-01-08 17:43:30 -08001429 if (event_loop_factory_ != nullptr) {
Austin Schuh858c9f32020-08-31 16:56:12 -07001430 state->Setup(monotonic_now + event_loop_factory_->send_delay() +
1431 std::chrono::nanoseconds(1));
Austin Schuheecb9282020-01-08 17:43:30 -08001432 }
Austin Schuhe309d2a2019-11-29 13:25:21 -08001433 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001434
Austin Schuh2f8fd752020-09-01 22:38:28 -07001435 // Once we make this call, the current time changes. So do everything
1436 // which involves time before changing it. That especially includes
1437 // sending the message.
1438 if (update_time) {
1439 VLOG(1) << MaybeNodeName(state->event_loop()->node())
1440 << "updating offsets";
1441
1442 std::vector<aos::monotonic_clock::time_point> before_times;
1443 before_times.resize(states_.size());
1444 std::transform(states_.begin(), states_.end(), before_times.begin(),
1445 [](const std::unique_ptr<State> &state) {
1446 return state->monotonic_now();
1447 });
1448
1449 for (size_t i = 0; i < states_.size(); ++i) {
Brian Silvermand90905f2020-09-23 14:42:56 -07001450 VLOG(1) << MaybeNodeName(states_[i]->event_loop()->node()) << "before "
1451 << states_[i]->monotonic_now();
Austin Schuh2f8fd752020-09-01 22:38:28 -07001452 }
1453
Austin Schuh8bd96322020-02-13 21:18:22 -08001454 UpdateOffsets();
Austin Schuh2f8fd752020-09-01 22:38:28 -07001455 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Now is now "
1456 << state->monotonic_now();
1457
1458 for (size_t i = 0; i < states_.size(); ++i) {
Brian Silvermand90905f2020-09-23 14:42:56 -07001459 VLOG(1) << MaybeNodeName(states_[i]->event_loop()->node()) << "after "
1460 << states_[i]->monotonic_now();
Austin Schuh2f8fd752020-09-01 22:38:28 -07001461 }
1462
1463 // TODO(austin): We should be perfect.
1464 const std::chrono::nanoseconds kTolerance{3};
1465 if (!FLAGS_skip_order_validation) {
1466 CHECK_GE(next_time, state->monotonic_now())
1467 << ": Time skipped the next event.";
1468
1469 for (size_t i = 0; i < states_.size(); ++i) {
1470 CHECK_GE(states_[i]->monotonic_now(), before_times[i] - kTolerance)
1471 << ": Time changed too much on node "
1472 << MaybeNodeName(states_[i]->event_loop()->node());
1473 CHECK_LE(states_[i]->monotonic_now(), before_times[i] + kTolerance)
1474 << ": Time changed too much on node "
1475 << states_[i]->event_loop()->node()->name()->string_view();
1476 }
1477 } else {
1478 if (next_time < state->monotonic_now()) {
1479 LOG(WARNING) << "Check failed: next_time >= "
1480 "state->monotonic_now() ("
1481 << next_time << " vs. " << state->monotonic_now()
1482 << "): Time skipped the next event.";
1483 }
1484 for (size_t i = 0; i < states_.size(); ++i) {
1485 if (states_[i]->monotonic_now() >= before_times[i] - kTolerance) {
1486 LOG(WARNING) << "Check failed: "
1487 "states_[i]->monotonic_now() "
1488 ">= before_times[i] - kTolerance ("
1489 << states_[i]->monotonic_now() << " vs. "
1490 << before_times[i] - kTolerance
1491 << ") : Time changed too much on node "
1492 << MaybeNodeName(states_[i]->event_loop()->node());
1493 }
1494 if (states_[i]->monotonic_now() <= before_times[i] + kTolerance) {
1495 LOG(WARNING) << "Check failed: "
1496 "states_[i]->monotonic_now() "
1497 "<= before_times[i] + kTolerance ("
1498 << states_[i]->monotonic_now() << " vs. "
1499 << before_times[i] - kTolerance
1500 << ") : Time changed too much on node "
1501 << MaybeNodeName(states_[i]->event_loop()->node());
1502 }
1503 }
1504 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001505 }
Austin Schuh2f8fd752020-09-01 22:38:28 -07001506
1507 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Done sending at "
1508 << state->event_loop()->context().monotonic_event_time << " now "
1509 << state->monotonic_now();
Austin Schuh858c9f32020-08-31 16:56:12 -07001510 }));
Austin Schuhe309d2a2019-11-29 13:25:21 -08001511
Austin Schuh6f3babe2020-01-26 20:34:50 -08001512 ++live_nodes_;
1513
Austin Schuh858c9f32020-08-31 16:56:12 -07001514 if (state->OldestMessageTime() != monotonic_clock::max_time) {
1515 event_loop->OnRun([state]() { state->Setup(state->OldestMessageTime()); });
Austin Schuhe309d2a2019-11-29 13:25:21 -08001516 }
1517}
1518
1519void LogReader::Deregister() {
James Kuszmaul84ff3e52020-01-03 19:48:53 -08001520 // Make sure that things get destroyed in the correct order, rather than
1521 // relying on getting the order correct in the class definition.
Austin Schuh8bd96322020-02-13 21:18:22 -08001522 for (std::unique_ptr<State> &state : states_) {
Austin Schuh858c9f32020-08-31 16:56:12 -07001523 state->Deregister();
Austin Schuhe309d2a2019-11-29 13:25:21 -08001524 }
Austin Schuh92547522019-12-28 14:33:43 -08001525
James Kuszmaul84ff3e52020-01-03 19:48:53 -08001526 event_loop_factory_unique_ptr_.reset();
1527 event_loop_factory_ = nullptr;
Austin Schuhe309d2a2019-11-29 13:25:21 -08001528}
1529
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001530void LogReader::RemapLoggedChannel(std::string_view name, std::string_view type,
1531 std::string_view add_prefix) {
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001532 for (size_t ii = 0; ii < logged_configuration()->channels()->size(); ++ii) {
1533 const Channel *const channel = logged_configuration()->channels()->Get(ii);
1534 if (channel->name()->str() == name &&
1535 channel->type()->string_view() == type) {
1536 CHECK_EQ(0u, remapped_channels_.count(ii))
1537 << "Already remapped channel "
1538 << configuration::CleanedChannelToString(channel);
1539 remapped_channels_[ii] = std::string(add_prefix) + std::string(name);
1540 VLOG(1) << "Remapping channel "
1541 << configuration::CleanedChannelToString(channel)
1542 << " to have name " << remapped_channels_[ii];
Austin Schuh6331ef92020-01-07 18:28:09 -08001543 MakeRemappedConfig();
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001544 return;
1545 }
1546 }
1547 LOG(FATAL) << "Unabled to locate channel with name " << name << " and type "
1548 << type;
1549}
1550
Austin Schuh01b4c352020-09-21 23:09:39 -07001551void LogReader::RemapLoggedChannel(std::string_view name, std::string_view type,
1552 const Node *node,
1553 std::string_view add_prefix) {
1554 VLOG(1) << "Node is " << aos::FlatbufferToJson(node);
1555 const Channel *remapped_channel =
1556 configuration::GetChannel(logged_configuration(), name, type, "", node);
1557 CHECK(remapped_channel != nullptr) << ": Failed to find {\"name\": \"" << name
1558 << "\", \"type\": \"" << type << "\"}";
1559 VLOG(1) << "Original {\"name\": \"" << name << "\", \"type\": \"" << type
1560 << "\"}";
1561 VLOG(1) << "Remapped "
1562 << aos::configuration::StrippedChannelToString(remapped_channel);
1563
1564 // We want to make /spray on node 0 go to /0/spray by snooping the maps. And
1565 // we want it to degrade if the heuristics fail to just work.
1566 //
1567 // The easiest way to do this is going to be incredibly specific and verbose.
1568 // Look up /spray, to /0/spray. Then, prefix the result with /original to get
1569 // /original/0/spray. Then, create a map from /original/spray to
1570 // /original/0/spray for just the type we were asked for.
1571 if (name != remapped_channel->name()->string_view()) {
1572 MapT new_map;
1573 new_map.match = std::make_unique<ChannelT>();
1574 new_map.match->name = absl::StrCat(add_prefix, name);
1575 new_map.match->type = type;
1576 if (node != nullptr) {
1577 new_map.match->source_node = node->name()->str();
1578 }
1579 new_map.rename = std::make_unique<ChannelT>();
1580 new_map.rename->name =
1581 absl::StrCat(add_prefix, remapped_channel->name()->string_view());
1582 maps_.emplace_back(std::move(new_map));
1583 }
1584
1585 const size_t channel_index =
1586 configuration::ChannelIndex(logged_configuration(), remapped_channel);
1587 CHECK_EQ(0u, remapped_channels_.count(channel_index))
1588 << "Already remapped channel "
1589 << configuration::CleanedChannelToString(remapped_channel);
1590 remapped_channels_[channel_index] =
1591 absl::StrCat(add_prefix, remapped_channel->name()->string_view());
1592 MakeRemappedConfig();
1593}
1594
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001595void LogReader::MakeRemappedConfig() {
Austin Schuh8bd96322020-02-13 21:18:22 -08001596 for (std::unique_ptr<State> &state : states_) {
Austin Schuh6aa77be2020-02-22 21:06:40 -08001597 if (state) {
Austin Schuh858c9f32020-08-31 16:56:12 -07001598 CHECK(!state->event_loop())
Austin Schuh6aa77be2020-02-22 21:06:40 -08001599 << ": Can't change the mapping after the events are scheduled.";
1600 }
Austin Schuh6f3babe2020-01-26 20:34:50 -08001601 }
Austin Schuhac0771c2020-01-07 18:36:30 -08001602
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001603 // If no remapping occurred and we are using the original config, then there
1604 // is nothing interesting to do here.
1605 if (remapped_channels_.empty() && replay_configuration_ == nullptr) {
Austin Schuh6f3babe2020-01-26 20:34:50 -08001606 remapped_configuration_ = logged_configuration();
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001607 return;
1608 }
1609 // Config to copy Channel definitions from. Use the specified
1610 // replay_configuration_ if it has been provided.
1611 const Configuration *const base_config = replay_configuration_ == nullptr
1612 ? logged_configuration()
1613 : replay_configuration_;
1614 // The remapped config will be identical to the base_config, except that it
1615 // will have a bunch of extra channels in the channel list, which are exact
1616 // copies of the remapped channels, but with different names.
1617 // Because the flatbuffers API is a pain to work with, this requires a bit of
1618 // a song-and-dance to get copied over.
1619 // The order of operations is to:
1620 // 1) Make a flatbuffer builder for a config that will just contain a list of
1621 // the new channels that we want to add.
1622 // 2) For each channel that we are remapping:
1623 // a) Make a buffer/builder and construct into it a Channel table that only
1624 // contains the new name for the channel.
1625 // b) Merge the new channel with just the name into the channel that we are
1626 // trying to copy, built in the flatbuffer builder made in 1. This gives
1627 // us the new channel definition that we need.
1628 // 3) Using this list of offsets, build the Configuration of just new
1629 // Channels.
1630 // 4) Merge the Configuration with the new Channels into the base_config.
1631 // 5) Call MergeConfiguration() on that result to give MergeConfiguration a
1632 // chance to sanitize the config.
1633
1634 // This is the builder that we use for the config containing all the new
1635 // channels.
1636 flatbuffers::FlatBufferBuilder new_config_fbb;
Austin Schuhd7b15da2020-02-17 15:06:11 -08001637 new_config_fbb.ForceDefaults(true);
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001638 std::vector<flatbuffers::Offset<Channel>> channel_offsets;
1639 for (auto &pair : remapped_channels_) {
1640 // This is the builder that we use for creating the Channel with just the
1641 // new name.
1642 flatbuffers::FlatBufferBuilder new_name_fbb;
Austin Schuhd7b15da2020-02-17 15:06:11 -08001643 new_name_fbb.ForceDefaults(true);
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001644 const flatbuffers::Offset<flatbuffers::String> name_offset =
1645 new_name_fbb.CreateString(pair.second);
1646 ChannelBuilder new_name_builder(new_name_fbb);
1647 new_name_builder.add_name(name_offset);
1648 new_name_fbb.Finish(new_name_builder.Finish());
1649 const FlatbufferDetachedBuffer<Channel> new_name = new_name_fbb.Release();
Austin Schuh2f8fd752020-09-01 22:38:28 -07001650 // Retrieve the channel that we want to copy, confirming that it is
1651 // actually present in base_config.
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001652 const Channel *const base_channel = CHECK_NOTNULL(configuration::GetChannel(
1653 base_config, logged_configuration()->channels()->Get(pair.first), "",
1654 nullptr));
1655 // Actually create the new channel and put it into the vector of Offsets
1656 // that we will use to create the new Configuration.
1657 channel_offsets.emplace_back(MergeFlatBuffers<Channel>(
1658 reinterpret_cast<const flatbuffers::Table *>(base_channel),
1659 reinterpret_cast<const flatbuffers::Table *>(&new_name.message()),
1660 &new_config_fbb));
1661 }
1662 // Create the Configuration containing the new channels that we want to add.
Austin Schuh01b4c352020-09-21 23:09:39 -07001663 const auto new_channel_vector_offsets =
Austin Schuhfa895892020-01-07 20:07:41 -08001664 new_config_fbb.CreateVector(channel_offsets);
Austin Schuh01b4c352020-09-21 23:09:39 -07001665
1666 // Now create the new maps.
1667 std::vector<flatbuffers::Offset<Map>> map_offsets;
1668 for (const MapT &map : maps_) {
1669 const flatbuffers::Offset<flatbuffers::String> match_name_offset =
1670 new_config_fbb.CreateString(map.match->name);
1671 const flatbuffers::Offset<flatbuffers::String> match_type_offset =
1672 new_config_fbb.CreateString(map.match->type);
1673 const flatbuffers::Offset<flatbuffers::String> rename_name_offset =
1674 new_config_fbb.CreateString(map.rename->name);
1675 flatbuffers::Offset<flatbuffers::String> match_source_node_offset;
1676 if (!map.match->source_node.empty()) {
1677 match_source_node_offset =
1678 new_config_fbb.CreateString(map.match->source_node);
1679 }
1680 Channel::Builder match_builder(new_config_fbb);
1681 match_builder.add_name(match_name_offset);
1682 match_builder.add_type(match_type_offset);
1683 if (!map.match->source_node.empty()) {
1684 match_builder.add_source_node(match_source_node_offset);
1685 }
1686 const flatbuffers::Offset<Channel> match_offset = match_builder.Finish();
1687
1688 Channel::Builder rename_builder(new_config_fbb);
1689 rename_builder.add_name(rename_name_offset);
1690 const flatbuffers::Offset<Channel> rename_offset = rename_builder.Finish();
1691
1692 Map::Builder map_builder(new_config_fbb);
1693 map_builder.add_match(match_offset);
1694 map_builder.add_rename(rename_offset);
1695 map_offsets.emplace_back(map_builder.Finish());
1696 }
1697
1698 const auto new_maps_offsets = new_config_fbb.CreateVector(map_offsets);
1699
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001700 ConfigurationBuilder new_config_builder(new_config_fbb);
Austin Schuh01b4c352020-09-21 23:09:39 -07001701 new_config_builder.add_channels(new_channel_vector_offsets);
1702 new_config_builder.add_maps(new_maps_offsets);
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001703 new_config_fbb.Finish(new_config_builder.Finish());
1704 const FlatbufferDetachedBuffer<Configuration> new_name_config =
1705 new_config_fbb.Release();
1706 // Merge the new channels configuration into the base_config, giving us the
1707 // remapped configuration.
1708 remapped_configuration_buffer_ =
1709 std::make_unique<FlatbufferDetachedBuffer<Configuration>>(
1710 MergeFlatBuffers<Configuration>(base_config,
1711 &new_name_config.message()));
1712 // Call MergeConfiguration to deal with sanitizing the config.
1713 remapped_configuration_buffer_ =
1714 std::make_unique<FlatbufferDetachedBuffer<Configuration>>(
1715 configuration::MergeConfiguration(*remapped_configuration_buffer_));
1716
1717 remapped_configuration_ = &remapped_configuration_buffer_->message();
1718}
1719
Austin Schuh6f3babe2020-01-26 20:34:50 -08001720const Channel *LogReader::RemapChannel(const EventLoop *event_loop,
1721 const Channel *channel) {
1722 std::string_view channel_name = channel->name()->string_view();
1723 std::string_view channel_type = channel->type()->string_view();
1724 const int channel_index =
1725 configuration::ChannelIndex(logged_configuration(), channel);
1726 // If the channel is remapped, find the correct channel name to use.
1727 if (remapped_channels_.count(channel_index) > 0) {
Austin Schuhee711052020-08-24 16:06:09 -07001728 VLOG(3) << "Got remapped channel on "
Austin Schuh6f3babe2020-01-26 20:34:50 -08001729 << configuration::CleanedChannelToString(channel);
1730 channel_name = remapped_channels_[channel_index];
1731 }
1732
Austin Schuhee711052020-08-24 16:06:09 -07001733 VLOG(2) << "Going to remap channel " << channel_name << " " << channel_type;
Austin Schuh6f3babe2020-01-26 20:34:50 -08001734 const Channel *remapped_channel = configuration::GetChannel(
1735 event_loop->configuration(), channel_name, channel_type,
1736 event_loop->name(), event_loop->node());
1737
1738 CHECK(remapped_channel != nullptr)
1739 << ": Unable to send {\"name\": \"" << channel_name << "\", \"type\": \""
1740 << channel_type << "\"} because it is not in the provided configuration.";
1741
1742 return remapped_channel;
1743}
1744
Austin Schuh858c9f32020-08-31 16:56:12 -07001745LogReader::State::State(std::unique_ptr<ChannelMerger> channel_merger)
1746 : channel_merger_(std::move(channel_merger)) {}
1747
1748EventLoop *LogReader::State::SetNodeEventLoopFactory(
1749 NodeEventLoopFactory *node_event_loop_factory) {
1750 node_event_loop_factory_ = node_event_loop_factory;
1751 event_loop_unique_ptr_ =
1752 node_event_loop_factory_->MakeEventLoop("log_reader");
1753 return event_loop_unique_ptr_.get();
1754}
1755
1756void LogReader::State::SetChannelCount(size_t count) {
1757 channels_.resize(count);
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001758 remote_timestamp_senders_.resize(count);
Austin Schuh858c9f32020-08-31 16:56:12 -07001759 filters_.resize(count);
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001760 channel_source_state_.resize(count);
1761 factory_channel_index_.resize(count);
1762 queue_index_map_.resize(count);
Austin Schuh858c9f32020-08-31 16:56:12 -07001763}
1764
1765void LogReader::State::SetChannel(
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001766 size_t logged_channel_index, size_t factory_channel_index,
1767 std::unique_ptr<RawSender> sender,
Austin Schuh2f8fd752020-09-01 22:38:28 -07001768 message_bridge::NoncausalOffsetEstimator *filter,
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001769 aos::Sender<MessageHeader> *remote_timestamp_sender, State *source_state) {
1770 channels_[logged_channel_index] = std::move(sender);
1771 filters_[logged_channel_index] = filter;
1772 remote_timestamp_senders_[logged_channel_index] = remote_timestamp_sender;
1773
1774 if (source_state) {
1775 channel_source_state_[logged_channel_index] = source_state;
1776
1777 if (remote_timestamp_sender != nullptr) {
1778 source_state->queue_index_map_[logged_channel_index] =
1779 std::make_unique<std::vector<State::SentTimestamp>>();
1780 }
1781 }
1782
1783 factory_channel_index_[logged_channel_index] = factory_channel_index;
1784}
1785
1786bool LogReader::State::Send(
1787 size_t channel_index, const void *data, size_t size,
1788 const TimestampMerger::DeliveryTimestamp &delivery_timestamp) {
1789 aos::RawSender *sender = channels_[channel_index].get();
1790 uint32_t remote_queue_index = 0xffffffff;
1791
1792 if (remote_timestamp_senders_[channel_index] != nullptr) {
1793 std::vector<SentTimestamp> *queue_index_map =
1794 CHECK_NOTNULL(CHECK_NOTNULL(channel_source_state_[channel_index])
1795 ->queue_index_map_[channel_index]
1796 .get());
1797
1798 SentTimestamp search;
1799 search.monotonic_event_time = delivery_timestamp.monotonic_remote_time;
1800 search.realtime_event_time = delivery_timestamp.realtime_remote_time;
1801 search.queue_index = delivery_timestamp.remote_queue_index;
1802
1803 // Find the sent time if available.
1804 auto element = std::lower_bound(
1805 queue_index_map->begin(), queue_index_map->end(), search,
1806 [](SentTimestamp a, SentTimestamp b) {
1807 if (b.monotonic_event_time < a.monotonic_event_time) {
1808 return false;
1809 }
1810 if (b.monotonic_event_time > a.monotonic_event_time) {
1811 return true;
1812 }
1813
1814 if (b.queue_index < a.queue_index) {
1815 return false;
1816 }
1817 if (b.queue_index > a.queue_index) {
1818 return true;
1819 }
1820
1821 CHECK_EQ(a.realtime_event_time, b.realtime_event_time);
1822 return false;
1823 });
1824
1825 // TODO(austin): Be a bit more principled here, but we will want to do that
1826 // after the logger rewrite. We hit this when one node finishes, but the
1827 // other node isn't done yet. So there is no send time, but there is a
1828 // receive time.
1829 if (element != queue_index_map->end()) {
1830 CHECK_EQ(element->monotonic_event_time,
1831 delivery_timestamp.monotonic_remote_time);
1832 CHECK_EQ(element->realtime_event_time,
1833 delivery_timestamp.realtime_remote_time);
1834 CHECK_EQ(element->queue_index, delivery_timestamp.remote_queue_index);
1835
1836 remote_queue_index = element->actual_queue_index;
1837 }
1838 }
1839
1840 // Send! Use the replayed queue index here instead of the logged queue index
1841 // for the remote queue index. This makes re-logging work.
1842 const bool sent =
1843 sender->Send(data, size, delivery_timestamp.monotonic_remote_time,
1844 delivery_timestamp.realtime_remote_time, remote_queue_index);
1845 if (!sent) return false;
1846
1847 if (queue_index_map_[channel_index]) {
1848 SentTimestamp timestamp;
1849 timestamp.monotonic_event_time = delivery_timestamp.monotonic_event_time;
1850 timestamp.realtime_event_time = delivery_timestamp.realtime_event_time;
1851 timestamp.queue_index = delivery_timestamp.queue_index;
1852 timestamp.actual_queue_index = sender->sent_queue_index();
1853 queue_index_map_[channel_index]->emplace_back(timestamp);
1854 } else if (remote_timestamp_senders_[channel_index] != nullptr) {
1855 aos::Sender<MessageHeader>::Builder builder =
1856 remote_timestamp_senders_[channel_index]->MakeBuilder();
1857
1858 logger::MessageHeader::Builder message_header_builder =
1859 builder.MakeBuilder<logger::MessageHeader>();
1860
1861 message_header_builder.add_channel_index(
1862 factory_channel_index_[channel_index]);
1863
1864 // Swap the remote and sent metrics. They are from the sender's
1865 // perspective, not the receiver's perspective.
1866 message_header_builder.add_monotonic_sent_time(
1867 sender->monotonic_sent_time().time_since_epoch().count());
1868 message_header_builder.add_realtime_sent_time(
1869 sender->realtime_sent_time().time_since_epoch().count());
1870 message_header_builder.add_queue_index(sender->sent_queue_index());
1871
1872 message_header_builder.add_monotonic_remote_time(
1873 delivery_timestamp.monotonic_remote_time.time_since_epoch().count());
1874 message_header_builder.add_realtime_remote_time(
1875 delivery_timestamp.realtime_remote_time.time_since_epoch().count());
1876
1877 message_header_builder.add_remote_queue_index(remote_queue_index);
1878
1879 builder.Send(message_header_builder.Finish());
1880 }
1881
1882 return true;
1883}
1884
1885aos::Sender<MessageHeader> *LogReader::State::RemoteTimestampSender(
1886 const Node *delivered_node) {
1887 auto sender = remote_timestamp_senders_map_.find(delivered_node);
1888
1889 if (sender == remote_timestamp_senders_map_.end()) {
1890 sender = remote_timestamp_senders_map_
1891 .emplace(std::make_pair(
1892 delivered_node,
1893 event_loop()->MakeSender<MessageHeader>(
1894 absl::StrCat("/aos/remote_timestamps/",
1895 delivered_node->name()->string_view()))))
1896 .first;
1897 }
1898
1899 return &(sender->second);
Austin Schuh858c9f32020-08-31 16:56:12 -07001900}
1901
1902std::tuple<TimestampMerger::DeliveryTimestamp, int,
1903 FlatbufferVector<MessageHeader>>
1904LogReader::State::PopOldest(bool *update_time) {
1905 CHECK_GT(sorted_messages_.size(), 0u);
1906
1907 std::tuple<TimestampMerger::DeliveryTimestamp, int,
Austin Schuh2f8fd752020-09-01 22:38:28 -07001908 FlatbufferVector<MessageHeader>,
1909 message_bridge::NoncausalOffsetEstimator *>
Austin Schuh858c9f32020-08-31 16:56:12 -07001910 result = std::move(sorted_messages_.front());
Austin Schuh2f8fd752020-09-01 22:38:28 -07001911 VLOG(2) << MaybeNodeName(event_loop_->node()) << "PopOldest Popping "
Austin Schuh858c9f32020-08-31 16:56:12 -07001912 << std::get<0>(result).monotonic_event_time;
1913 sorted_messages_.pop_front();
1914 SeedSortedMessages();
1915
Austin Schuh2f8fd752020-09-01 22:38:28 -07001916 if (std::get<3>(result) != nullptr) {
1917 *update_time = std::get<3>(result)->Pop(
1918 event_loop_->node(), std::get<0>(result).monotonic_event_time);
1919 } else {
1920 *update_time = false;
1921 }
Austin Schuh858c9f32020-08-31 16:56:12 -07001922 return std::make_tuple(std::get<0>(result), std::get<1>(result),
1923 std::move(std::get<2>(result)));
1924}
1925
1926monotonic_clock::time_point LogReader::State::OldestMessageTime() const {
1927 if (sorted_messages_.size() > 0) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001928 VLOG(2) << MaybeNodeName(event_loop_->node()) << "oldest message at "
Austin Schuh858c9f32020-08-31 16:56:12 -07001929 << std::get<0>(sorted_messages_.front()).monotonic_event_time;
1930 return std::get<0>(sorted_messages_.front()).monotonic_event_time;
1931 }
1932
1933 return channel_merger_->OldestMessageTime();
1934}
1935
1936void LogReader::State::SeedSortedMessages() {
1937 const aos::monotonic_clock::time_point end_queue_time =
1938 (sorted_messages_.size() > 0
1939 ? std::get<0>(sorted_messages_.front()).monotonic_event_time
1940 : channel_merger_->monotonic_start_time()) +
1941 std::chrono::seconds(2);
1942
1943 while (true) {
1944 if (channel_merger_->OldestMessageTime() == monotonic_clock::max_time) {
1945 return;
1946 }
1947 if (sorted_messages_.size() > 0) {
1948 // Stop placing sorted messages on the list once we have 2 seconds
1949 // queued up (but queue at least until the log starts.
1950 if (end_queue_time <
1951 std::get<0>(sorted_messages_.back()).monotonic_event_time) {
1952 return;
1953 }
1954 }
1955
1956 TimestampMerger::DeliveryTimestamp channel_timestamp;
1957 int channel_index;
1958 FlatbufferVector<MessageHeader> channel_data =
1959 FlatbufferVector<MessageHeader>::Empty();
1960
Austin Schuh2f8fd752020-09-01 22:38:28 -07001961 message_bridge::NoncausalOffsetEstimator *filter = nullptr;
1962
Austin Schuh858c9f32020-08-31 16:56:12 -07001963 std::tie(channel_timestamp, channel_index, channel_data) =
1964 channel_merger_->PopOldest();
1965
Austin Schuh2f8fd752020-09-01 22:38:28 -07001966 // Skip any messages without forwarding information.
1967 if (channel_timestamp.monotonic_remote_time != monotonic_clock::min_time) {
1968 // Got a forwarding timestamp!
1969 filter = filters_[channel_index];
1970
1971 CHECK(filter != nullptr);
1972
1973 // Call the correct method depending on if we are the forward or
1974 // reverse direction here.
1975 filter->Sample(event_loop_->node(),
1976 channel_timestamp.monotonic_event_time,
1977 channel_timestamp.monotonic_remote_time);
1978 }
Austin Schuh858c9f32020-08-31 16:56:12 -07001979 sorted_messages_.emplace_back(channel_timestamp, channel_index,
Austin Schuh2f8fd752020-09-01 22:38:28 -07001980 std::move(channel_data), filter);
Austin Schuh858c9f32020-08-31 16:56:12 -07001981 }
1982}
1983
1984void LogReader::State::Deregister() {
1985 for (size_t i = 0; i < channels_.size(); ++i) {
1986 channels_[i].reset();
1987 }
Austin Schuh8d7e0bb2020-10-02 17:57:00 -07001988 remote_timestamp_senders_map_.clear();
Austin Schuh858c9f32020-08-31 16:56:12 -07001989 event_loop_unique_ptr_.reset();
1990 event_loop_ = nullptr;
1991 timer_handler_ = nullptr;
1992 node_event_loop_factory_ = nullptr;
1993}
1994
Austin Schuhe309d2a2019-11-29 13:25:21 -08001995} // namespace logger
1996} // namespace aos