blob: 2780bd5caa45768b09b901a4f8aed3087777162e [file] [log] [blame]
James Kuszmaul38735e82019-12-07 16:42:06 -08001#include "aos/events/logging/logger.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -08002
3#include <fcntl.h>
Austin Schuh4c4e0092019-12-22 16:18:03 -08004#include <limits.h>
Austin Schuhe309d2a2019-11-29 13:25:21 -08005#include <sys/stat.h>
6#include <sys/types.h>
7#include <sys/uio.h>
8#include <vector>
9
Austin Schuh8bd96322020-02-13 21:18:22 -080010#include "Eigen/Dense"
Austin Schuh2f8fd752020-09-01 22:38:28 -070011#include "absl/strings/escaping.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080012#include "absl/types/span.h"
13#include "aos/events/event_loop.h"
James Kuszmaul38735e82019-12-07 16:42:06 -080014#include "aos/events/logging/logger_generated.h"
Austin Schuh64fab802020-09-09 22:47:47 -070015#include "aos/events/logging/uuid.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080016#include "aos/flatbuffer_merge.h"
Austin Schuh288479d2019-12-18 19:47:52 -080017#include "aos/network/team_number.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080018#include "aos/time/time.h"
Brian Silvermanae7c0332020-09-30 16:58:23 -070019#include "aos/util/file.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080020#include "flatbuffers/flatbuffers.h"
Austin Schuh2f8fd752020-09-01 22:38:28 -070021#include "third_party/gmp/gmpxx.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080022
Austin Schuh15649d62019-12-28 16:36:38 -080023DEFINE_bool(skip_missing_forwarding_entries, false,
24 "If true, drop any forwarding entries with missing data. If "
25 "false, CHECK.");
Austin Schuhe309d2a2019-11-29 13:25:21 -080026
Austin Schuh8bd96322020-02-13 21:18:22 -080027DEFINE_bool(timestamps_to_csv, false,
28 "If true, write all the time synchronization information to a set "
29 "of CSV files in /tmp/. This should only be needed when debugging "
30 "time synchronization.");
31
Austin Schuh2f8fd752020-09-01 22:38:28 -070032DEFINE_bool(skip_order_validation, false,
33 "If true, ignore any out of orderness in replay");
34
Austin Schuhe309d2a2019-11-29 13:25:21 -080035namespace aos {
36namespace logger {
Austin Schuhe309d2a2019-11-29 13:25:21 -080037namespace chrono = std::chrono;
38
Brian Silverman1f345222020-09-24 21:14:48 -070039Logger::Logger(EventLoop *event_loop, const Configuration *configuration,
40 std::function<bool(const Channel *)> should_log)
Austin Schuhe309d2a2019-11-29 13:25:21 -080041 : event_loop_(event_loop),
Austin Schuh0c297012020-09-16 18:41:59 -070042 configuration_(configuration),
Brian Silvermanae7c0332020-09-30 16:58:23 -070043 boot_uuid_(
44 util::ReadFileToStringOrDie("/proc/sys/kernel/random/boot_id")),
Austin Schuh0c297012020-09-16 18:41:59 -070045 name_(network::GetHostname()),
Brian Silverman1f345222020-09-24 21:14:48 -070046 timer_handler_(event_loop_->AddTimer(
47 [this]() { DoLogData(event_loop_->monotonic_now()); })),
Austin Schuh2f8fd752020-09-01 22:38:28 -070048 server_statistics_fetcher_(
49 configuration::MultiNode(event_loop_->configuration())
50 ? event_loop_->MakeFetcher<message_bridge::ServerStatistics>(
51 "/aos")
52 : aos::Fetcher<message_bridge::ServerStatistics>()) {
Brian Silverman1f345222020-09-24 21:14:48 -070053 VLOG(1) << "Creating logger for " << FlatbufferToJson(event_loop_->node());
Austin Schuh2f8fd752020-09-01 22:38:28 -070054
55 // Find all the nodes which are logging timestamps on our node.
56 std::set<const Node *> timestamp_logger_nodes;
Austin Schuh0c297012020-09-16 18:41:59 -070057 for (const Channel *channel : *configuration_->channels()) {
Brian Silverman1f345222020-09-24 21:14:48 -070058 if (!configuration::ChannelIsSendableOnNode(channel, event_loop_->node())) {
59 continue;
60 }
61 if (!channel->has_destination_nodes()) {
62 continue;
63 }
64 if (!should_log(channel)) {
Austin Schuh2f8fd752020-09-01 22:38:28 -070065 continue;
66 }
67 for (const Connection *connection : *channel->destination_nodes()) {
68 const Node *other_node = configuration::GetNode(
Austin Schuh0c297012020-09-16 18:41:59 -070069 configuration_, connection->name()->string_view());
Austin Schuh2f8fd752020-09-01 22:38:28 -070070
71 if (configuration::ConnectionDeliveryTimeIsLoggedOnNode(
72 connection, event_loop_->node())) {
73 VLOG(1) << "Timestamps are logged from "
74 << FlatbufferToJson(other_node);
75 timestamp_logger_nodes.insert(other_node);
76 }
77 }
78 }
79
80 std::map<const Channel *, const Node *> timestamp_logger_channels;
81
82 // Now that we have all the nodes accumulated, make remote timestamp loggers
83 // for them.
84 for (const Node *node : timestamp_logger_nodes) {
85 const Channel *channel = configuration::GetChannel(
Austin Schuh0c297012020-09-16 18:41:59 -070086 configuration_,
Austin Schuh2f8fd752020-09-01 22:38:28 -070087 absl::StrCat("/aos/remote_timestamps/", node->name()->string_view()),
88 logger::MessageHeader::GetFullyQualifiedName(), event_loop_->name(),
89 event_loop_->node());
90
91 CHECK(channel != nullptr)
92 << ": Remote timestamps are logged on "
93 << event_loop_->node()->name()->string_view()
94 << " but can't find channel /aos/remote_timestamps/"
95 << node->name()->string_view();
Brian Silverman1f345222020-09-24 21:14:48 -070096 if (!should_log(channel)) {
97 continue;
98 }
Austin Schuh2f8fd752020-09-01 22:38:28 -070099 timestamp_logger_channels.insert(std::make_pair(channel, node));
100 }
101
Brian Silvermand90905f2020-09-23 14:42:56 -0700102 const size_t our_node_index =
103 configuration::GetNodeIndex(configuration_, event_loop_->node());
Austin Schuh2f8fd752020-09-01 22:38:28 -0700104
Brian Silverman1f345222020-09-24 21:14:48 -0700105 for (size_t channel_index = 0;
106 channel_index < configuration_->channels()->size(); ++channel_index) {
107 const Channel *const config_channel =
108 configuration_->channels()->Get(channel_index);
Austin Schuh0c297012020-09-16 18:41:59 -0700109 // The MakeRawFetcher method needs a channel which is in the event loop
110 // configuration() object, not the configuration_ object. Go look that up
111 // from the config.
112 const Channel *channel = aos::configuration::GetChannel(
113 event_loop_->configuration(), config_channel->name()->string_view(),
114 config_channel->type()->string_view(), "", event_loop_->node());
Brian Silverman1f345222020-09-24 21:14:48 -0700115 if (!should_log(channel)) {
116 continue;
117 }
Austin Schuh0c297012020-09-16 18:41:59 -0700118
Austin Schuhe309d2a2019-11-29 13:25:21 -0800119 FetcherStruct fs;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700120 fs.node_index = our_node_index;
Brian Silverman1f345222020-09-24 21:14:48 -0700121 fs.channel_index = channel_index;
122 fs.channel = channel;
123
Austin Schuh6f3babe2020-01-26 20:34:50 -0800124 const bool is_local =
125 configuration::ChannelIsSendableOnNode(channel, event_loop_->node());
126
Austin Schuh15649d62019-12-28 16:36:38 -0800127 const bool is_readable =
128 configuration::ChannelIsReadableOnNode(channel, event_loop_->node());
Brian Silverman1f345222020-09-24 21:14:48 -0700129 const bool is_logged = configuration::ChannelMessageIsLoggedOnNode(
130 channel, event_loop_->node());
131 const bool log_message = is_logged && is_readable;
Austin Schuh15649d62019-12-28 16:36:38 -0800132
Brian Silverman1f345222020-09-24 21:14:48 -0700133 bool log_delivery_times = false;
134 if (event_loop_->node() != nullptr) {
135 log_delivery_times = configuration::ConnectionDeliveryTimeIsLoggedOnNode(
136 channel, event_loop_->node(), event_loop_->node());
137 }
Austin Schuh15649d62019-12-28 16:36:38 -0800138
Austin Schuh2f8fd752020-09-01 22:38:28 -0700139 // Now, detect a MessageHeader timestamp logger where we should just log the
140 // contents to a file directly.
141 const bool log_contents = timestamp_logger_channels.find(channel) !=
142 timestamp_logger_channels.end();
Austin Schuh2f8fd752020-09-01 22:38:28 -0700143
144 if (log_message || log_delivery_times || log_contents) {
Austin Schuh15649d62019-12-28 16:36:38 -0800145 fs.fetcher = event_loop->MakeRawFetcher(channel);
146 VLOG(1) << "Logging channel "
147 << configuration::CleanedChannelToString(channel);
148
149 if (log_delivery_times) {
Austin Schuh6f3babe2020-01-26 20:34:50 -0800150 VLOG(1) << " Delivery times";
Brian Silverman1f345222020-09-24 21:14:48 -0700151 fs.wants_timestamp_writer = true;
Austin Schuh15649d62019-12-28 16:36:38 -0800152 }
Austin Schuh6f3babe2020-01-26 20:34:50 -0800153 if (log_message) {
154 VLOG(1) << " Data";
Brian Silverman1f345222020-09-24 21:14:48 -0700155 fs.wants_writer = true;
Austin Schuh6f3babe2020-01-26 20:34:50 -0800156 if (!is_local) {
157 fs.log_type = LogType::kLogRemoteMessage;
158 }
159 }
Austin Schuh2f8fd752020-09-01 22:38:28 -0700160 if (log_contents) {
161 VLOG(1) << "Timestamp logger channel "
162 << configuration::CleanedChannelToString(channel);
Brian Silverman1f345222020-09-24 21:14:48 -0700163 fs.timestamp_node = timestamp_logger_channels.find(channel)->second;
164 fs.wants_contents_writer = true;
Austin Schuh0c297012020-09-16 18:41:59 -0700165 fs.node_index =
Brian Silverman1f345222020-09-24 21:14:48 -0700166 configuration::GetNodeIndex(configuration_, fs.timestamp_node);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700167 }
Austin Schuh6f3babe2020-01-26 20:34:50 -0800168 fetchers_.emplace_back(std::move(fs));
Austin Schuh15649d62019-12-28 16:36:38 -0800169 }
Brian Silverman1f345222020-09-24 21:14:48 -0700170 }
171}
172
173Logger::~Logger() {
174 if (log_namer_) {
175 // If we are replaying a log file, or in simulation, we want to force the
176 // last bit of data to be logged. The easiest way to deal with this is to
177 // poll everything as we go to destroy the class, ie, shut down the logger,
178 // and write it to disk.
179 StopLogging(event_loop_->monotonic_now());
180 }
181}
182
Brian Silvermanae7c0332020-09-30 16:58:23 -0700183void Logger::StartLogging(std::unique_ptr<LogNamer> log_namer,
184 std::string_view log_start_uuid) {
Brian Silverman1f345222020-09-24 21:14:48 -0700185 CHECK(!log_namer_) << ": Already logging";
186 log_namer_ = std::move(log_namer);
Brian Silvermanae7c0332020-09-30 16:58:23 -0700187 log_event_uuid_ = UUID::Random();
188 log_start_uuid_ = log_start_uuid;
Brian Silverman1f345222020-09-24 21:14:48 -0700189 VLOG(1) << "Starting logger for " << FlatbufferToJson(event_loop_->node());
190
191 // We want to do as much work as possible before the initial Fetch. Time
192 // between that and actually starting to log opens up the possibility of
193 // falling off the end of the queue during that time.
194
195 for (FetcherStruct &f : fetchers_) {
196 if (f.wants_writer) {
197 f.writer = log_namer_->MakeWriter(f.channel);
198 }
199 if (f.wants_timestamp_writer) {
200 f.timestamp_writer = log_namer_->MakeTimestampWriter(f.channel);
201 }
202 if (f.wants_contents_writer) {
203 f.contents_writer = log_namer_->MakeForwardedTimestampWriter(
204 f.channel, CHECK_NOTNULL(f.timestamp_node));
205 }
Austin Schuhe309d2a2019-11-29 13:25:21 -0800206 }
207
Brian Silverman1f345222020-09-24 21:14:48 -0700208 CHECK(node_state_.empty());
Austin Schuh0c297012020-09-16 18:41:59 -0700209 node_state_.resize(configuration::MultiNode(configuration_)
210 ? configuration_->nodes()->size()
Austin Schuh2f8fd752020-09-01 22:38:28 -0700211 : 1u);
Austin Schuhe309d2a2019-11-29 13:25:21 -0800212
Austin Schuh2f8fd752020-09-01 22:38:28 -0700213 for (const Node *node : log_namer_->nodes()) {
Brian Silvermand90905f2020-09-23 14:42:56 -0700214 const int node_index = configuration::GetNodeIndex(configuration_, node);
Austin Schuhe309d2a2019-11-29 13:25:21 -0800215
Austin Schuh2f8fd752020-09-01 22:38:28 -0700216 node_state_[node_index].log_file_header = MakeHeader(node);
217 }
Austin Schuhe309d2a2019-11-29 13:25:21 -0800218
Austin Schuh2f8fd752020-09-01 22:38:28 -0700219 // Grab data from each channel right before we declare the log file started
220 // so we can capture the latest message on each channel. This lets us have
221 // non periodic messages with configuration that now get logged.
222 for (FetcherStruct &f : fetchers_) {
223 f.written = !f.fetcher->Fetch();
224 }
225
226 // Clear out any old timestamps in case we are re-starting logging.
227 for (size_t i = 0; i < node_state_.size(); ++i) {
228 SetStartTime(i, monotonic_clock::min_time, realtime_clock::min_time);
229 }
230
231 WriteHeader();
232
233 LOG(INFO) << "Logging node as " << FlatbufferToJson(event_loop_->node())
234 << " start_time " << last_synchronized_time_;
235
236 timer_handler_->Setup(event_loop_->monotonic_now() + polling_period_,
237 polling_period_);
238}
239
Brian Silverman1f345222020-09-24 21:14:48 -0700240std::unique_ptr<LogNamer> Logger::StopLogging(
241 aos::monotonic_clock::time_point end_time) {
242 CHECK(log_namer_) << ": Not logging right now";
243
244 if (end_time != aos::monotonic_clock::min_time) {
245 LogUntil(end_time);
246 }
247 timer_handler_->Disable();
248
249 for (FetcherStruct &f : fetchers_) {
250 f.writer = nullptr;
251 f.timestamp_writer = nullptr;
252 f.contents_writer = nullptr;
253 }
254 node_state_.clear();
255
Brian Silvermanae7c0332020-09-30 16:58:23 -0700256 log_event_uuid_ = UUID::Zero();
257 log_start_uuid_ = std::string();
258
Brian Silverman1f345222020-09-24 21:14:48 -0700259 return std::move(log_namer_);
260}
261
Austin Schuhfa895892020-01-07 20:07:41 -0800262void Logger::WriteHeader() {
Austin Schuh0c297012020-09-16 18:41:59 -0700263 if (configuration::MultiNode(configuration_)) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700264 server_statistics_fetcher_.Fetch();
265 }
266
267 aos::monotonic_clock::time_point monotonic_start_time =
268 event_loop_->monotonic_now();
269 aos::realtime_clock::time_point realtime_start_time =
270 event_loop_->realtime_now();
271
272 // We need to pick a point in time to declare the log file "started". This
273 // starts here. It needs to be after everything is fetched so that the
274 // fetchers are all pointed at the most recent message before the start
275 // time.
276 last_synchronized_time_ = monotonic_start_time;
277
Austin Schuh6f3babe2020-01-26 20:34:50 -0800278 for (const Node *node : log_namer_->nodes()) {
Brian Silvermand90905f2020-09-23 14:42:56 -0700279 const int node_index = configuration::GetNodeIndex(configuration_, node);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700280 MaybeUpdateTimestamp(node, node_index, monotonic_start_time,
281 realtime_start_time);
Austin Schuh64fab802020-09-09 22:47:47 -0700282 log_namer_->WriteHeader(&node_state_[node_index].log_file_header, node);
Austin Schuh6f3babe2020-01-26 20:34:50 -0800283 }
284}
Austin Schuh8bd96322020-02-13 21:18:22 -0800285
Austin Schuh2f8fd752020-09-01 22:38:28 -0700286void Logger::WriteMissingTimestamps() {
Austin Schuh0c297012020-09-16 18:41:59 -0700287 if (configuration::MultiNode(configuration_)) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700288 server_statistics_fetcher_.Fetch();
289 } else {
290 return;
291 }
292
293 if (server_statistics_fetcher_.get() == nullptr) {
294 return;
295 }
296
297 for (const Node *node : log_namer_->nodes()) {
Brian Silvermand90905f2020-09-23 14:42:56 -0700298 const int node_index = configuration::GetNodeIndex(configuration_, node);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700299 if (MaybeUpdateTimestamp(
300 node, node_index,
301 server_statistics_fetcher_.context().monotonic_event_time,
302 server_statistics_fetcher_.context().realtime_event_time)) {
Austin Schuh64fab802020-09-09 22:47:47 -0700303 log_namer_->Rotate(node, &node_state_[node_index].log_file_header);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700304 }
305 }
306}
307
308void Logger::SetStartTime(size_t node_index,
309 aos::monotonic_clock::time_point monotonic_start_time,
310 aos::realtime_clock::time_point realtime_start_time) {
311 node_state_[node_index].monotonic_start_time = monotonic_start_time;
312 node_state_[node_index].realtime_start_time = realtime_start_time;
313 node_state_[node_index]
314 .log_file_header.mutable_message()
315 ->mutate_monotonic_start_time(
316 std::chrono::duration_cast<std::chrono::nanoseconds>(
317 monotonic_start_time.time_since_epoch())
318 .count());
319 if (node_state_[node_index]
320 .log_file_header.mutable_message()
321 ->has_realtime_start_time()) {
322 node_state_[node_index]
323 .log_file_header.mutable_message()
324 ->mutate_realtime_start_time(
325 std::chrono::duration_cast<std::chrono::nanoseconds>(
326 realtime_start_time.time_since_epoch())
327 .count());
328 }
329}
330
331bool Logger::MaybeUpdateTimestamp(
332 const Node *node, int node_index,
333 aos::monotonic_clock::time_point monotonic_start_time,
334 aos::realtime_clock::time_point realtime_start_time) {
Brian Silverman87ac0402020-09-17 14:47:01 -0700335 // Bail early if the start times are already set.
Austin Schuh2f8fd752020-09-01 22:38:28 -0700336 if (node_state_[node_index].monotonic_start_time !=
337 monotonic_clock::min_time) {
338 return false;
339 }
Austin Schuh0c297012020-09-16 18:41:59 -0700340 if (configuration::MultiNode(configuration_)) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700341 if (event_loop_->node() == node) {
342 // There are no offsets to compute for ourself, so always succeed.
343 SetStartTime(node_index, monotonic_start_time, realtime_start_time);
344 return true;
345 } else if (server_statistics_fetcher_.get() != nullptr) {
346 // We must be a remote node now. Look for the connection and see if it is
347 // connected.
348
349 for (const message_bridge::ServerConnection *connection :
350 *server_statistics_fetcher_->connections()) {
351 if (connection->node()->name()->string_view() !=
352 node->name()->string_view()) {
353 continue;
354 }
355
356 if (connection->state() != message_bridge::State::CONNECTED) {
357 VLOG(1) << node->name()->string_view()
358 << " is not connected, can't start it yet.";
359 break;
360 }
361
362 if (!connection->has_monotonic_offset()) {
363 VLOG(1) << "Missing monotonic offset for setting start time for node "
364 << aos::FlatbufferToJson(node);
365 break;
366 }
367
368 VLOG(1) << "Updating start time for " << aos::FlatbufferToJson(node);
369
370 // Found it and it is connected. Compensate and go.
371 monotonic_start_time +=
372 std::chrono::nanoseconds(connection->monotonic_offset());
373
374 SetStartTime(node_index, monotonic_start_time, realtime_start_time);
375 return true;
376 }
377 }
378 } else {
379 SetStartTime(node_index, monotonic_start_time, realtime_start_time);
380 return true;
381 }
382 return false;
383}
384
385aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> Logger::MakeHeader(
386 const Node *node) {
Austin Schuhfa895892020-01-07 20:07:41 -0800387 // Now write the header with this timestamp in it.
388 flatbuffers::FlatBufferBuilder fbb;
Austin Schuhd7b15da2020-02-17 15:06:11 -0800389 fbb.ForceDefaults(true);
Austin Schuhfa895892020-01-07 20:07:41 -0800390
Austin Schuh2f8fd752020-09-01 22:38:28 -0700391 // TODO(austin): Compress this much more efficiently. There are a bunch of
392 // duplicated schemas.
Brian Silvermanae7c0332020-09-30 16:58:23 -0700393 const flatbuffers::Offset<aos::Configuration> configuration_offset =
Austin Schuh0c297012020-09-16 18:41:59 -0700394 CopyFlatBuffer(configuration_, &fbb);
Austin Schuhfa895892020-01-07 20:07:41 -0800395
Brian Silvermanae7c0332020-09-30 16:58:23 -0700396 const flatbuffers::Offset<flatbuffers::String> name_offset =
Austin Schuh0c297012020-09-16 18:41:59 -0700397 fbb.CreateString(name_);
Austin Schuhfa895892020-01-07 20:07:41 -0800398
Brian Silvermanae7c0332020-09-30 16:58:23 -0700399 CHECK(log_event_uuid_ != UUID::Zero());
400 const flatbuffers::Offset<flatbuffers::String> log_event_uuid_offset =
401 fbb.CreateString(log_event_uuid_.string_view());
Austin Schuh64fab802020-09-09 22:47:47 -0700402
Brian Silvermanae7c0332020-09-30 16:58:23 -0700403 const flatbuffers::Offset<flatbuffers::String> logger_instance_uuid_offset =
404 fbb.CreateString(logger_instance_uuid_.string_view());
405
406 flatbuffers::Offset<flatbuffers::String> log_start_uuid_offset;
407 if (!log_start_uuid_.empty()) {
408 log_start_uuid_offset = fbb.CreateString(log_start_uuid_);
409 }
410
411 const flatbuffers::Offset<flatbuffers::String> boot_uuid_offset =
412 fbb.CreateString(boot_uuid_);
413
414 const flatbuffers::Offset<flatbuffers::String> parts_uuid_offset =
Austin Schuh64fab802020-09-09 22:47:47 -0700415 fbb.CreateString("00000000-0000-4000-8000-000000000000");
416
Austin Schuhfa895892020-01-07 20:07:41 -0800417 flatbuffers::Offset<Node> node_offset;
Brian Silverman80993c22020-10-01 15:05:19 -0700418 flatbuffers::Offset<Node> logger_node_offset;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700419
Austin Schuh0c297012020-09-16 18:41:59 -0700420 if (configuration::MultiNode(configuration_)) {
Austin Schuh6f3babe2020-01-26 20:34:50 -0800421 node_offset = CopyFlatBuffer(node, &fbb);
Brian Silverman80993c22020-10-01 15:05:19 -0700422 logger_node_offset = CopyFlatBuffer(event_loop_->node(), &fbb);
Austin Schuhfa895892020-01-07 20:07:41 -0800423 }
424
425 aos::logger::LogFileHeader::Builder log_file_header_builder(fbb);
426
Austin Schuh64fab802020-09-09 22:47:47 -0700427 log_file_header_builder.add_name(name_offset);
Austin Schuhfa895892020-01-07 20:07:41 -0800428
429 // Only add the node if we are running in a multinode configuration.
Austin Schuh6f3babe2020-01-26 20:34:50 -0800430 if (node != nullptr) {
Austin Schuhfa895892020-01-07 20:07:41 -0800431 log_file_header_builder.add_node(node_offset);
Brian Silverman80993c22020-10-01 15:05:19 -0700432 log_file_header_builder.add_logger_node(logger_node_offset);
Austin Schuhfa895892020-01-07 20:07:41 -0800433 }
434
435 log_file_header_builder.add_configuration(configuration_offset);
436 // The worst case theoretical out of order is the polling period times 2.
437 // One message could get logged right after the boundary, but be for right
438 // before the next boundary. And the reverse could happen for another
439 // message. Report back 3x to be extra safe, and because the cost isn't
440 // huge on the read side.
441 log_file_header_builder.add_max_out_of_order_duration(
Brian Silverman1f345222020-09-24 21:14:48 -0700442 std::chrono::nanoseconds(3 * polling_period_).count());
Austin Schuhfa895892020-01-07 20:07:41 -0800443
444 log_file_header_builder.add_monotonic_start_time(
445 std::chrono::duration_cast<std::chrono::nanoseconds>(
Austin Schuh2f8fd752020-09-01 22:38:28 -0700446 monotonic_clock::min_time.time_since_epoch())
Austin Schuhfa895892020-01-07 20:07:41 -0800447 .count());
Austin Schuh2f8fd752020-09-01 22:38:28 -0700448 if (node == event_loop_->node()) {
449 log_file_header_builder.add_realtime_start_time(
450 std::chrono::duration_cast<std::chrono::nanoseconds>(
451 realtime_clock::min_time.time_since_epoch())
452 .count());
Austin Schuh6f3babe2020-01-26 20:34:50 -0800453 }
454
Brian Silvermanae7c0332020-09-30 16:58:23 -0700455 log_file_header_builder.add_log_event_uuid(log_event_uuid_offset);
456 log_file_header_builder.add_logger_instance_uuid(logger_instance_uuid_offset);
457 if (!log_start_uuid_offset.IsNull()) {
458 log_file_header_builder.add_log_start_uuid(log_start_uuid_offset);
459 }
460 log_file_header_builder.add_boot_uuid(boot_uuid_offset);
Austin Schuh64fab802020-09-09 22:47:47 -0700461
462 log_file_header_builder.add_parts_uuid(parts_uuid_offset);
463 log_file_header_builder.add_parts_index(0);
464
Austin Schuh2f8fd752020-09-01 22:38:28 -0700465 fbb.FinishSizePrefixed(log_file_header_builder.Finish());
466 return fbb.Release();
467}
468
469void Logger::Rotate() {
470 for (const Node *node : log_namer_->nodes()) {
Brian Silvermand90905f2020-09-23 14:42:56 -0700471 const int node_index = configuration::GetNodeIndex(configuration_, node);
Austin Schuh64fab802020-09-09 22:47:47 -0700472 log_namer_->Rotate(node, &node_state_[node_index].log_file_header);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700473 }
474}
475
476void Logger::LogUntil(monotonic_clock::time_point t) {
477 WriteMissingTimestamps();
478
479 // Write each channel to disk, one at a time.
480 for (FetcherStruct &f : fetchers_) {
481 while (true) {
482 if (f.written) {
483 if (!f.fetcher->FetchNext()) {
484 VLOG(2) << "No new data on "
485 << configuration::CleanedChannelToString(
486 f.fetcher->channel());
487 break;
488 } else {
489 f.written = false;
490 }
491 }
492
493 CHECK(!f.written);
494
495 // TODO(james): Write tests to exercise this logic.
496 if (f.fetcher->context().monotonic_event_time < t) {
497 if (f.writer != nullptr) {
498 // Write!
499 flatbuffers::FlatBufferBuilder fbb(f.fetcher->context().size +
500 max_header_size_);
501 fbb.ForceDefaults(true);
502
503 fbb.FinishSizePrefixed(PackMessage(&fbb, f.fetcher->context(),
504 f.channel_index, f.log_type));
505
506 VLOG(2) << "Writing data as node "
507 << FlatbufferToJson(event_loop_->node()) << " for channel "
508 << configuration::CleanedChannelToString(f.fetcher->channel())
509 << " to " << f.writer->filename() << " data "
510 << FlatbufferToJson(
511 flatbuffers::GetSizePrefixedRoot<MessageHeader>(
512 fbb.GetBufferPointer()));
513
514 max_header_size_ = std::max(
515 max_header_size_, fbb.GetSize() - f.fetcher->context().size);
516 f.writer->QueueSizedFlatbuffer(&fbb);
517 }
518
519 if (f.timestamp_writer != nullptr) {
520 // And now handle timestamps.
521 flatbuffers::FlatBufferBuilder fbb;
522 fbb.ForceDefaults(true);
523
524 fbb.FinishSizePrefixed(PackMessage(&fbb, f.fetcher->context(),
525 f.channel_index,
526 LogType::kLogDeliveryTimeOnly));
527
528 VLOG(2) << "Writing timestamps as node "
529 << FlatbufferToJson(event_loop_->node()) << " for channel "
530 << configuration::CleanedChannelToString(f.fetcher->channel())
531 << " to " << f.timestamp_writer->filename() << " timestamp "
532 << FlatbufferToJson(
533 flatbuffers::GetSizePrefixedRoot<MessageHeader>(
534 fbb.GetBufferPointer()));
535
536 f.timestamp_writer->QueueSizedFlatbuffer(&fbb);
537 }
538
539 if (f.contents_writer != nullptr) {
540 // And now handle the special message contents channel. Copy the
541 // message into a FlatBufferBuilder and save it to disk.
542 // TODO(austin): We can be more efficient here when we start to
543 // care...
544 flatbuffers::FlatBufferBuilder fbb;
545 fbb.ForceDefaults(true);
546
547 const MessageHeader *msg =
548 flatbuffers::GetRoot<MessageHeader>(f.fetcher->context().data);
549
550 logger::MessageHeader::Builder message_header_builder(fbb);
551
552 // Note: this must match the same order as MessageBridgeServer and
553 // PackMessage. We want identical headers to have identical
554 // on-the-wire formats to make comparing them easier.
555 message_header_builder.add_channel_index(msg->channel_index());
556
557 message_header_builder.add_queue_index(msg->queue_index());
558 message_header_builder.add_monotonic_sent_time(
559 msg->monotonic_sent_time());
560 message_header_builder.add_realtime_sent_time(
561 msg->realtime_sent_time());
562
563 message_header_builder.add_monotonic_remote_time(
564 msg->monotonic_remote_time());
565 message_header_builder.add_realtime_remote_time(
566 msg->realtime_remote_time());
567 message_header_builder.add_remote_queue_index(
568 msg->remote_queue_index());
569
570 fbb.FinishSizePrefixed(message_header_builder.Finish());
571
572 f.contents_writer->QueueSizedFlatbuffer(&fbb);
573 }
574
575 f.written = true;
576 } else {
577 break;
578 }
579 }
580 }
581 last_synchronized_time_ = t;
Austin Schuhfa895892020-01-07 20:07:41 -0800582}
583
Brian Silverman1f345222020-09-24 21:14:48 -0700584void Logger::DoLogData(const monotonic_clock::time_point end_time) {
585 // We want to guarantee that messages aren't out of order by more than
Austin Schuhe309d2a2019-11-29 13:25:21 -0800586 // max_out_of_order_duration. To do this, we need sync points. Every write
587 // cycle should be a sync point.
Austin Schuhe309d2a2019-11-29 13:25:21 -0800588
589 do {
590 // Move the sync point up by at most polling_period. This forces one sync
591 // per iteration, even if it is small.
Brian Silverman1f345222020-09-24 21:14:48 -0700592 LogUntil(std::min(last_synchronized_time_ + polling_period_, end_time));
593
594 on_logged_period_();
Austin Schuhe309d2a2019-11-29 13:25:21 -0800595
Austin Schuhe309d2a2019-11-29 13:25:21 -0800596 // If we missed cycles, we could be pretty far behind. Spin until we are
597 // caught up.
Brian Silverman1f345222020-09-24 21:14:48 -0700598 } while (last_synchronized_time_ + polling_period_ < end_time);
Austin Schuhe309d2a2019-11-29 13:25:21 -0800599}
600
Austin Schuh11d43732020-09-21 17:28:30 -0700601std::vector<LogFile> SortParts(const std::vector<std::string> &parts) {
Austin Schuh5212cad2020-09-09 23:12:09 -0700602 // Start by grouping all parts by UUID, and extracting the part index.
Austin Schuh11d43732020-09-21 17:28:30 -0700603 // Datastructure to hold all the info extracted from a set of parts which go
604 // together so we can sort them afterwords.
605 struct UnsortedLogParts {
606 // Start times.
607 aos::monotonic_clock::time_point monotonic_start_time;
608 aos::realtime_clock::time_point realtime_start_time;
609
610 // Node to save.
611 std::string node;
612
613 // Pairs of the filename and the part index for sorting.
614 std::vector<std::pair<std::string, int>> parts;
615 };
616
Brian Silvermanae7c0332020-09-30 16:58:23 -0700617 // Map holding the log_event_uuid -> second map. The second map holds the
Austin Schuh11d43732020-09-21 17:28:30 -0700618 // parts_uuid -> list of parts for sorting.
619 std::map<std::string, std::map<std::string, UnsortedLogParts>> parts_list;
Austin Schuh5212cad2020-09-09 23:12:09 -0700620
621 // Sort part files without UUIDs and part indexes as well. Extract everything
622 // useful from the log in the first pass, then sort later.
Austin Schuh11d43732020-09-21 17:28:30 -0700623 struct UnsortedOldParts {
624 // Part information with everything but the list of parts.
625 LogParts parts;
626
627 // Tuple of time for the data and filename needed for sorting after
628 // extracting.
Brian Silvermand90905f2020-09-23 14:42:56 -0700629 std::vector<std::pair<monotonic_clock::time_point, std::string>>
630 unsorted_parts;
Austin Schuh5212cad2020-09-09 23:12:09 -0700631 };
632
Austin Schuh11d43732020-09-21 17:28:30 -0700633 // A list of all the old parts which we don't know how to sort using uuids.
634 // There are enough of these in the wild that this is worth supporting.
635 std::vector<UnsortedOldParts> old_parts;
Austin Schuh5212cad2020-09-09 23:12:09 -0700636
Austin Schuh11d43732020-09-21 17:28:30 -0700637 // Now extract everything into our datastructures above for sorting.
Austin Schuh5212cad2020-09-09 23:12:09 -0700638 for (const std::string &part : parts) {
639 FlatbufferVector<LogFileHeader> log_header = ReadHeader(part);
640
Austin Schuh11d43732020-09-21 17:28:30 -0700641 const monotonic_clock::time_point monotonic_start_time(
642 chrono::nanoseconds(log_header.message().monotonic_start_time()));
643 const realtime_clock::time_point realtime_start_time(
644 chrono::nanoseconds(log_header.message().realtime_start_time()));
645
646 const std::string_view node =
647 log_header.message().has_node()
648 ? log_header.message().node()->name()->string_view()
649 : "";
650
Austin Schuh5212cad2020-09-09 23:12:09 -0700651 // Looks like an old log. No UUID, index, and also single node. We have
652 // little to no multi-node log files in the wild without part UUIDs and
653 // indexes which we care much about.
654 if (!log_header.message().has_parts_uuid() &&
655 !log_header.message().has_parts_index() &&
656 !log_header.message().has_node()) {
Austin Schuh5212cad2020-09-09 23:12:09 -0700657 FlatbufferVector<MessageHeader> first_message = ReadNthMessage(part, 0);
Austin Schuh11d43732020-09-21 17:28:30 -0700658 const monotonic_clock::time_point first_message_time(
Austin Schuh5212cad2020-09-09 23:12:09 -0700659 chrono::nanoseconds(first_message.message().monotonic_sent_time()));
Austin Schuh11d43732020-09-21 17:28:30 -0700660
661 // Find anything with a matching start time. They all go together.
662 auto result = std::find_if(
663 old_parts.begin(), old_parts.end(),
664 [&](const UnsortedOldParts &parts) {
665 return parts.parts.monotonic_start_time == monotonic_start_time &&
666 parts.parts.realtime_start_time == realtime_start_time;
667 });
668
669 if (result == old_parts.end()) {
670 old_parts.emplace_back();
671 old_parts.back().parts.monotonic_start_time = monotonic_start_time;
672 old_parts.back().parts.realtime_start_time = realtime_start_time;
673 old_parts.back().unsorted_parts.emplace_back(
674 std::make_pair(first_message_time, part));
675 } else {
676 result->unsorted_parts.emplace_back(
677 std::make_pair(first_message_time, part));
678 }
Austin Schuh5212cad2020-09-09 23:12:09 -0700679 continue;
680 }
681
Brian Silvermanae7c0332020-09-30 16:58:23 -0700682 CHECK(log_header.message().has_log_event_uuid());
Austin Schuh5212cad2020-09-09 23:12:09 -0700683 CHECK(log_header.message().has_parts_uuid());
684 CHECK(log_header.message().has_parts_index());
685
Brian Silvermanae7c0332020-09-30 16:58:23 -0700686 const std::string log_event_uuid =
687 log_header.message().log_event_uuid()->str();
Austin Schuh5212cad2020-09-09 23:12:09 -0700688 const std::string parts_uuid = log_header.message().parts_uuid()->str();
Austin Schuh11d43732020-09-21 17:28:30 -0700689 int32_t parts_index = log_header.message().parts_index();
690
Brian Silvermanae7c0332020-09-30 16:58:23 -0700691 auto log_it = parts_list.find(log_event_uuid);
Austin Schuh11d43732020-09-21 17:28:30 -0700692 if (log_it == parts_list.end()) {
Brian Silvermanae7c0332020-09-30 16:58:23 -0700693 log_it =
694 parts_list
695 .insert(std::make_pair(log_event_uuid,
696 std::map<std::string, UnsortedLogParts>()))
697 .first;
Austin Schuh5212cad2020-09-09 23:12:09 -0700698 }
Austin Schuh11d43732020-09-21 17:28:30 -0700699
700 auto it = log_it->second.find(parts_uuid);
701 if (it == log_it->second.end()) {
702 it = log_it->second.insert(std::make_pair(parts_uuid, UnsortedLogParts()))
703 .first;
704 it->second.monotonic_start_time = monotonic_start_time;
705 it->second.realtime_start_time = realtime_start_time;
706 it->second.node = std::string(node);
707 }
708
709 // First part might be min_time. If it is, try to put a better time on it.
710 if (it->second.monotonic_start_time == monotonic_clock::min_time) {
711 it->second.monotonic_start_time = monotonic_start_time;
712 } else if (monotonic_start_time != monotonic_clock::min_time) {
713 CHECK_EQ(it->second.monotonic_start_time, monotonic_start_time);
714 }
715 if (it->second.realtime_start_time == realtime_clock::min_time) {
716 it->second.realtime_start_time = realtime_start_time;
717 } else if (realtime_start_time != realtime_clock::min_time) {
718 CHECK_EQ(it->second.realtime_start_time, realtime_start_time);
719 }
720
721 it->second.parts.emplace_back(std::make_pair(part, parts_index));
Austin Schuh5212cad2020-09-09 23:12:09 -0700722 }
723
724 CHECK_NE(old_parts.empty(), parts_list.empty())
725 << ": Can't have a mix of old and new parts.";
726
Austin Schuh11d43732020-09-21 17:28:30 -0700727 // Now reformat old_parts to be in the right datastructure to report.
Austin Schuh5212cad2020-09-09 23:12:09 -0700728 if (!old_parts.empty()) {
Austin Schuh11d43732020-09-21 17:28:30 -0700729 std::vector<LogFile> result;
730 for (UnsortedOldParts &p : old_parts) {
731 // Sort by the oldest message in each file.
732 std::sort(
733 p.unsorted_parts.begin(), p.unsorted_parts.end(),
734 [](const std::pair<monotonic_clock::time_point, std::string> &a,
735 const std::pair<monotonic_clock::time_point, std::string> &b) {
736 return a.first < b.first;
737 });
738 LogFile log_file;
739 for (std::pair<monotonic_clock::time_point, std::string> &f :
740 p.unsorted_parts) {
741 p.parts.parts.emplace_back(std::move(f.second));
742 }
743 log_file.parts.emplace_back(std::move(p.parts));
744 result.emplace_back(std::move(log_file));
Austin Schuh5212cad2020-09-09 23:12:09 -0700745 }
Austin Schuh5212cad2020-09-09 23:12:09 -0700746
Austin Schuh11d43732020-09-21 17:28:30 -0700747 return result;
Austin Schuh5212cad2020-09-09 23:12:09 -0700748 }
749
750 // Now, sort them and produce the final vector form.
Austin Schuh11d43732020-09-21 17:28:30 -0700751 std::vector<LogFile> result;
Austin Schuh5212cad2020-09-09 23:12:09 -0700752 result.reserve(parts_list.size());
Brian Silvermand90905f2020-09-23 14:42:56 -0700753 for (std::pair<const std::string, std::map<std::string, UnsortedLogParts>>
754 &logs : parts_list) {
Austin Schuh11d43732020-09-21 17:28:30 -0700755 LogFile new_file;
Brian Silvermanae7c0332020-09-30 16:58:23 -0700756 new_file.log_event_uuid = logs.first;
Austin Schuh11d43732020-09-21 17:28:30 -0700757 for (std::pair<const std::string, UnsortedLogParts> &parts : logs.second) {
758 LogParts new_parts;
759 new_parts.monotonic_start_time = parts.second.monotonic_start_time;
760 new_parts.realtime_start_time = parts.second.realtime_start_time;
Brian Silvermanae7c0332020-09-30 16:58:23 -0700761 new_parts.log_event_uuid = logs.first;
Austin Schuh11d43732020-09-21 17:28:30 -0700762 new_parts.parts_uuid = parts.first;
763 new_parts.node = std::move(parts.second.node);
764
765 std::sort(parts.second.parts.begin(), parts.second.parts.end(),
766 [](const std::pair<std::string, int> &a,
767 const std::pair<std::string, int> &b) {
768 return a.second < b.second;
769 });
770 new_parts.parts.reserve(parts.second.parts.size());
771 for (std::pair<std::string, int> &p : parts.second.parts) {
772 new_parts.parts.emplace_back(std::move(p.first));
773 }
774 new_file.parts.emplace_back(std::move(new_parts));
Austin Schuh5212cad2020-09-09 23:12:09 -0700775 }
Austin Schuh11d43732020-09-21 17:28:30 -0700776 result.emplace_back(std::move(new_file));
777 }
778 return result;
779}
780
781std::ostream &operator<<(std::ostream &stream, const LogFile &file) {
782 stream << "{";
Brian Silvermanae7c0332020-09-30 16:58:23 -0700783 if (!file.log_event_uuid.empty()) {
784 stream << "\"log_event_uuid\": \"" << file.log_event_uuid << "\", ";
Austin Schuh11d43732020-09-21 17:28:30 -0700785 }
786 stream << "\"parts\": [";
787 for (size_t i = 0; i < file.parts.size(); ++i) {
788 if (i != 0u) {
789 stream << ", ";
790 }
791 stream << file.parts[i];
792 }
793 stream << "]}";
794 return stream;
795}
796std::ostream &operator<<(std::ostream &stream, const LogParts &parts) {
797 stream << "{";
Brian Silvermanae7c0332020-09-30 16:58:23 -0700798 if (!parts.log_event_uuid.empty()) {
799 stream << "\"log_event_uuid\": \"" << parts.log_event_uuid << "\", ";
Austin Schuh11d43732020-09-21 17:28:30 -0700800 }
801 if (!parts.parts_uuid.empty()) {
802 stream << "\"parts_uuid\": \"" << parts.parts_uuid << "\", ";
803 }
804 if (!parts.node.empty()) {
805 stream << "\"node\": \"" << parts.node << "\", ";
806 }
807 stream << "\"monotonic_start_time\": " << parts.monotonic_start_time
808 << ", \"realtime_start_time\": " << parts.realtime_start_time << ", [";
809
810 for (size_t i = 0; i < parts.parts.size(); ++i) {
811 if (i != 0u) {
812 stream << ", ";
813 }
814 stream << parts.parts[i];
815 }
816
817 stream << "]}";
818 return stream;
819}
820
821std::vector<std::vector<std::string>> ToLogReaderVector(
822 const std::vector<LogFile> &log_files) {
823 std::vector<std::vector<std::string>> result;
824 for (const LogFile &log_file : log_files) {
825 for (const LogParts &log_parts : log_file.parts) {
826 std::vector<std::string> parts;
827 for (const std::string &part : log_parts.parts) {
828 parts.emplace_back(part);
829 }
830 result.emplace_back(std::move(parts));
831 }
Austin Schuh5212cad2020-09-09 23:12:09 -0700832 }
833 return result;
834}
835
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800836LogReader::LogReader(std::string_view filename,
837 const Configuration *replay_configuration)
Austin Schuhfa895892020-01-07 20:07:41 -0800838 : LogReader(std::vector<std::string>{std::string(filename)},
839 replay_configuration) {}
840
841LogReader::LogReader(const std::vector<std::string> &filenames,
842 const Configuration *replay_configuration)
Austin Schuh6f3babe2020-01-26 20:34:50 -0800843 : LogReader(std::vector<std::vector<std::string>>{filenames},
844 replay_configuration) {}
845
Austin Schuh11d43732020-09-21 17:28:30 -0700846// TODO(austin): Make this the base and kill the others. This has much better
847// context for sorting.
848LogReader::LogReader(const std::vector<LogFile> &log_files,
849 const Configuration *replay_configuration)
850 : LogReader(ToLogReaderVector(log_files), replay_configuration) {}
851
Austin Schuh6f3babe2020-01-26 20:34:50 -0800852LogReader::LogReader(const std::vector<std::vector<std::string>> &filenames,
853 const Configuration *replay_configuration)
854 : filenames_(filenames),
855 log_file_header_(ReadHeader(filenames[0][0])),
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800856 replay_configuration_(replay_configuration) {
Austin Schuh6331ef92020-01-07 18:28:09 -0800857 MakeRemappedConfig();
Austin Schuh6f3babe2020-01-26 20:34:50 -0800858
Austin Schuh6aa77be2020-02-22 21:06:40 -0800859 if (replay_configuration) {
860 CHECK_EQ(configuration::MultiNode(configuration()),
861 configuration::MultiNode(replay_configuration))
Austin Schuh2f8fd752020-09-01 22:38:28 -0700862 << ": Log file and replay config need to both be multi or single "
863 "node.";
Austin Schuh6aa77be2020-02-22 21:06:40 -0800864 }
865
Austin Schuh6f3babe2020-01-26 20:34:50 -0800866 if (!configuration::MultiNode(configuration())) {
Austin Schuh858c9f32020-08-31 16:56:12 -0700867 states_.emplace_back(
868 std::make_unique<State>(std::make_unique<ChannelMerger>(filenames)));
Austin Schuh8bd96322020-02-13 21:18:22 -0800869 } else {
Austin Schuh6aa77be2020-02-22 21:06:40 -0800870 if (replay_configuration) {
James Kuszmaul46d82582020-05-09 19:50:09 -0700871 CHECK_EQ(logged_configuration()->nodes()->size(),
Austin Schuh6aa77be2020-02-22 21:06:40 -0800872 replay_configuration->nodes()->size())
Austin Schuh2f8fd752020-09-01 22:38:28 -0700873 << ": Log file and replay config need to have matching nodes "
874 "lists.";
James Kuszmaul46d82582020-05-09 19:50:09 -0700875 for (const Node *node : *logged_configuration()->nodes()) {
876 if (configuration::GetNode(replay_configuration, node) == nullptr) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700877 LOG(FATAL) << "Found node " << FlatbufferToJson(node)
878 << " in logged config that is not present in the replay "
879 "config.";
James Kuszmaul46d82582020-05-09 19:50:09 -0700880 }
881 }
Austin Schuh6aa77be2020-02-22 21:06:40 -0800882 }
Austin Schuh8bd96322020-02-13 21:18:22 -0800883 states_.resize(configuration()->nodes()->size());
Austin Schuh6f3babe2020-01-26 20:34:50 -0800884 }
Austin Schuhe309d2a2019-11-29 13:25:21 -0800885}
886
Austin Schuh6aa77be2020-02-22 21:06:40 -0800887LogReader::~LogReader() {
Austin Schuh39580f12020-08-01 14:44:08 -0700888 if (event_loop_factory_unique_ptr_) {
889 Deregister();
890 } else if (event_loop_factory_ != nullptr) {
891 LOG(FATAL) << "Must call Deregister before the SimulatedEventLoopFactory "
892 "is destroyed";
893 }
Austin Schuh8bd96322020-02-13 21:18:22 -0800894 if (offset_fp_ != nullptr) {
895 fclose(offset_fp_);
896 }
Austin Schuh2f8fd752020-09-01 22:38:28 -0700897 // Zero out some buffers. It's easy to do use-after-frees on these, so make
898 // it more obvious.
Austin Schuh39580f12020-08-01 14:44:08 -0700899 if (remapped_configuration_buffer_) {
900 remapped_configuration_buffer_->Wipe();
901 }
902 log_file_header_.Wipe();
Austin Schuh8bd96322020-02-13 21:18:22 -0800903}
Austin Schuhe309d2a2019-11-29 13:25:21 -0800904
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800905const Configuration *LogReader::logged_configuration() const {
Austin Schuh6f3babe2020-01-26 20:34:50 -0800906 return log_file_header_.message().configuration();
Austin Schuhe309d2a2019-11-29 13:25:21 -0800907}
908
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800909const Configuration *LogReader::configuration() const {
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800910 return remapped_configuration_;
911}
912
Austin Schuh6f3babe2020-01-26 20:34:50 -0800913std::vector<const Node *> LogReader::Nodes() const {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700914 // Because the Node pointer will only be valid if it actually points to
915 // memory owned by remapped_configuration_, we need to wait for the
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800916 // remapped_configuration_ to be populated before accessing it.
Austin Schuh6f3babe2020-01-26 20:34:50 -0800917 //
918 // Also, note, that when ever a map is changed, the nodes in here are
919 // invalidated.
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800920 CHECK(remapped_configuration_ != nullptr)
921 << ": Need to call Register before the node() pointer will be valid.";
Austin Schuh6f3babe2020-01-26 20:34:50 -0800922 return configuration::GetNodes(remapped_configuration_);
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800923}
Austin Schuh15649d62019-12-28 16:36:38 -0800924
Austin Schuh11d43732020-09-21 17:28:30 -0700925monotonic_clock::time_point LogReader::monotonic_start_time(
926 const Node *node) const {
Austin Schuh8bd96322020-02-13 21:18:22 -0800927 State *state =
928 states_[configuration::GetNodeIndex(configuration(), node)].get();
929 CHECK(state != nullptr) << ": Unknown node " << FlatbufferToJson(node);
930
Austin Schuh858c9f32020-08-31 16:56:12 -0700931 return state->monotonic_start_time();
Austin Schuhe309d2a2019-11-29 13:25:21 -0800932}
933
Austin Schuh11d43732020-09-21 17:28:30 -0700934realtime_clock::time_point LogReader::realtime_start_time(
935 const Node *node) const {
Austin Schuh8bd96322020-02-13 21:18:22 -0800936 State *state =
937 states_[configuration::GetNodeIndex(configuration(), node)].get();
938 CHECK(state != nullptr) << ": Unknown node " << FlatbufferToJson(node);
939
Austin Schuh858c9f32020-08-31 16:56:12 -0700940 return state->realtime_start_time();
Austin Schuhe309d2a2019-11-29 13:25:21 -0800941}
942
James Kuszmaul84ff3e52020-01-03 19:48:53 -0800943void LogReader::Register() {
944 event_loop_factory_unique_ptr_ =
Austin Schuhac0771c2020-01-07 18:36:30 -0800945 std::make_unique<SimulatedEventLoopFactory>(configuration());
James Kuszmaul84ff3e52020-01-03 19:48:53 -0800946 Register(event_loop_factory_unique_ptr_.get());
947}
948
Austin Schuh92547522019-12-28 14:33:43 -0800949void LogReader::Register(SimulatedEventLoopFactory *event_loop_factory) {
Austin Schuh92547522019-12-28 14:33:43 -0800950 event_loop_factory_ = event_loop_factory;
Austin Schuhe5bbd9e2020-09-21 17:29:20 -0700951 remapped_configuration_ = event_loop_factory_->configuration();
Austin Schuh92547522019-12-28 14:33:43 -0800952
Brian Silvermand90905f2020-09-23 14:42:56 -0700953 for (const Node *node : configuration::GetNodes(configuration())) {
Austin Schuh8bd96322020-02-13 21:18:22 -0800954 const size_t node_index =
955 configuration::GetNodeIndex(configuration(), node);
Austin Schuh858c9f32020-08-31 16:56:12 -0700956 states_[node_index] =
957 std::make_unique<State>(std::make_unique<ChannelMerger>(filenames_));
Austin Schuh8bd96322020-02-13 21:18:22 -0800958 State *state = states_[node_index].get();
Austin Schuh6f3babe2020-01-26 20:34:50 -0800959
Austin Schuh858c9f32020-08-31 16:56:12 -0700960 Register(state->SetNodeEventLoopFactory(
961 event_loop_factory_->GetNodeEventLoopFactory(node)));
Austin Schuhcde938c2020-02-02 17:30:07 -0800962 }
James Kuszmaul46d82582020-05-09 19:50:09 -0700963 if (live_nodes_ == 0) {
964 LOG(FATAL)
965 << "Don't have logs from any of the nodes in the replay config--are "
966 "you sure that the replay config matches the original config?";
967 }
Austin Schuh6f3babe2020-01-26 20:34:50 -0800968
Austin Schuh2f8fd752020-09-01 22:38:28 -0700969 // We need to now seed our per-node time offsets and get everything set up
970 // to run.
971 const size_t num_nodes = nodes_count();
Austin Schuhcde938c2020-02-02 17:30:07 -0800972
Austin Schuh8bd96322020-02-13 21:18:22 -0800973 // It is easiest to solve for per node offsets with a matrix rather than
974 // trying to solve the equations by hand. So let's get after it.
975 //
976 // Now, build up the map matrix.
977 //
Austin Schuh2f8fd752020-09-01 22:38:28 -0700978 // offset_matrix_ = (map_matrix_ + slope_matrix_) * [ta; tb; tc]
979 map_matrix_ = Eigen::Matrix<mpq_class, Eigen::Dynamic, Eigen::Dynamic>::Zero(
980 filters_.size() + 1, num_nodes);
981 slope_matrix_ =
982 Eigen::Matrix<mpq_class, Eigen::Dynamic, Eigen::Dynamic>::Zero(
983 filters_.size() + 1, num_nodes);
Austin Schuhcde938c2020-02-02 17:30:07 -0800984
Austin Schuh2f8fd752020-09-01 22:38:28 -0700985 offset_matrix_ =
986 Eigen::Matrix<mpq_class, Eigen::Dynamic, 1>::Zero(filters_.size() + 1);
987 valid_matrix_ =
988 Eigen::Matrix<bool, Eigen::Dynamic, 1>::Zero(filters_.size() + 1);
989 last_valid_matrix_ =
990 Eigen::Matrix<bool, Eigen::Dynamic, 1>::Zero(filters_.size() + 1);
Austin Schuhcde938c2020-02-02 17:30:07 -0800991
Austin Schuh2f8fd752020-09-01 22:38:28 -0700992 time_offset_matrix_ = Eigen::VectorXd::Zero(num_nodes);
993 time_slope_matrix_ = Eigen::VectorXd::Zero(num_nodes);
Austin Schuh8bd96322020-02-13 21:18:22 -0800994
Austin Schuh2f8fd752020-09-01 22:38:28 -0700995 // All times should average out to the distributed clock.
996 for (int i = 0; i < map_matrix_.cols(); ++i) {
997 // 1/num_nodes.
998 map_matrix_(0, i) = mpq_class(1, num_nodes);
999 }
1000 valid_matrix_(0) = true;
Austin Schuh8bd96322020-02-13 21:18:22 -08001001
1002 {
1003 // Now, add the a - b -> sample elements.
1004 size_t i = 1;
1005 for (std::pair<const std::tuple<const Node *, const Node *>,
Austin Schuh2f8fd752020-09-01 22:38:28 -07001006 std::tuple<message_bridge::NoncausalOffsetEstimator>>
1007 &filter : filters_) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001008 const Node *const node_a = std::get<0>(filter.first);
1009 const Node *const node_b = std::get<1>(filter.first);
1010
1011 const size_t node_a_index =
1012 configuration::GetNodeIndex(configuration(), node_a);
1013 const size_t node_b_index =
1014 configuration::GetNodeIndex(configuration(), node_b);
1015
Austin Schuh2f8fd752020-09-01 22:38:28 -07001016 // -a
1017 map_matrix_(i, node_a_index) = mpq_class(-1);
1018 // +b
1019 map_matrix_(i, node_b_index) = mpq_class(1);
Austin Schuh8bd96322020-02-13 21:18:22 -08001020
1021 // -> sample
Austin Schuh2f8fd752020-09-01 22:38:28 -07001022 std::get<0>(filter.second)
1023 .set_slope_pointer(&slope_matrix_(i, node_a_index));
1024 std::get<0>(filter.second).set_offset_pointer(&offset_matrix_(i, 0));
1025
1026 valid_matrix_(i) = false;
1027 std::get<0>(filter.second).set_valid_pointer(&valid_matrix_(i));
Austin Schuh8bd96322020-02-13 21:18:22 -08001028
1029 ++i;
Austin Schuh6f3babe2020-01-26 20:34:50 -08001030 }
1031 }
1032
Austin Schuh858c9f32020-08-31 16:56:12 -07001033 for (std::unique_ptr<State> &state : states_) {
1034 state->SeedSortedMessages();
1035 }
1036
Austin Schuh2f8fd752020-09-01 22:38:28 -07001037 // Rank of the map matrix tells you if all the nodes are in communication
1038 // with each other, which tells you if the offsets are observable.
1039 const size_t connected_nodes =
1040 Eigen::FullPivLU<
1041 Eigen::Matrix<mpq_class, Eigen::Dynamic, Eigen::Dynamic>>(map_matrix_)
1042 .rank();
1043
1044 // We don't need to support isolated nodes until someone has a real use
1045 // case.
1046 CHECK_EQ(connected_nodes, num_nodes)
1047 << ": There is a node which isn't communicating with the rest.";
1048
1049 // And solve.
Austin Schuh8bd96322020-02-13 21:18:22 -08001050 UpdateOffsets();
1051
Austin Schuh2f8fd752020-09-01 22:38:28 -07001052 // We want to start the log file at the last start time of the log files
1053 // from all the nodes. Compute how long each node's simulation needs to run
1054 // to move time to this point.
Austin Schuh8bd96322020-02-13 21:18:22 -08001055 distributed_clock::time_point start_time = distributed_clock::min_time;
Austin Schuhcde938c2020-02-02 17:30:07 -08001056
Austin Schuh2f8fd752020-09-01 22:38:28 -07001057 // TODO(austin): We want an "OnStart" callback for each node rather than
1058 // running until the last node.
1059
Austin Schuh8bd96322020-02-13 21:18:22 -08001060 for (std::unique_ptr<State> &state : states_) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001061 VLOG(1) << "Start time is " << state->monotonic_start_time() << " for node "
1062 << MaybeNodeName(state->event_loop()->node()) << "now "
1063 << state->monotonic_now();
1064 // And start computing the start time on the distributed clock now that
1065 // that works.
Austin Schuh858c9f32020-08-31 16:56:12 -07001066 start_time = std::max(
1067 start_time, state->ToDistributedClock(state->monotonic_start_time()));
Austin Schuhcde938c2020-02-02 17:30:07 -08001068 }
Austin Schuh2f8fd752020-09-01 22:38:28 -07001069
1070 CHECK_GE(start_time, distributed_clock::epoch())
1071 << ": Hmm, we have a node starting before the start of time. Offset "
1072 "everything.";
Austin Schuhcde938c2020-02-02 17:30:07 -08001073
Austin Schuh6f3babe2020-01-26 20:34:50 -08001074 // Forwarding is tracked per channel. If it is enabled, we want to turn it
1075 // off. Otherwise messages replayed will get forwarded across to the other
Austin Schuh2f8fd752020-09-01 22:38:28 -07001076 // nodes, and also replayed on the other nodes. This may not satisfy all
1077 // our users, but it'll start the discussion.
Austin Schuh6f3babe2020-01-26 20:34:50 -08001078 if (configuration::MultiNode(event_loop_factory_->configuration())) {
1079 for (size_t i = 0; i < logged_configuration()->channels()->size(); ++i) {
1080 const Channel *channel = logged_configuration()->channels()->Get(i);
1081 const Node *node = configuration::GetNode(
1082 configuration(), channel->source_node()->string_view());
1083
Austin Schuh8bd96322020-02-13 21:18:22 -08001084 State *state =
1085 states_[configuration::GetNodeIndex(configuration(), node)].get();
Austin Schuh6f3babe2020-01-26 20:34:50 -08001086
1087 const Channel *remapped_channel =
Austin Schuh858c9f32020-08-31 16:56:12 -07001088 RemapChannel(state->event_loop(), channel);
Austin Schuh6f3babe2020-01-26 20:34:50 -08001089
1090 event_loop_factory_->DisableForwarding(remapped_channel);
1091 }
Austin Schuh4c3b9702020-08-30 11:34:55 -07001092
1093 // If we are replaying a log, we don't want a bunch of redundant messages
1094 // from both the real message bridge and simulated message bridge.
1095 event_loop_factory_->DisableStatistics();
Austin Schuh6f3babe2020-01-26 20:34:50 -08001096 }
1097
Austin Schuhcde938c2020-02-02 17:30:07 -08001098 // While we are starting the system up, we might be relying on matching data
1099 // to timestamps on log files where the timestamp log file starts before the
1100 // data. In this case, it is reasonable to expect missing data.
1101 ignore_missing_data_ = true;
Austin Schuh2f8fd752020-09-01 22:38:28 -07001102 VLOG(1) << "Running until " << start_time << " in Register";
Austin Schuh8bd96322020-02-13 21:18:22 -08001103 event_loop_factory_->RunFor(start_time.time_since_epoch());
Brian Silverman8a32ce62020-08-12 12:02:38 -07001104 VLOG(1) << "At start time";
Austin Schuhcde938c2020-02-02 17:30:07 -08001105 // Now that we are running for real, missing data means that the log file is
1106 // corrupted or went wrong.
1107 ignore_missing_data_ = false;
Austin Schuh92547522019-12-28 14:33:43 -08001108
Austin Schuh8bd96322020-02-13 21:18:22 -08001109 for (std::unique_ptr<State> &state : states_) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001110 // Make the RT clock be correct before handing it to the user.
1111 if (state->realtime_start_time() != realtime_clock::min_time) {
1112 state->SetRealtimeOffset(state->monotonic_start_time(),
1113 state->realtime_start_time());
1114 }
1115 VLOG(1) << "Start time is " << state->monotonic_start_time() << " for node "
1116 << MaybeNodeName(state->event_loop()->node()) << "now "
1117 << state->monotonic_now();
1118 }
1119
1120 if (FLAGS_timestamps_to_csv) {
1121 for (std::pair<const std::tuple<const Node *, const Node *>,
1122 std::tuple<message_bridge::NoncausalOffsetEstimator>>
1123 &filter : filters_) {
1124 const Node *const node_a = std::get<0>(filter.first);
1125 const Node *const node_b = std::get<1>(filter.first);
1126
1127 std::get<0>(filter.second)
1128 .SetFirstFwdTime(event_loop_factory_->GetNodeEventLoopFactory(node_a)
1129 ->monotonic_now());
1130 std::get<0>(filter.second)
1131 .SetFirstRevTime(event_loop_factory_->GetNodeEventLoopFactory(node_b)
1132 ->monotonic_now());
1133 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001134 }
1135}
1136
Austin Schuh2f8fd752020-09-01 22:38:28 -07001137void LogReader::UpdateOffsets() {
1138 VLOG(2) << "Samples are " << offset_matrix_;
1139 VLOG(2) << "Map is " << (map_matrix_ + slope_matrix_);
1140 std::tie(time_slope_matrix_, time_offset_matrix_) = SolveOffsets();
1141 Eigen::IOFormat HeavyFmt(Eigen::FullPrecision, 0, ", ", ";\n", "[", "]", "[",
1142 "]");
1143 VLOG(1) << "First slope " << time_slope_matrix_.transpose().format(HeavyFmt)
1144 << " offset " << time_offset_matrix_.transpose().format(HeavyFmt);
1145
1146 size_t node_index = 0;
1147 for (std::unique_ptr<State> &state : states_) {
1148 state->SetDistributedOffset(offset(node_index), slope(node_index));
1149 VLOG(1) << "Offset for node " << node_index << " "
1150 << MaybeNodeName(state->event_loop()->node()) << "is "
1151 << aos::distributed_clock::time_point(offset(node_index))
1152 << " slope " << std::setprecision(9) << std::fixed
1153 << slope(node_index);
1154 ++node_index;
1155 }
1156
1157 if (VLOG_IS_ON(1)) {
1158 LogFit("Offset is");
1159 }
1160}
1161
1162void LogReader::LogFit(std::string_view prefix) {
1163 for (std::unique_ptr<State> &state : states_) {
1164 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << " now "
1165 << state->monotonic_now() << " distributed "
1166 << event_loop_factory_->distributed_now();
1167 }
1168
1169 for (std::pair<const std::tuple<const Node *, const Node *>,
1170 std::tuple<message_bridge::NoncausalOffsetEstimator>> &filter :
1171 filters_) {
1172 message_bridge::NoncausalOffsetEstimator *estimator =
1173 &std::get<0>(filter.second);
1174
1175 if (estimator->a_timestamps().size() == 0 &&
1176 estimator->b_timestamps().size() == 0) {
1177 continue;
1178 }
1179
1180 if (VLOG_IS_ON(1)) {
1181 estimator->LogFit(prefix);
1182 }
1183
1184 const Node *const node_a = std::get<0>(filter.first);
1185 const Node *const node_b = std::get<1>(filter.first);
1186
1187 const size_t node_a_index =
1188 configuration::GetNodeIndex(configuration(), node_a);
1189 const size_t node_b_index =
1190 configuration::GetNodeIndex(configuration(), node_b);
1191
1192 const double recovered_slope =
1193 slope(node_b_index) / slope(node_a_index) - 1.0;
1194 const int64_t recovered_offset =
1195 offset(node_b_index).count() - offset(node_a_index).count() *
1196 slope(node_b_index) /
1197 slope(node_a_index);
1198
1199 VLOG(1) << "Recovered slope " << std::setprecision(20) << recovered_slope
1200 << " (error " << recovered_slope - estimator->fit().slope() << ") "
1201 << " offset " << std::setprecision(20) << recovered_offset
1202 << " (error "
1203 << recovered_offset - estimator->fit().offset().count() << ")";
1204
1205 const aos::distributed_clock::time_point a0 =
1206 states_[node_a_index]->ToDistributedClock(
1207 std::get<0>(estimator->a_timestamps()[0]));
1208 const aos::distributed_clock::time_point a1 =
1209 states_[node_a_index]->ToDistributedClock(
1210 std::get<0>(estimator->a_timestamps()[1]));
1211
1212 VLOG(1) << node_a->name()->string_view() << " timestamps()[0] = "
1213 << std::get<0>(estimator->a_timestamps()[0]) << " -> " << a0
1214 << " distributed -> " << node_b->name()->string_view() << " "
1215 << states_[node_b_index]->FromDistributedClock(a0) << " should be "
1216 << aos::monotonic_clock::time_point(
1217 std::chrono::nanoseconds(static_cast<int64_t>(
1218 std::get<0>(estimator->a_timestamps()[0])
1219 .time_since_epoch()
1220 .count() *
1221 (1.0 + estimator->fit().slope()))) +
1222 estimator->fit().offset())
1223 << ((a0 <= event_loop_factory_->distributed_now())
1224 ? ""
1225 : " After now, investigate");
1226 VLOG(1) << node_a->name()->string_view() << " timestamps()[1] = "
1227 << std::get<0>(estimator->a_timestamps()[1]) << " -> " << a1
1228 << " distributed -> " << node_b->name()->string_view() << " "
1229 << states_[node_b_index]->FromDistributedClock(a1) << " should be "
1230 << aos::monotonic_clock::time_point(
1231 std::chrono::nanoseconds(static_cast<int64_t>(
1232 std::get<0>(estimator->a_timestamps()[1])
1233 .time_since_epoch()
1234 .count() *
1235 (1.0 + estimator->fit().slope()))) +
1236 estimator->fit().offset())
1237 << ((event_loop_factory_->distributed_now() <= a1)
1238 ? ""
1239 : " Before now, investigate");
1240
1241 const aos::distributed_clock::time_point b0 =
1242 states_[node_b_index]->ToDistributedClock(
1243 std::get<0>(estimator->b_timestamps()[0]));
1244 const aos::distributed_clock::time_point b1 =
1245 states_[node_b_index]->ToDistributedClock(
1246 std::get<0>(estimator->b_timestamps()[1]));
1247
1248 VLOG(1) << node_b->name()->string_view() << " timestamps()[0] = "
1249 << std::get<0>(estimator->b_timestamps()[0]) << " -> " << b0
1250 << " distributed -> " << node_a->name()->string_view() << " "
1251 << states_[node_a_index]->FromDistributedClock(b0)
1252 << ((b0 <= event_loop_factory_->distributed_now())
1253 ? ""
1254 : " After now, investigate");
1255 VLOG(1) << node_b->name()->string_view() << " timestamps()[1] = "
1256 << std::get<0>(estimator->b_timestamps()[1]) << " -> " << b1
1257 << " distributed -> " << node_a->name()->string_view() << " "
1258 << states_[node_a_index]->FromDistributedClock(b1)
1259 << ((event_loop_factory_->distributed_now() <= b1)
1260 ? ""
1261 : " Before now, investigate");
1262 }
1263}
1264
1265message_bridge::NoncausalOffsetEstimator *LogReader::GetFilter(
Austin Schuh8bd96322020-02-13 21:18:22 -08001266 const Node *node_a, const Node *node_b) {
1267 CHECK_NE(node_a, node_b);
1268 CHECK_EQ(configuration::GetNode(configuration(), node_a), node_a);
1269 CHECK_EQ(configuration::GetNode(configuration(), node_b), node_b);
1270
1271 if (node_a > node_b) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001272 return GetFilter(node_b, node_a);
Austin Schuh8bd96322020-02-13 21:18:22 -08001273 }
1274
1275 auto tuple = std::make_tuple(node_a, node_b);
1276
1277 auto it = filters_.find(tuple);
1278
1279 if (it == filters_.end()) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001280 auto &x =
1281 filters_
1282 .insert(std::make_pair(
1283 tuple, std::make_tuple(message_bridge::NoncausalOffsetEstimator(
1284 node_a, node_b))))
1285 .first->second;
Austin Schuh8bd96322020-02-13 21:18:22 -08001286 if (FLAGS_timestamps_to_csv) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001287 std::get<0>(x).SetFwdCsvFileName(absl::StrCat(
1288 "/tmp/timestamp_noncausal_", node_a->name()->string_view(), "_",
1289 node_b->name()->string_view()));
1290 std::get<0>(x).SetRevCsvFileName(absl::StrCat(
1291 "/tmp/timestamp_noncausal_", node_b->name()->string_view(), "_",
1292 node_a->name()->string_view()));
Austin Schuh8bd96322020-02-13 21:18:22 -08001293 }
1294
Austin Schuh2f8fd752020-09-01 22:38:28 -07001295 return &std::get<0>(x);
Austin Schuh8bd96322020-02-13 21:18:22 -08001296 } else {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001297 return &std::get<0>(it->second);
Austin Schuh8bd96322020-02-13 21:18:22 -08001298 }
1299}
1300
Austin Schuhe309d2a2019-11-29 13:25:21 -08001301void LogReader::Register(EventLoop *event_loop) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001302 State *state =
1303 states_[configuration::GetNodeIndex(configuration(), event_loop->node())]
1304 .get();
Austin Schuh6f3babe2020-01-26 20:34:50 -08001305
Austin Schuh858c9f32020-08-31 16:56:12 -07001306 state->set_event_loop(event_loop);
Austin Schuhe309d2a2019-11-29 13:25:21 -08001307
Tyler Chatow67ddb032020-01-12 14:30:04 -08001308 // We don't run timing reports when trying to print out logged data, because
1309 // otherwise we would end up printing out the timing reports themselves...
1310 // This is only really relevant when we are replaying into a simulation.
Austin Schuh6f3babe2020-01-26 20:34:50 -08001311 event_loop->SkipTimingReport();
1312 event_loop->SkipAosLog();
Austin Schuh39788ff2019-12-01 18:22:57 -08001313
Austin Schuh858c9f32020-08-31 16:56:12 -07001314 const bool has_data = state->SetNode();
Austin Schuhe309d2a2019-11-29 13:25:21 -08001315
Austin Schuh858c9f32020-08-31 16:56:12 -07001316 state->SetChannelCount(logged_configuration()->channels()->size());
Austin Schuh8bd96322020-02-13 21:18:22 -08001317
Austin Schuh858c9f32020-08-31 16:56:12 -07001318 for (size_t i = 0; i < logged_configuration()->channels()->size(); ++i) {
Austin Schuh6f3babe2020-01-26 20:34:50 -08001319 const Channel *channel =
1320 RemapChannel(event_loop, logged_configuration()->channels()->Get(i));
Austin Schuh6331ef92020-01-07 18:28:09 -08001321
Austin Schuh858c9f32020-08-31 16:56:12 -07001322 NodeEventLoopFactory *channel_target_event_loop_factory = nullptr;
Austin Schuh2f8fd752020-09-01 22:38:28 -07001323 message_bridge::NoncausalOffsetEstimator *filter = nullptr;
Austin Schuh8bd96322020-02-13 21:18:22 -08001324
1325 if (!configuration::ChannelIsSendableOnNode(channel, event_loop->node()) &&
1326 configuration::ChannelIsReadableOnNode(channel, event_loop->node())) {
1327 const Node *target_node = configuration::GetNode(
1328 event_loop->configuration(), channel->source_node()->string_view());
Austin Schuh858c9f32020-08-31 16:56:12 -07001329 filter = GetFilter(event_loop->node(), target_node);
Austin Schuh8bd96322020-02-13 21:18:22 -08001330
1331 if (event_loop_factory_ != nullptr) {
Austin Schuh858c9f32020-08-31 16:56:12 -07001332 channel_target_event_loop_factory =
Austin Schuh8bd96322020-02-13 21:18:22 -08001333 event_loop_factory_->GetNodeEventLoopFactory(target_node);
1334 }
1335 }
Austin Schuh858c9f32020-08-31 16:56:12 -07001336
1337 state->SetChannel(i, event_loop->MakeRawSender(channel), filter,
1338 channel_target_event_loop_factory);
Austin Schuhe309d2a2019-11-29 13:25:21 -08001339 }
1340
Austin Schuh6aa77be2020-02-22 21:06:40 -08001341 // If we didn't find any log files with data in them, we won't ever get a
1342 // callback or be live. So skip the rest of the setup.
1343 if (!has_data) {
1344 return;
1345 }
1346
Austin Schuh858c9f32020-08-31 16:56:12 -07001347 state->set_timer_handler(event_loop->AddTimer([this, state]() {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001348 VLOG(1) << "Starting sending " << MaybeNodeName(state->event_loop()->node())
1349 << "at " << state->event_loop()->context().monotonic_event_time
1350 << " now " << state->monotonic_now();
Austin Schuh858c9f32020-08-31 16:56:12 -07001351 if (state->OldestMessageTime() == monotonic_clock::max_time) {
Austin Schuh6f3babe2020-01-26 20:34:50 -08001352 --live_nodes_;
Austin Schuh2f8fd752020-09-01 22:38:28 -07001353 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Node down!";
Austin Schuh6f3babe2020-01-26 20:34:50 -08001354 if (live_nodes_ == 0) {
1355 event_loop_factory_->Exit();
1356 }
James Kuszmaul314f1672020-01-03 20:02:08 -08001357 return;
1358 }
Austin Schuh6f3babe2020-01-26 20:34:50 -08001359 TimestampMerger::DeliveryTimestamp channel_timestamp;
Austin Schuh05b70472020-01-01 17:11:17 -08001360 int channel_index;
1361 FlatbufferVector<MessageHeader> channel_data =
1362 FlatbufferVector<MessageHeader>::Empty();
1363
Austin Schuh2f8fd752020-09-01 22:38:28 -07001364 if (VLOG_IS_ON(1)) {
1365 LogFit("Offset was");
1366 }
1367
1368 bool update_time;
Austin Schuh05b70472020-01-01 17:11:17 -08001369 std::tie(channel_timestamp, channel_index, channel_data) =
Austin Schuh2f8fd752020-09-01 22:38:28 -07001370 state->PopOldest(&update_time);
Austin Schuh05b70472020-01-01 17:11:17 -08001371
Austin Schuhe309d2a2019-11-29 13:25:21 -08001372 const monotonic_clock::time_point monotonic_now =
Austin Schuh858c9f32020-08-31 16:56:12 -07001373 state->event_loop()->context().monotonic_event_time;
Austin Schuh2f8fd752020-09-01 22:38:28 -07001374 if (!FLAGS_skip_order_validation) {
1375 CHECK(monotonic_now == channel_timestamp.monotonic_event_time)
1376 << ": " << FlatbufferToJson(state->event_loop()->node()) << " Now "
1377 << monotonic_now << " trying to send "
1378 << channel_timestamp.monotonic_event_time << " failure "
1379 << state->DebugString();
1380 } else if (monotonic_now != channel_timestamp.monotonic_event_time) {
1381 LOG(WARNING) << "Check failed: monotonic_now == "
1382 "channel_timestamp.monotonic_event_time) ("
1383 << monotonic_now << " vs. "
1384 << channel_timestamp.monotonic_event_time
1385 << "): " << FlatbufferToJson(state->event_loop()->node())
1386 << " Now " << monotonic_now << " trying to send "
1387 << channel_timestamp.monotonic_event_time << " failure "
1388 << state->DebugString();
1389 }
Austin Schuhe309d2a2019-11-29 13:25:21 -08001390
Austin Schuh6f3babe2020-01-26 20:34:50 -08001391 if (channel_timestamp.monotonic_event_time >
Austin Schuh858c9f32020-08-31 16:56:12 -07001392 state->monotonic_start_time() ||
Austin Schuh15649d62019-12-28 16:36:38 -08001393 event_loop_factory_ != nullptr) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001394 if ((!ignore_missing_data_ && !FLAGS_skip_missing_forwarding_entries &&
Austin Schuh858c9f32020-08-31 16:56:12 -07001395 !state->at_end()) ||
Austin Schuh05b70472020-01-01 17:11:17 -08001396 channel_data.message().data() != nullptr) {
1397 CHECK(channel_data.message().data() != nullptr)
1398 << ": Got a message without data. Forwarding entry which was "
Austin Schuh2f8fd752020-09-01 22:38:28 -07001399 "not matched? Use --skip_missing_forwarding_entries to "
Brian Silverman87ac0402020-09-17 14:47:01 -07001400 "ignore this.";
Austin Schuh92547522019-12-28 14:33:43 -08001401
Austin Schuh2f8fd752020-09-01 22:38:28 -07001402 if (update_time) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001403 // Confirm that the message was sent on the sending node before the
1404 // destination node (this node). As a proxy, do this by making sure
1405 // that time on the source node is past when the message was sent.
Austin Schuh2f8fd752020-09-01 22:38:28 -07001406 if (!FLAGS_skip_order_validation) {
1407 CHECK_LT(channel_timestamp.monotonic_remote_time,
1408 state->monotonic_remote_now(channel_index))
1409 << state->event_loop()->node()->name()->string_view() << " to "
1410 << state->remote_node(channel_index)->name()->string_view()
1411 << " " << state->DebugString();
1412 } else if (channel_timestamp.monotonic_remote_time >=
1413 state->monotonic_remote_now(channel_index)) {
1414 LOG(WARNING)
1415 << "Check failed: channel_timestamp.monotonic_remote_time < "
1416 "state->monotonic_remote_now(channel_index) ("
1417 << channel_timestamp.monotonic_remote_time << " vs. "
1418 << state->monotonic_remote_now(channel_index) << ") "
1419 << state->event_loop()->node()->name()->string_view() << " to "
1420 << state->remote_node(channel_index)->name()->string_view()
1421 << " currently " << channel_timestamp.monotonic_event_time
1422 << " ("
1423 << state->ToDistributedClock(
1424 channel_timestamp.monotonic_event_time)
1425 << ") remote event time "
1426 << channel_timestamp.monotonic_remote_time << " ("
1427 << state->RemoteToDistributedClock(
1428 channel_index, channel_timestamp.monotonic_remote_time)
1429 << ") " << state->DebugString();
1430 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001431
1432 if (FLAGS_timestamps_to_csv) {
1433 if (offset_fp_ == nullptr) {
1434 offset_fp_ = fopen("/tmp/offsets.csv", "w");
1435 fprintf(
1436 offset_fp_,
1437 "# time_since_start, offset node 0, offset node 1, ...\n");
1438 first_time_ = channel_timestamp.realtime_event_time;
1439 }
1440
1441 fprintf(offset_fp_, "%.9f",
1442 std::chrono::duration_cast<std::chrono::duration<double>>(
1443 channel_timestamp.realtime_event_time - first_time_)
1444 .count());
Austin Schuh2f8fd752020-09-01 22:38:28 -07001445 for (int i = 1; i < time_offset_matrix_.rows(); ++i) {
1446 fprintf(offset_fp_, ", %.9f",
1447 time_offset_matrix_(i, 0) +
1448 time_slope_matrix_(i, 0) *
1449 chrono::duration<double>(
1450 event_loop_factory_->distributed_now()
1451 .time_since_epoch())
1452 .count());
Austin Schuh8bd96322020-02-13 21:18:22 -08001453 }
1454 fprintf(offset_fp_, "\n");
1455 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001456 }
1457
Austin Schuh15649d62019-12-28 16:36:38 -08001458 // If we have access to the factory, use it to fix the realtime time.
Austin Schuh858c9f32020-08-31 16:56:12 -07001459 state->SetRealtimeOffset(channel_timestamp.monotonic_event_time,
1460 channel_timestamp.realtime_event_time);
Austin Schuh15649d62019-12-28 16:36:38 -08001461
Austin Schuh2f8fd752020-09-01 22:38:28 -07001462 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Sending "
1463 << channel_timestamp.monotonic_event_time;
1464 // TODO(austin): std::move channel_data in and make that efficient in
1465 // simulation.
Austin Schuh858c9f32020-08-31 16:56:12 -07001466 state->Send(channel_index, channel_data.message().data()->Data(),
1467 channel_data.message().data()->size(),
1468 channel_timestamp.monotonic_remote_time,
1469 channel_timestamp.realtime_remote_time,
1470 channel_timestamp.remote_queue_index);
Austin Schuh2f8fd752020-09-01 22:38:28 -07001471 } else if (state->at_end() && !ignore_missing_data_) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001472 // We are at the end of the log file and found missing data. Finish
Austin Schuh2f8fd752020-09-01 22:38:28 -07001473 // reading the rest of the log file and call it quits. We don't want
1474 // to replay partial data.
Austin Schuh858c9f32020-08-31 16:56:12 -07001475 while (state->OldestMessageTime() != monotonic_clock::max_time) {
1476 bool update_time_dummy;
1477 state->PopOldest(&update_time_dummy);
Austin Schuh8bd96322020-02-13 21:18:22 -08001478 }
Austin Schuh2f8fd752020-09-01 22:38:28 -07001479 } else {
1480 CHECK(channel_data.message().data() == nullptr) << ": Nullptr";
Austin Schuh92547522019-12-28 14:33:43 -08001481 }
Austin Schuhe309d2a2019-11-29 13:25:21 -08001482 } else {
Austin Schuh6f3babe2020-01-26 20:34:50 -08001483 LOG(WARNING)
1484 << "Not sending data from before the start of the log file. "
1485 << channel_timestamp.monotonic_event_time.time_since_epoch().count()
1486 << " start " << monotonic_start_time().time_since_epoch().count()
1487 << " " << FlatbufferToJson(channel_data);
Austin Schuhe309d2a2019-11-29 13:25:21 -08001488 }
1489
Austin Schuh858c9f32020-08-31 16:56:12 -07001490 const monotonic_clock::time_point next_time = state->OldestMessageTime();
Austin Schuh6f3babe2020-01-26 20:34:50 -08001491 if (next_time != monotonic_clock::max_time) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001492 VLOG(1) << "Scheduling " << MaybeNodeName(state->event_loop()->node())
1493 << "wakeup for " << next_time << "("
1494 << state->ToDistributedClock(next_time)
1495 << " distributed), now is " << state->monotonic_now();
Austin Schuh858c9f32020-08-31 16:56:12 -07001496 state->Setup(next_time);
James Kuszmaul314f1672020-01-03 20:02:08 -08001497 } else {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001498 VLOG(1) << MaybeNodeName(state->event_loop()->node())
1499 << "No next message, scheduling shutdown";
1500 // Set a timer up immediately after now to die. If we don't do this,
1501 // then the senders waiting on the message we just read will never get
1502 // called.
Austin Schuheecb9282020-01-08 17:43:30 -08001503 if (event_loop_factory_ != nullptr) {
Austin Schuh858c9f32020-08-31 16:56:12 -07001504 state->Setup(monotonic_now + event_loop_factory_->send_delay() +
1505 std::chrono::nanoseconds(1));
Austin Schuheecb9282020-01-08 17:43:30 -08001506 }
Austin Schuhe309d2a2019-11-29 13:25:21 -08001507 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001508
Austin Schuh2f8fd752020-09-01 22:38:28 -07001509 // Once we make this call, the current time changes. So do everything
1510 // which involves time before changing it. That especially includes
1511 // sending the message.
1512 if (update_time) {
1513 VLOG(1) << MaybeNodeName(state->event_loop()->node())
1514 << "updating offsets";
1515
1516 std::vector<aos::monotonic_clock::time_point> before_times;
1517 before_times.resize(states_.size());
1518 std::transform(states_.begin(), states_.end(), before_times.begin(),
1519 [](const std::unique_ptr<State> &state) {
1520 return state->monotonic_now();
1521 });
1522
1523 for (size_t i = 0; i < states_.size(); ++i) {
Brian Silvermand90905f2020-09-23 14:42:56 -07001524 VLOG(1) << MaybeNodeName(states_[i]->event_loop()->node()) << "before "
1525 << states_[i]->monotonic_now();
Austin Schuh2f8fd752020-09-01 22:38:28 -07001526 }
1527
Austin Schuh8bd96322020-02-13 21:18:22 -08001528 UpdateOffsets();
Austin Schuh2f8fd752020-09-01 22:38:28 -07001529 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Now is now "
1530 << state->monotonic_now();
1531
1532 for (size_t i = 0; i < states_.size(); ++i) {
Brian Silvermand90905f2020-09-23 14:42:56 -07001533 VLOG(1) << MaybeNodeName(states_[i]->event_loop()->node()) << "after "
1534 << states_[i]->monotonic_now();
Austin Schuh2f8fd752020-09-01 22:38:28 -07001535 }
1536
1537 // TODO(austin): We should be perfect.
1538 const std::chrono::nanoseconds kTolerance{3};
1539 if (!FLAGS_skip_order_validation) {
1540 CHECK_GE(next_time, state->monotonic_now())
1541 << ": Time skipped the next event.";
1542
1543 for (size_t i = 0; i < states_.size(); ++i) {
1544 CHECK_GE(states_[i]->monotonic_now(), before_times[i] - kTolerance)
1545 << ": Time changed too much on node "
1546 << MaybeNodeName(states_[i]->event_loop()->node());
1547 CHECK_LE(states_[i]->monotonic_now(), before_times[i] + kTolerance)
1548 << ": Time changed too much on node "
1549 << states_[i]->event_loop()->node()->name()->string_view();
1550 }
1551 } else {
1552 if (next_time < state->monotonic_now()) {
1553 LOG(WARNING) << "Check failed: next_time >= "
1554 "state->monotonic_now() ("
1555 << next_time << " vs. " << state->monotonic_now()
1556 << "): Time skipped the next event.";
1557 }
1558 for (size_t i = 0; i < states_.size(); ++i) {
1559 if (states_[i]->monotonic_now() >= before_times[i] - kTolerance) {
1560 LOG(WARNING) << "Check failed: "
1561 "states_[i]->monotonic_now() "
1562 ">= before_times[i] - kTolerance ("
1563 << states_[i]->monotonic_now() << " vs. "
1564 << before_times[i] - kTolerance
1565 << ") : Time changed too much on node "
1566 << MaybeNodeName(states_[i]->event_loop()->node());
1567 }
1568 if (states_[i]->monotonic_now() <= before_times[i] + kTolerance) {
1569 LOG(WARNING) << "Check failed: "
1570 "states_[i]->monotonic_now() "
1571 "<= before_times[i] + kTolerance ("
1572 << states_[i]->monotonic_now() << " vs. "
1573 << before_times[i] - kTolerance
1574 << ") : Time changed too much on node "
1575 << MaybeNodeName(states_[i]->event_loop()->node());
1576 }
1577 }
1578 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001579 }
Austin Schuh2f8fd752020-09-01 22:38:28 -07001580
1581 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Done sending at "
1582 << state->event_loop()->context().monotonic_event_time << " now "
1583 << state->monotonic_now();
Austin Schuh858c9f32020-08-31 16:56:12 -07001584 }));
Austin Schuhe309d2a2019-11-29 13:25:21 -08001585
Austin Schuh6f3babe2020-01-26 20:34:50 -08001586 ++live_nodes_;
1587
Austin Schuh858c9f32020-08-31 16:56:12 -07001588 if (state->OldestMessageTime() != monotonic_clock::max_time) {
1589 event_loop->OnRun([state]() { state->Setup(state->OldestMessageTime()); });
Austin Schuhe309d2a2019-11-29 13:25:21 -08001590 }
1591}
1592
1593void LogReader::Deregister() {
James Kuszmaul84ff3e52020-01-03 19:48:53 -08001594 // Make sure that things get destroyed in the correct order, rather than
1595 // relying on getting the order correct in the class definition.
Austin Schuh8bd96322020-02-13 21:18:22 -08001596 for (std::unique_ptr<State> &state : states_) {
Austin Schuh858c9f32020-08-31 16:56:12 -07001597 state->Deregister();
Austin Schuhe309d2a2019-11-29 13:25:21 -08001598 }
Austin Schuh92547522019-12-28 14:33:43 -08001599
James Kuszmaul84ff3e52020-01-03 19:48:53 -08001600 event_loop_factory_unique_ptr_.reset();
1601 event_loop_factory_ = nullptr;
Austin Schuhe309d2a2019-11-29 13:25:21 -08001602}
1603
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001604void LogReader::RemapLoggedChannel(std::string_view name, std::string_view type,
1605 std::string_view add_prefix) {
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001606 for (size_t ii = 0; ii < logged_configuration()->channels()->size(); ++ii) {
1607 const Channel *const channel = logged_configuration()->channels()->Get(ii);
1608 if (channel->name()->str() == name &&
1609 channel->type()->string_view() == type) {
1610 CHECK_EQ(0u, remapped_channels_.count(ii))
1611 << "Already remapped channel "
1612 << configuration::CleanedChannelToString(channel);
1613 remapped_channels_[ii] = std::string(add_prefix) + std::string(name);
1614 VLOG(1) << "Remapping channel "
1615 << configuration::CleanedChannelToString(channel)
1616 << " to have name " << remapped_channels_[ii];
Austin Schuh6331ef92020-01-07 18:28:09 -08001617 MakeRemappedConfig();
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001618 return;
1619 }
1620 }
1621 LOG(FATAL) << "Unabled to locate channel with name " << name << " and type "
1622 << type;
1623}
1624
Austin Schuh01b4c352020-09-21 23:09:39 -07001625void LogReader::RemapLoggedChannel(std::string_view name, std::string_view type,
1626 const Node *node,
1627 std::string_view add_prefix) {
1628 VLOG(1) << "Node is " << aos::FlatbufferToJson(node);
1629 const Channel *remapped_channel =
1630 configuration::GetChannel(logged_configuration(), name, type, "", node);
1631 CHECK(remapped_channel != nullptr) << ": Failed to find {\"name\": \"" << name
1632 << "\", \"type\": \"" << type << "\"}";
1633 VLOG(1) << "Original {\"name\": \"" << name << "\", \"type\": \"" << type
1634 << "\"}";
1635 VLOG(1) << "Remapped "
1636 << aos::configuration::StrippedChannelToString(remapped_channel);
1637
1638 // We want to make /spray on node 0 go to /0/spray by snooping the maps. And
1639 // we want it to degrade if the heuristics fail to just work.
1640 //
1641 // The easiest way to do this is going to be incredibly specific and verbose.
1642 // Look up /spray, to /0/spray. Then, prefix the result with /original to get
1643 // /original/0/spray. Then, create a map from /original/spray to
1644 // /original/0/spray for just the type we were asked for.
1645 if (name != remapped_channel->name()->string_view()) {
1646 MapT new_map;
1647 new_map.match = std::make_unique<ChannelT>();
1648 new_map.match->name = absl::StrCat(add_prefix, name);
1649 new_map.match->type = type;
1650 if (node != nullptr) {
1651 new_map.match->source_node = node->name()->str();
1652 }
1653 new_map.rename = std::make_unique<ChannelT>();
1654 new_map.rename->name =
1655 absl::StrCat(add_prefix, remapped_channel->name()->string_view());
1656 maps_.emplace_back(std::move(new_map));
1657 }
1658
1659 const size_t channel_index =
1660 configuration::ChannelIndex(logged_configuration(), remapped_channel);
1661 CHECK_EQ(0u, remapped_channels_.count(channel_index))
1662 << "Already remapped channel "
1663 << configuration::CleanedChannelToString(remapped_channel);
1664 remapped_channels_[channel_index] =
1665 absl::StrCat(add_prefix, remapped_channel->name()->string_view());
1666 MakeRemappedConfig();
1667}
1668
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001669void LogReader::MakeRemappedConfig() {
Austin Schuh8bd96322020-02-13 21:18:22 -08001670 for (std::unique_ptr<State> &state : states_) {
Austin Schuh6aa77be2020-02-22 21:06:40 -08001671 if (state) {
Austin Schuh858c9f32020-08-31 16:56:12 -07001672 CHECK(!state->event_loop())
Austin Schuh6aa77be2020-02-22 21:06:40 -08001673 << ": Can't change the mapping after the events are scheduled.";
1674 }
Austin Schuh6f3babe2020-01-26 20:34:50 -08001675 }
Austin Schuhac0771c2020-01-07 18:36:30 -08001676
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001677 // If no remapping occurred and we are using the original config, then there
1678 // is nothing interesting to do here.
1679 if (remapped_channels_.empty() && replay_configuration_ == nullptr) {
Austin Schuh6f3babe2020-01-26 20:34:50 -08001680 remapped_configuration_ = logged_configuration();
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001681 return;
1682 }
1683 // Config to copy Channel definitions from. Use the specified
1684 // replay_configuration_ if it has been provided.
1685 const Configuration *const base_config = replay_configuration_ == nullptr
1686 ? logged_configuration()
1687 : replay_configuration_;
1688 // The remapped config will be identical to the base_config, except that it
1689 // will have a bunch of extra channels in the channel list, which are exact
1690 // copies of the remapped channels, but with different names.
1691 // Because the flatbuffers API is a pain to work with, this requires a bit of
1692 // a song-and-dance to get copied over.
1693 // The order of operations is to:
1694 // 1) Make a flatbuffer builder for a config that will just contain a list of
1695 // the new channels that we want to add.
1696 // 2) For each channel that we are remapping:
1697 // a) Make a buffer/builder and construct into it a Channel table that only
1698 // contains the new name for the channel.
1699 // b) Merge the new channel with just the name into the channel that we are
1700 // trying to copy, built in the flatbuffer builder made in 1. This gives
1701 // us the new channel definition that we need.
1702 // 3) Using this list of offsets, build the Configuration of just new
1703 // Channels.
1704 // 4) Merge the Configuration with the new Channels into the base_config.
1705 // 5) Call MergeConfiguration() on that result to give MergeConfiguration a
1706 // chance to sanitize the config.
1707
1708 // This is the builder that we use for the config containing all the new
1709 // channels.
1710 flatbuffers::FlatBufferBuilder new_config_fbb;
Austin Schuhd7b15da2020-02-17 15:06:11 -08001711 new_config_fbb.ForceDefaults(true);
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001712 std::vector<flatbuffers::Offset<Channel>> channel_offsets;
1713 for (auto &pair : remapped_channels_) {
1714 // This is the builder that we use for creating the Channel with just the
1715 // new name.
1716 flatbuffers::FlatBufferBuilder new_name_fbb;
Austin Schuhd7b15da2020-02-17 15:06:11 -08001717 new_name_fbb.ForceDefaults(true);
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001718 const flatbuffers::Offset<flatbuffers::String> name_offset =
1719 new_name_fbb.CreateString(pair.second);
1720 ChannelBuilder new_name_builder(new_name_fbb);
1721 new_name_builder.add_name(name_offset);
1722 new_name_fbb.Finish(new_name_builder.Finish());
1723 const FlatbufferDetachedBuffer<Channel> new_name = new_name_fbb.Release();
Austin Schuh2f8fd752020-09-01 22:38:28 -07001724 // Retrieve the channel that we want to copy, confirming that it is
1725 // actually present in base_config.
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001726 const Channel *const base_channel = CHECK_NOTNULL(configuration::GetChannel(
1727 base_config, logged_configuration()->channels()->Get(pair.first), "",
1728 nullptr));
1729 // Actually create the new channel and put it into the vector of Offsets
1730 // that we will use to create the new Configuration.
1731 channel_offsets.emplace_back(MergeFlatBuffers<Channel>(
1732 reinterpret_cast<const flatbuffers::Table *>(base_channel),
1733 reinterpret_cast<const flatbuffers::Table *>(&new_name.message()),
1734 &new_config_fbb));
1735 }
1736 // Create the Configuration containing the new channels that we want to add.
Austin Schuh01b4c352020-09-21 23:09:39 -07001737 const auto new_channel_vector_offsets =
Austin Schuhfa895892020-01-07 20:07:41 -08001738 new_config_fbb.CreateVector(channel_offsets);
Austin Schuh01b4c352020-09-21 23:09:39 -07001739
1740 // Now create the new maps.
1741 std::vector<flatbuffers::Offset<Map>> map_offsets;
1742 for (const MapT &map : maps_) {
1743 const flatbuffers::Offset<flatbuffers::String> match_name_offset =
1744 new_config_fbb.CreateString(map.match->name);
1745 const flatbuffers::Offset<flatbuffers::String> match_type_offset =
1746 new_config_fbb.CreateString(map.match->type);
1747 const flatbuffers::Offset<flatbuffers::String> rename_name_offset =
1748 new_config_fbb.CreateString(map.rename->name);
1749 flatbuffers::Offset<flatbuffers::String> match_source_node_offset;
1750 if (!map.match->source_node.empty()) {
1751 match_source_node_offset =
1752 new_config_fbb.CreateString(map.match->source_node);
1753 }
1754 Channel::Builder match_builder(new_config_fbb);
1755 match_builder.add_name(match_name_offset);
1756 match_builder.add_type(match_type_offset);
1757 if (!map.match->source_node.empty()) {
1758 match_builder.add_source_node(match_source_node_offset);
1759 }
1760 const flatbuffers::Offset<Channel> match_offset = match_builder.Finish();
1761
1762 Channel::Builder rename_builder(new_config_fbb);
1763 rename_builder.add_name(rename_name_offset);
1764 const flatbuffers::Offset<Channel> rename_offset = rename_builder.Finish();
1765
1766 Map::Builder map_builder(new_config_fbb);
1767 map_builder.add_match(match_offset);
1768 map_builder.add_rename(rename_offset);
1769 map_offsets.emplace_back(map_builder.Finish());
1770 }
1771
1772 const auto new_maps_offsets = new_config_fbb.CreateVector(map_offsets);
1773
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001774 ConfigurationBuilder new_config_builder(new_config_fbb);
Austin Schuh01b4c352020-09-21 23:09:39 -07001775 new_config_builder.add_channels(new_channel_vector_offsets);
1776 new_config_builder.add_maps(new_maps_offsets);
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001777 new_config_fbb.Finish(new_config_builder.Finish());
1778 const FlatbufferDetachedBuffer<Configuration> new_name_config =
1779 new_config_fbb.Release();
1780 // Merge the new channels configuration into the base_config, giving us the
1781 // remapped configuration.
1782 remapped_configuration_buffer_ =
1783 std::make_unique<FlatbufferDetachedBuffer<Configuration>>(
1784 MergeFlatBuffers<Configuration>(base_config,
1785 &new_name_config.message()));
1786 // Call MergeConfiguration to deal with sanitizing the config.
1787 remapped_configuration_buffer_ =
1788 std::make_unique<FlatbufferDetachedBuffer<Configuration>>(
1789 configuration::MergeConfiguration(*remapped_configuration_buffer_));
1790
1791 remapped_configuration_ = &remapped_configuration_buffer_->message();
1792}
1793
Austin Schuh6f3babe2020-01-26 20:34:50 -08001794const Channel *LogReader::RemapChannel(const EventLoop *event_loop,
1795 const Channel *channel) {
1796 std::string_view channel_name = channel->name()->string_view();
1797 std::string_view channel_type = channel->type()->string_view();
1798 const int channel_index =
1799 configuration::ChannelIndex(logged_configuration(), channel);
1800 // If the channel is remapped, find the correct channel name to use.
1801 if (remapped_channels_.count(channel_index) > 0) {
Austin Schuhee711052020-08-24 16:06:09 -07001802 VLOG(3) << "Got remapped channel on "
Austin Schuh6f3babe2020-01-26 20:34:50 -08001803 << configuration::CleanedChannelToString(channel);
1804 channel_name = remapped_channels_[channel_index];
1805 }
1806
Austin Schuhee711052020-08-24 16:06:09 -07001807 VLOG(2) << "Going to remap channel " << channel_name << " " << channel_type;
Austin Schuh6f3babe2020-01-26 20:34:50 -08001808 const Channel *remapped_channel = configuration::GetChannel(
1809 event_loop->configuration(), channel_name, channel_type,
1810 event_loop->name(), event_loop->node());
1811
1812 CHECK(remapped_channel != nullptr)
1813 << ": Unable to send {\"name\": \"" << channel_name << "\", \"type\": \""
1814 << channel_type << "\"} because it is not in the provided configuration.";
1815
1816 return remapped_channel;
1817}
1818
Austin Schuh858c9f32020-08-31 16:56:12 -07001819LogReader::State::State(std::unique_ptr<ChannelMerger> channel_merger)
1820 : channel_merger_(std::move(channel_merger)) {}
1821
1822EventLoop *LogReader::State::SetNodeEventLoopFactory(
1823 NodeEventLoopFactory *node_event_loop_factory) {
1824 node_event_loop_factory_ = node_event_loop_factory;
1825 event_loop_unique_ptr_ =
1826 node_event_loop_factory_->MakeEventLoop("log_reader");
1827 return event_loop_unique_ptr_.get();
1828}
1829
1830void LogReader::State::SetChannelCount(size_t count) {
1831 channels_.resize(count);
1832 filters_.resize(count);
1833 channel_target_event_loop_factory_.resize(count);
1834}
1835
1836void LogReader::State::SetChannel(
1837 size_t channel, std::unique_ptr<RawSender> sender,
Austin Schuh2f8fd752020-09-01 22:38:28 -07001838 message_bridge::NoncausalOffsetEstimator *filter,
Austin Schuh858c9f32020-08-31 16:56:12 -07001839 NodeEventLoopFactory *channel_target_event_loop_factory) {
1840 channels_[channel] = std::move(sender);
1841 filters_[channel] = filter;
1842 channel_target_event_loop_factory_[channel] =
1843 channel_target_event_loop_factory;
1844}
1845
1846std::tuple<TimestampMerger::DeliveryTimestamp, int,
1847 FlatbufferVector<MessageHeader>>
1848LogReader::State::PopOldest(bool *update_time) {
1849 CHECK_GT(sorted_messages_.size(), 0u);
1850
1851 std::tuple<TimestampMerger::DeliveryTimestamp, int,
Austin Schuh2f8fd752020-09-01 22:38:28 -07001852 FlatbufferVector<MessageHeader>,
1853 message_bridge::NoncausalOffsetEstimator *>
Austin Schuh858c9f32020-08-31 16:56:12 -07001854 result = std::move(sorted_messages_.front());
Austin Schuh2f8fd752020-09-01 22:38:28 -07001855 VLOG(2) << MaybeNodeName(event_loop_->node()) << "PopOldest Popping "
Austin Schuh858c9f32020-08-31 16:56:12 -07001856 << std::get<0>(result).monotonic_event_time;
1857 sorted_messages_.pop_front();
1858 SeedSortedMessages();
1859
Austin Schuh2f8fd752020-09-01 22:38:28 -07001860 if (std::get<3>(result) != nullptr) {
1861 *update_time = std::get<3>(result)->Pop(
1862 event_loop_->node(), std::get<0>(result).monotonic_event_time);
1863 } else {
1864 *update_time = false;
1865 }
Austin Schuh858c9f32020-08-31 16:56:12 -07001866 return std::make_tuple(std::get<0>(result), std::get<1>(result),
1867 std::move(std::get<2>(result)));
1868}
1869
1870monotonic_clock::time_point LogReader::State::OldestMessageTime() const {
1871 if (sorted_messages_.size() > 0) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001872 VLOG(2) << MaybeNodeName(event_loop_->node()) << "oldest message at "
Austin Schuh858c9f32020-08-31 16:56:12 -07001873 << std::get<0>(sorted_messages_.front()).monotonic_event_time;
1874 return std::get<0>(sorted_messages_.front()).monotonic_event_time;
1875 }
1876
1877 return channel_merger_->OldestMessageTime();
1878}
1879
1880void LogReader::State::SeedSortedMessages() {
1881 const aos::monotonic_clock::time_point end_queue_time =
1882 (sorted_messages_.size() > 0
1883 ? std::get<0>(sorted_messages_.front()).monotonic_event_time
1884 : channel_merger_->monotonic_start_time()) +
1885 std::chrono::seconds(2);
1886
1887 while (true) {
1888 if (channel_merger_->OldestMessageTime() == monotonic_clock::max_time) {
1889 return;
1890 }
1891 if (sorted_messages_.size() > 0) {
1892 // Stop placing sorted messages on the list once we have 2 seconds
1893 // queued up (but queue at least until the log starts.
1894 if (end_queue_time <
1895 std::get<0>(sorted_messages_.back()).monotonic_event_time) {
1896 return;
1897 }
1898 }
1899
1900 TimestampMerger::DeliveryTimestamp channel_timestamp;
1901 int channel_index;
1902 FlatbufferVector<MessageHeader> channel_data =
1903 FlatbufferVector<MessageHeader>::Empty();
1904
Austin Schuh2f8fd752020-09-01 22:38:28 -07001905 message_bridge::NoncausalOffsetEstimator *filter = nullptr;
1906
Austin Schuh858c9f32020-08-31 16:56:12 -07001907 std::tie(channel_timestamp, channel_index, channel_data) =
1908 channel_merger_->PopOldest();
1909
Austin Schuh2f8fd752020-09-01 22:38:28 -07001910 // Skip any messages without forwarding information.
1911 if (channel_timestamp.monotonic_remote_time != monotonic_clock::min_time) {
1912 // Got a forwarding timestamp!
1913 filter = filters_[channel_index];
1914
1915 CHECK(filter != nullptr);
1916
1917 // Call the correct method depending on if we are the forward or
1918 // reverse direction here.
1919 filter->Sample(event_loop_->node(),
1920 channel_timestamp.monotonic_event_time,
1921 channel_timestamp.monotonic_remote_time);
1922 }
Austin Schuh858c9f32020-08-31 16:56:12 -07001923 sorted_messages_.emplace_back(channel_timestamp, channel_index,
Austin Schuh2f8fd752020-09-01 22:38:28 -07001924 std::move(channel_data), filter);
Austin Schuh858c9f32020-08-31 16:56:12 -07001925 }
1926}
1927
1928void LogReader::State::Deregister() {
1929 for (size_t i = 0; i < channels_.size(); ++i) {
1930 channels_[i].reset();
1931 }
1932 event_loop_unique_ptr_.reset();
1933 event_loop_ = nullptr;
1934 timer_handler_ = nullptr;
1935 node_event_loop_factory_ = nullptr;
1936}
1937
Austin Schuhe309d2a2019-11-29 13:25:21 -08001938} // namespace logger
1939} // namespace aos