blob: 859e24c5af4e1dffdf318c4bd3adf91957e9841c [file] [log] [blame]
James Kuszmaul38735e82019-12-07 16:42:06 -08001#include "aos/events/logging/logger.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -08002
3#include <fcntl.h>
Austin Schuh4c4e0092019-12-22 16:18:03 -08004#include <limits.h>
Austin Schuhe309d2a2019-11-29 13:25:21 -08005#include <sys/stat.h>
6#include <sys/types.h>
7#include <sys/uio.h>
8#include <vector>
9
Austin Schuh8bd96322020-02-13 21:18:22 -080010#include "Eigen/Dense"
Austin Schuh2f8fd752020-09-01 22:38:28 -070011#include "absl/strings/escaping.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080012#include "absl/types/span.h"
13#include "aos/events/event_loop.h"
James Kuszmaul38735e82019-12-07 16:42:06 -080014#include "aos/events/logging/logger_generated.h"
Austin Schuh64fab802020-09-09 22:47:47 -070015#include "aos/events/logging/uuid.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080016#include "aos/flatbuffer_merge.h"
Austin Schuh288479d2019-12-18 19:47:52 -080017#include "aos/network/team_number.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080018#include "aos/time/time.h"
19#include "flatbuffers/flatbuffers.h"
Austin Schuh2f8fd752020-09-01 22:38:28 -070020#include "third_party/gmp/gmpxx.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080021
Austin Schuh15649d62019-12-28 16:36:38 -080022DEFINE_bool(skip_missing_forwarding_entries, false,
23 "If true, drop any forwarding entries with missing data. If "
24 "false, CHECK.");
Austin Schuhe309d2a2019-11-29 13:25:21 -080025
Austin Schuh8bd96322020-02-13 21:18:22 -080026DEFINE_bool(timestamps_to_csv, false,
27 "If true, write all the time synchronization information to a set "
28 "of CSV files in /tmp/. This should only be needed when debugging "
29 "time synchronization.");
30
Austin Schuh2f8fd752020-09-01 22:38:28 -070031DEFINE_bool(skip_order_validation, false,
32 "If true, ignore any out of orderness in replay");
33
Austin Schuhe309d2a2019-11-29 13:25:21 -080034namespace aos {
35namespace logger {
Austin Schuhe309d2a2019-11-29 13:25:21 -080036namespace chrono = std::chrono;
37
Brian Silverman1f345222020-09-24 21:14:48 -070038Logger::Logger(EventLoop *event_loop, const Configuration *configuration,
39 std::function<bool(const Channel *)> should_log)
Austin Schuhe309d2a2019-11-29 13:25:21 -080040 : event_loop_(event_loop),
Austin Schuh0c297012020-09-16 18:41:59 -070041 configuration_(configuration),
42 name_(network::GetHostname()),
Brian Silverman1f345222020-09-24 21:14:48 -070043 timer_handler_(event_loop_->AddTimer(
44 [this]() { DoLogData(event_loop_->monotonic_now()); })),
Austin Schuh2f8fd752020-09-01 22:38:28 -070045 server_statistics_fetcher_(
46 configuration::MultiNode(event_loop_->configuration())
47 ? event_loop_->MakeFetcher<message_bridge::ServerStatistics>(
48 "/aos")
49 : aos::Fetcher<message_bridge::ServerStatistics>()) {
Brian Silverman1f345222020-09-24 21:14:48 -070050 VLOG(1) << "Creating logger for " << FlatbufferToJson(event_loop_->node());
Austin Schuh2f8fd752020-09-01 22:38:28 -070051
52 // Find all the nodes which are logging timestamps on our node.
53 std::set<const Node *> timestamp_logger_nodes;
Austin Schuh0c297012020-09-16 18:41:59 -070054 for (const Channel *channel : *configuration_->channels()) {
Brian Silverman1f345222020-09-24 21:14:48 -070055 if (!configuration::ChannelIsSendableOnNode(channel, event_loop_->node())) {
56 continue;
57 }
58 if (!channel->has_destination_nodes()) {
59 continue;
60 }
61 if (!should_log(channel)) {
Austin Schuh2f8fd752020-09-01 22:38:28 -070062 continue;
63 }
64 for (const Connection *connection : *channel->destination_nodes()) {
65 const Node *other_node = configuration::GetNode(
Austin Schuh0c297012020-09-16 18:41:59 -070066 configuration_, connection->name()->string_view());
Austin Schuh2f8fd752020-09-01 22:38:28 -070067
68 if (configuration::ConnectionDeliveryTimeIsLoggedOnNode(
69 connection, event_loop_->node())) {
70 VLOG(1) << "Timestamps are logged from "
71 << FlatbufferToJson(other_node);
72 timestamp_logger_nodes.insert(other_node);
73 }
74 }
75 }
76
77 std::map<const Channel *, const Node *> timestamp_logger_channels;
78
79 // Now that we have all the nodes accumulated, make remote timestamp loggers
80 // for them.
81 for (const Node *node : timestamp_logger_nodes) {
82 const Channel *channel = configuration::GetChannel(
Austin Schuh0c297012020-09-16 18:41:59 -070083 configuration_,
Austin Schuh2f8fd752020-09-01 22:38:28 -070084 absl::StrCat("/aos/remote_timestamps/", node->name()->string_view()),
85 logger::MessageHeader::GetFullyQualifiedName(), event_loop_->name(),
86 event_loop_->node());
87
88 CHECK(channel != nullptr)
89 << ": Remote timestamps are logged on "
90 << event_loop_->node()->name()->string_view()
91 << " but can't find channel /aos/remote_timestamps/"
92 << node->name()->string_view();
Brian Silverman1f345222020-09-24 21:14:48 -070093 if (!should_log(channel)) {
94 continue;
95 }
Austin Schuh2f8fd752020-09-01 22:38:28 -070096 timestamp_logger_channels.insert(std::make_pair(channel, node));
97 }
98
Brian Silvermand90905f2020-09-23 14:42:56 -070099 const size_t our_node_index =
100 configuration::GetNodeIndex(configuration_, event_loop_->node());
Austin Schuh2f8fd752020-09-01 22:38:28 -0700101
Brian Silverman1f345222020-09-24 21:14:48 -0700102 for (size_t channel_index = 0;
103 channel_index < configuration_->channels()->size(); ++channel_index) {
104 const Channel *const config_channel =
105 configuration_->channels()->Get(channel_index);
Austin Schuh0c297012020-09-16 18:41:59 -0700106 // The MakeRawFetcher method needs a channel which is in the event loop
107 // configuration() object, not the configuration_ object. Go look that up
108 // from the config.
109 const Channel *channel = aos::configuration::GetChannel(
110 event_loop_->configuration(), config_channel->name()->string_view(),
111 config_channel->type()->string_view(), "", event_loop_->node());
Brian Silverman1f345222020-09-24 21:14:48 -0700112 if (!should_log(channel)) {
113 continue;
114 }
Austin Schuh0c297012020-09-16 18:41:59 -0700115
Austin Schuhe309d2a2019-11-29 13:25:21 -0800116 FetcherStruct fs;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700117 fs.node_index = our_node_index;
Brian Silverman1f345222020-09-24 21:14:48 -0700118 fs.channel_index = channel_index;
119 fs.channel = channel;
120
Austin Schuh6f3babe2020-01-26 20:34:50 -0800121 const bool is_local =
122 configuration::ChannelIsSendableOnNode(channel, event_loop_->node());
123
Austin Schuh15649d62019-12-28 16:36:38 -0800124 const bool is_readable =
125 configuration::ChannelIsReadableOnNode(channel, event_loop_->node());
Brian Silverman1f345222020-09-24 21:14:48 -0700126 const bool is_logged = configuration::ChannelMessageIsLoggedOnNode(
127 channel, event_loop_->node());
128 const bool log_message = is_logged && is_readable;
Austin Schuh15649d62019-12-28 16:36:38 -0800129
Brian Silverman1f345222020-09-24 21:14:48 -0700130 bool log_delivery_times = false;
131 if (event_loop_->node() != nullptr) {
132 log_delivery_times = configuration::ConnectionDeliveryTimeIsLoggedOnNode(
133 channel, event_loop_->node(), event_loop_->node());
134 }
Austin Schuh15649d62019-12-28 16:36:38 -0800135
Austin Schuh2f8fd752020-09-01 22:38:28 -0700136 // Now, detect a MessageHeader timestamp logger where we should just log the
137 // contents to a file directly.
138 const bool log_contents = timestamp_logger_channels.find(channel) !=
139 timestamp_logger_channels.end();
Austin Schuh2f8fd752020-09-01 22:38:28 -0700140
141 if (log_message || log_delivery_times || log_contents) {
Austin Schuh15649d62019-12-28 16:36:38 -0800142 fs.fetcher = event_loop->MakeRawFetcher(channel);
143 VLOG(1) << "Logging channel "
144 << configuration::CleanedChannelToString(channel);
145
146 if (log_delivery_times) {
Austin Schuh6f3babe2020-01-26 20:34:50 -0800147 VLOG(1) << " Delivery times";
Brian Silverman1f345222020-09-24 21:14:48 -0700148 fs.wants_timestamp_writer = true;
Austin Schuh15649d62019-12-28 16:36:38 -0800149 }
Austin Schuh6f3babe2020-01-26 20:34:50 -0800150 if (log_message) {
151 VLOG(1) << " Data";
Brian Silverman1f345222020-09-24 21:14:48 -0700152 fs.wants_writer = true;
Austin Schuh6f3babe2020-01-26 20:34:50 -0800153 if (!is_local) {
154 fs.log_type = LogType::kLogRemoteMessage;
155 }
156 }
Austin Schuh2f8fd752020-09-01 22:38:28 -0700157 if (log_contents) {
158 VLOG(1) << "Timestamp logger channel "
159 << configuration::CleanedChannelToString(channel);
Brian Silverman1f345222020-09-24 21:14:48 -0700160 fs.timestamp_node = timestamp_logger_channels.find(channel)->second;
161 fs.wants_contents_writer = true;
Austin Schuh0c297012020-09-16 18:41:59 -0700162 fs.node_index =
Brian Silverman1f345222020-09-24 21:14:48 -0700163 configuration::GetNodeIndex(configuration_, fs.timestamp_node);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700164 }
Austin Schuh6f3babe2020-01-26 20:34:50 -0800165 fetchers_.emplace_back(std::move(fs));
Austin Schuh15649d62019-12-28 16:36:38 -0800166 }
Brian Silverman1f345222020-09-24 21:14:48 -0700167 }
168}
169
170Logger::~Logger() {
171 if (log_namer_) {
172 // If we are replaying a log file, or in simulation, we want to force the
173 // last bit of data to be logged. The easiest way to deal with this is to
174 // poll everything as we go to destroy the class, ie, shut down the logger,
175 // and write it to disk.
176 StopLogging(event_loop_->monotonic_now());
177 }
178}
179
180void Logger::StartLogging(std::unique_ptr<LogNamer> log_namer) {
181 CHECK(!log_namer_) << ": Already logging";
182 log_namer_ = std::move(log_namer);
183 uuid_ = UUID::Random();
184 VLOG(1) << "Starting logger for " << FlatbufferToJson(event_loop_->node());
185
186 // We want to do as much work as possible before the initial Fetch. Time
187 // between that and actually starting to log opens up the possibility of
188 // falling off the end of the queue during that time.
189
190 for (FetcherStruct &f : fetchers_) {
191 if (f.wants_writer) {
192 f.writer = log_namer_->MakeWriter(f.channel);
193 }
194 if (f.wants_timestamp_writer) {
195 f.timestamp_writer = log_namer_->MakeTimestampWriter(f.channel);
196 }
197 if (f.wants_contents_writer) {
198 f.contents_writer = log_namer_->MakeForwardedTimestampWriter(
199 f.channel, CHECK_NOTNULL(f.timestamp_node));
200 }
Austin Schuhe309d2a2019-11-29 13:25:21 -0800201 }
202
Brian Silverman1f345222020-09-24 21:14:48 -0700203 CHECK(node_state_.empty());
Austin Schuh0c297012020-09-16 18:41:59 -0700204 node_state_.resize(configuration::MultiNode(configuration_)
205 ? configuration_->nodes()->size()
Austin Schuh2f8fd752020-09-01 22:38:28 -0700206 : 1u);
Austin Schuhe309d2a2019-11-29 13:25:21 -0800207
Austin Schuh2f8fd752020-09-01 22:38:28 -0700208 for (const Node *node : log_namer_->nodes()) {
Brian Silvermand90905f2020-09-23 14:42:56 -0700209 const int node_index = configuration::GetNodeIndex(configuration_, node);
Austin Schuhe309d2a2019-11-29 13:25:21 -0800210
Austin Schuh2f8fd752020-09-01 22:38:28 -0700211 node_state_[node_index].log_file_header = MakeHeader(node);
212 }
Austin Schuhe309d2a2019-11-29 13:25:21 -0800213
Austin Schuh2f8fd752020-09-01 22:38:28 -0700214 // Grab data from each channel right before we declare the log file started
215 // so we can capture the latest message on each channel. This lets us have
216 // non periodic messages with configuration that now get logged.
217 for (FetcherStruct &f : fetchers_) {
218 f.written = !f.fetcher->Fetch();
219 }
220
221 // Clear out any old timestamps in case we are re-starting logging.
222 for (size_t i = 0; i < node_state_.size(); ++i) {
223 SetStartTime(i, monotonic_clock::min_time, realtime_clock::min_time);
224 }
225
226 WriteHeader();
227
228 LOG(INFO) << "Logging node as " << FlatbufferToJson(event_loop_->node())
229 << " start_time " << last_synchronized_time_;
230
231 timer_handler_->Setup(event_loop_->monotonic_now() + polling_period_,
232 polling_period_);
233}
234
Brian Silverman1f345222020-09-24 21:14:48 -0700235std::unique_ptr<LogNamer> Logger::StopLogging(
236 aos::monotonic_clock::time_point end_time) {
237 CHECK(log_namer_) << ": Not logging right now";
238
239 if (end_time != aos::monotonic_clock::min_time) {
240 LogUntil(end_time);
241 }
242 timer_handler_->Disable();
243
244 for (FetcherStruct &f : fetchers_) {
245 f.writer = nullptr;
246 f.timestamp_writer = nullptr;
247 f.contents_writer = nullptr;
248 }
249 node_state_.clear();
250
251 return std::move(log_namer_);
252}
253
Austin Schuhfa895892020-01-07 20:07:41 -0800254void Logger::WriteHeader() {
Austin Schuh0c297012020-09-16 18:41:59 -0700255 if (configuration::MultiNode(configuration_)) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700256 server_statistics_fetcher_.Fetch();
257 }
258
259 aos::monotonic_clock::time_point monotonic_start_time =
260 event_loop_->monotonic_now();
261 aos::realtime_clock::time_point realtime_start_time =
262 event_loop_->realtime_now();
263
264 // We need to pick a point in time to declare the log file "started". This
265 // starts here. It needs to be after everything is fetched so that the
266 // fetchers are all pointed at the most recent message before the start
267 // time.
268 last_synchronized_time_ = monotonic_start_time;
269
Austin Schuh6f3babe2020-01-26 20:34:50 -0800270 for (const Node *node : log_namer_->nodes()) {
Brian Silvermand90905f2020-09-23 14:42:56 -0700271 const int node_index = configuration::GetNodeIndex(configuration_, node);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700272 MaybeUpdateTimestamp(node, node_index, monotonic_start_time,
273 realtime_start_time);
Austin Schuh64fab802020-09-09 22:47:47 -0700274 log_namer_->WriteHeader(&node_state_[node_index].log_file_header, node);
Austin Schuh6f3babe2020-01-26 20:34:50 -0800275 }
276}
Austin Schuh8bd96322020-02-13 21:18:22 -0800277
Austin Schuh2f8fd752020-09-01 22:38:28 -0700278void Logger::WriteMissingTimestamps() {
Austin Schuh0c297012020-09-16 18:41:59 -0700279 if (configuration::MultiNode(configuration_)) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700280 server_statistics_fetcher_.Fetch();
281 } else {
282 return;
283 }
284
285 if (server_statistics_fetcher_.get() == nullptr) {
286 return;
287 }
288
289 for (const Node *node : log_namer_->nodes()) {
Brian Silvermand90905f2020-09-23 14:42:56 -0700290 const int node_index = configuration::GetNodeIndex(configuration_, node);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700291 if (MaybeUpdateTimestamp(
292 node, node_index,
293 server_statistics_fetcher_.context().monotonic_event_time,
294 server_statistics_fetcher_.context().realtime_event_time)) {
Austin Schuh64fab802020-09-09 22:47:47 -0700295 log_namer_->Rotate(node, &node_state_[node_index].log_file_header);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700296 }
297 }
298}
299
300void Logger::SetStartTime(size_t node_index,
301 aos::monotonic_clock::time_point monotonic_start_time,
302 aos::realtime_clock::time_point realtime_start_time) {
303 node_state_[node_index].monotonic_start_time = monotonic_start_time;
304 node_state_[node_index].realtime_start_time = realtime_start_time;
305 node_state_[node_index]
306 .log_file_header.mutable_message()
307 ->mutate_monotonic_start_time(
308 std::chrono::duration_cast<std::chrono::nanoseconds>(
309 monotonic_start_time.time_since_epoch())
310 .count());
311 if (node_state_[node_index]
312 .log_file_header.mutable_message()
313 ->has_realtime_start_time()) {
314 node_state_[node_index]
315 .log_file_header.mutable_message()
316 ->mutate_realtime_start_time(
317 std::chrono::duration_cast<std::chrono::nanoseconds>(
318 realtime_start_time.time_since_epoch())
319 .count());
320 }
321}
322
323bool Logger::MaybeUpdateTimestamp(
324 const Node *node, int node_index,
325 aos::monotonic_clock::time_point monotonic_start_time,
326 aos::realtime_clock::time_point realtime_start_time) {
Brian Silverman87ac0402020-09-17 14:47:01 -0700327 // Bail early if the start times are already set.
Austin Schuh2f8fd752020-09-01 22:38:28 -0700328 if (node_state_[node_index].monotonic_start_time !=
329 monotonic_clock::min_time) {
330 return false;
331 }
Austin Schuh0c297012020-09-16 18:41:59 -0700332 if (configuration::MultiNode(configuration_)) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700333 if (event_loop_->node() == node) {
334 // There are no offsets to compute for ourself, so always succeed.
335 SetStartTime(node_index, monotonic_start_time, realtime_start_time);
336 return true;
337 } else if (server_statistics_fetcher_.get() != nullptr) {
338 // We must be a remote node now. Look for the connection and see if it is
339 // connected.
340
341 for (const message_bridge::ServerConnection *connection :
342 *server_statistics_fetcher_->connections()) {
343 if (connection->node()->name()->string_view() !=
344 node->name()->string_view()) {
345 continue;
346 }
347
348 if (connection->state() != message_bridge::State::CONNECTED) {
349 VLOG(1) << node->name()->string_view()
350 << " is not connected, can't start it yet.";
351 break;
352 }
353
354 if (!connection->has_monotonic_offset()) {
355 VLOG(1) << "Missing monotonic offset for setting start time for node "
356 << aos::FlatbufferToJson(node);
357 break;
358 }
359
360 VLOG(1) << "Updating start time for " << aos::FlatbufferToJson(node);
361
362 // Found it and it is connected. Compensate and go.
363 monotonic_start_time +=
364 std::chrono::nanoseconds(connection->monotonic_offset());
365
366 SetStartTime(node_index, monotonic_start_time, realtime_start_time);
367 return true;
368 }
369 }
370 } else {
371 SetStartTime(node_index, monotonic_start_time, realtime_start_time);
372 return true;
373 }
374 return false;
375}
376
377aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> Logger::MakeHeader(
378 const Node *node) {
Austin Schuhfa895892020-01-07 20:07:41 -0800379 // Now write the header with this timestamp in it.
380 flatbuffers::FlatBufferBuilder fbb;
Austin Schuhd7b15da2020-02-17 15:06:11 -0800381 fbb.ForceDefaults(true);
Austin Schuhfa895892020-01-07 20:07:41 -0800382
Austin Schuh2f8fd752020-09-01 22:38:28 -0700383 // TODO(austin): Compress this much more efficiently. There are a bunch of
384 // duplicated schemas.
Austin Schuhfa895892020-01-07 20:07:41 -0800385 flatbuffers::Offset<aos::Configuration> configuration_offset =
Austin Schuh0c297012020-09-16 18:41:59 -0700386 CopyFlatBuffer(configuration_, &fbb);
Austin Schuhfa895892020-01-07 20:07:41 -0800387
Austin Schuh64fab802020-09-09 22:47:47 -0700388 flatbuffers::Offset<flatbuffers::String> name_offset =
Austin Schuh0c297012020-09-16 18:41:59 -0700389 fbb.CreateString(name_);
Austin Schuhfa895892020-01-07 20:07:41 -0800390
Brian Silverman1f345222020-09-24 21:14:48 -0700391 CHECK(uuid_ != UUID::Zero());
Austin Schuh64fab802020-09-09 22:47:47 -0700392 flatbuffers::Offset<flatbuffers::String> logger_uuid_offset =
393 fbb.CreateString(uuid_.string_view());
394
395 flatbuffers::Offset<flatbuffers::String> parts_uuid_offset =
396 fbb.CreateString("00000000-0000-4000-8000-000000000000");
397
Austin Schuhfa895892020-01-07 20:07:41 -0800398 flatbuffers::Offset<Node> node_offset;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700399
Austin Schuh0c297012020-09-16 18:41:59 -0700400 if (configuration::MultiNode(configuration_)) {
Austin Schuh6f3babe2020-01-26 20:34:50 -0800401 node_offset = CopyFlatBuffer(node, &fbb);
Austin Schuhfa895892020-01-07 20:07:41 -0800402 }
403
404 aos::logger::LogFileHeader::Builder log_file_header_builder(fbb);
405
Austin Schuh64fab802020-09-09 22:47:47 -0700406 log_file_header_builder.add_name(name_offset);
Austin Schuhfa895892020-01-07 20:07:41 -0800407
408 // Only add the node if we are running in a multinode configuration.
Austin Schuh6f3babe2020-01-26 20:34:50 -0800409 if (node != nullptr) {
Austin Schuhfa895892020-01-07 20:07:41 -0800410 log_file_header_builder.add_node(node_offset);
411 }
412
413 log_file_header_builder.add_configuration(configuration_offset);
414 // The worst case theoretical out of order is the polling period times 2.
415 // One message could get logged right after the boundary, but be for right
416 // before the next boundary. And the reverse could happen for another
417 // message. Report back 3x to be extra safe, and because the cost isn't
418 // huge on the read side.
419 log_file_header_builder.add_max_out_of_order_duration(
Brian Silverman1f345222020-09-24 21:14:48 -0700420 std::chrono::nanoseconds(3 * polling_period_).count());
Austin Schuhfa895892020-01-07 20:07:41 -0800421
422 log_file_header_builder.add_monotonic_start_time(
423 std::chrono::duration_cast<std::chrono::nanoseconds>(
Austin Schuh2f8fd752020-09-01 22:38:28 -0700424 monotonic_clock::min_time.time_since_epoch())
Austin Schuhfa895892020-01-07 20:07:41 -0800425 .count());
Austin Schuh2f8fd752020-09-01 22:38:28 -0700426 if (node == event_loop_->node()) {
427 log_file_header_builder.add_realtime_start_time(
428 std::chrono::duration_cast<std::chrono::nanoseconds>(
429 realtime_clock::min_time.time_since_epoch())
430 .count());
Austin Schuh6f3babe2020-01-26 20:34:50 -0800431 }
432
Austin Schuh64fab802020-09-09 22:47:47 -0700433 log_file_header_builder.add_logger_uuid(logger_uuid_offset);
434
435 log_file_header_builder.add_parts_uuid(parts_uuid_offset);
436 log_file_header_builder.add_parts_index(0);
437
Austin Schuh2f8fd752020-09-01 22:38:28 -0700438 fbb.FinishSizePrefixed(log_file_header_builder.Finish());
439 return fbb.Release();
440}
441
442void Logger::Rotate() {
443 for (const Node *node : log_namer_->nodes()) {
Brian Silvermand90905f2020-09-23 14:42:56 -0700444 const int node_index = configuration::GetNodeIndex(configuration_, node);
Austin Schuh64fab802020-09-09 22:47:47 -0700445 log_namer_->Rotate(node, &node_state_[node_index].log_file_header);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700446 }
447}
448
449void Logger::LogUntil(monotonic_clock::time_point t) {
450 WriteMissingTimestamps();
451
452 // Write each channel to disk, one at a time.
453 for (FetcherStruct &f : fetchers_) {
454 while (true) {
455 if (f.written) {
456 if (!f.fetcher->FetchNext()) {
457 VLOG(2) << "No new data on "
458 << configuration::CleanedChannelToString(
459 f.fetcher->channel());
460 break;
461 } else {
462 f.written = false;
463 }
464 }
465
466 CHECK(!f.written);
467
468 // TODO(james): Write tests to exercise this logic.
469 if (f.fetcher->context().monotonic_event_time < t) {
470 if (f.writer != nullptr) {
471 // Write!
472 flatbuffers::FlatBufferBuilder fbb(f.fetcher->context().size +
473 max_header_size_);
474 fbb.ForceDefaults(true);
475
476 fbb.FinishSizePrefixed(PackMessage(&fbb, f.fetcher->context(),
477 f.channel_index, f.log_type));
478
479 VLOG(2) << "Writing data as node "
480 << FlatbufferToJson(event_loop_->node()) << " for channel "
481 << configuration::CleanedChannelToString(f.fetcher->channel())
482 << " to " << f.writer->filename() << " data "
483 << FlatbufferToJson(
484 flatbuffers::GetSizePrefixedRoot<MessageHeader>(
485 fbb.GetBufferPointer()));
486
487 max_header_size_ = std::max(
488 max_header_size_, fbb.GetSize() - f.fetcher->context().size);
489 f.writer->QueueSizedFlatbuffer(&fbb);
490 }
491
492 if (f.timestamp_writer != nullptr) {
493 // And now handle timestamps.
494 flatbuffers::FlatBufferBuilder fbb;
495 fbb.ForceDefaults(true);
496
497 fbb.FinishSizePrefixed(PackMessage(&fbb, f.fetcher->context(),
498 f.channel_index,
499 LogType::kLogDeliveryTimeOnly));
500
501 VLOG(2) << "Writing timestamps as node "
502 << FlatbufferToJson(event_loop_->node()) << " for channel "
503 << configuration::CleanedChannelToString(f.fetcher->channel())
504 << " to " << f.timestamp_writer->filename() << " timestamp "
505 << FlatbufferToJson(
506 flatbuffers::GetSizePrefixedRoot<MessageHeader>(
507 fbb.GetBufferPointer()));
508
509 f.timestamp_writer->QueueSizedFlatbuffer(&fbb);
510 }
511
512 if (f.contents_writer != nullptr) {
513 // And now handle the special message contents channel. Copy the
514 // message into a FlatBufferBuilder and save it to disk.
515 // TODO(austin): We can be more efficient here when we start to
516 // care...
517 flatbuffers::FlatBufferBuilder fbb;
518 fbb.ForceDefaults(true);
519
520 const MessageHeader *msg =
521 flatbuffers::GetRoot<MessageHeader>(f.fetcher->context().data);
522
523 logger::MessageHeader::Builder message_header_builder(fbb);
524
525 // Note: this must match the same order as MessageBridgeServer and
526 // PackMessage. We want identical headers to have identical
527 // on-the-wire formats to make comparing them easier.
528 message_header_builder.add_channel_index(msg->channel_index());
529
530 message_header_builder.add_queue_index(msg->queue_index());
531 message_header_builder.add_monotonic_sent_time(
532 msg->monotonic_sent_time());
533 message_header_builder.add_realtime_sent_time(
534 msg->realtime_sent_time());
535
536 message_header_builder.add_monotonic_remote_time(
537 msg->monotonic_remote_time());
538 message_header_builder.add_realtime_remote_time(
539 msg->realtime_remote_time());
540 message_header_builder.add_remote_queue_index(
541 msg->remote_queue_index());
542
543 fbb.FinishSizePrefixed(message_header_builder.Finish());
544
545 f.contents_writer->QueueSizedFlatbuffer(&fbb);
546 }
547
548 f.written = true;
549 } else {
550 break;
551 }
552 }
553 }
554 last_synchronized_time_ = t;
Austin Schuhfa895892020-01-07 20:07:41 -0800555}
556
Brian Silverman1f345222020-09-24 21:14:48 -0700557void Logger::DoLogData(const monotonic_clock::time_point end_time) {
558 // We want to guarantee that messages aren't out of order by more than
Austin Schuhe309d2a2019-11-29 13:25:21 -0800559 // max_out_of_order_duration. To do this, we need sync points. Every write
560 // cycle should be a sync point.
Austin Schuhe309d2a2019-11-29 13:25:21 -0800561
562 do {
563 // Move the sync point up by at most polling_period. This forces one sync
564 // per iteration, even if it is small.
Brian Silverman1f345222020-09-24 21:14:48 -0700565 LogUntil(std::min(last_synchronized_time_ + polling_period_, end_time));
566
567 on_logged_period_();
Austin Schuhe309d2a2019-11-29 13:25:21 -0800568
Austin Schuhe309d2a2019-11-29 13:25:21 -0800569 // If we missed cycles, we could be pretty far behind. Spin until we are
570 // caught up.
Brian Silverman1f345222020-09-24 21:14:48 -0700571 } while (last_synchronized_time_ + polling_period_ < end_time);
Austin Schuhe309d2a2019-11-29 13:25:21 -0800572}
573
Austin Schuh11d43732020-09-21 17:28:30 -0700574std::vector<LogFile> SortParts(const std::vector<std::string> &parts) {
Austin Schuh5212cad2020-09-09 23:12:09 -0700575 // Start by grouping all parts by UUID, and extracting the part index.
Austin Schuh11d43732020-09-21 17:28:30 -0700576 // Datastructure to hold all the info extracted from a set of parts which go
577 // together so we can sort them afterwords.
578 struct UnsortedLogParts {
579 // Start times.
580 aos::monotonic_clock::time_point monotonic_start_time;
581 aos::realtime_clock::time_point realtime_start_time;
582
583 // Node to save.
584 std::string node;
585
586 // Pairs of the filename and the part index for sorting.
587 std::vector<std::pair<std::string, int>> parts;
588 };
589
590 // Map holding the logger_uuid -> second map. The second map holds the
591 // parts_uuid -> list of parts for sorting.
592 std::map<std::string, std::map<std::string, UnsortedLogParts>> parts_list;
Austin Schuh5212cad2020-09-09 23:12:09 -0700593
594 // Sort part files without UUIDs and part indexes as well. Extract everything
595 // useful from the log in the first pass, then sort later.
Austin Schuh11d43732020-09-21 17:28:30 -0700596 struct UnsortedOldParts {
597 // Part information with everything but the list of parts.
598 LogParts parts;
599
600 // Tuple of time for the data and filename needed for sorting after
601 // extracting.
Brian Silvermand90905f2020-09-23 14:42:56 -0700602 std::vector<std::pair<monotonic_clock::time_point, std::string>>
603 unsorted_parts;
Austin Schuh5212cad2020-09-09 23:12:09 -0700604 };
605
Austin Schuh11d43732020-09-21 17:28:30 -0700606 // A list of all the old parts which we don't know how to sort using uuids.
607 // There are enough of these in the wild that this is worth supporting.
608 std::vector<UnsortedOldParts> old_parts;
Austin Schuh5212cad2020-09-09 23:12:09 -0700609
Austin Schuh11d43732020-09-21 17:28:30 -0700610 // Now extract everything into our datastructures above for sorting.
Austin Schuh5212cad2020-09-09 23:12:09 -0700611 for (const std::string &part : parts) {
612 FlatbufferVector<LogFileHeader> log_header = ReadHeader(part);
613
Austin Schuh11d43732020-09-21 17:28:30 -0700614 const monotonic_clock::time_point monotonic_start_time(
615 chrono::nanoseconds(log_header.message().monotonic_start_time()));
616 const realtime_clock::time_point realtime_start_time(
617 chrono::nanoseconds(log_header.message().realtime_start_time()));
618
619 const std::string_view node =
620 log_header.message().has_node()
621 ? log_header.message().node()->name()->string_view()
622 : "";
623
Austin Schuh5212cad2020-09-09 23:12:09 -0700624 // Looks like an old log. No UUID, index, and also single node. We have
625 // little to no multi-node log files in the wild without part UUIDs and
626 // indexes which we care much about.
627 if (!log_header.message().has_parts_uuid() &&
628 !log_header.message().has_parts_index() &&
629 !log_header.message().has_node()) {
Austin Schuh5212cad2020-09-09 23:12:09 -0700630 FlatbufferVector<MessageHeader> first_message = ReadNthMessage(part, 0);
Austin Schuh11d43732020-09-21 17:28:30 -0700631 const monotonic_clock::time_point first_message_time(
Austin Schuh5212cad2020-09-09 23:12:09 -0700632 chrono::nanoseconds(first_message.message().monotonic_sent_time()));
Austin Schuh11d43732020-09-21 17:28:30 -0700633
634 // Find anything with a matching start time. They all go together.
635 auto result = std::find_if(
636 old_parts.begin(), old_parts.end(),
637 [&](const UnsortedOldParts &parts) {
638 return parts.parts.monotonic_start_time == monotonic_start_time &&
639 parts.parts.realtime_start_time == realtime_start_time;
640 });
641
642 if (result == old_parts.end()) {
643 old_parts.emplace_back();
644 old_parts.back().parts.monotonic_start_time = monotonic_start_time;
645 old_parts.back().parts.realtime_start_time = realtime_start_time;
646 old_parts.back().unsorted_parts.emplace_back(
647 std::make_pair(first_message_time, part));
648 } else {
649 result->unsorted_parts.emplace_back(
650 std::make_pair(first_message_time, part));
651 }
Austin Schuh5212cad2020-09-09 23:12:09 -0700652 continue;
653 }
654
Austin Schuh11d43732020-09-21 17:28:30 -0700655 CHECK(log_header.message().has_logger_uuid());
Austin Schuh5212cad2020-09-09 23:12:09 -0700656 CHECK(log_header.message().has_parts_uuid());
657 CHECK(log_header.message().has_parts_index());
658
Austin Schuh11d43732020-09-21 17:28:30 -0700659 const std::string logger_uuid = log_header.message().logger_uuid()->str();
Austin Schuh5212cad2020-09-09 23:12:09 -0700660 const std::string parts_uuid = log_header.message().parts_uuid()->str();
Austin Schuh11d43732020-09-21 17:28:30 -0700661 int32_t parts_index = log_header.message().parts_index();
662
663 auto log_it = parts_list.find(logger_uuid);
664 if (log_it == parts_list.end()) {
665 log_it = parts_list
Brian Silvermand90905f2020-09-23 14:42:56 -0700666 .insert(std::make_pair(
667 logger_uuid, std::map<std::string, UnsortedLogParts>()))
668 .first;
Austin Schuh5212cad2020-09-09 23:12:09 -0700669 }
Austin Schuh11d43732020-09-21 17:28:30 -0700670
671 auto it = log_it->second.find(parts_uuid);
672 if (it == log_it->second.end()) {
673 it = log_it->second.insert(std::make_pair(parts_uuid, UnsortedLogParts()))
674 .first;
675 it->second.monotonic_start_time = monotonic_start_time;
676 it->second.realtime_start_time = realtime_start_time;
677 it->second.node = std::string(node);
678 }
679
680 // First part might be min_time. If it is, try to put a better time on it.
681 if (it->second.monotonic_start_time == monotonic_clock::min_time) {
682 it->second.monotonic_start_time = monotonic_start_time;
683 } else if (monotonic_start_time != monotonic_clock::min_time) {
684 CHECK_EQ(it->second.monotonic_start_time, monotonic_start_time);
685 }
686 if (it->second.realtime_start_time == realtime_clock::min_time) {
687 it->second.realtime_start_time = realtime_start_time;
688 } else if (realtime_start_time != realtime_clock::min_time) {
689 CHECK_EQ(it->second.realtime_start_time, realtime_start_time);
690 }
691
692 it->second.parts.emplace_back(std::make_pair(part, parts_index));
Austin Schuh5212cad2020-09-09 23:12:09 -0700693 }
694
695 CHECK_NE(old_parts.empty(), parts_list.empty())
696 << ": Can't have a mix of old and new parts.";
697
Austin Schuh11d43732020-09-21 17:28:30 -0700698 // Now reformat old_parts to be in the right datastructure to report.
Austin Schuh5212cad2020-09-09 23:12:09 -0700699 if (!old_parts.empty()) {
Austin Schuh11d43732020-09-21 17:28:30 -0700700 std::vector<LogFile> result;
701 for (UnsortedOldParts &p : old_parts) {
702 // Sort by the oldest message in each file.
703 std::sort(
704 p.unsorted_parts.begin(), p.unsorted_parts.end(),
705 [](const std::pair<monotonic_clock::time_point, std::string> &a,
706 const std::pair<monotonic_clock::time_point, std::string> &b) {
707 return a.first < b.first;
708 });
709 LogFile log_file;
710 for (std::pair<monotonic_clock::time_point, std::string> &f :
711 p.unsorted_parts) {
712 p.parts.parts.emplace_back(std::move(f.second));
713 }
714 log_file.parts.emplace_back(std::move(p.parts));
715 result.emplace_back(std::move(log_file));
Austin Schuh5212cad2020-09-09 23:12:09 -0700716 }
Austin Schuh5212cad2020-09-09 23:12:09 -0700717
Austin Schuh11d43732020-09-21 17:28:30 -0700718 return result;
Austin Schuh5212cad2020-09-09 23:12:09 -0700719 }
720
721 // Now, sort them and produce the final vector form.
Austin Schuh11d43732020-09-21 17:28:30 -0700722 std::vector<LogFile> result;
Austin Schuh5212cad2020-09-09 23:12:09 -0700723 result.reserve(parts_list.size());
Brian Silvermand90905f2020-09-23 14:42:56 -0700724 for (std::pair<const std::string, std::map<std::string, UnsortedLogParts>>
725 &logs : parts_list) {
Austin Schuh11d43732020-09-21 17:28:30 -0700726 LogFile new_file;
727 new_file.logger_uuid = logs.first;
728 for (std::pair<const std::string, UnsortedLogParts> &parts : logs.second) {
729 LogParts new_parts;
730 new_parts.monotonic_start_time = parts.second.monotonic_start_time;
731 new_parts.realtime_start_time = parts.second.realtime_start_time;
732 new_parts.logger_uuid = logs.first;
733 new_parts.parts_uuid = parts.first;
734 new_parts.node = std::move(parts.second.node);
735
736 std::sort(parts.second.parts.begin(), parts.second.parts.end(),
737 [](const std::pair<std::string, int> &a,
738 const std::pair<std::string, int> &b) {
739 return a.second < b.second;
740 });
741 new_parts.parts.reserve(parts.second.parts.size());
742 for (std::pair<std::string, int> &p : parts.second.parts) {
743 new_parts.parts.emplace_back(std::move(p.first));
744 }
745 new_file.parts.emplace_back(std::move(new_parts));
Austin Schuh5212cad2020-09-09 23:12:09 -0700746 }
Austin Schuh11d43732020-09-21 17:28:30 -0700747 result.emplace_back(std::move(new_file));
748 }
749 return result;
750}
751
752std::ostream &operator<<(std::ostream &stream, const LogFile &file) {
753 stream << "{";
754 if (!file.logger_uuid.empty()) {
755 stream << "\"logger_uuid\": \"" << file.logger_uuid << "\", ";
756 }
757 stream << "\"parts\": [";
758 for (size_t i = 0; i < file.parts.size(); ++i) {
759 if (i != 0u) {
760 stream << ", ";
761 }
762 stream << file.parts[i];
763 }
764 stream << "]}";
765 return stream;
766}
767std::ostream &operator<<(std::ostream &stream, const LogParts &parts) {
768 stream << "{";
769 if (!parts.logger_uuid.empty()) {
770 stream << "\"logger_uuid\": \"" << parts.logger_uuid << "\", ";
771 }
772 if (!parts.parts_uuid.empty()) {
773 stream << "\"parts_uuid\": \"" << parts.parts_uuid << "\", ";
774 }
775 if (!parts.node.empty()) {
776 stream << "\"node\": \"" << parts.node << "\", ";
777 }
778 stream << "\"monotonic_start_time\": " << parts.monotonic_start_time
779 << ", \"realtime_start_time\": " << parts.realtime_start_time << ", [";
780
781 for (size_t i = 0; i < parts.parts.size(); ++i) {
782 if (i != 0u) {
783 stream << ", ";
784 }
785 stream << parts.parts[i];
786 }
787
788 stream << "]}";
789 return stream;
790}
791
792std::vector<std::vector<std::string>> ToLogReaderVector(
793 const std::vector<LogFile> &log_files) {
794 std::vector<std::vector<std::string>> result;
795 for (const LogFile &log_file : log_files) {
796 for (const LogParts &log_parts : log_file.parts) {
797 std::vector<std::string> parts;
798 for (const std::string &part : log_parts.parts) {
799 parts.emplace_back(part);
800 }
801 result.emplace_back(std::move(parts));
802 }
Austin Schuh5212cad2020-09-09 23:12:09 -0700803 }
804 return result;
805}
806
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800807LogReader::LogReader(std::string_view filename,
808 const Configuration *replay_configuration)
Austin Schuhfa895892020-01-07 20:07:41 -0800809 : LogReader(std::vector<std::string>{std::string(filename)},
810 replay_configuration) {}
811
812LogReader::LogReader(const std::vector<std::string> &filenames,
813 const Configuration *replay_configuration)
Austin Schuh6f3babe2020-01-26 20:34:50 -0800814 : LogReader(std::vector<std::vector<std::string>>{filenames},
815 replay_configuration) {}
816
Austin Schuh11d43732020-09-21 17:28:30 -0700817// TODO(austin): Make this the base and kill the others. This has much better
818// context for sorting.
819LogReader::LogReader(const std::vector<LogFile> &log_files,
820 const Configuration *replay_configuration)
821 : LogReader(ToLogReaderVector(log_files), replay_configuration) {}
822
Austin Schuh6f3babe2020-01-26 20:34:50 -0800823LogReader::LogReader(const std::vector<std::vector<std::string>> &filenames,
824 const Configuration *replay_configuration)
825 : filenames_(filenames),
826 log_file_header_(ReadHeader(filenames[0][0])),
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800827 replay_configuration_(replay_configuration) {
Austin Schuh6331ef92020-01-07 18:28:09 -0800828 MakeRemappedConfig();
Austin Schuh6f3babe2020-01-26 20:34:50 -0800829
Austin Schuh6aa77be2020-02-22 21:06:40 -0800830 if (replay_configuration) {
831 CHECK_EQ(configuration::MultiNode(configuration()),
832 configuration::MultiNode(replay_configuration))
Austin Schuh2f8fd752020-09-01 22:38:28 -0700833 << ": Log file and replay config need to both be multi or single "
834 "node.";
Austin Schuh6aa77be2020-02-22 21:06:40 -0800835 }
836
Austin Schuh6f3babe2020-01-26 20:34:50 -0800837 if (!configuration::MultiNode(configuration())) {
Austin Schuh858c9f32020-08-31 16:56:12 -0700838 states_.emplace_back(
839 std::make_unique<State>(std::make_unique<ChannelMerger>(filenames)));
Austin Schuh8bd96322020-02-13 21:18:22 -0800840 } else {
Austin Schuh6aa77be2020-02-22 21:06:40 -0800841 if (replay_configuration) {
James Kuszmaul46d82582020-05-09 19:50:09 -0700842 CHECK_EQ(logged_configuration()->nodes()->size(),
Austin Schuh6aa77be2020-02-22 21:06:40 -0800843 replay_configuration->nodes()->size())
Austin Schuh2f8fd752020-09-01 22:38:28 -0700844 << ": Log file and replay config need to have matching nodes "
845 "lists.";
James Kuszmaul46d82582020-05-09 19:50:09 -0700846 for (const Node *node : *logged_configuration()->nodes()) {
847 if (configuration::GetNode(replay_configuration, node) == nullptr) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700848 LOG(FATAL) << "Found node " << FlatbufferToJson(node)
849 << " in logged config that is not present in the replay "
850 "config.";
James Kuszmaul46d82582020-05-09 19:50:09 -0700851 }
852 }
Austin Schuh6aa77be2020-02-22 21:06:40 -0800853 }
Austin Schuh8bd96322020-02-13 21:18:22 -0800854 states_.resize(configuration()->nodes()->size());
Austin Schuh6f3babe2020-01-26 20:34:50 -0800855 }
Austin Schuhe309d2a2019-11-29 13:25:21 -0800856}
857
Austin Schuh6aa77be2020-02-22 21:06:40 -0800858LogReader::~LogReader() {
Austin Schuh39580f12020-08-01 14:44:08 -0700859 if (event_loop_factory_unique_ptr_) {
860 Deregister();
861 } else if (event_loop_factory_ != nullptr) {
862 LOG(FATAL) << "Must call Deregister before the SimulatedEventLoopFactory "
863 "is destroyed";
864 }
Austin Schuh8bd96322020-02-13 21:18:22 -0800865 if (offset_fp_ != nullptr) {
866 fclose(offset_fp_);
867 }
Austin Schuh2f8fd752020-09-01 22:38:28 -0700868 // Zero out some buffers. It's easy to do use-after-frees on these, so make
869 // it more obvious.
Austin Schuh39580f12020-08-01 14:44:08 -0700870 if (remapped_configuration_buffer_) {
871 remapped_configuration_buffer_->Wipe();
872 }
873 log_file_header_.Wipe();
Austin Schuh8bd96322020-02-13 21:18:22 -0800874}
Austin Schuhe309d2a2019-11-29 13:25:21 -0800875
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800876const Configuration *LogReader::logged_configuration() const {
Austin Schuh6f3babe2020-01-26 20:34:50 -0800877 return log_file_header_.message().configuration();
Austin Schuhe309d2a2019-11-29 13:25:21 -0800878}
879
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800880const Configuration *LogReader::configuration() const {
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800881 return remapped_configuration_;
882}
883
Austin Schuh6f3babe2020-01-26 20:34:50 -0800884std::vector<const Node *> LogReader::Nodes() const {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700885 // Because the Node pointer will only be valid if it actually points to
886 // memory owned by remapped_configuration_, we need to wait for the
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800887 // remapped_configuration_ to be populated before accessing it.
Austin Schuh6f3babe2020-01-26 20:34:50 -0800888 //
889 // Also, note, that when ever a map is changed, the nodes in here are
890 // invalidated.
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800891 CHECK(remapped_configuration_ != nullptr)
892 << ": Need to call Register before the node() pointer will be valid.";
Austin Schuh6f3babe2020-01-26 20:34:50 -0800893 return configuration::GetNodes(remapped_configuration_);
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800894}
Austin Schuh15649d62019-12-28 16:36:38 -0800895
Austin Schuh11d43732020-09-21 17:28:30 -0700896monotonic_clock::time_point LogReader::monotonic_start_time(
897 const Node *node) const {
Austin Schuh8bd96322020-02-13 21:18:22 -0800898 State *state =
899 states_[configuration::GetNodeIndex(configuration(), node)].get();
900 CHECK(state != nullptr) << ": Unknown node " << FlatbufferToJson(node);
901
Austin Schuh858c9f32020-08-31 16:56:12 -0700902 return state->monotonic_start_time();
Austin Schuhe309d2a2019-11-29 13:25:21 -0800903}
904
Austin Schuh11d43732020-09-21 17:28:30 -0700905realtime_clock::time_point LogReader::realtime_start_time(
906 const Node *node) const {
Austin Schuh8bd96322020-02-13 21:18:22 -0800907 State *state =
908 states_[configuration::GetNodeIndex(configuration(), node)].get();
909 CHECK(state != nullptr) << ": Unknown node " << FlatbufferToJson(node);
910
Austin Schuh858c9f32020-08-31 16:56:12 -0700911 return state->realtime_start_time();
Austin Schuhe309d2a2019-11-29 13:25:21 -0800912}
913
James Kuszmaul84ff3e52020-01-03 19:48:53 -0800914void LogReader::Register() {
915 event_loop_factory_unique_ptr_ =
Austin Schuhac0771c2020-01-07 18:36:30 -0800916 std::make_unique<SimulatedEventLoopFactory>(configuration());
James Kuszmaul84ff3e52020-01-03 19:48:53 -0800917 Register(event_loop_factory_unique_ptr_.get());
918}
919
Austin Schuh92547522019-12-28 14:33:43 -0800920void LogReader::Register(SimulatedEventLoopFactory *event_loop_factory) {
Austin Schuh92547522019-12-28 14:33:43 -0800921 event_loop_factory_ = event_loop_factory;
Austin Schuhe5bbd9e2020-09-21 17:29:20 -0700922 remapped_configuration_ = event_loop_factory_->configuration();
Austin Schuh92547522019-12-28 14:33:43 -0800923
Brian Silvermand90905f2020-09-23 14:42:56 -0700924 for (const Node *node : configuration::GetNodes(configuration())) {
Austin Schuh8bd96322020-02-13 21:18:22 -0800925 const size_t node_index =
926 configuration::GetNodeIndex(configuration(), node);
Austin Schuh858c9f32020-08-31 16:56:12 -0700927 states_[node_index] =
928 std::make_unique<State>(std::make_unique<ChannelMerger>(filenames_));
Austin Schuh8bd96322020-02-13 21:18:22 -0800929 State *state = states_[node_index].get();
Austin Schuh6f3babe2020-01-26 20:34:50 -0800930
Austin Schuh858c9f32020-08-31 16:56:12 -0700931 Register(state->SetNodeEventLoopFactory(
932 event_loop_factory_->GetNodeEventLoopFactory(node)));
Austin Schuhcde938c2020-02-02 17:30:07 -0800933 }
James Kuszmaul46d82582020-05-09 19:50:09 -0700934 if (live_nodes_ == 0) {
935 LOG(FATAL)
936 << "Don't have logs from any of the nodes in the replay config--are "
937 "you sure that the replay config matches the original config?";
938 }
Austin Schuh6f3babe2020-01-26 20:34:50 -0800939
Austin Schuh2f8fd752020-09-01 22:38:28 -0700940 // We need to now seed our per-node time offsets and get everything set up
941 // to run.
942 const size_t num_nodes = nodes_count();
Austin Schuhcde938c2020-02-02 17:30:07 -0800943
Austin Schuh8bd96322020-02-13 21:18:22 -0800944 // It is easiest to solve for per node offsets with a matrix rather than
945 // trying to solve the equations by hand. So let's get after it.
946 //
947 // Now, build up the map matrix.
948 //
Austin Schuh2f8fd752020-09-01 22:38:28 -0700949 // offset_matrix_ = (map_matrix_ + slope_matrix_) * [ta; tb; tc]
950 map_matrix_ = Eigen::Matrix<mpq_class, Eigen::Dynamic, Eigen::Dynamic>::Zero(
951 filters_.size() + 1, num_nodes);
952 slope_matrix_ =
953 Eigen::Matrix<mpq_class, Eigen::Dynamic, Eigen::Dynamic>::Zero(
954 filters_.size() + 1, num_nodes);
Austin Schuhcde938c2020-02-02 17:30:07 -0800955
Austin Schuh2f8fd752020-09-01 22:38:28 -0700956 offset_matrix_ =
957 Eigen::Matrix<mpq_class, Eigen::Dynamic, 1>::Zero(filters_.size() + 1);
958 valid_matrix_ =
959 Eigen::Matrix<bool, Eigen::Dynamic, 1>::Zero(filters_.size() + 1);
960 last_valid_matrix_ =
961 Eigen::Matrix<bool, Eigen::Dynamic, 1>::Zero(filters_.size() + 1);
Austin Schuhcde938c2020-02-02 17:30:07 -0800962
Austin Schuh2f8fd752020-09-01 22:38:28 -0700963 time_offset_matrix_ = Eigen::VectorXd::Zero(num_nodes);
964 time_slope_matrix_ = Eigen::VectorXd::Zero(num_nodes);
Austin Schuh8bd96322020-02-13 21:18:22 -0800965
Austin Schuh2f8fd752020-09-01 22:38:28 -0700966 // All times should average out to the distributed clock.
967 for (int i = 0; i < map_matrix_.cols(); ++i) {
968 // 1/num_nodes.
969 map_matrix_(0, i) = mpq_class(1, num_nodes);
970 }
971 valid_matrix_(0) = true;
Austin Schuh8bd96322020-02-13 21:18:22 -0800972
973 {
974 // Now, add the a - b -> sample elements.
975 size_t i = 1;
976 for (std::pair<const std::tuple<const Node *, const Node *>,
Austin Schuh2f8fd752020-09-01 22:38:28 -0700977 std::tuple<message_bridge::NoncausalOffsetEstimator>>
978 &filter : filters_) {
Austin Schuh8bd96322020-02-13 21:18:22 -0800979 const Node *const node_a = std::get<0>(filter.first);
980 const Node *const node_b = std::get<1>(filter.first);
981
982 const size_t node_a_index =
983 configuration::GetNodeIndex(configuration(), node_a);
984 const size_t node_b_index =
985 configuration::GetNodeIndex(configuration(), node_b);
986
Austin Schuh2f8fd752020-09-01 22:38:28 -0700987 // -a
988 map_matrix_(i, node_a_index) = mpq_class(-1);
989 // +b
990 map_matrix_(i, node_b_index) = mpq_class(1);
Austin Schuh8bd96322020-02-13 21:18:22 -0800991
992 // -> sample
Austin Schuh2f8fd752020-09-01 22:38:28 -0700993 std::get<0>(filter.second)
994 .set_slope_pointer(&slope_matrix_(i, node_a_index));
995 std::get<0>(filter.second).set_offset_pointer(&offset_matrix_(i, 0));
996
997 valid_matrix_(i) = false;
998 std::get<0>(filter.second).set_valid_pointer(&valid_matrix_(i));
Austin Schuh8bd96322020-02-13 21:18:22 -0800999
1000 ++i;
Austin Schuh6f3babe2020-01-26 20:34:50 -08001001 }
1002 }
1003
Austin Schuh858c9f32020-08-31 16:56:12 -07001004 for (std::unique_ptr<State> &state : states_) {
1005 state->SeedSortedMessages();
1006 }
1007
Austin Schuh2f8fd752020-09-01 22:38:28 -07001008 // Rank of the map matrix tells you if all the nodes are in communication
1009 // with each other, which tells you if the offsets are observable.
1010 const size_t connected_nodes =
1011 Eigen::FullPivLU<
1012 Eigen::Matrix<mpq_class, Eigen::Dynamic, Eigen::Dynamic>>(map_matrix_)
1013 .rank();
1014
1015 // We don't need to support isolated nodes until someone has a real use
1016 // case.
1017 CHECK_EQ(connected_nodes, num_nodes)
1018 << ": There is a node which isn't communicating with the rest.";
1019
1020 // And solve.
Austin Schuh8bd96322020-02-13 21:18:22 -08001021 UpdateOffsets();
1022
Austin Schuh2f8fd752020-09-01 22:38:28 -07001023 // We want to start the log file at the last start time of the log files
1024 // from all the nodes. Compute how long each node's simulation needs to run
1025 // to move time to this point.
Austin Schuh8bd96322020-02-13 21:18:22 -08001026 distributed_clock::time_point start_time = distributed_clock::min_time;
Austin Schuhcde938c2020-02-02 17:30:07 -08001027
Austin Schuh2f8fd752020-09-01 22:38:28 -07001028 // TODO(austin): We want an "OnStart" callback for each node rather than
1029 // running until the last node.
1030
Austin Schuh8bd96322020-02-13 21:18:22 -08001031 for (std::unique_ptr<State> &state : states_) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001032 VLOG(1) << "Start time is " << state->monotonic_start_time() << " for node "
1033 << MaybeNodeName(state->event_loop()->node()) << "now "
1034 << state->monotonic_now();
1035 // And start computing the start time on the distributed clock now that
1036 // that works.
Austin Schuh858c9f32020-08-31 16:56:12 -07001037 start_time = std::max(
1038 start_time, state->ToDistributedClock(state->monotonic_start_time()));
Austin Schuhcde938c2020-02-02 17:30:07 -08001039 }
Austin Schuh2f8fd752020-09-01 22:38:28 -07001040
1041 CHECK_GE(start_time, distributed_clock::epoch())
1042 << ": Hmm, we have a node starting before the start of time. Offset "
1043 "everything.";
Austin Schuhcde938c2020-02-02 17:30:07 -08001044
Austin Schuh6f3babe2020-01-26 20:34:50 -08001045 // Forwarding is tracked per channel. If it is enabled, we want to turn it
1046 // off. Otherwise messages replayed will get forwarded across to the other
Austin Schuh2f8fd752020-09-01 22:38:28 -07001047 // nodes, and also replayed on the other nodes. This may not satisfy all
1048 // our users, but it'll start the discussion.
Austin Schuh6f3babe2020-01-26 20:34:50 -08001049 if (configuration::MultiNode(event_loop_factory_->configuration())) {
1050 for (size_t i = 0; i < logged_configuration()->channels()->size(); ++i) {
1051 const Channel *channel = logged_configuration()->channels()->Get(i);
1052 const Node *node = configuration::GetNode(
1053 configuration(), channel->source_node()->string_view());
1054
Austin Schuh8bd96322020-02-13 21:18:22 -08001055 State *state =
1056 states_[configuration::GetNodeIndex(configuration(), node)].get();
Austin Schuh6f3babe2020-01-26 20:34:50 -08001057
1058 const Channel *remapped_channel =
Austin Schuh858c9f32020-08-31 16:56:12 -07001059 RemapChannel(state->event_loop(), channel);
Austin Schuh6f3babe2020-01-26 20:34:50 -08001060
1061 event_loop_factory_->DisableForwarding(remapped_channel);
1062 }
Austin Schuh4c3b9702020-08-30 11:34:55 -07001063
1064 // If we are replaying a log, we don't want a bunch of redundant messages
1065 // from both the real message bridge and simulated message bridge.
1066 event_loop_factory_->DisableStatistics();
Austin Schuh6f3babe2020-01-26 20:34:50 -08001067 }
1068
Austin Schuhcde938c2020-02-02 17:30:07 -08001069 // While we are starting the system up, we might be relying on matching data
1070 // to timestamps on log files where the timestamp log file starts before the
1071 // data. In this case, it is reasonable to expect missing data.
1072 ignore_missing_data_ = true;
Austin Schuh2f8fd752020-09-01 22:38:28 -07001073 VLOG(1) << "Running until " << start_time << " in Register";
Austin Schuh8bd96322020-02-13 21:18:22 -08001074 event_loop_factory_->RunFor(start_time.time_since_epoch());
Brian Silverman8a32ce62020-08-12 12:02:38 -07001075 VLOG(1) << "At start time";
Austin Schuhcde938c2020-02-02 17:30:07 -08001076 // Now that we are running for real, missing data means that the log file is
1077 // corrupted or went wrong.
1078 ignore_missing_data_ = false;
Austin Schuh92547522019-12-28 14:33:43 -08001079
Austin Schuh8bd96322020-02-13 21:18:22 -08001080 for (std::unique_ptr<State> &state : states_) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001081 // Make the RT clock be correct before handing it to the user.
1082 if (state->realtime_start_time() != realtime_clock::min_time) {
1083 state->SetRealtimeOffset(state->monotonic_start_time(),
1084 state->realtime_start_time());
1085 }
1086 VLOG(1) << "Start time is " << state->monotonic_start_time() << " for node "
1087 << MaybeNodeName(state->event_loop()->node()) << "now "
1088 << state->monotonic_now();
1089 }
1090
1091 if (FLAGS_timestamps_to_csv) {
1092 for (std::pair<const std::tuple<const Node *, const Node *>,
1093 std::tuple<message_bridge::NoncausalOffsetEstimator>>
1094 &filter : filters_) {
1095 const Node *const node_a = std::get<0>(filter.first);
1096 const Node *const node_b = std::get<1>(filter.first);
1097
1098 std::get<0>(filter.second)
1099 .SetFirstFwdTime(event_loop_factory_->GetNodeEventLoopFactory(node_a)
1100 ->monotonic_now());
1101 std::get<0>(filter.second)
1102 .SetFirstRevTime(event_loop_factory_->GetNodeEventLoopFactory(node_b)
1103 ->monotonic_now());
1104 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001105 }
1106}
1107
Austin Schuh2f8fd752020-09-01 22:38:28 -07001108void LogReader::UpdateOffsets() {
1109 VLOG(2) << "Samples are " << offset_matrix_;
1110 VLOG(2) << "Map is " << (map_matrix_ + slope_matrix_);
1111 std::tie(time_slope_matrix_, time_offset_matrix_) = SolveOffsets();
1112 Eigen::IOFormat HeavyFmt(Eigen::FullPrecision, 0, ", ", ";\n", "[", "]", "[",
1113 "]");
1114 VLOG(1) << "First slope " << time_slope_matrix_.transpose().format(HeavyFmt)
1115 << " offset " << time_offset_matrix_.transpose().format(HeavyFmt);
1116
1117 size_t node_index = 0;
1118 for (std::unique_ptr<State> &state : states_) {
1119 state->SetDistributedOffset(offset(node_index), slope(node_index));
1120 VLOG(1) << "Offset for node " << node_index << " "
1121 << MaybeNodeName(state->event_loop()->node()) << "is "
1122 << aos::distributed_clock::time_point(offset(node_index))
1123 << " slope " << std::setprecision(9) << std::fixed
1124 << slope(node_index);
1125 ++node_index;
1126 }
1127
1128 if (VLOG_IS_ON(1)) {
1129 LogFit("Offset is");
1130 }
1131}
1132
1133void LogReader::LogFit(std::string_view prefix) {
1134 for (std::unique_ptr<State> &state : states_) {
1135 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << " now "
1136 << state->monotonic_now() << " distributed "
1137 << event_loop_factory_->distributed_now();
1138 }
1139
1140 for (std::pair<const std::tuple<const Node *, const Node *>,
1141 std::tuple<message_bridge::NoncausalOffsetEstimator>> &filter :
1142 filters_) {
1143 message_bridge::NoncausalOffsetEstimator *estimator =
1144 &std::get<0>(filter.second);
1145
1146 if (estimator->a_timestamps().size() == 0 &&
1147 estimator->b_timestamps().size() == 0) {
1148 continue;
1149 }
1150
1151 if (VLOG_IS_ON(1)) {
1152 estimator->LogFit(prefix);
1153 }
1154
1155 const Node *const node_a = std::get<0>(filter.first);
1156 const Node *const node_b = std::get<1>(filter.first);
1157
1158 const size_t node_a_index =
1159 configuration::GetNodeIndex(configuration(), node_a);
1160 const size_t node_b_index =
1161 configuration::GetNodeIndex(configuration(), node_b);
1162
1163 const double recovered_slope =
1164 slope(node_b_index) / slope(node_a_index) - 1.0;
1165 const int64_t recovered_offset =
1166 offset(node_b_index).count() - offset(node_a_index).count() *
1167 slope(node_b_index) /
1168 slope(node_a_index);
1169
1170 VLOG(1) << "Recovered slope " << std::setprecision(20) << recovered_slope
1171 << " (error " << recovered_slope - estimator->fit().slope() << ") "
1172 << " offset " << std::setprecision(20) << recovered_offset
1173 << " (error "
1174 << recovered_offset - estimator->fit().offset().count() << ")";
1175
1176 const aos::distributed_clock::time_point a0 =
1177 states_[node_a_index]->ToDistributedClock(
1178 std::get<0>(estimator->a_timestamps()[0]));
1179 const aos::distributed_clock::time_point a1 =
1180 states_[node_a_index]->ToDistributedClock(
1181 std::get<0>(estimator->a_timestamps()[1]));
1182
1183 VLOG(1) << node_a->name()->string_view() << " timestamps()[0] = "
1184 << std::get<0>(estimator->a_timestamps()[0]) << " -> " << a0
1185 << " distributed -> " << node_b->name()->string_view() << " "
1186 << states_[node_b_index]->FromDistributedClock(a0) << " should be "
1187 << aos::monotonic_clock::time_point(
1188 std::chrono::nanoseconds(static_cast<int64_t>(
1189 std::get<0>(estimator->a_timestamps()[0])
1190 .time_since_epoch()
1191 .count() *
1192 (1.0 + estimator->fit().slope()))) +
1193 estimator->fit().offset())
1194 << ((a0 <= event_loop_factory_->distributed_now())
1195 ? ""
1196 : " After now, investigate");
1197 VLOG(1) << node_a->name()->string_view() << " timestamps()[1] = "
1198 << std::get<0>(estimator->a_timestamps()[1]) << " -> " << a1
1199 << " distributed -> " << node_b->name()->string_view() << " "
1200 << states_[node_b_index]->FromDistributedClock(a1) << " should be "
1201 << aos::monotonic_clock::time_point(
1202 std::chrono::nanoseconds(static_cast<int64_t>(
1203 std::get<0>(estimator->a_timestamps()[1])
1204 .time_since_epoch()
1205 .count() *
1206 (1.0 + estimator->fit().slope()))) +
1207 estimator->fit().offset())
1208 << ((event_loop_factory_->distributed_now() <= a1)
1209 ? ""
1210 : " Before now, investigate");
1211
1212 const aos::distributed_clock::time_point b0 =
1213 states_[node_b_index]->ToDistributedClock(
1214 std::get<0>(estimator->b_timestamps()[0]));
1215 const aos::distributed_clock::time_point b1 =
1216 states_[node_b_index]->ToDistributedClock(
1217 std::get<0>(estimator->b_timestamps()[1]));
1218
1219 VLOG(1) << node_b->name()->string_view() << " timestamps()[0] = "
1220 << std::get<0>(estimator->b_timestamps()[0]) << " -> " << b0
1221 << " distributed -> " << node_a->name()->string_view() << " "
1222 << states_[node_a_index]->FromDistributedClock(b0)
1223 << ((b0 <= event_loop_factory_->distributed_now())
1224 ? ""
1225 : " After now, investigate");
1226 VLOG(1) << node_b->name()->string_view() << " timestamps()[1] = "
1227 << std::get<0>(estimator->b_timestamps()[1]) << " -> " << b1
1228 << " distributed -> " << node_a->name()->string_view() << " "
1229 << states_[node_a_index]->FromDistributedClock(b1)
1230 << ((event_loop_factory_->distributed_now() <= b1)
1231 ? ""
1232 : " Before now, investigate");
1233 }
1234}
1235
1236message_bridge::NoncausalOffsetEstimator *LogReader::GetFilter(
Austin Schuh8bd96322020-02-13 21:18:22 -08001237 const Node *node_a, const Node *node_b) {
1238 CHECK_NE(node_a, node_b);
1239 CHECK_EQ(configuration::GetNode(configuration(), node_a), node_a);
1240 CHECK_EQ(configuration::GetNode(configuration(), node_b), node_b);
1241
1242 if (node_a > node_b) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001243 return GetFilter(node_b, node_a);
Austin Schuh8bd96322020-02-13 21:18:22 -08001244 }
1245
1246 auto tuple = std::make_tuple(node_a, node_b);
1247
1248 auto it = filters_.find(tuple);
1249
1250 if (it == filters_.end()) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001251 auto &x =
1252 filters_
1253 .insert(std::make_pair(
1254 tuple, std::make_tuple(message_bridge::NoncausalOffsetEstimator(
1255 node_a, node_b))))
1256 .first->second;
Austin Schuh8bd96322020-02-13 21:18:22 -08001257 if (FLAGS_timestamps_to_csv) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001258 std::get<0>(x).SetFwdCsvFileName(absl::StrCat(
1259 "/tmp/timestamp_noncausal_", node_a->name()->string_view(), "_",
1260 node_b->name()->string_view()));
1261 std::get<0>(x).SetRevCsvFileName(absl::StrCat(
1262 "/tmp/timestamp_noncausal_", node_b->name()->string_view(), "_",
1263 node_a->name()->string_view()));
Austin Schuh8bd96322020-02-13 21:18:22 -08001264 }
1265
Austin Schuh2f8fd752020-09-01 22:38:28 -07001266 return &std::get<0>(x);
Austin Schuh8bd96322020-02-13 21:18:22 -08001267 } else {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001268 return &std::get<0>(it->second);
Austin Schuh8bd96322020-02-13 21:18:22 -08001269 }
1270}
1271
Austin Schuhe309d2a2019-11-29 13:25:21 -08001272void LogReader::Register(EventLoop *event_loop) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001273 State *state =
1274 states_[configuration::GetNodeIndex(configuration(), event_loop->node())]
1275 .get();
Austin Schuh6f3babe2020-01-26 20:34:50 -08001276
Austin Schuh858c9f32020-08-31 16:56:12 -07001277 state->set_event_loop(event_loop);
Austin Schuhe309d2a2019-11-29 13:25:21 -08001278
Tyler Chatow67ddb032020-01-12 14:30:04 -08001279 // We don't run timing reports when trying to print out logged data, because
1280 // otherwise we would end up printing out the timing reports themselves...
1281 // This is only really relevant when we are replaying into a simulation.
Austin Schuh6f3babe2020-01-26 20:34:50 -08001282 event_loop->SkipTimingReport();
1283 event_loop->SkipAosLog();
Austin Schuh39788ff2019-12-01 18:22:57 -08001284
Austin Schuh858c9f32020-08-31 16:56:12 -07001285 const bool has_data = state->SetNode();
Austin Schuhe309d2a2019-11-29 13:25:21 -08001286
Austin Schuh858c9f32020-08-31 16:56:12 -07001287 state->SetChannelCount(logged_configuration()->channels()->size());
Austin Schuh8bd96322020-02-13 21:18:22 -08001288
Austin Schuh858c9f32020-08-31 16:56:12 -07001289 for (size_t i = 0; i < logged_configuration()->channels()->size(); ++i) {
Austin Schuh6f3babe2020-01-26 20:34:50 -08001290 const Channel *channel =
1291 RemapChannel(event_loop, logged_configuration()->channels()->Get(i));
Austin Schuh6331ef92020-01-07 18:28:09 -08001292
Austin Schuh858c9f32020-08-31 16:56:12 -07001293 NodeEventLoopFactory *channel_target_event_loop_factory = nullptr;
Austin Schuh2f8fd752020-09-01 22:38:28 -07001294 message_bridge::NoncausalOffsetEstimator *filter = nullptr;
Austin Schuh8bd96322020-02-13 21:18:22 -08001295
1296 if (!configuration::ChannelIsSendableOnNode(channel, event_loop->node()) &&
1297 configuration::ChannelIsReadableOnNode(channel, event_loop->node())) {
1298 const Node *target_node = configuration::GetNode(
1299 event_loop->configuration(), channel->source_node()->string_view());
Austin Schuh858c9f32020-08-31 16:56:12 -07001300 filter = GetFilter(event_loop->node(), target_node);
Austin Schuh8bd96322020-02-13 21:18:22 -08001301
1302 if (event_loop_factory_ != nullptr) {
Austin Schuh858c9f32020-08-31 16:56:12 -07001303 channel_target_event_loop_factory =
Austin Schuh8bd96322020-02-13 21:18:22 -08001304 event_loop_factory_->GetNodeEventLoopFactory(target_node);
1305 }
1306 }
Austin Schuh858c9f32020-08-31 16:56:12 -07001307
1308 state->SetChannel(i, event_loop->MakeRawSender(channel), filter,
1309 channel_target_event_loop_factory);
Austin Schuhe309d2a2019-11-29 13:25:21 -08001310 }
1311
Austin Schuh6aa77be2020-02-22 21:06:40 -08001312 // If we didn't find any log files with data in them, we won't ever get a
1313 // callback or be live. So skip the rest of the setup.
1314 if (!has_data) {
1315 return;
1316 }
1317
Austin Schuh858c9f32020-08-31 16:56:12 -07001318 state->set_timer_handler(event_loop->AddTimer([this, state]() {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001319 VLOG(1) << "Starting sending " << MaybeNodeName(state->event_loop()->node())
1320 << "at " << state->event_loop()->context().monotonic_event_time
1321 << " now " << state->monotonic_now();
Austin Schuh858c9f32020-08-31 16:56:12 -07001322 if (state->OldestMessageTime() == monotonic_clock::max_time) {
Austin Schuh6f3babe2020-01-26 20:34:50 -08001323 --live_nodes_;
Austin Schuh2f8fd752020-09-01 22:38:28 -07001324 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Node down!";
Austin Schuh6f3babe2020-01-26 20:34:50 -08001325 if (live_nodes_ == 0) {
1326 event_loop_factory_->Exit();
1327 }
James Kuszmaul314f1672020-01-03 20:02:08 -08001328 return;
1329 }
Austin Schuh6f3babe2020-01-26 20:34:50 -08001330 TimestampMerger::DeliveryTimestamp channel_timestamp;
Austin Schuh05b70472020-01-01 17:11:17 -08001331 int channel_index;
1332 FlatbufferVector<MessageHeader> channel_data =
1333 FlatbufferVector<MessageHeader>::Empty();
1334
Austin Schuh2f8fd752020-09-01 22:38:28 -07001335 if (VLOG_IS_ON(1)) {
1336 LogFit("Offset was");
1337 }
1338
1339 bool update_time;
Austin Schuh05b70472020-01-01 17:11:17 -08001340 std::tie(channel_timestamp, channel_index, channel_data) =
Austin Schuh2f8fd752020-09-01 22:38:28 -07001341 state->PopOldest(&update_time);
Austin Schuh05b70472020-01-01 17:11:17 -08001342
Austin Schuhe309d2a2019-11-29 13:25:21 -08001343 const monotonic_clock::time_point monotonic_now =
Austin Schuh858c9f32020-08-31 16:56:12 -07001344 state->event_loop()->context().monotonic_event_time;
Austin Schuh2f8fd752020-09-01 22:38:28 -07001345 if (!FLAGS_skip_order_validation) {
1346 CHECK(monotonic_now == channel_timestamp.monotonic_event_time)
1347 << ": " << FlatbufferToJson(state->event_loop()->node()) << " Now "
1348 << monotonic_now << " trying to send "
1349 << channel_timestamp.monotonic_event_time << " failure "
1350 << state->DebugString();
1351 } else if (monotonic_now != channel_timestamp.monotonic_event_time) {
1352 LOG(WARNING) << "Check failed: monotonic_now == "
1353 "channel_timestamp.monotonic_event_time) ("
1354 << monotonic_now << " vs. "
1355 << channel_timestamp.monotonic_event_time
1356 << "): " << FlatbufferToJson(state->event_loop()->node())
1357 << " Now " << monotonic_now << " trying to send "
1358 << channel_timestamp.monotonic_event_time << " failure "
1359 << state->DebugString();
1360 }
Austin Schuhe309d2a2019-11-29 13:25:21 -08001361
Austin Schuh6f3babe2020-01-26 20:34:50 -08001362 if (channel_timestamp.monotonic_event_time >
Austin Schuh858c9f32020-08-31 16:56:12 -07001363 state->monotonic_start_time() ||
Austin Schuh15649d62019-12-28 16:36:38 -08001364 event_loop_factory_ != nullptr) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001365 if ((!ignore_missing_data_ && !FLAGS_skip_missing_forwarding_entries &&
Austin Schuh858c9f32020-08-31 16:56:12 -07001366 !state->at_end()) ||
Austin Schuh05b70472020-01-01 17:11:17 -08001367 channel_data.message().data() != nullptr) {
1368 CHECK(channel_data.message().data() != nullptr)
1369 << ": Got a message without data. Forwarding entry which was "
Austin Schuh2f8fd752020-09-01 22:38:28 -07001370 "not matched? Use --skip_missing_forwarding_entries to "
Brian Silverman87ac0402020-09-17 14:47:01 -07001371 "ignore this.";
Austin Schuh92547522019-12-28 14:33:43 -08001372
Austin Schuh2f8fd752020-09-01 22:38:28 -07001373 if (update_time) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001374 // Confirm that the message was sent on the sending node before the
1375 // destination node (this node). As a proxy, do this by making sure
1376 // that time on the source node is past when the message was sent.
Austin Schuh2f8fd752020-09-01 22:38:28 -07001377 if (!FLAGS_skip_order_validation) {
1378 CHECK_LT(channel_timestamp.monotonic_remote_time,
1379 state->monotonic_remote_now(channel_index))
1380 << state->event_loop()->node()->name()->string_view() << " to "
1381 << state->remote_node(channel_index)->name()->string_view()
1382 << " " << state->DebugString();
1383 } else if (channel_timestamp.monotonic_remote_time >=
1384 state->monotonic_remote_now(channel_index)) {
1385 LOG(WARNING)
1386 << "Check failed: channel_timestamp.monotonic_remote_time < "
1387 "state->monotonic_remote_now(channel_index) ("
1388 << channel_timestamp.monotonic_remote_time << " vs. "
1389 << state->monotonic_remote_now(channel_index) << ") "
1390 << state->event_loop()->node()->name()->string_view() << " to "
1391 << state->remote_node(channel_index)->name()->string_view()
1392 << " currently " << channel_timestamp.monotonic_event_time
1393 << " ("
1394 << state->ToDistributedClock(
1395 channel_timestamp.monotonic_event_time)
1396 << ") remote event time "
1397 << channel_timestamp.monotonic_remote_time << " ("
1398 << state->RemoteToDistributedClock(
1399 channel_index, channel_timestamp.monotonic_remote_time)
1400 << ") " << state->DebugString();
1401 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001402
1403 if (FLAGS_timestamps_to_csv) {
1404 if (offset_fp_ == nullptr) {
1405 offset_fp_ = fopen("/tmp/offsets.csv", "w");
1406 fprintf(
1407 offset_fp_,
1408 "# time_since_start, offset node 0, offset node 1, ...\n");
1409 first_time_ = channel_timestamp.realtime_event_time;
1410 }
1411
1412 fprintf(offset_fp_, "%.9f",
1413 std::chrono::duration_cast<std::chrono::duration<double>>(
1414 channel_timestamp.realtime_event_time - first_time_)
1415 .count());
Austin Schuh2f8fd752020-09-01 22:38:28 -07001416 for (int i = 1; i < time_offset_matrix_.rows(); ++i) {
1417 fprintf(offset_fp_, ", %.9f",
1418 time_offset_matrix_(i, 0) +
1419 time_slope_matrix_(i, 0) *
1420 chrono::duration<double>(
1421 event_loop_factory_->distributed_now()
1422 .time_since_epoch())
1423 .count());
Austin Schuh8bd96322020-02-13 21:18:22 -08001424 }
1425 fprintf(offset_fp_, "\n");
1426 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001427 }
1428
Austin Schuh15649d62019-12-28 16:36:38 -08001429 // If we have access to the factory, use it to fix the realtime time.
Austin Schuh858c9f32020-08-31 16:56:12 -07001430 state->SetRealtimeOffset(channel_timestamp.monotonic_event_time,
1431 channel_timestamp.realtime_event_time);
Austin Schuh15649d62019-12-28 16:36:38 -08001432
Austin Schuh2f8fd752020-09-01 22:38:28 -07001433 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Sending "
1434 << channel_timestamp.monotonic_event_time;
1435 // TODO(austin): std::move channel_data in and make that efficient in
1436 // simulation.
Austin Schuh858c9f32020-08-31 16:56:12 -07001437 state->Send(channel_index, channel_data.message().data()->Data(),
1438 channel_data.message().data()->size(),
1439 channel_timestamp.monotonic_remote_time,
1440 channel_timestamp.realtime_remote_time,
1441 channel_timestamp.remote_queue_index);
Austin Schuh2f8fd752020-09-01 22:38:28 -07001442 } else if (state->at_end() && !ignore_missing_data_) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001443 // We are at the end of the log file and found missing data. Finish
Austin Schuh2f8fd752020-09-01 22:38:28 -07001444 // reading the rest of the log file and call it quits. We don't want
1445 // to replay partial data.
Austin Schuh858c9f32020-08-31 16:56:12 -07001446 while (state->OldestMessageTime() != monotonic_clock::max_time) {
1447 bool update_time_dummy;
1448 state->PopOldest(&update_time_dummy);
Austin Schuh8bd96322020-02-13 21:18:22 -08001449 }
Austin Schuh2f8fd752020-09-01 22:38:28 -07001450 } else {
1451 CHECK(channel_data.message().data() == nullptr) << ": Nullptr";
Austin Schuh92547522019-12-28 14:33:43 -08001452 }
Austin Schuhe309d2a2019-11-29 13:25:21 -08001453 } else {
Austin Schuh6f3babe2020-01-26 20:34:50 -08001454 LOG(WARNING)
1455 << "Not sending data from before the start of the log file. "
1456 << channel_timestamp.monotonic_event_time.time_since_epoch().count()
1457 << " start " << monotonic_start_time().time_since_epoch().count()
1458 << " " << FlatbufferToJson(channel_data);
Austin Schuhe309d2a2019-11-29 13:25:21 -08001459 }
1460
Austin Schuh858c9f32020-08-31 16:56:12 -07001461 const monotonic_clock::time_point next_time = state->OldestMessageTime();
Austin Schuh6f3babe2020-01-26 20:34:50 -08001462 if (next_time != monotonic_clock::max_time) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001463 VLOG(1) << "Scheduling " << MaybeNodeName(state->event_loop()->node())
1464 << "wakeup for " << next_time << "("
1465 << state->ToDistributedClock(next_time)
1466 << " distributed), now is " << state->monotonic_now();
Austin Schuh858c9f32020-08-31 16:56:12 -07001467 state->Setup(next_time);
James Kuszmaul314f1672020-01-03 20:02:08 -08001468 } else {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001469 VLOG(1) << MaybeNodeName(state->event_loop()->node())
1470 << "No next message, scheduling shutdown";
1471 // Set a timer up immediately after now to die. If we don't do this,
1472 // then the senders waiting on the message we just read will never get
1473 // called.
Austin Schuheecb9282020-01-08 17:43:30 -08001474 if (event_loop_factory_ != nullptr) {
Austin Schuh858c9f32020-08-31 16:56:12 -07001475 state->Setup(monotonic_now + event_loop_factory_->send_delay() +
1476 std::chrono::nanoseconds(1));
Austin Schuheecb9282020-01-08 17:43:30 -08001477 }
Austin Schuhe309d2a2019-11-29 13:25:21 -08001478 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001479
Austin Schuh2f8fd752020-09-01 22:38:28 -07001480 // Once we make this call, the current time changes. So do everything
1481 // which involves time before changing it. That especially includes
1482 // sending the message.
1483 if (update_time) {
1484 VLOG(1) << MaybeNodeName(state->event_loop()->node())
1485 << "updating offsets";
1486
1487 std::vector<aos::monotonic_clock::time_point> before_times;
1488 before_times.resize(states_.size());
1489 std::transform(states_.begin(), states_.end(), before_times.begin(),
1490 [](const std::unique_ptr<State> &state) {
1491 return state->monotonic_now();
1492 });
1493
1494 for (size_t i = 0; i < states_.size(); ++i) {
Brian Silvermand90905f2020-09-23 14:42:56 -07001495 VLOG(1) << MaybeNodeName(states_[i]->event_loop()->node()) << "before "
1496 << states_[i]->monotonic_now();
Austin Schuh2f8fd752020-09-01 22:38:28 -07001497 }
1498
Austin Schuh8bd96322020-02-13 21:18:22 -08001499 UpdateOffsets();
Austin Schuh2f8fd752020-09-01 22:38:28 -07001500 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Now is now "
1501 << state->monotonic_now();
1502
1503 for (size_t i = 0; i < states_.size(); ++i) {
Brian Silvermand90905f2020-09-23 14:42:56 -07001504 VLOG(1) << MaybeNodeName(states_[i]->event_loop()->node()) << "after "
1505 << states_[i]->monotonic_now();
Austin Schuh2f8fd752020-09-01 22:38:28 -07001506 }
1507
1508 // TODO(austin): We should be perfect.
1509 const std::chrono::nanoseconds kTolerance{3};
1510 if (!FLAGS_skip_order_validation) {
1511 CHECK_GE(next_time, state->monotonic_now())
1512 << ": Time skipped the next event.";
1513
1514 for (size_t i = 0; i < states_.size(); ++i) {
1515 CHECK_GE(states_[i]->monotonic_now(), before_times[i] - kTolerance)
1516 << ": Time changed too much on node "
1517 << MaybeNodeName(states_[i]->event_loop()->node());
1518 CHECK_LE(states_[i]->monotonic_now(), before_times[i] + kTolerance)
1519 << ": Time changed too much on node "
1520 << states_[i]->event_loop()->node()->name()->string_view();
1521 }
1522 } else {
1523 if (next_time < state->monotonic_now()) {
1524 LOG(WARNING) << "Check failed: next_time >= "
1525 "state->monotonic_now() ("
1526 << next_time << " vs. " << state->monotonic_now()
1527 << "): Time skipped the next event.";
1528 }
1529 for (size_t i = 0; i < states_.size(); ++i) {
1530 if (states_[i]->monotonic_now() >= before_times[i] - kTolerance) {
1531 LOG(WARNING) << "Check failed: "
1532 "states_[i]->monotonic_now() "
1533 ">= before_times[i] - kTolerance ("
1534 << states_[i]->monotonic_now() << " vs. "
1535 << before_times[i] - kTolerance
1536 << ") : Time changed too much on node "
1537 << MaybeNodeName(states_[i]->event_loop()->node());
1538 }
1539 if (states_[i]->monotonic_now() <= before_times[i] + kTolerance) {
1540 LOG(WARNING) << "Check failed: "
1541 "states_[i]->monotonic_now() "
1542 "<= before_times[i] + kTolerance ("
1543 << states_[i]->monotonic_now() << " vs. "
1544 << before_times[i] - kTolerance
1545 << ") : Time changed too much on node "
1546 << MaybeNodeName(states_[i]->event_loop()->node());
1547 }
1548 }
1549 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001550 }
Austin Schuh2f8fd752020-09-01 22:38:28 -07001551
1552 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Done sending at "
1553 << state->event_loop()->context().monotonic_event_time << " now "
1554 << state->monotonic_now();
Austin Schuh858c9f32020-08-31 16:56:12 -07001555 }));
Austin Schuhe309d2a2019-11-29 13:25:21 -08001556
Austin Schuh6f3babe2020-01-26 20:34:50 -08001557 ++live_nodes_;
1558
Austin Schuh858c9f32020-08-31 16:56:12 -07001559 if (state->OldestMessageTime() != monotonic_clock::max_time) {
1560 event_loop->OnRun([state]() { state->Setup(state->OldestMessageTime()); });
Austin Schuhe309d2a2019-11-29 13:25:21 -08001561 }
1562}
1563
1564void LogReader::Deregister() {
James Kuszmaul84ff3e52020-01-03 19:48:53 -08001565 // Make sure that things get destroyed in the correct order, rather than
1566 // relying on getting the order correct in the class definition.
Austin Schuh8bd96322020-02-13 21:18:22 -08001567 for (std::unique_ptr<State> &state : states_) {
Austin Schuh858c9f32020-08-31 16:56:12 -07001568 state->Deregister();
Austin Schuhe309d2a2019-11-29 13:25:21 -08001569 }
Austin Schuh92547522019-12-28 14:33:43 -08001570
James Kuszmaul84ff3e52020-01-03 19:48:53 -08001571 event_loop_factory_unique_ptr_.reset();
1572 event_loop_factory_ = nullptr;
Austin Schuhe309d2a2019-11-29 13:25:21 -08001573}
1574
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001575void LogReader::RemapLoggedChannel(std::string_view name, std::string_view type,
1576 std::string_view add_prefix) {
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001577 for (size_t ii = 0; ii < logged_configuration()->channels()->size(); ++ii) {
1578 const Channel *const channel = logged_configuration()->channels()->Get(ii);
1579 if (channel->name()->str() == name &&
1580 channel->type()->string_view() == type) {
1581 CHECK_EQ(0u, remapped_channels_.count(ii))
1582 << "Already remapped channel "
1583 << configuration::CleanedChannelToString(channel);
1584 remapped_channels_[ii] = std::string(add_prefix) + std::string(name);
1585 VLOG(1) << "Remapping channel "
1586 << configuration::CleanedChannelToString(channel)
1587 << " to have name " << remapped_channels_[ii];
Austin Schuh6331ef92020-01-07 18:28:09 -08001588 MakeRemappedConfig();
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001589 return;
1590 }
1591 }
1592 LOG(FATAL) << "Unabled to locate channel with name " << name << " and type "
1593 << type;
1594}
1595
Austin Schuh01b4c352020-09-21 23:09:39 -07001596void LogReader::RemapLoggedChannel(std::string_view name, std::string_view type,
1597 const Node *node,
1598 std::string_view add_prefix) {
1599 VLOG(1) << "Node is " << aos::FlatbufferToJson(node);
1600 const Channel *remapped_channel =
1601 configuration::GetChannel(logged_configuration(), name, type, "", node);
1602 CHECK(remapped_channel != nullptr) << ": Failed to find {\"name\": \"" << name
1603 << "\", \"type\": \"" << type << "\"}";
1604 VLOG(1) << "Original {\"name\": \"" << name << "\", \"type\": \"" << type
1605 << "\"}";
1606 VLOG(1) << "Remapped "
1607 << aos::configuration::StrippedChannelToString(remapped_channel);
1608
1609 // We want to make /spray on node 0 go to /0/spray by snooping the maps. And
1610 // we want it to degrade if the heuristics fail to just work.
1611 //
1612 // The easiest way to do this is going to be incredibly specific and verbose.
1613 // Look up /spray, to /0/spray. Then, prefix the result with /original to get
1614 // /original/0/spray. Then, create a map from /original/spray to
1615 // /original/0/spray for just the type we were asked for.
1616 if (name != remapped_channel->name()->string_view()) {
1617 MapT new_map;
1618 new_map.match = std::make_unique<ChannelT>();
1619 new_map.match->name = absl::StrCat(add_prefix, name);
1620 new_map.match->type = type;
1621 if (node != nullptr) {
1622 new_map.match->source_node = node->name()->str();
1623 }
1624 new_map.rename = std::make_unique<ChannelT>();
1625 new_map.rename->name =
1626 absl::StrCat(add_prefix, remapped_channel->name()->string_view());
1627 maps_.emplace_back(std::move(new_map));
1628 }
1629
1630 const size_t channel_index =
1631 configuration::ChannelIndex(logged_configuration(), remapped_channel);
1632 CHECK_EQ(0u, remapped_channels_.count(channel_index))
1633 << "Already remapped channel "
1634 << configuration::CleanedChannelToString(remapped_channel);
1635 remapped_channels_[channel_index] =
1636 absl::StrCat(add_prefix, remapped_channel->name()->string_view());
1637 MakeRemappedConfig();
1638}
1639
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001640void LogReader::MakeRemappedConfig() {
Austin Schuh8bd96322020-02-13 21:18:22 -08001641 for (std::unique_ptr<State> &state : states_) {
Austin Schuh6aa77be2020-02-22 21:06:40 -08001642 if (state) {
Austin Schuh858c9f32020-08-31 16:56:12 -07001643 CHECK(!state->event_loop())
Austin Schuh6aa77be2020-02-22 21:06:40 -08001644 << ": Can't change the mapping after the events are scheduled.";
1645 }
Austin Schuh6f3babe2020-01-26 20:34:50 -08001646 }
Austin Schuhac0771c2020-01-07 18:36:30 -08001647
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001648 // If no remapping occurred and we are using the original config, then there
1649 // is nothing interesting to do here.
1650 if (remapped_channels_.empty() && replay_configuration_ == nullptr) {
Austin Schuh6f3babe2020-01-26 20:34:50 -08001651 remapped_configuration_ = logged_configuration();
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001652 return;
1653 }
1654 // Config to copy Channel definitions from. Use the specified
1655 // replay_configuration_ if it has been provided.
1656 const Configuration *const base_config = replay_configuration_ == nullptr
1657 ? logged_configuration()
1658 : replay_configuration_;
1659 // The remapped config will be identical to the base_config, except that it
1660 // will have a bunch of extra channels in the channel list, which are exact
1661 // copies of the remapped channels, but with different names.
1662 // Because the flatbuffers API is a pain to work with, this requires a bit of
1663 // a song-and-dance to get copied over.
1664 // The order of operations is to:
1665 // 1) Make a flatbuffer builder for a config that will just contain a list of
1666 // the new channels that we want to add.
1667 // 2) For each channel that we are remapping:
1668 // a) Make a buffer/builder and construct into it a Channel table that only
1669 // contains the new name for the channel.
1670 // b) Merge the new channel with just the name into the channel that we are
1671 // trying to copy, built in the flatbuffer builder made in 1. This gives
1672 // us the new channel definition that we need.
1673 // 3) Using this list of offsets, build the Configuration of just new
1674 // Channels.
1675 // 4) Merge the Configuration with the new Channels into the base_config.
1676 // 5) Call MergeConfiguration() on that result to give MergeConfiguration a
1677 // chance to sanitize the config.
1678
1679 // This is the builder that we use for the config containing all the new
1680 // channels.
1681 flatbuffers::FlatBufferBuilder new_config_fbb;
Austin Schuhd7b15da2020-02-17 15:06:11 -08001682 new_config_fbb.ForceDefaults(true);
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001683 std::vector<flatbuffers::Offset<Channel>> channel_offsets;
1684 for (auto &pair : remapped_channels_) {
1685 // This is the builder that we use for creating the Channel with just the
1686 // new name.
1687 flatbuffers::FlatBufferBuilder new_name_fbb;
Austin Schuhd7b15da2020-02-17 15:06:11 -08001688 new_name_fbb.ForceDefaults(true);
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001689 const flatbuffers::Offset<flatbuffers::String> name_offset =
1690 new_name_fbb.CreateString(pair.second);
1691 ChannelBuilder new_name_builder(new_name_fbb);
1692 new_name_builder.add_name(name_offset);
1693 new_name_fbb.Finish(new_name_builder.Finish());
1694 const FlatbufferDetachedBuffer<Channel> new_name = new_name_fbb.Release();
Austin Schuh2f8fd752020-09-01 22:38:28 -07001695 // Retrieve the channel that we want to copy, confirming that it is
1696 // actually present in base_config.
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001697 const Channel *const base_channel = CHECK_NOTNULL(configuration::GetChannel(
1698 base_config, logged_configuration()->channels()->Get(pair.first), "",
1699 nullptr));
1700 // Actually create the new channel and put it into the vector of Offsets
1701 // that we will use to create the new Configuration.
1702 channel_offsets.emplace_back(MergeFlatBuffers<Channel>(
1703 reinterpret_cast<const flatbuffers::Table *>(base_channel),
1704 reinterpret_cast<const flatbuffers::Table *>(&new_name.message()),
1705 &new_config_fbb));
1706 }
1707 // Create the Configuration containing the new channels that we want to add.
Austin Schuh01b4c352020-09-21 23:09:39 -07001708 const auto new_channel_vector_offsets =
Austin Schuhfa895892020-01-07 20:07:41 -08001709 new_config_fbb.CreateVector(channel_offsets);
Austin Schuh01b4c352020-09-21 23:09:39 -07001710
1711 // Now create the new maps.
1712 std::vector<flatbuffers::Offset<Map>> map_offsets;
1713 for (const MapT &map : maps_) {
1714 const flatbuffers::Offset<flatbuffers::String> match_name_offset =
1715 new_config_fbb.CreateString(map.match->name);
1716 const flatbuffers::Offset<flatbuffers::String> match_type_offset =
1717 new_config_fbb.CreateString(map.match->type);
1718 const flatbuffers::Offset<flatbuffers::String> rename_name_offset =
1719 new_config_fbb.CreateString(map.rename->name);
1720 flatbuffers::Offset<flatbuffers::String> match_source_node_offset;
1721 if (!map.match->source_node.empty()) {
1722 match_source_node_offset =
1723 new_config_fbb.CreateString(map.match->source_node);
1724 }
1725 Channel::Builder match_builder(new_config_fbb);
1726 match_builder.add_name(match_name_offset);
1727 match_builder.add_type(match_type_offset);
1728 if (!map.match->source_node.empty()) {
1729 match_builder.add_source_node(match_source_node_offset);
1730 }
1731 const flatbuffers::Offset<Channel> match_offset = match_builder.Finish();
1732
1733 Channel::Builder rename_builder(new_config_fbb);
1734 rename_builder.add_name(rename_name_offset);
1735 const flatbuffers::Offset<Channel> rename_offset = rename_builder.Finish();
1736
1737 Map::Builder map_builder(new_config_fbb);
1738 map_builder.add_match(match_offset);
1739 map_builder.add_rename(rename_offset);
1740 map_offsets.emplace_back(map_builder.Finish());
1741 }
1742
1743 const auto new_maps_offsets = new_config_fbb.CreateVector(map_offsets);
1744
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001745 ConfigurationBuilder new_config_builder(new_config_fbb);
Austin Schuh01b4c352020-09-21 23:09:39 -07001746 new_config_builder.add_channels(new_channel_vector_offsets);
1747 new_config_builder.add_maps(new_maps_offsets);
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001748 new_config_fbb.Finish(new_config_builder.Finish());
1749 const FlatbufferDetachedBuffer<Configuration> new_name_config =
1750 new_config_fbb.Release();
1751 // Merge the new channels configuration into the base_config, giving us the
1752 // remapped configuration.
1753 remapped_configuration_buffer_ =
1754 std::make_unique<FlatbufferDetachedBuffer<Configuration>>(
1755 MergeFlatBuffers<Configuration>(base_config,
1756 &new_name_config.message()));
1757 // Call MergeConfiguration to deal with sanitizing the config.
1758 remapped_configuration_buffer_ =
1759 std::make_unique<FlatbufferDetachedBuffer<Configuration>>(
1760 configuration::MergeConfiguration(*remapped_configuration_buffer_));
1761
1762 remapped_configuration_ = &remapped_configuration_buffer_->message();
1763}
1764
Austin Schuh6f3babe2020-01-26 20:34:50 -08001765const Channel *LogReader::RemapChannel(const EventLoop *event_loop,
1766 const Channel *channel) {
1767 std::string_view channel_name = channel->name()->string_view();
1768 std::string_view channel_type = channel->type()->string_view();
1769 const int channel_index =
1770 configuration::ChannelIndex(logged_configuration(), channel);
1771 // If the channel is remapped, find the correct channel name to use.
1772 if (remapped_channels_.count(channel_index) > 0) {
Austin Schuhee711052020-08-24 16:06:09 -07001773 VLOG(3) << "Got remapped channel on "
Austin Schuh6f3babe2020-01-26 20:34:50 -08001774 << configuration::CleanedChannelToString(channel);
1775 channel_name = remapped_channels_[channel_index];
1776 }
1777
Austin Schuhee711052020-08-24 16:06:09 -07001778 VLOG(2) << "Going to remap channel " << channel_name << " " << channel_type;
Austin Schuh6f3babe2020-01-26 20:34:50 -08001779 const Channel *remapped_channel = configuration::GetChannel(
1780 event_loop->configuration(), channel_name, channel_type,
1781 event_loop->name(), event_loop->node());
1782
1783 CHECK(remapped_channel != nullptr)
1784 << ": Unable to send {\"name\": \"" << channel_name << "\", \"type\": \""
1785 << channel_type << "\"} because it is not in the provided configuration.";
1786
1787 return remapped_channel;
1788}
1789
Austin Schuh858c9f32020-08-31 16:56:12 -07001790LogReader::State::State(std::unique_ptr<ChannelMerger> channel_merger)
1791 : channel_merger_(std::move(channel_merger)) {}
1792
1793EventLoop *LogReader::State::SetNodeEventLoopFactory(
1794 NodeEventLoopFactory *node_event_loop_factory) {
1795 node_event_loop_factory_ = node_event_loop_factory;
1796 event_loop_unique_ptr_ =
1797 node_event_loop_factory_->MakeEventLoop("log_reader");
1798 return event_loop_unique_ptr_.get();
1799}
1800
1801void LogReader::State::SetChannelCount(size_t count) {
1802 channels_.resize(count);
1803 filters_.resize(count);
1804 channel_target_event_loop_factory_.resize(count);
1805}
1806
1807void LogReader::State::SetChannel(
1808 size_t channel, std::unique_ptr<RawSender> sender,
Austin Schuh2f8fd752020-09-01 22:38:28 -07001809 message_bridge::NoncausalOffsetEstimator *filter,
Austin Schuh858c9f32020-08-31 16:56:12 -07001810 NodeEventLoopFactory *channel_target_event_loop_factory) {
1811 channels_[channel] = std::move(sender);
1812 filters_[channel] = filter;
1813 channel_target_event_loop_factory_[channel] =
1814 channel_target_event_loop_factory;
1815}
1816
1817std::tuple<TimestampMerger::DeliveryTimestamp, int,
1818 FlatbufferVector<MessageHeader>>
1819LogReader::State::PopOldest(bool *update_time) {
1820 CHECK_GT(sorted_messages_.size(), 0u);
1821
1822 std::tuple<TimestampMerger::DeliveryTimestamp, int,
Austin Schuh2f8fd752020-09-01 22:38:28 -07001823 FlatbufferVector<MessageHeader>,
1824 message_bridge::NoncausalOffsetEstimator *>
Austin Schuh858c9f32020-08-31 16:56:12 -07001825 result = std::move(sorted_messages_.front());
Austin Schuh2f8fd752020-09-01 22:38:28 -07001826 VLOG(2) << MaybeNodeName(event_loop_->node()) << "PopOldest Popping "
Austin Schuh858c9f32020-08-31 16:56:12 -07001827 << std::get<0>(result).monotonic_event_time;
1828 sorted_messages_.pop_front();
1829 SeedSortedMessages();
1830
Austin Schuh2f8fd752020-09-01 22:38:28 -07001831 if (std::get<3>(result) != nullptr) {
1832 *update_time = std::get<3>(result)->Pop(
1833 event_loop_->node(), std::get<0>(result).monotonic_event_time);
1834 } else {
1835 *update_time = false;
1836 }
Austin Schuh858c9f32020-08-31 16:56:12 -07001837 return std::make_tuple(std::get<0>(result), std::get<1>(result),
1838 std::move(std::get<2>(result)));
1839}
1840
1841monotonic_clock::time_point LogReader::State::OldestMessageTime() const {
1842 if (sorted_messages_.size() > 0) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001843 VLOG(2) << MaybeNodeName(event_loop_->node()) << "oldest message at "
Austin Schuh858c9f32020-08-31 16:56:12 -07001844 << std::get<0>(sorted_messages_.front()).monotonic_event_time;
1845 return std::get<0>(sorted_messages_.front()).monotonic_event_time;
1846 }
1847
1848 return channel_merger_->OldestMessageTime();
1849}
1850
1851void LogReader::State::SeedSortedMessages() {
1852 const aos::monotonic_clock::time_point end_queue_time =
1853 (sorted_messages_.size() > 0
1854 ? std::get<0>(sorted_messages_.front()).monotonic_event_time
1855 : channel_merger_->monotonic_start_time()) +
1856 std::chrono::seconds(2);
1857
1858 while (true) {
1859 if (channel_merger_->OldestMessageTime() == monotonic_clock::max_time) {
1860 return;
1861 }
1862 if (sorted_messages_.size() > 0) {
1863 // Stop placing sorted messages on the list once we have 2 seconds
1864 // queued up (but queue at least until the log starts.
1865 if (end_queue_time <
1866 std::get<0>(sorted_messages_.back()).monotonic_event_time) {
1867 return;
1868 }
1869 }
1870
1871 TimestampMerger::DeliveryTimestamp channel_timestamp;
1872 int channel_index;
1873 FlatbufferVector<MessageHeader> channel_data =
1874 FlatbufferVector<MessageHeader>::Empty();
1875
Austin Schuh2f8fd752020-09-01 22:38:28 -07001876 message_bridge::NoncausalOffsetEstimator *filter = nullptr;
1877
Austin Schuh858c9f32020-08-31 16:56:12 -07001878 std::tie(channel_timestamp, channel_index, channel_data) =
1879 channel_merger_->PopOldest();
1880
Austin Schuh2f8fd752020-09-01 22:38:28 -07001881 // Skip any messages without forwarding information.
1882 if (channel_timestamp.monotonic_remote_time != monotonic_clock::min_time) {
1883 // Got a forwarding timestamp!
1884 filter = filters_[channel_index];
1885
1886 CHECK(filter != nullptr);
1887
1888 // Call the correct method depending on if we are the forward or
1889 // reverse direction here.
1890 filter->Sample(event_loop_->node(),
1891 channel_timestamp.monotonic_event_time,
1892 channel_timestamp.monotonic_remote_time);
1893 }
Austin Schuh858c9f32020-08-31 16:56:12 -07001894 sorted_messages_.emplace_back(channel_timestamp, channel_index,
Austin Schuh2f8fd752020-09-01 22:38:28 -07001895 std::move(channel_data), filter);
Austin Schuh858c9f32020-08-31 16:56:12 -07001896 }
1897}
1898
1899void LogReader::State::Deregister() {
1900 for (size_t i = 0; i < channels_.size(); ++i) {
1901 channels_[i].reset();
1902 }
1903 event_loop_unique_ptr_.reset();
1904 event_loop_ = nullptr;
1905 timer_handler_ = nullptr;
1906 node_event_loop_factory_ = nullptr;
1907}
1908
Austin Schuhe309d2a2019-11-29 13:25:21 -08001909} // namespace logger
1910} // namespace aos