blob: 21092060aa1aec9f7cdbce5e0496f223b44d38d7 [file] [log] [blame]
James Kuszmaul38735e82019-12-07 16:42:06 -08001#include "aos/events/logging/logger.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -08002
3#include <fcntl.h>
Austin Schuh4c4e0092019-12-22 16:18:03 -08004#include <limits.h>
Austin Schuhe309d2a2019-11-29 13:25:21 -08005#include <sys/stat.h>
6#include <sys/types.h>
7#include <sys/uio.h>
8#include <vector>
9
Austin Schuh8bd96322020-02-13 21:18:22 -080010#include "Eigen/Dense"
Austin Schuh2f8fd752020-09-01 22:38:28 -070011#include "absl/strings/escaping.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080012#include "absl/types/span.h"
13#include "aos/events/event_loop.h"
James Kuszmaul38735e82019-12-07 16:42:06 -080014#include "aos/events/logging/logger_generated.h"
Austin Schuh64fab802020-09-09 22:47:47 -070015#include "aos/events/logging/uuid.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080016#include "aos/flatbuffer_merge.h"
Austin Schuh288479d2019-12-18 19:47:52 -080017#include "aos/network/team_number.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080018#include "aos/time/time.h"
19#include "flatbuffers/flatbuffers.h"
Austin Schuh2f8fd752020-09-01 22:38:28 -070020#include "third_party/gmp/gmpxx.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080021
Austin Schuh15649d62019-12-28 16:36:38 -080022DEFINE_bool(skip_missing_forwarding_entries, false,
23 "If true, drop any forwarding entries with missing data. If "
24 "false, CHECK.");
Austin Schuhe309d2a2019-11-29 13:25:21 -080025
Austin Schuh8bd96322020-02-13 21:18:22 -080026DEFINE_bool(timestamps_to_csv, false,
27 "If true, write all the time synchronization information to a set "
28 "of CSV files in /tmp/. This should only be needed when debugging "
29 "time synchronization.");
30
Austin Schuh2f8fd752020-09-01 22:38:28 -070031DEFINE_bool(skip_order_validation, false,
32 "If true, ignore any out of orderness in replay");
33
Austin Schuhe309d2a2019-11-29 13:25:21 -080034namespace aos {
35namespace logger {
Austin Schuhe309d2a2019-11-29 13:25:21 -080036namespace chrono = std::chrono;
37
Austin Schuh64fab802020-09-09 22:47:47 -070038void LogNamer::UpdateHeader(
39 aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header,
40 const UUID &uuid, int parts_index) {
41 header->mutable_message()->mutate_parts_index(parts_index);
42 CHECK_EQ(uuid.string_view().size(),
43 header->mutable_message()->mutable_parts_uuid()->size());
44 std::copy(uuid.string_view().begin(), uuid.string_view().end(),
45 reinterpret_cast<char *>(
46 header->mutable_message()->mutable_parts_uuid()->Data()));
47}
48
Austin Schuh2f8fd752020-09-01 22:38:28 -070049void MultiNodeLogNamer::WriteHeader(
Austin Schuh64fab802020-09-09 22:47:47 -070050 aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header,
Austin Schuh2f8fd752020-09-01 22:38:28 -070051 const Node *node) {
52 if (node == this->node()) {
Austin Schuh64fab802020-09-09 22:47:47 -070053 UpdateHeader(header, uuid_, part_number_);
54 data_writer_->WriteSizedFlatbuffer(header->full_span());
Austin Schuh2f8fd752020-09-01 22:38:28 -070055 } else {
56 for (std::pair<const Channel *const, DataWriter> &data_writer :
57 data_writers_) {
58 if (node == data_writer.second.node) {
Austin Schuh64fab802020-09-09 22:47:47 -070059 UpdateHeader(header, data_writer.second.uuid,
60 data_writer.second.part_number);
61 data_writer.second.writer->WriteSizedFlatbuffer(header->full_span());
Austin Schuh2f8fd752020-09-01 22:38:28 -070062 }
63 }
64 }
65}
66
67void MultiNodeLogNamer::Rotate(
68 const Node *node,
Austin Schuh64fab802020-09-09 22:47:47 -070069 aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header) {
Austin Schuh2f8fd752020-09-01 22:38:28 -070070 if (node == this->node()) {
71 ++part_number_;
72 *data_writer_ = std::move(*OpenDataWriter());
Austin Schuh64fab802020-09-09 22:47:47 -070073 UpdateHeader(header, uuid_, part_number_);
74 data_writer_->WriteSizedFlatbuffer(header->full_span());
Austin Schuh2f8fd752020-09-01 22:38:28 -070075 } else {
76 for (std::pair<const Channel *const, DataWriter> &data_writer :
77 data_writers_) {
78 if (node == data_writer.second.node) {
79 ++data_writer.second.part_number;
80 data_writer.second.rotate(data_writer.first, &data_writer.second);
Austin Schuh64fab802020-09-09 22:47:47 -070081 UpdateHeader(header, data_writer.second.uuid,
82 data_writer.second.part_number);
83 data_writer.second.writer->WriteSizedFlatbuffer(header->full_span());
Austin Schuh2f8fd752020-09-01 22:38:28 -070084 }
85 }
86 }
87}
88
89Logger::Logger(std::string_view base_name, EventLoop *event_loop,
Austin Schuhe309d2a2019-11-29 13:25:21 -080090 std::chrono::milliseconds polling_period)
Austin Schuh0c297012020-09-16 18:41:59 -070091 : Logger(base_name, event_loop, event_loop->configuration(),
92 polling_period) {}
93Logger::Logger(std::string_view base_name, EventLoop *event_loop,
94 const Configuration *configuration,
95 std::chrono::milliseconds polling_period)
Austin Schuh2f8fd752020-09-01 22:38:28 -070096 : Logger(std::make_unique<LocalLogNamer>(base_name, event_loop->node()),
Austin Schuh0c297012020-09-16 18:41:59 -070097 event_loop, configuration, polling_period) {}
98Logger::Logger(std::unique_ptr<LogNamer> log_namer, EventLoop *event_loop,
99 std::chrono::milliseconds polling_period)
100 : Logger(std::move(log_namer), event_loop, event_loop->configuration(),
101 polling_period) {}
Austin Schuh6f3babe2020-01-26 20:34:50 -0800102
103Logger::Logger(std::unique_ptr<LogNamer> log_namer, EventLoop *event_loop,
Austin Schuh0c297012020-09-16 18:41:59 -0700104 const Configuration *configuration,
Austin Schuh6f3babe2020-01-26 20:34:50 -0800105 std::chrono::milliseconds polling_period)
Austin Schuhe309d2a2019-11-29 13:25:21 -0800106 : event_loop_(event_loop),
Austin Schuh64fab802020-09-09 22:47:47 -0700107 uuid_(UUID::Random()),
Austin Schuh6f3babe2020-01-26 20:34:50 -0800108 log_namer_(std::move(log_namer)),
Austin Schuh0c297012020-09-16 18:41:59 -0700109 configuration_(configuration),
110 name_(network::GetHostname()),
Austin Schuhe309d2a2019-11-29 13:25:21 -0800111 timer_handler_(event_loop_->AddTimer([this]() { DoLogData(); })),
Austin Schuh2f8fd752020-09-01 22:38:28 -0700112 polling_period_(polling_period),
113 server_statistics_fetcher_(
114 configuration::MultiNode(event_loop_->configuration())
115 ? event_loop_->MakeFetcher<message_bridge::ServerStatistics>(
116 "/aos")
117 : aos::Fetcher<message_bridge::ServerStatistics>()) {
Austin Schuh6f3babe2020-01-26 20:34:50 -0800118 VLOG(1) << "Starting logger for " << FlatbufferToJson(event_loop_->node());
119 int channel_index = 0;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700120
121 // Find all the nodes which are logging timestamps on our node.
122 std::set<const Node *> timestamp_logger_nodes;
Austin Schuh0c297012020-09-16 18:41:59 -0700123 for (const Channel *channel : *configuration_->channels()) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700124 if (!configuration::ChannelIsSendableOnNode(channel, event_loop_->node()) ||
125 !channel->has_destination_nodes()) {
126 continue;
127 }
128 for (const Connection *connection : *channel->destination_nodes()) {
129 const Node *other_node = configuration::GetNode(
Austin Schuh0c297012020-09-16 18:41:59 -0700130 configuration_, connection->name()->string_view());
Austin Schuh2f8fd752020-09-01 22:38:28 -0700131
132 if (configuration::ConnectionDeliveryTimeIsLoggedOnNode(
133 connection, event_loop_->node())) {
134 VLOG(1) << "Timestamps are logged from "
135 << FlatbufferToJson(other_node);
136 timestamp_logger_nodes.insert(other_node);
137 }
138 }
139 }
140
141 std::map<const Channel *, const Node *> timestamp_logger_channels;
142
143 // Now that we have all the nodes accumulated, make remote timestamp loggers
144 // for them.
145 for (const Node *node : timestamp_logger_nodes) {
146 const Channel *channel = configuration::GetChannel(
Austin Schuh0c297012020-09-16 18:41:59 -0700147 configuration_,
Austin Schuh2f8fd752020-09-01 22:38:28 -0700148 absl::StrCat("/aos/remote_timestamps/", node->name()->string_view()),
149 logger::MessageHeader::GetFullyQualifiedName(), event_loop_->name(),
150 event_loop_->node());
151
152 CHECK(channel != nullptr)
153 << ": Remote timestamps are logged on "
154 << event_loop_->node()->name()->string_view()
155 << " but can't find channel /aos/remote_timestamps/"
156 << node->name()->string_view();
157 timestamp_logger_channels.insert(std::make_pair(channel, node));
158 }
159
160 const size_t our_node_index = configuration::GetNodeIndex(
Austin Schuh0c297012020-09-16 18:41:59 -0700161 configuration_, event_loop_->node());
Austin Schuh2f8fd752020-09-01 22:38:28 -0700162
Austin Schuh0c297012020-09-16 18:41:59 -0700163 for (const Channel *config_channel : *configuration_->channels()) {
164 // The MakeRawFetcher method needs a channel which is in the event loop
165 // configuration() object, not the configuration_ object. Go look that up
166 // from the config.
167 const Channel *channel = aos::configuration::GetChannel(
168 event_loop_->configuration(), config_channel->name()->string_view(),
169 config_channel->type()->string_view(), "", event_loop_->node());
170
Austin Schuhe309d2a2019-11-29 13:25:21 -0800171 FetcherStruct fs;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700172 fs.node_index = our_node_index;
Austin Schuh6f3babe2020-01-26 20:34:50 -0800173 const bool is_local =
174 configuration::ChannelIsSendableOnNode(channel, event_loop_->node());
175
Austin Schuh15649d62019-12-28 16:36:38 -0800176 const bool is_readable =
177 configuration::ChannelIsReadableOnNode(channel, event_loop_->node());
178 const bool log_message = configuration::ChannelMessageIsLoggedOnNode(
179 channel, event_loop_->node()) &&
180 is_readable;
181
182 const bool log_delivery_times =
183 (event_loop_->node() == nullptr)
184 ? false
185 : configuration::ConnectionDeliveryTimeIsLoggedOnNode(
186 channel, event_loop_->node(), event_loop_->node());
187
Austin Schuh2f8fd752020-09-01 22:38:28 -0700188 // Now, detect a MessageHeader timestamp logger where we should just log the
189 // contents to a file directly.
190 const bool log_contents = timestamp_logger_channels.find(channel) !=
191 timestamp_logger_channels.end();
192 const Node *timestamp_node =
193 log_contents ? timestamp_logger_channels.find(channel)->second
194 : nullptr;
195
196 if (log_message || log_delivery_times || log_contents) {
Austin Schuh15649d62019-12-28 16:36:38 -0800197 fs.fetcher = event_loop->MakeRawFetcher(channel);
198 VLOG(1) << "Logging channel "
199 << configuration::CleanedChannelToString(channel);
200
201 if (log_delivery_times) {
Austin Schuh6f3babe2020-01-26 20:34:50 -0800202 VLOG(1) << " Delivery times";
203 fs.timestamp_writer = log_namer_->MakeTimestampWriter(channel);
Austin Schuh15649d62019-12-28 16:36:38 -0800204 }
Austin Schuh6f3babe2020-01-26 20:34:50 -0800205 if (log_message) {
206 VLOG(1) << " Data";
207 fs.writer = log_namer_->MakeWriter(channel);
208 if (!is_local) {
209 fs.log_type = LogType::kLogRemoteMessage;
210 }
211 }
Austin Schuh2f8fd752020-09-01 22:38:28 -0700212 if (log_contents) {
213 VLOG(1) << "Timestamp logger channel "
214 << configuration::CleanedChannelToString(channel);
215 fs.contents_writer =
216 log_namer_->MakeForwardedTimestampWriter(channel, timestamp_node);
Austin Schuh0c297012020-09-16 18:41:59 -0700217 fs.node_index =
218 configuration::GetNodeIndex(configuration_, timestamp_node);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700219 }
Austin Schuh6f3babe2020-01-26 20:34:50 -0800220 fs.channel_index = channel_index;
221 fs.written = false;
222 fetchers_.emplace_back(std::move(fs));
Austin Schuh15649d62019-12-28 16:36:38 -0800223 }
Austin Schuh6f3babe2020-01-26 20:34:50 -0800224 ++channel_index;
Austin Schuhe309d2a2019-11-29 13:25:21 -0800225 }
226
Austin Schuh0c297012020-09-16 18:41:59 -0700227 node_state_.resize(configuration::MultiNode(configuration_)
228 ? configuration_->nodes()->size()
Austin Schuh2f8fd752020-09-01 22:38:28 -0700229 : 1u);
Austin Schuhe309d2a2019-11-29 13:25:21 -0800230
Austin Schuh2f8fd752020-09-01 22:38:28 -0700231 for (const Node *node : log_namer_->nodes()) {
232 const int node_index =
Austin Schuh0c297012020-09-16 18:41:59 -0700233 configuration::GetNodeIndex(configuration_, node);
Austin Schuhe309d2a2019-11-29 13:25:21 -0800234
Austin Schuh2f8fd752020-09-01 22:38:28 -0700235 node_state_[node_index].log_file_header = MakeHeader(node);
236 }
Austin Schuhe309d2a2019-11-29 13:25:21 -0800237
Austin Schuh2f8fd752020-09-01 22:38:28 -0700238 // When things start, we want to log the header, then the most recent
239 // messages available on each fetcher to capture the previous state, then
240 // start polling.
241 event_loop_->OnRun([this]() { StartLogging(); });
Austin Schuhe309d2a2019-11-29 13:25:21 -0800242}
243
Austin Schuh0c297012020-09-16 18:41:59 -0700244Logger::~Logger() {
245 // If we are replaying a log file, or in simulation, we want to force the last
246 // bit of data to be logged. The easiest way to deal with this is to poll
247 // everything as we go to destroy the class, ie, shut down the logger, and
248 // write it to disk.
249 DoLogData();
250}
251
Austin Schuh2f8fd752020-09-01 22:38:28 -0700252void Logger::StartLogging() {
253 // Grab data from each channel right before we declare the log file started
254 // so we can capture the latest message on each channel. This lets us have
255 // non periodic messages with configuration that now get logged.
256 for (FetcherStruct &f : fetchers_) {
257 f.written = !f.fetcher->Fetch();
258 }
259
260 // Clear out any old timestamps in case we are re-starting logging.
261 for (size_t i = 0; i < node_state_.size(); ++i) {
262 SetStartTime(i, monotonic_clock::min_time, realtime_clock::min_time);
263 }
264
265 WriteHeader();
266
267 LOG(INFO) << "Logging node as " << FlatbufferToJson(event_loop_->node())
268 << " start_time " << last_synchronized_time_;
269
270 timer_handler_->Setup(event_loop_->monotonic_now() + polling_period_,
271 polling_period_);
272}
273
Austin Schuhfa895892020-01-07 20:07:41 -0800274void Logger::WriteHeader() {
Austin Schuh0c297012020-09-16 18:41:59 -0700275 if (configuration::MultiNode(configuration_)) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700276 server_statistics_fetcher_.Fetch();
277 }
278
279 aos::monotonic_clock::time_point monotonic_start_time =
280 event_loop_->monotonic_now();
281 aos::realtime_clock::time_point realtime_start_time =
282 event_loop_->realtime_now();
283
284 // We need to pick a point in time to declare the log file "started". This
285 // starts here. It needs to be after everything is fetched so that the
286 // fetchers are all pointed at the most recent message before the start
287 // time.
288 last_synchronized_time_ = monotonic_start_time;
289
Austin Schuh6f3babe2020-01-26 20:34:50 -0800290 for (const Node *node : log_namer_->nodes()) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700291 const int node_index =
Austin Schuh0c297012020-09-16 18:41:59 -0700292 configuration::GetNodeIndex(configuration_, node);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700293 MaybeUpdateTimestamp(node, node_index, monotonic_start_time,
294 realtime_start_time);
Austin Schuh64fab802020-09-09 22:47:47 -0700295 log_namer_->WriteHeader(&node_state_[node_index].log_file_header, node);
Austin Schuh6f3babe2020-01-26 20:34:50 -0800296 }
297}
Austin Schuh8bd96322020-02-13 21:18:22 -0800298
Austin Schuh2f8fd752020-09-01 22:38:28 -0700299void Logger::WriteMissingTimestamps() {
Austin Schuh0c297012020-09-16 18:41:59 -0700300 if (configuration::MultiNode(configuration_)) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700301 server_statistics_fetcher_.Fetch();
302 } else {
303 return;
304 }
305
306 if (server_statistics_fetcher_.get() == nullptr) {
307 return;
308 }
309
310 for (const Node *node : log_namer_->nodes()) {
311 const int node_index =
Austin Schuh0c297012020-09-16 18:41:59 -0700312 configuration::GetNodeIndex(configuration_, node);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700313 if (MaybeUpdateTimestamp(
314 node, node_index,
315 server_statistics_fetcher_.context().monotonic_event_time,
316 server_statistics_fetcher_.context().realtime_event_time)) {
Austin Schuh64fab802020-09-09 22:47:47 -0700317 log_namer_->Rotate(node, &node_state_[node_index].log_file_header);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700318 }
319 }
320}
321
322void Logger::SetStartTime(size_t node_index,
323 aos::monotonic_clock::time_point monotonic_start_time,
324 aos::realtime_clock::time_point realtime_start_time) {
325 node_state_[node_index].monotonic_start_time = monotonic_start_time;
326 node_state_[node_index].realtime_start_time = realtime_start_time;
327 node_state_[node_index]
328 .log_file_header.mutable_message()
329 ->mutate_monotonic_start_time(
330 std::chrono::duration_cast<std::chrono::nanoseconds>(
331 monotonic_start_time.time_since_epoch())
332 .count());
333 if (node_state_[node_index]
334 .log_file_header.mutable_message()
335 ->has_realtime_start_time()) {
336 node_state_[node_index]
337 .log_file_header.mutable_message()
338 ->mutate_realtime_start_time(
339 std::chrono::duration_cast<std::chrono::nanoseconds>(
340 realtime_start_time.time_since_epoch())
341 .count());
342 }
343}
344
345bool Logger::MaybeUpdateTimestamp(
346 const Node *node, int node_index,
347 aos::monotonic_clock::time_point monotonic_start_time,
348 aos::realtime_clock::time_point realtime_start_time) {
349 // Bail early if there the start times are already set.
350 if (node_state_[node_index].monotonic_start_time !=
351 monotonic_clock::min_time) {
352 return false;
353 }
Austin Schuh0c297012020-09-16 18:41:59 -0700354 if (configuration::MultiNode(configuration_)) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700355 if (event_loop_->node() == node) {
356 // There are no offsets to compute for ourself, so always succeed.
357 SetStartTime(node_index, monotonic_start_time, realtime_start_time);
358 return true;
359 } else if (server_statistics_fetcher_.get() != nullptr) {
360 // We must be a remote node now. Look for the connection and see if it is
361 // connected.
362
363 for (const message_bridge::ServerConnection *connection :
364 *server_statistics_fetcher_->connections()) {
365 if (connection->node()->name()->string_view() !=
366 node->name()->string_view()) {
367 continue;
368 }
369
370 if (connection->state() != message_bridge::State::CONNECTED) {
371 VLOG(1) << node->name()->string_view()
372 << " is not connected, can't start it yet.";
373 break;
374 }
375
376 if (!connection->has_monotonic_offset()) {
377 VLOG(1) << "Missing monotonic offset for setting start time for node "
378 << aos::FlatbufferToJson(node);
379 break;
380 }
381
382 VLOG(1) << "Updating start time for " << aos::FlatbufferToJson(node);
383
384 // Found it and it is connected. Compensate and go.
385 monotonic_start_time +=
386 std::chrono::nanoseconds(connection->monotonic_offset());
387
388 SetStartTime(node_index, monotonic_start_time, realtime_start_time);
389 return true;
390 }
391 }
392 } else {
393 SetStartTime(node_index, monotonic_start_time, realtime_start_time);
394 return true;
395 }
396 return false;
397}
398
399aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> Logger::MakeHeader(
400 const Node *node) {
Austin Schuhfa895892020-01-07 20:07:41 -0800401 // Now write the header with this timestamp in it.
402 flatbuffers::FlatBufferBuilder fbb;
Austin Schuhd7b15da2020-02-17 15:06:11 -0800403 fbb.ForceDefaults(true);
Austin Schuhfa895892020-01-07 20:07:41 -0800404
Austin Schuh2f8fd752020-09-01 22:38:28 -0700405 // TODO(austin): Compress this much more efficiently. There are a bunch of
406 // duplicated schemas.
Austin Schuhfa895892020-01-07 20:07:41 -0800407 flatbuffers::Offset<aos::Configuration> configuration_offset =
Austin Schuh0c297012020-09-16 18:41:59 -0700408 CopyFlatBuffer(configuration_, &fbb);
Austin Schuhfa895892020-01-07 20:07:41 -0800409
Austin Schuh64fab802020-09-09 22:47:47 -0700410 flatbuffers::Offset<flatbuffers::String> name_offset =
Austin Schuh0c297012020-09-16 18:41:59 -0700411 fbb.CreateString(name_);
Austin Schuhfa895892020-01-07 20:07:41 -0800412
Austin Schuh64fab802020-09-09 22:47:47 -0700413 flatbuffers::Offset<flatbuffers::String> logger_uuid_offset =
414 fbb.CreateString(uuid_.string_view());
415
416 flatbuffers::Offset<flatbuffers::String> parts_uuid_offset =
417 fbb.CreateString("00000000-0000-4000-8000-000000000000");
418
Austin Schuhfa895892020-01-07 20:07:41 -0800419 flatbuffers::Offset<Node> node_offset;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700420
Austin Schuh0c297012020-09-16 18:41:59 -0700421 if (configuration::MultiNode(configuration_)) {
Austin Schuh6f3babe2020-01-26 20:34:50 -0800422 node_offset = CopyFlatBuffer(node, &fbb);
Austin Schuhfa895892020-01-07 20:07:41 -0800423 }
424
425 aos::logger::LogFileHeader::Builder log_file_header_builder(fbb);
426
Austin Schuh64fab802020-09-09 22:47:47 -0700427 log_file_header_builder.add_name(name_offset);
Austin Schuhfa895892020-01-07 20:07:41 -0800428
429 // Only add the node if we are running in a multinode configuration.
Austin Schuh6f3babe2020-01-26 20:34:50 -0800430 if (node != nullptr) {
Austin Schuhfa895892020-01-07 20:07:41 -0800431 log_file_header_builder.add_node(node_offset);
432 }
433
434 log_file_header_builder.add_configuration(configuration_offset);
435 // The worst case theoretical out of order is the polling period times 2.
436 // One message could get logged right after the boundary, but be for right
437 // before the next boundary. And the reverse could happen for another
438 // message. Report back 3x to be extra safe, and because the cost isn't
439 // huge on the read side.
440 log_file_header_builder.add_max_out_of_order_duration(
441 std::chrono::duration_cast<std::chrono::nanoseconds>(3 * polling_period_)
442 .count());
443
444 log_file_header_builder.add_monotonic_start_time(
445 std::chrono::duration_cast<std::chrono::nanoseconds>(
Austin Schuh2f8fd752020-09-01 22:38:28 -0700446 monotonic_clock::min_time.time_since_epoch())
Austin Schuhfa895892020-01-07 20:07:41 -0800447 .count());
Austin Schuh2f8fd752020-09-01 22:38:28 -0700448 if (node == event_loop_->node()) {
449 log_file_header_builder.add_realtime_start_time(
450 std::chrono::duration_cast<std::chrono::nanoseconds>(
451 realtime_clock::min_time.time_since_epoch())
452 .count());
Austin Schuh6f3babe2020-01-26 20:34:50 -0800453 }
454
Austin Schuh64fab802020-09-09 22:47:47 -0700455 log_file_header_builder.add_logger_uuid(logger_uuid_offset);
456
457 log_file_header_builder.add_parts_uuid(parts_uuid_offset);
458 log_file_header_builder.add_parts_index(0);
459
Austin Schuh2f8fd752020-09-01 22:38:28 -0700460 fbb.FinishSizePrefixed(log_file_header_builder.Finish());
461 return fbb.Release();
462}
463
464void Logger::Rotate() {
465 for (const Node *node : log_namer_->nodes()) {
466 const int node_index =
Austin Schuh0c297012020-09-16 18:41:59 -0700467 configuration::GetNodeIndex(configuration_, node);
Austin Schuh64fab802020-09-09 22:47:47 -0700468 log_namer_->Rotate(node, &node_state_[node_index].log_file_header);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700469 }
470}
471
472void Logger::LogUntil(monotonic_clock::time_point t) {
473 WriteMissingTimestamps();
474
475 // Write each channel to disk, one at a time.
476 for (FetcherStruct &f : fetchers_) {
477 while (true) {
478 if (f.written) {
479 if (!f.fetcher->FetchNext()) {
480 VLOG(2) << "No new data on "
481 << configuration::CleanedChannelToString(
482 f.fetcher->channel());
483 break;
484 } else {
485 f.written = false;
486 }
487 }
488
489 CHECK(!f.written);
490
491 // TODO(james): Write tests to exercise this logic.
492 if (f.fetcher->context().monotonic_event_time < t) {
493 if (f.writer != nullptr) {
494 // Write!
495 flatbuffers::FlatBufferBuilder fbb(f.fetcher->context().size +
496 max_header_size_);
497 fbb.ForceDefaults(true);
498
499 fbb.FinishSizePrefixed(PackMessage(&fbb, f.fetcher->context(),
500 f.channel_index, f.log_type));
501
502 VLOG(2) << "Writing data as node "
503 << FlatbufferToJson(event_loop_->node()) << " for channel "
504 << configuration::CleanedChannelToString(f.fetcher->channel())
505 << " to " << f.writer->filename() << " data "
506 << FlatbufferToJson(
507 flatbuffers::GetSizePrefixedRoot<MessageHeader>(
508 fbb.GetBufferPointer()));
509
510 max_header_size_ = std::max(
511 max_header_size_, fbb.GetSize() - f.fetcher->context().size);
512 f.writer->QueueSizedFlatbuffer(&fbb);
513 }
514
515 if (f.timestamp_writer != nullptr) {
516 // And now handle timestamps.
517 flatbuffers::FlatBufferBuilder fbb;
518 fbb.ForceDefaults(true);
519
520 fbb.FinishSizePrefixed(PackMessage(&fbb, f.fetcher->context(),
521 f.channel_index,
522 LogType::kLogDeliveryTimeOnly));
523
524 VLOG(2) << "Writing timestamps as node "
525 << FlatbufferToJson(event_loop_->node()) << " for channel "
526 << configuration::CleanedChannelToString(f.fetcher->channel())
527 << " to " << f.timestamp_writer->filename() << " timestamp "
528 << FlatbufferToJson(
529 flatbuffers::GetSizePrefixedRoot<MessageHeader>(
530 fbb.GetBufferPointer()));
531
532 f.timestamp_writer->QueueSizedFlatbuffer(&fbb);
533 }
534
535 if (f.contents_writer != nullptr) {
536 // And now handle the special message contents channel. Copy the
537 // message into a FlatBufferBuilder and save it to disk.
538 // TODO(austin): We can be more efficient here when we start to
539 // care...
540 flatbuffers::FlatBufferBuilder fbb;
541 fbb.ForceDefaults(true);
542
543 const MessageHeader *msg =
544 flatbuffers::GetRoot<MessageHeader>(f.fetcher->context().data);
545
546 logger::MessageHeader::Builder message_header_builder(fbb);
547
548 // Note: this must match the same order as MessageBridgeServer and
549 // PackMessage. We want identical headers to have identical
550 // on-the-wire formats to make comparing them easier.
551 message_header_builder.add_channel_index(msg->channel_index());
552
553 message_header_builder.add_queue_index(msg->queue_index());
554 message_header_builder.add_monotonic_sent_time(
555 msg->monotonic_sent_time());
556 message_header_builder.add_realtime_sent_time(
557 msg->realtime_sent_time());
558
559 message_header_builder.add_monotonic_remote_time(
560 msg->monotonic_remote_time());
561 message_header_builder.add_realtime_remote_time(
562 msg->realtime_remote_time());
563 message_header_builder.add_remote_queue_index(
564 msg->remote_queue_index());
565
566 fbb.FinishSizePrefixed(message_header_builder.Finish());
567
568 f.contents_writer->QueueSizedFlatbuffer(&fbb);
569 }
570
571 f.written = true;
572 } else {
573 break;
574 }
575 }
576 }
577 last_synchronized_time_ = t;
Austin Schuhfa895892020-01-07 20:07:41 -0800578}
579
Austin Schuhe309d2a2019-11-29 13:25:21 -0800580void Logger::DoLogData() {
581 // We want to guarentee that messages aren't out of order by more than
582 // max_out_of_order_duration. To do this, we need sync points. Every write
583 // cycle should be a sync point.
Austin Schuhfa895892020-01-07 20:07:41 -0800584 const monotonic_clock::time_point monotonic_now =
585 event_loop_->monotonic_now();
Austin Schuhe309d2a2019-11-29 13:25:21 -0800586
587 do {
588 // Move the sync point up by at most polling_period. This forces one sync
589 // per iteration, even if it is small.
Austin Schuh2f8fd752020-09-01 22:38:28 -0700590 LogUntil(
591 std::min(last_synchronized_time_ + polling_period_, monotonic_now));
Austin Schuhe309d2a2019-11-29 13:25:21 -0800592
Austin Schuhe309d2a2019-11-29 13:25:21 -0800593 // If we missed cycles, we could be pretty far behind. Spin until we are
594 // caught up.
595 } while (last_synchronized_time_ + polling_period_ < monotonic_now);
Austin Schuhe309d2a2019-11-29 13:25:21 -0800596}
597
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800598LogReader::LogReader(std::string_view filename,
599 const Configuration *replay_configuration)
Austin Schuhfa895892020-01-07 20:07:41 -0800600 : LogReader(std::vector<std::string>{std::string(filename)},
601 replay_configuration) {}
602
603LogReader::LogReader(const std::vector<std::string> &filenames,
604 const Configuration *replay_configuration)
Austin Schuh6f3babe2020-01-26 20:34:50 -0800605 : LogReader(std::vector<std::vector<std::string>>{filenames},
606 replay_configuration) {}
607
608LogReader::LogReader(const std::vector<std::vector<std::string>> &filenames,
609 const Configuration *replay_configuration)
610 : filenames_(filenames),
611 log_file_header_(ReadHeader(filenames[0][0])),
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800612 replay_configuration_(replay_configuration) {
Austin Schuh6331ef92020-01-07 18:28:09 -0800613 MakeRemappedConfig();
Austin Schuh6f3babe2020-01-26 20:34:50 -0800614
Austin Schuh6aa77be2020-02-22 21:06:40 -0800615 if (replay_configuration) {
616 CHECK_EQ(configuration::MultiNode(configuration()),
617 configuration::MultiNode(replay_configuration))
Austin Schuh2f8fd752020-09-01 22:38:28 -0700618 << ": Log file and replay config need to both be multi or single "
619 "node.";
Austin Schuh6aa77be2020-02-22 21:06:40 -0800620 }
621
Austin Schuh6f3babe2020-01-26 20:34:50 -0800622 if (!configuration::MultiNode(configuration())) {
Austin Schuh858c9f32020-08-31 16:56:12 -0700623 states_.emplace_back(
624 std::make_unique<State>(std::make_unique<ChannelMerger>(filenames)));
Austin Schuh8bd96322020-02-13 21:18:22 -0800625 } else {
Austin Schuh6aa77be2020-02-22 21:06:40 -0800626 if (replay_configuration) {
James Kuszmaul46d82582020-05-09 19:50:09 -0700627 CHECK_EQ(logged_configuration()->nodes()->size(),
Austin Schuh6aa77be2020-02-22 21:06:40 -0800628 replay_configuration->nodes()->size())
Austin Schuh2f8fd752020-09-01 22:38:28 -0700629 << ": Log file and replay config need to have matching nodes "
630 "lists.";
James Kuszmaul46d82582020-05-09 19:50:09 -0700631 for (const Node *node : *logged_configuration()->nodes()) {
632 if (configuration::GetNode(replay_configuration, node) == nullptr) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700633 LOG(FATAL) << "Found node " << FlatbufferToJson(node)
634 << " in logged config that is not present in the replay "
635 "config.";
James Kuszmaul46d82582020-05-09 19:50:09 -0700636 }
637 }
Austin Schuh6aa77be2020-02-22 21:06:40 -0800638 }
Austin Schuh8bd96322020-02-13 21:18:22 -0800639 states_.resize(configuration()->nodes()->size());
Austin Schuh6f3babe2020-01-26 20:34:50 -0800640 }
Austin Schuhe309d2a2019-11-29 13:25:21 -0800641}
642
Austin Schuh6aa77be2020-02-22 21:06:40 -0800643LogReader::~LogReader() {
Austin Schuh39580f12020-08-01 14:44:08 -0700644 if (event_loop_factory_unique_ptr_) {
645 Deregister();
646 } else if (event_loop_factory_ != nullptr) {
647 LOG(FATAL) << "Must call Deregister before the SimulatedEventLoopFactory "
648 "is destroyed";
649 }
Austin Schuh8bd96322020-02-13 21:18:22 -0800650 if (offset_fp_ != nullptr) {
651 fclose(offset_fp_);
652 }
Austin Schuh2f8fd752020-09-01 22:38:28 -0700653 // Zero out some buffers. It's easy to do use-after-frees on these, so make
654 // it more obvious.
Austin Schuh39580f12020-08-01 14:44:08 -0700655 if (remapped_configuration_buffer_) {
656 remapped_configuration_buffer_->Wipe();
657 }
658 log_file_header_.Wipe();
Austin Schuh8bd96322020-02-13 21:18:22 -0800659}
Austin Schuhe309d2a2019-11-29 13:25:21 -0800660
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800661const Configuration *LogReader::logged_configuration() const {
Austin Schuh6f3babe2020-01-26 20:34:50 -0800662 return log_file_header_.message().configuration();
Austin Schuhe309d2a2019-11-29 13:25:21 -0800663}
664
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800665const Configuration *LogReader::configuration() const {
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800666 return remapped_configuration_;
667}
668
Austin Schuh6f3babe2020-01-26 20:34:50 -0800669std::vector<const Node *> LogReader::Nodes() const {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700670 // Because the Node pointer will only be valid if it actually points to
671 // memory owned by remapped_configuration_, we need to wait for the
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800672 // remapped_configuration_ to be populated before accessing it.
Austin Schuh6f3babe2020-01-26 20:34:50 -0800673 //
674 // Also, note, that when ever a map is changed, the nodes in here are
675 // invalidated.
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800676 CHECK(remapped_configuration_ != nullptr)
677 << ": Need to call Register before the node() pointer will be valid.";
Austin Schuh6f3babe2020-01-26 20:34:50 -0800678 return configuration::GetNodes(remapped_configuration_);
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -0800679}
Austin Schuh15649d62019-12-28 16:36:38 -0800680
Austin Schuh6f3babe2020-01-26 20:34:50 -0800681monotonic_clock::time_point LogReader::monotonic_start_time(const Node *node) {
Austin Schuh8bd96322020-02-13 21:18:22 -0800682 State *state =
683 states_[configuration::GetNodeIndex(configuration(), node)].get();
684 CHECK(state != nullptr) << ": Unknown node " << FlatbufferToJson(node);
685
Austin Schuh858c9f32020-08-31 16:56:12 -0700686 return state->monotonic_start_time();
Austin Schuhe309d2a2019-11-29 13:25:21 -0800687}
688
Austin Schuh6f3babe2020-01-26 20:34:50 -0800689realtime_clock::time_point LogReader::realtime_start_time(const Node *node) {
Austin Schuh8bd96322020-02-13 21:18:22 -0800690 State *state =
691 states_[configuration::GetNodeIndex(configuration(), node)].get();
692 CHECK(state != nullptr) << ": Unknown node " << FlatbufferToJson(node);
693
Austin Schuh858c9f32020-08-31 16:56:12 -0700694 return state->realtime_start_time();
Austin Schuhe309d2a2019-11-29 13:25:21 -0800695}
696
James Kuszmaul84ff3e52020-01-03 19:48:53 -0800697void LogReader::Register() {
698 event_loop_factory_unique_ptr_ =
Austin Schuhac0771c2020-01-07 18:36:30 -0800699 std::make_unique<SimulatedEventLoopFactory>(configuration());
James Kuszmaul84ff3e52020-01-03 19:48:53 -0800700 Register(event_loop_factory_unique_ptr_.get());
701}
702
Austin Schuh92547522019-12-28 14:33:43 -0800703void LogReader::Register(SimulatedEventLoopFactory *event_loop_factory) {
Austin Schuh92547522019-12-28 14:33:43 -0800704 event_loop_factory_ = event_loop_factory;
Austin Schuh92547522019-12-28 14:33:43 -0800705
Austin Schuh6f3babe2020-01-26 20:34:50 -0800706 for (const Node *node : configuration::GetNodes(configuration())) {
Austin Schuh8bd96322020-02-13 21:18:22 -0800707 const size_t node_index =
708 configuration::GetNodeIndex(configuration(), node);
Austin Schuh858c9f32020-08-31 16:56:12 -0700709 states_[node_index] =
710 std::make_unique<State>(std::make_unique<ChannelMerger>(filenames_));
Austin Schuh8bd96322020-02-13 21:18:22 -0800711 State *state = states_[node_index].get();
Austin Schuh6f3babe2020-01-26 20:34:50 -0800712
Austin Schuh858c9f32020-08-31 16:56:12 -0700713 Register(state->SetNodeEventLoopFactory(
714 event_loop_factory_->GetNodeEventLoopFactory(node)));
Austin Schuhcde938c2020-02-02 17:30:07 -0800715 }
James Kuszmaul46d82582020-05-09 19:50:09 -0700716 if (live_nodes_ == 0) {
717 LOG(FATAL)
718 << "Don't have logs from any of the nodes in the replay config--are "
719 "you sure that the replay config matches the original config?";
720 }
Austin Schuh6f3babe2020-01-26 20:34:50 -0800721
Austin Schuh2f8fd752020-09-01 22:38:28 -0700722 // We need to now seed our per-node time offsets and get everything set up
723 // to run.
724 const size_t num_nodes = nodes_count();
Austin Schuhcde938c2020-02-02 17:30:07 -0800725
Austin Schuh8bd96322020-02-13 21:18:22 -0800726 // It is easiest to solve for per node offsets with a matrix rather than
727 // trying to solve the equations by hand. So let's get after it.
728 //
729 // Now, build up the map matrix.
730 //
Austin Schuh2f8fd752020-09-01 22:38:28 -0700731 // offset_matrix_ = (map_matrix_ + slope_matrix_) * [ta; tb; tc]
732 map_matrix_ = Eigen::Matrix<mpq_class, Eigen::Dynamic, Eigen::Dynamic>::Zero(
733 filters_.size() + 1, num_nodes);
734 slope_matrix_ =
735 Eigen::Matrix<mpq_class, Eigen::Dynamic, Eigen::Dynamic>::Zero(
736 filters_.size() + 1, num_nodes);
Austin Schuhcde938c2020-02-02 17:30:07 -0800737
Austin Schuh2f8fd752020-09-01 22:38:28 -0700738 offset_matrix_ =
739 Eigen::Matrix<mpq_class, Eigen::Dynamic, 1>::Zero(filters_.size() + 1);
740 valid_matrix_ =
741 Eigen::Matrix<bool, Eigen::Dynamic, 1>::Zero(filters_.size() + 1);
742 last_valid_matrix_ =
743 Eigen::Matrix<bool, Eigen::Dynamic, 1>::Zero(filters_.size() + 1);
Austin Schuhcde938c2020-02-02 17:30:07 -0800744
Austin Schuh2f8fd752020-09-01 22:38:28 -0700745 time_offset_matrix_ = Eigen::VectorXd::Zero(num_nodes);
746 time_slope_matrix_ = Eigen::VectorXd::Zero(num_nodes);
Austin Schuh8bd96322020-02-13 21:18:22 -0800747
Austin Schuh2f8fd752020-09-01 22:38:28 -0700748 // All times should average out to the distributed clock.
749 for (int i = 0; i < map_matrix_.cols(); ++i) {
750 // 1/num_nodes.
751 map_matrix_(0, i) = mpq_class(1, num_nodes);
752 }
753 valid_matrix_(0) = true;
Austin Schuh8bd96322020-02-13 21:18:22 -0800754
755 {
756 // Now, add the a - b -> sample elements.
757 size_t i = 1;
758 for (std::pair<const std::tuple<const Node *, const Node *>,
Austin Schuh2f8fd752020-09-01 22:38:28 -0700759 std::tuple<message_bridge::NoncausalOffsetEstimator>>
760 &filter : filters_) {
Austin Schuh8bd96322020-02-13 21:18:22 -0800761 const Node *const node_a = std::get<0>(filter.first);
762 const Node *const node_b = std::get<1>(filter.first);
763
764 const size_t node_a_index =
765 configuration::GetNodeIndex(configuration(), node_a);
766 const size_t node_b_index =
767 configuration::GetNodeIndex(configuration(), node_b);
768
Austin Schuh2f8fd752020-09-01 22:38:28 -0700769 // -a
770 map_matrix_(i, node_a_index) = mpq_class(-1);
771 // +b
772 map_matrix_(i, node_b_index) = mpq_class(1);
Austin Schuh8bd96322020-02-13 21:18:22 -0800773
774 // -> sample
Austin Schuh2f8fd752020-09-01 22:38:28 -0700775 std::get<0>(filter.second)
776 .set_slope_pointer(&slope_matrix_(i, node_a_index));
777 std::get<0>(filter.second).set_offset_pointer(&offset_matrix_(i, 0));
778
779 valid_matrix_(i) = false;
780 std::get<0>(filter.second).set_valid_pointer(&valid_matrix_(i));
Austin Schuh8bd96322020-02-13 21:18:22 -0800781
782 ++i;
Austin Schuh6f3babe2020-01-26 20:34:50 -0800783 }
784 }
785
Austin Schuh858c9f32020-08-31 16:56:12 -0700786 for (std::unique_ptr<State> &state : states_) {
787 state->SeedSortedMessages();
788 }
789
Austin Schuh2f8fd752020-09-01 22:38:28 -0700790 // Rank of the map matrix tells you if all the nodes are in communication
791 // with each other, which tells you if the offsets are observable.
792 const size_t connected_nodes =
793 Eigen::FullPivLU<
794 Eigen::Matrix<mpq_class, Eigen::Dynamic, Eigen::Dynamic>>(map_matrix_)
795 .rank();
796
797 // We don't need to support isolated nodes until someone has a real use
798 // case.
799 CHECK_EQ(connected_nodes, num_nodes)
800 << ": There is a node which isn't communicating with the rest.";
801
802 // And solve.
Austin Schuh8bd96322020-02-13 21:18:22 -0800803 UpdateOffsets();
804
Austin Schuh2f8fd752020-09-01 22:38:28 -0700805 // We want to start the log file at the last start time of the log files
806 // from all the nodes. Compute how long each node's simulation needs to run
807 // to move time to this point.
Austin Schuh8bd96322020-02-13 21:18:22 -0800808 distributed_clock::time_point start_time = distributed_clock::min_time;
Austin Schuhcde938c2020-02-02 17:30:07 -0800809
Austin Schuh2f8fd752020-09-01 22:38:28 -0700810 // TODO(austin): We want an "OnStart" callback for each node rather than
811 // running until the last node.
812
Austin Schuh8bd96322020-02-13 21:18:22 -0800813 for (std::unique_ptr<State> &state : states_) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700814 VLOG(1) << "Start time is " << state->monotonic_start_time() << " for node "
815 << MaybeNodeName(state->event_loop()->node()) << "now "
816 << state->monotonic_now();
817 // And start computing the start time on the distributed clock now that
818 // that works.
Austin Schuh858c9f32020-08-31 16:56:12 -0700819 start_time = std::max(
820 start_time, state->ToDistributedClock(state->monotonic_start_time()));
Austin Schuhcde938c2020-02-02 17:30:07 -0800821 }
Austin Schuh2f8fd752020-09-01 22:38:28 -0700822
823 CHECK_GE(start_time, distributed_clock::epoch())
824 << ": Hmm, we have a node starting before the start of time. Offset "
825 "everything.";
Austin Schuhcde938c2020-02-02 17:30:07 -0800826
Austin Schuh6f3babe2020-01-26 20:34:50 -0800827 // Forwarding is tracked per channel. If it is enabled, we want to turn it
828 // off. Otherwise messages replayed will get forwarded across to the other
Austin Schuh2f8fd752020-09-01 22:38:28 -0700829 // nodes, and also replayed on the other nodes. This may not satisfy all
830 // our users, but it'll start the discussion.
Austin Schuh6f3babe2020-01-26 20:34:50 -0800831 if (configuration::MultiNode(event_loop_factory_->configuration())) {
832 for (size_t i = 0; i < logged_configuration()->channels()->size(); ++i) {
833 const Channel *channel = logged_configuration()->channels()->Get(i);
834 const Node *node = configuration::GetNode(
835 configuration(), channel->source_node()->string_view());
836
Austin Schuh8bd96322020-02-13 21:18:22 -0800837 State *state =
838 states_[configuration::GetNodeIndex(configuration(), node)].get();
Austin Schuh6f3babe2020-01-26 20:34:50 -0800839
840 const Channel *remapped_channel =
Austin Schuh858c9f32020-08-31 16:56:12 -0700841 RemapChannel(state->event_loop(), channel);
Austin Schuh6f3babe2020-01-26 20:34:50 -0800842
843 event_loop_factory_->DisableForwarding(remapped_channel);
844 }
Austin Schuh4c3b9702020-08-30 11:34:55 -0700845
846 // If we are replaying a log, we don't want a bunch of redundant messages
847 // from both the real message bridge and simulated message bridge.
848 event_loop_factory_->DisableStatistics();
Austin Schuh6f3babe2020-01-26 20:34:50 -0800849 }
850
Austin Schuhcde938c2020-02-02 17:30:07 -0800851 // While we are starting the system up, we might be relying on matching data
852 // to timestamps on log files where the timestamp log file starts before the
853 // data. In this case, it is reasonable to expect missing data.
854 ignore_missing_data_ = true;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700855 VLOG(1) << "Running until " << start_time << " in Register";
Austin Schuh8bd96322020-02-13 21:18:22 -0800856 event_loop_factory_->RunFor(start_time.time_since_epoch());
Brian Silverman8a32ce62020-08-12 12:02:38 -0700857 VLOG(1) << "At start time";
Austin Schuhcde938c2020-02-02 17:30:07 -0800858 // Now that we are running for real, missing data means that the log file is
859 // corrupted or went wrong.
860 ignore_missing_data_ = false;
Austin Schuh92547522019-12-28 14:33:43 -0800861
Austin Schuh8bd96322020-02-13 21:18:22 -0800862 for (std::unique_ptr<State> &state : states_) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700863 // Make the RT clock be correct before handing it to the user.
864 if (state->realtime_start_time() != realtime_clock::min_time) {
865 state->SetRealtimeOffset(state->monotonic_start_time(),
866 state->realtime_start_time());
867 }
868 VLOG(1) << "Start time is " << state->monotonic_start_time() << " for node "
869 << MaybeNodeName(state->event_loop()->node()) << "now "
870 << state->monotonic_now();
871 }
872
873 if (FLAGS_timestamps_to_csv) {
874 for (std::pair<const std::tuple<const Node *, const Node *>,
875 std::tuple<message_bridge::NoncausalOffsetEstimator>>
876 &filter : filters_) {
877 const Node *const node_a = std::get<0>(filter.first);
878 const Node *const node_b = std::get<1>(filter.first);
879
880 std::get<0>(filter.second)
881 .SetFirstFwdTime(event_loop_factory_->GetNodeEventLoopFactory(node_a)
882 ->monotonic_now());
883 std::get<0>(filter.second)
884 .SetFirstRevTime(event_loop_factory_->GetNodeEventLoopFactory(node_b)
885 ->monotonic_now());
886 }
Austin Schuh8bd96322020-02-13 21:18:22 -0800887 }
888}
889
Austin Schuh2f8fd752020-09-01 22:38:28 -0700890void LogReader::UpdateOffsets() {
891 VLOG(2) << "Samples are " << offset_matrix_;
892 VLOG(2) << "Map is " << (map_matrix_ + slope_matrix_);
893 std::tie(time_slope_matrix_, time_offset_matrix_) = SolveOffsets();
894 Eigen::IOFormat HeavyFmt(Eigen::FullPrecision, 0, ", ", ";\n", "[", "]", "[",
895 "]");
896 VLOG(1) << "First slope " << time_slope_matrix_.transpose().format(HeavyFmt)
897 << " offset " << time_offset_matrix_.transpose().format(HeavyFmt);
898
899 size_t node_index = 0;
900 for (std::unique_ptr<State> &state : states_) {
901 state->SetDistributedOffset(offset(node_index), slope(node_index));
902 VLOG(1) << "Offset for node " << node_index << " "
903 << MaybeNodeName(state->event_loop()->node()) << "is "
904 << aos::distributed_clock::time_point(offset(node_index))
905 << " slope " << std::setprecision(9) << std::fixed
906 << slope(node_index);
907 ++node_index;
908 }
909
910 if (VLOG_IS_ON(1)) {
911 LogFit("Offset is");
912 }
913}
914
915void LogReader::LogFit(std::string_view prefix) {
916 for (std::unique_ptr<State> &state : states_) {
917 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << " now "
918 << state->monotonic_now() << " distributed "
919 << event_loop_factory_->distributed_now();
920 }
921
922 for (std::pair<const std::tuple<const Node *, const Node *>,
923 std::tuple<message_bridge::NoncausalOffsetEstimator>> &filter :
924 filters_) {
925 message_bridge::NoncausalOffsetEstimator *estimator =
926 &std::get<0>(filter.second);
927
928 if (estimator->a_timestamps().size() == 0 &&
929 estimator->b_timestamps().size() == 0) {
930 continue;
931 }
932
933 if (VLOG_IS_ON(1)) {
934 estimator->LogFit(prefix);
935 }
936
937 const Node *const node_a = std::get<0>(filter.first);
938 const Node *const node_b = std::get<1>(filter.first);
939
940 const size_t node_a_index =
941 configuration::GetNodeIndex(configuration(), node_a);
942 const size_t node_b_index =
943 configuration::GetNodeIndex(configuration(), node_b);
944
945 const double recovered_slope =
946 slope(node_b_index) / slope(node_a_index) - 1.0;
947 const int64_t recovered_offset =
948 offset(node_b_index).count() - offset(node_a_index).count() *
949 slope(node_b_index) /
950 slope(node_a_index);
951
952 VLOG(1) << "Recovered slope " << std::setprecision(20) << recovered_slope
953 << " (error " << recovered_slope - estimator->fit().slope() << ") "
954 << " offset " << std::setprecision(20) << recovered_offset
955 << " (error "
956 << recovered_offset - estimator->fit().offset().count() << ")";
957
958 const aos::distributed_clock::time_point a0 =
959 states_[node_a_index]->ToDistributedClock(
960 std::get<0>(estimator->a_timestamps()[0]));
961 const aos::distributed_clock::time_point a1 =
962 states_[node_a_index]->ToDistributedClock(
963 std::get<0>(estimator->a_timestamps()[1]));
964
965 VLOG(1) << node_a->name()->string_view() << " timestamps()[0] = "
966 << std::get<0>(estimator->a_timestamps()[0]) << " -> " << a0
967 << " distributed -> " << node_b->name()->string_view() << " "
968 << states_[node_b_index]->FromDistributedClock(a0) << " should be "
969 << aos::monotonic_clock::time_point(
970 std::chrono::nanoseconds(static_cast<int64_t>(
971 std::get<0>(estimator->a_timestamps()[0])
972 .time_since_epoch()
973 .count() *
974 (1.0 + estimator->fit().slope()))) +
975 estimator->fit().offset())
976 << ((a0 <= event_loop_factory_->distributed_now())
977 ? ""
978 : " After now, investigate");
979 VLOG(1) << node_a->name()->string_view() << " timestamps()[1] = "
980 << std::get<0>(estimator->a_timestamps()[1]) << " -> " << a1
981 << " distributed -> " << node_b->name()->string_view() << " "
982 << states_[node_b_index]->FromDistributedClock(a1) << " should be "
983 << aos::monotonic_clock::time_point(
984 std::chrono::nanoseconds(static_cast<int64_t>(
985 std::get<0>(estimator->a_timestamps()[1])
986 .time_since_epoch()
987 .count() *
988 (1.0 + estimator->fit().slope()))) +
989 estimator->fit().offset())
990 << ((event_loop_factory_->distributed_now() <= a1)
991 ? ""
992 : " Before now, investigate");
993
994 const aos::distributed_clock::time_point b0 =
995 states_[node_b_index]->ToDistributedClock(
996 std::get<0>(estimator->b_timestamps()[0]));
997 const aos::distributed_clock::time_point b1 =
998 states_[node_b_index]->ToDistributedClock(
999 std::get<0>(estimator->b_timestamps()[1]));
1000
1001 VLOG(1) << node_b->name()->string_view() << " timestamps()[0] = "
1002 << std::get<0>(estimator->b_timestamps()[0]) << " -> " << b0
1003 << " distributed -> " << node_a->name()->string_view() << " "
1004 << states_[node_a_index]->FromDistributedClock(b0)
1005 << ((b0 <= event_loop_factory_->distributed_now())
1006 ? ""
1007 : " After now, investigate");
1008 VLOG(1) << node_b->name()->string_view() << " timestamps()[1] = "
1009 << std::get<0>(estimator->b_timestamps()[1]) << " -> " << b1
1010 << " distributed -> " << node_a->name()->string_view() << " "
1011 << states_[node_a_index]->FromDistributedClock(b1)
1012 << ((event_loop_factory_->distributed_now() <= b1)
1013 ? ""
1014 : " Before now, investigate");
1015 }
1016}
1017
1018message_bridge::NoncausalOffsetEstimator *LogReader::GetFilter(
Austin Schuh8bd96322020-02-13 21:18:22 -08001019 const Node *node_a, const Node *node_b) {
1020 CHECK_NE(node_a, node_b);
1021 CHECK_EQ(configuration::GetNode(configuration(), node_a), node_a);
1022 CHECK_EQ(configuration::GetNode(configuration(), node_b), node_b);
1023
1024 if (node_a > node_b) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001025 return GetFilter(node_b, node_a);
Austin Schuh8bd96322020-02-13 21:18:22 -08001026 }
1027
1028 auto tuple = std::make_tuple(node_a, node_b);
1029
1030 auto it = filters_.find(tuple);
1031
1032 if (it == filters_.end()) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001033 auto &x =
1034 filters_
1035 .insert(std::make_pair(
1036 tuple, std::make_tuple(message_bridge::NoncausalOffsetEstimator(
1037 node_a, node_b))))
1038 .first->second;
Austin Schuh8bd96322020-02-13 21:18:22 -08001039 if (FLAGS_timestamps_to_csv) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001040 std::get<0>(x).SetFwdCsvFileName(absl::StrCat(
1041 "/tmp/timestamp_noncausal_", node_a->name()->string_view(), "_",
1042 node_b->name()->string_view()));
1043 std::get<0>(x).SetRevCsvFileName(absl::StrCat(
1044 "/tmp/timestamp_noncausal_", node_b->name()->string_view(), "_",
1045 node_a->name()->string_view()));
Austin Schuh8bd96322020-02-13 21:18:22 -08001046 }
1047
Austin Schuh2f8fd752020-09-01 22:38:28 -07001048 return &std::get<0>(x);
Austin Schuh8bd96322020-02-13 21:18:22 -08001049 } else {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001050 return &std::get<0>(it->second);
Austin Schuh8bd96322020-02-13 21:18:22 -08001051 }
1052}
1053
Austin Schuh8bd96322020-02-13 21:18:22 -08001054
Austin Schuhe309d2a2019-11-29 13:25:21 -08001055void LogReader::Register(EventLoop *event_loop) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001056 State *state =
1057 states_[configuration::GetNodeIndex(configuration(), event_loop->node())]
1058 .get();
Austin Schuh6f3babe2020-01-26 20:34:50 -08001059
Austin Schuh858c9f32020-08-31 16:56:12 -07001060 state->set_event_loop(event_loop);
Austin Schuhe309d2a2019-11-29 13:25:21 -08001061
Tyler Chatow67ddb032020-01-12 14:30:04 -08001062 // We don't run timing reports when trying to print out logged data, because
1063 // otherwise we would end up printing out the timing reports themselves...
1064 // This is only really relevant when we are replaying into a simulation.
Austin Schuh6f3babe2020-01-26 20:34:50 -08001065 event_loop->SkipTimingReport();
1066 event_loop->SkipAosLog();
Austin Schuh39788ff2019-12-01 18:22:57 -08001067
Austin Schuh858c9f32020-08-31 16:56:12 -07001068 const bool has_data = state->SetNode();
Austin Schuhe309d2a2019-11-29 13:25:21 -08001069
Austin Schuh858c9f32020-08-31 16:56:12 -07001070 state->SetChannelCount(logged_configuration()->channels()->size());
Austin Schuh8bd96322020-02-13 21:18:22 -08001071
Austin Schuh858c9f32020-08-31 16:56:12 -07001072 for (size_t i = 0; i < logged_configuration()->channels()->size(); ++i) {
Austin Schuh6f3babe2020-01-26 20:34:50 -08001073 const Channel *channel =
1074 RemapChannel(event_loop, logged_configuration()->channels()->Get(i));
Austin Schuh6331ef92020-01-07 18:28:09 -08001075
Austin Schuh858c9f32020-08-31 16:56:12 -07001076 NodeEventLoopFactory *channel_target_event_loop_factory = nullptr;
Austin Schuh2f8fd752020-09-01 22:38:28 -07001077 message_bridge::NoncausalOffsetEstimator *filter = nullptr;
Austin Schuh8bd96322020-02-13 21:18:22 -08001078
1079 if (!configuration::ChannelIsSendableOnNode(channel, event_loop->node()) &&
1080 configuration::ChannelIsReadableOnNode(channel, event_loop->node())) {
1081 const Node *target_node = configuration::GetNode(
1082 event_loop->configuration(), channel->source_node()->string_view());
Austin Schuh858c9f32020-08-31 16:56:12 -07001083 filter = GetFilter(event_loop->node(), target_node);
Austin Schuh8bd96322020-02-13 21:18:22 -08001084
1085 if (event_loop_factory_ != nullptr) {
Austin Schuh858c9f32020-08-31 16:56:12 -07001086 channel_target_event_loop_factory =
Austin Schuh8bd96322020-02-13 21:18:22 -08001087 event_loop_factory_->GetNodeEventLoopFactory(target_node);
1088 }
1089 }
Austin Schuh858c9f32020-08-31 16:56:12 -07001090
1091 state->SetChannel(i, event_loop->MakeRawSender(channel), filter,
1092 channel_target_event_loop_factory);
Austin Schuhe309d2a2019-11-29 13:25:21 -08001093 }
1094
Austin Schuh6aa77be2020-02-22 21:06:40 -08001095 // If we didn't find any log files with data in them, we won't ever get a
1096 // callback or be live. So skip the rest of the setup.
1097 if (!has_data) {
1098 return;
1099 }
1100
Austin Schuh858c9f32020-08-31 16:56:12 -07001101 state->set_timer_handler(event_loop->AddTimer([this, state]() {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001102 VLOG(1) << "Starting sending " << MaybeNodeName(state->event_loop()->node())
1103 << "at " << state->event_loop()->context().monotonic_event_time
1104 << " now " << state->monotonic_now();
Austin Schuh858c9f32020-08-31 16:56:12 -07001105 if (state->OldestMessageTime() == monotonic_clock::max_time) {
Austin Schuh6f3babe2020-01-26 20:34:50 -08001106 --live_nodes_;
Austin Schuh2f8fd752020-09-01 22:38:28 -07001107 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Node down!";
Austin Schuh6f3babe2020-01-26 20:34:50 -08001108 if (live_nodes_ == 0) {
1109 event_loop_factory_->Exit();
1110 }
James Kuszmaul314f1672020-01-03 20:02:08 -08001111 return;
1112 }
Austin Schuh6f3babe2020-01-26 20:34:50 -08001113 TimestampMerger::DeliveryTimestamp channel_timestamp;
Austin Schuh05b70472020-01-01 17:11:17 -08001114 int channel_index;
1115 FlatbufferVector<MessageHeader> channel_data =
1116 FlatbufferVector<MessageHeader>::Empty();
1117
Austin Schuh2f8fd752020-09-01 22:38:28 -07001118 if (VLOG_IS_ON(1)) {
1119 LogFit("Offset was");
1120 }
1121
1122 bool update_time;
Austin Schuh05b70472020-01-01 17:11:17 -08001123 std::tie(channel_timestamp, channel_index, channel_data) =
Austin Schuh2f8fd752020-09-01 22:38:28 -07001124 state->PopOldest(&update_time);
Austin Schuh05b70472020-01-01 17:11:17 -08001125
Austin Schuhe309d2a2019-11-29 13:25:21 -08001126 const monotonic_clock::time_point monotonic_now =
Austin Schuh858c9f32020-08-31 16:56:12 -07001127 state->event_loop()->context().monotonic_event_time;
Austin Schuh2f8fd752020-09-01 22:38:28 -07001128 if (!FLAGS_skip_order_validation) {
1129 CHECK(monotonic_now == channel_timestamp.monotonic_event_time)
1130 << ": " << FlatbufferToJson(state->event_loop()->node()) << " Now "
1131 << monotonic_now << " trying to send "
1132 << channel_timestamp.monotonic_event_time << " failure "
1133 << state->DebugString();
1134 } else if (monotonic_now != channel_timestamp.monotonic_event_time) {
1135 LOG(WARNING) << "Check failed: monotonic_now == "
1136 "channel_timestamp.monotonic_event_time) ("
1137 << monotonic_now << " vs. "
1138 << channel_timestamp.monotonic_event_time
1139 << "): " << FlatbufferToJson(state->event_loop()->node())
1140 << " Now " << monotonic_now << " trying to send "
1141 << channel_timestamp.monotonic_event_time << " failure "
1142 << state->DebugString();
1143 }
Austin Schuhe309d2a2019-11-29 13:25:21 -08001144
Austin Schuh6f3babe2020-01-26 20:34:50 -08001145 if (channel_timestamp.monotonic_event_time >
Austin Schuh858c9f32020-08-31 16:56:12 -07001146 state->monotonic_start_time() ||
Austin Schuh15649d62019-12-28 16:36:38 -08001147 event_loop_factory_ != nullptr) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001148 if ((!ignore_missing_data_ && !FLAGS_skip_missing_forwarding_entries &&
Austin Schuh858c9f32020-08-31 16:56:12 -07001149 !state->at_end()) ||
Austin Schuh05b70472020-01-01 17:11:17 -08001150 channel_data.message().data() != nullptr) {
1151 CHECK(channel_data.message().data() != nullptr)
1152 << ": Got a message without data. Forwarding entry which was "
Austin Schuh2f8fd752020-09-01 22:38:28 -07001153 "not matched? Use --skip_missing_forwarding_entries to "
1154 "ignore "
Austin Schuh15649d62019-12-28 16:36:38 -08001155 "this.";
Austin Schuh92547522019-12-28 14:33:43 -08001156
Austin Schuh2f8fd752020-09-01 22:38:28 -07001157 if (update_time) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001158 // Confirm that the message was sent on the sending node before the
1159 // destination node (this node). As a proxy, do this by making sure
1160 // that time on the source node is past when the message was sent.
Austin Schuh2f8fd752020-09-01 22:38:28 -07001161 if (!FLAGS_skip_order_validation) {
1162 CHECK_LT(channel_timestamp.monotonic_remote_time,
1163 state->monotonic_remote_now(channel_index))
1164 << state->event_loop()->node()->name()->string_view() << " to "
1165 << state->remote_node(channel_index)->name()->string_view()
1166 << " " << state->DebugString();
1167 } else if (channel_timestamp.monotonic_remote_time >=
1168 state->monotonic_remote_now(channel_index)) {
1169 LOG(WARNING)
1170 << "Check failed: channel_timestamp.monotonic_remote_time < "
1171 "state->monotonic_remote_now(channel_index) ("
1172 << channel_timestamp.monotonic_remote_time << " vs. "
1173 << state->monotonic_remote_now(channel_index) << ") "
1174 << state->event_loop()->node()->name()->string_view() << " to "
1175 << state->remote_node(channel_index)->name()->string_view()
1176 << " currently " << channel_timestamp.monotonic_event_time
1177 << " ("
1178 << state->ToDistributedClock(
1179 channel_timestamp.monotonic_event_time)
1180 << ") remote event time "
1181 << channel_timestamp.monotonic_remote_time << " ("
1182 << state->RemoteToDistributedClock(
1183 channel_index, channel_timestamp.monotonic_remote_time)
1184 << ") " << state->DebugString();
1185 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001186
1187 if (FLAGS_timestamps_to_csv) {
1188 if (offset_fp_ == nullptr) {
1189 offset_fp_ = fopen("/tmp/offsets.csv", "w");
1190 fprintf(
1191 offset_fp_,
1192 "# time_since_start, offset node 0, offset node 1, ...\n");
1193 first_time_ = channel_timestamp.realtime_event_time;
1194 }
1195
1196 fprintf(offset_fp_, "%.9f",
1197 std::chrono::duration_cast<std::chrono::duration<double>>(
1198 channel_timestamp.realtime_event_time - first_time_)
1199 .count());
Austin Schuh2f8fd752020-09-01 22:38:28 -07001200 for (int i = 1; i < time_offset_matrix_.rows(); ++i) {
1201 fprintf(offset_fp_, ", %.9f",
1202 time_offset_matrix_(i, 0) +
1203 time_slope_matrix_(i, 0) *
1204 chrono::duration<double>(
1205 event_loop_factory_->distributed_now()
1206 .time_since_epoch())
1207 .count());
Austin Schuh8bd96322020-02-13 21:18:22 -08001208 }
1209 fprintf(offset_fp_, "\n");
1210 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001211 }
1212
Austin Schuh15649d62019-12-28 16:36:38 -08001213 // If we have access to the factory, use it to fix the realtime time.
Austin Schuh858c9f32020-08-31 16:56:12 -07001214 state->SetRealtimeOffset(channel_timestamp.monotonic_event_time,
1215 channel_timestamp.realtime_event_time);
Austin Schuh15649d62019-12-28 16:36:38 -08001216
Austin Schuh2f8fd752020-09-01 22:38:28 -07001217 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Sending "
1218 << channel_timestamp.monotonic_event_time;
1219 // TODO(austin): std::move channel_data in and make that efficient in
1220 // simulation.
Austin Schuh858c9f32020-08-31 16:56:12 -07001221 state->Send(channel_index, channel_data.message().data()->Data(),
1222 channel_data.message().data()->size(),
1223 channel_timestamp.monotonic_remote_time,
1224 channel_timestamp.realtime_remote_time,
1225 channel_timestamp.remote_queue_index);
Austin Schuh2f8fd752020-09-01 22:38:28 -07001226 } else if (state->at_end() && !ignore_missing_data_) {
Austin Schuh8bd96322020-02-13 21:18:22 -08001227 // We are at the end of the log file and found missing data. Finish
Austin Schuh2f8fd752020-09-01 22:38:28 -07001228 // reading the rest of the log file and call it quits. We don't want
1229 // to replay partial data.
Austin Schuh858c9f32020-08-31 16:56:12 -07001230 while (state->OldestMessageTime() != monotonic_clock::max_time) {
1231 bool update_time_dummy;
1232 state->PopOldest(&update_time_dummy);
Austin Schuh8bd96322020-02-13 21:18:22 -08001233 }
Austin Schuh2f8fd752020-09-01 22:38:28 -07001234 } else {
1235 CHECK(channel_data.message().data() == nullptr) << ": Nullptr";
Austin Schuh92547522019-12-28 14:33:43 -08001236 }
Austin Schuhe309d2a2019-11-29 13:25:21 -08001237 } else {
Austin Schuh6f3babe2020-01-26 20:34:50 -08001238 LOG(WARNING)
1239 << "Not sending data from before the start of the log file. "
1240 << channel_timestamp.monotonic_event_time.time_since_epoch().count()
1241 << " start " << monotonic_start_time().time_since_epoch().count()
1242 << " " << FlatbufferToJson(channel_data);
Austin Schuhe309d2a2019-11-29 13:25:21 -08001243 }
1244
Austin Schuh858c9f32020-08-31 16:56:12 -07001245 const monotonic_clock::time_point next_time = state->OldestMessageTime();
Austin Schuh6f3babe2020-01-26 20:34:50 -08001246 if (next_time != monotonic_clock::max_time) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001247 VLOG(1) << "Scheduling " << MaybeNodeName(state->event_loop()->node())
1248 << "wakeup for " << next_time << "("
1249 << state->ToDistributedClock(next_time)
1250 << " distributed), now is " << state->monotonic_now();
Austin Schuh858c9f32020-08-31 16:56:12 -07001251 state->Setup(next_time);
James Kuszmaul314f1672020-01-03 20:02:08 -08001252 } else {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001253 VLOG(1) << MaybeNodeName(state->event_loop()->node())
1254 << "No next message, scheduling shutdown";
1255 // Set a timer up immediately after now to die. If we don't do this,
1256 // then the senders waiting on the message we just read will never get
1257 // called.
Austin Schuheecb9282020-01-08 17:43:30 -08001258 if (event_loop_factory_ != nullptr) {
Austin Schuh858c9f32020-08-31 16:56:12 -07001259 state->Setup(monotonic_now + event_loop_factory_->send_delay() +
1260 std::chrono::nanoseconds(1));
Austin Schuheecb9282020-01-08 17:43:30 -08001261 }
Austin Schuhe309d2a2019-11-29 13:25:21 -08001262 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001263
Austin Schuh2f8fd752020-09-01 22:38:28 -07001264 // Once we make this call, the current time changes. So do everything
1265 // which involves time before changing it. That especially includes
1266 // sending the message.
1267 if (update_time) {
1268 VLOG(1) << MaybeNodeName(state->event_loop()->node())
1269 << "updating offsets";
1270
1271 std::vector<aos::monotonic_clock::time_point> before_times;
1272 before_times.resize(states_.size());
1273 std::transform(states_.begin(), states_.end(), before_times.begin(),
1274 [](const std::unique_ptr<State> &state) {
1275 return state->monotonic_now();
1276 });
1277
1278 for (size_t i = 0; i < states_.size(); ++i) {
1279 VLOG(1) << MaybeNodeName(
1280 states_[i]->event_loop()->node())
1281 << "before " << states_[i]->monotonic_now();
1282 }
1283
Austin Schuh8bd96322020-02-13 21:18:22 -08001284 UpdateOffsets();
Austin Schuh2f8fd752020-09-01 22:38:28 -07001285 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Now is now "
1286 << state->monotonic_now();
1287
1288 for (size_t i = 0; i < states_.size(); ++i) {
1289 VLOG(1) << MaybeNodeName(
1290 states_[i]->event_loop()->node())
1291 << "after " << states_[i]->monotonic_now();
1292 }
1293
1294 // TODO(austin): We should be perfect.
1295 const std::chrono::nanoseconds kTolerance{3};
1296 if (!FLAGS_skip_order_validation) {
1297 CHECK_GE(next_time, state->monotonic_now())
1298 << ": Time skipped the next event.";
1299
1300 for (size_t i = 0; i < states_.size(); ++i) {
1301 CHECK_GE(states_[i]->monotonic_now(), before_times[i] - kTolerance)
1302 << ": Time changed too much on node "
1303 << MaybeNodeName(states_[i]->event_loop()->node());
1304 CHECK_LE(states_[i]->monotonic_now(), before_times[i] + kTolerance)
1305 << ": Time changed too much on node "
1306 << states_[i]->event_loop()->node()->name()->string_view();
1307 }
1308 } else {
1309 if (next_time < state->monotonic_now()) {
1310 LOG(WARNING) << "Check failed: next_time >= "
1311 "state->monotonic_now() ("
1312 << next_time << " vs. " << state->monotonic_now()
1313 << "): Time skipped the next event.";
1314 }
1315 for (size_t i = 0; i < states_.size(); ++i) {
1316 if (states_[i]->monotonic_now() >= before_times[i] - kTolerance) {
1317 LOG(WARNING) << "Check failed: "
1318 "states_[i]->monotonic_now() "
1319 ">= before_times[i] - kTolerance ("
1320 << states_[i]->monotonic_now() << " vs. "
1321 << before_times[i] - kTolerance
1322 << ") : Time changed too much on node "
1323 << MaybeNodeName(states_[i]->event_loop()->node());
1324 }
1325 if (states_[i]->monotonic_now() <= before_times[i] + kTolerance) {
1326 LOG(WARNING) << "Check failed: "
1327 "states_[i]->monotonic_now() "
1328 "<= before_times[i] + kTolerance ("
1329 << states_[i]->monotonic_now() << " vs. "
1330 << before_times[i] - kTolerance
1331 << ") : Time changed too much on node "
1332 << MaybeNodeName(states_[i]->event_loop()->node());
1333 }
1334 }
1335 }
Austin Schuh8bd96322020-02-13 21:18:22 -08001336 }
Austin Schuh2f8fd752020-09-01 22:38:28 -07001337
1338 VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Done sending at "
1339 << state->event_loop()->context().monotonic_event_time << " now "
1340 << state->monotonic_now();
Austin Schuh858c9f32020-08-31 16:56:12 -07001341 }));
Austin Schuhe309d2a2019-11-29 13:25:21 -08001342
Austin Schuh6f3babe2020-01-26 20:34:50 -08001343 ++live_nodes_;
1344
Austin Schuh858c9f32020-08-31 16:56:12 -07001345 if (state->OldestMessageTime() != monotonic_clock::max_time) {
1346 event_loop->OnRun([state]() { state->Setup(state->OldestMessageTime()); });
Austin Schuhe309d2a2019-11-29 13:25:21 -08001347 }
1348}
1349
1350void LogReader::Deregister() {
James Kuszmaul84ff3e52020-01-03 19:48:53 -08001351 // Make sure that things get destroyed in the correct order, rather than
1352 // relying on getting the order correct in the class definition.
Austin Schuh8bd96322020-02-13 21:18:22 -08001353 for (std::unique_ptr<State> &state : states_) {
Austin Schuh858c9f32020-08-31 16:56:12 -07001354 state->Deregister();
Austin Schuhe309d2a2019-11-29 13:25:21 -08001355 }
Austin Schuh92547522019-12-28 14:33:43 -08001356
James Kuszmaul84ff3e52020-01-03 19:48:53 -08001357 event_loop_factory_unique_ptr_.reset();
1358 event_loop_factory_ = nullptr;
Austin Schuhe309d2a2019-11-29 13:25:21 -08001359}
1360
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001361void LogReader::RemapLoggedChannel(std::string_view name, std::string_view type,
1362 std::string_view add_prefix) {
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001363 for (size_t ii = 0; ii < logged_configuration()->channels()->size(); ++ii) {
1364 const Channel *const channel = logged_configuration()->channels()->Get(ii);
1365 if (channel->name()->str() == name &&
1366 channel->type()->string_view() == type) {
1367 CHECK_EQ(0u, remapped_channels_.count(ii))
1368 << "Already remapped channel "
1369 << configuration::CleanedChannelToString(channel);
1370 remapped_channels_[ii] = std::string(add_prefix) + std::string(name);
1371 VLOG(1) << "Remapping channel "
1372 << configuration::CleanedChannelToString(channel)
1373 << " to have name " << remapped_channels_[ii];
Austin Schuh6331ef92020-01-07 18:28:09 -08001374 MakeRemappedConfig();
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001375 return;
1376 }
1377 }
1378 LOG(FATAL) << "Unabled to locate channel with name " << name << " and type "
1379 << type;
1380}
1381
1382void LogReader::MakeRemappedConfig() {
Austin Schuh8bd96322020-02-13 21:18:22 -08001383 for (std::unique_ptr<State> &state : states_) {
Austin Schuh6aa77be2020-02-22 21:06:40 -08001384 if (state) {
Austin Schuh858c9f32020-08-31 16:56:12 -07001385 CHECK(!state->event_loop())
Austin Schuh6aa77be2020-02-22 21:06:40 -08001386 << ": Can't change the mapping after the events are scheduled.";
1387 }
Austin Schuh6f3babe2020-01-26 20:34:50 -08001388 }
Austin Schuhac0771c2020-01-07 18:36:30 -08001389
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001390 // If no remapping occurred and we are using the original config, then there
1391 // is nothing interesting to do here.
1392 if (remapped_channels_.empty() && replay_configuration_ == nullptr) {
Austin Schuh6f3babe2020-01-26 20:34:50 -08001393 remapped_configuration_ = logged_configuration();
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001394 return;
1395 }
1396 // Config to copy Channel definitions from. Use the specified
1397 // replay_configuration_ if it has been provided.
1398 const Configuration *const base_config = replay_configuration_ == nullptr
1399 ? logged_configuration()
1400 : replay_configuration_;
1401 // The remapped config will be identical to the base_config, except that it
1402 // will have a bunch of extra channels in the channel list, which are exact
1403 // copies of the remapped channels, but with different names.
1404 // Because the flatbuffers API is a pain to work with, this requires a bit of
1405 // a song-and-dance to get copied over.
1406 // The order of operations is to:
1407 // 1) Make a flatbuffer builder for a config that will just contain a list of
1408 // the new channels that we want to add.
1409 // 2) For each channel that we are remapping:
1410 // a) Make a buffer/builder and construct into it a Channel table that only
1411 // contains the new name for the channel.
1412 // b) Merge the new channel with just the name into the channel that we are
1413 // trying to copy, built in the flatbuffer builder made in 1. This gives
1414 // us the new channel definition that we need.
1415 // 3) Using this list of offsets, build the Configuration of just new
1416 // Channels.
1417 // 4) Merge the Configuration with the new Channels into the base_config.
1418 // 5) Call MergeConfiguration() on that result to give MergeConfiguration a
1419 // chance to sanitize the config.
1420
1421 // This is the builder that we use for the config containing all the new
1422 // channels.
1423 flatbuffers::FlatBufferBuilder new_config_fbb;
Austin Schuhd7b15da2020-02-17 15:06:11 -08001424 new_config_fbb.ForceDefaults(true);
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001425 std::vector<flatbuffers::Offset<Channel>> channel_offsets;
1426 for (auto &pair : remapped_channels_) {
1427 // This is the builder that we use for creating the Channel with just the
1428 // new name.
1429 flatbuffers::FlatBufferBuilder new_name_fbb;
Austin Schuhd7b15da2020-02-17 15:06:11 -08001430 new_name_fbb.ForceDefaults(true);
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001431 const flatbuffers::Offset<flatbuffers::String> name_offset =
1432 new_name_fbb.CreateString(pair.second);
1433 ChannelBuilder new_name_builder(new_name_fbb);
1434 new_name_builder.add_name(name_offset);
1435 new_name_fbb.Finish(new_name_builder.Finish());
1436 const FlatbufferDetachedBuffer<Channel> new_name = new_name_fbb.Release();
Austin Schuh2f8fd752020-09-01 22:38:28 -07001437 // Retrieve the channel that we want to copy, confirming that it is
1438 // actually present in base_config.
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001439 const Channel *const base_channel = CHECK_NOTNULL(configuration::GetChannel(
1440 base_config, logged_configuration()->channels()->Get(pair.first), "",
1441 nullptr));
1442 // Actually create the new channel and put it into the vector of Offsets
1443 // that we will use to create the new Configuration.
1444 channel_offsets.emplace_back(MergeFlatBuffers<Channel>(
1445 reinterpret_cast<const flatbuffers::Table *>(base_channel),
1446 reinterpret_cast<const flatbuffers::Table *>(&new_name.message()),
1447 &new_config_fbb));
1448 }
1449 // Create the Configuration containing the new channels that we want to add.
Austin Schuhfa895892020-01-07 20:07:41 -08001450 const auto new_name_vector_offsets =
1451 new_config_fbb.CreateVector(channel_offsets);
James Kuszmaulc7bbb3e2020-01-03 20:01:00 -08001452 ConfigurationBuilder new_config_builder(new_config_fbb);
1453 new_config_builder.add_channels(new_name_vector_offsets);
1454 new_config_fbb.Finish(new_config_builder.Finish());
1455 const FlatbufferDetachedBuffer<Configuration> new_name_config =
1456 new_config_fbb.Release();
1457 // Merge the new channels configuration into the base_config, giving us the
1458 // remapped configuration.
1459 remapped_configuration_buffer_ =
1460 std::make_unique<FlatbufferDetachedBuffer<Configuration>>(
1461 MergeFlatBuffers<Configuration>(base_config,
1462 &new_name_config.message()));
1463 // Call MergeConfiguration to deal with sanitizing the config.
1464 remapped_configuration_buffer_ =
1465 std::make_unique<FlatbufferDetachedBuffer<Configuration>>(
1466 configuration::MergeConfiguration(*remapped_configuration_buffer_));
1467
1468 remapped_configuration_ = &remapped_configuration_buffer_->message();
1469}
1470
Austin Schuh6f3babe2020-01-26 20:34:50 -08001471const Channel *LogReader::RemapChannel(const EventLoop *event_loop,
1472 const Channel *channel) {
1473 std::string_view channel_name = channel->name()->string_view();
1474 std::string_view channel_type = channel->type()->string_view();
1475 const int channel_index =
1476 configuration::ChannelIndex(logged_configuration(), channel);
1477 // If the channel is remapped, find the correct channel name to use.
1478 if (remapped_channels_.count(channel_index) > 0) {
Austin Schuhee711052020-08-24 16:06:09 -07001479 VLOG(3) << "Got remapped channel on "
Austin Schuh6f3babe2020-01-26 20:34:50 -08001480 << configuration::CleanedChannelToString(channel);
1481 channel_name = remapped_channels_[channel_index];
1482 }
1483
Austin Schuhee711052020-08-24 16:06:09 -07001484 VLOG(2) << "Going to remap channel " << channel_name << " " << channel_type;
Austin Schuh6f3babe2020-01-26 20:34:50 -08001485 const Channel *remapped_channel = configuration::GetChannel(
1486 event_loop->configuration(), channel_name, channel_type,
1487 event_loop->name(), event_loop->node());
1488
1489 CHECK(remapped_channel != nullptr)
1490 << ": Unable to send {\"name\": \"" << channel_name << "\", \"type\": \""
1491 << channel_type << "\"} because it is not in the provided configuration.";
1492
1493 return remapped_channel;
1494}
1495
Austin Schuh858c9f32020-08-31 16:56:12 -07001496LogReader::State::State(std::unique_ptr<ChannelMerger> channel_merger)
1497 : channel_merger_(std::move(channel_merger)) {}
1498
1499EventLoop *LogReader::State::SetNodeEventLoopFactory(
1500 NodeEventLoopFactory *node_event_loop_factory) {
1501 node_event_loop_factory_ = node_event_loop_factory;
1502 event_loop_unique_ptr_ =
1503 node_event_loop_factory_->MakeEventLoop("log_reader");
1504 return event_loop_unique_ptr_.get();
1505}
1506
1507void LogReader::State::SetChannelCount(size_t count) {
1508 channels_.resize(count);
1509 filters_.resize(count);
1510 channel_target_event_loop_factory_.resize(count);
1511}
1512
1513void LogReader::State::SetChannel(
1514 size_t channel, std::unique_ptr<RawSender> sender,
Austin Schuh2f8fd752020-09-01 22:38:28 -07001515 message_bridge::NoncausalOffsetEstimator *filter,
Austin Schuh858c9f32020-08-31 16:56:12 -07001516 NodeEventLoopFactory *channel_target_event_loop_factory) {
1517 channels_[channel] = std::move(sender);
1518 filters_[channel] = filter;
1519 channel_target_event_loop_factory_[channel] =
1520 channel_target_event_loop_factory;
1521}
1522
1523std::tuple<TimestampMerger::DeliveryTimestamp, int,
1524 FlatbufferVector<MessageHeader>>
1525LogReader::State::PopOldest(bool *update_time) {
1526 CHECK_GT(sorted_messages_.size(), 0u);
1527
1528 std::tuple<TimestampMerger::DeliveryTimestamp, int,
Austin Schuh2f8fd752020-09-01 22:38:28 -07001529 FlatbufferVector<MessageHeader>,
1530 message_bridge::NoncausalOffsetEstimator *>
Austin Schuh858c9f32020-08-31 16:56:12 -07001531 result = std::move(sorted_messages_.front());
Austin Schuh2f8fd752020-09-01 22:38:28 -07001532 VLOG(2) << MaybeNodeName(event_loop_->node()) << "PopOldest Popping "
Austin Schuh858c9f32020-08-31 16:56:12 -07001533 << std::get<0>(result).monotonic_event_time;
1534 sorted_messages_.pop_front();
1535 SeedSortedMessages();
1536
Austin Schuh2f8fd752020-09-01 22:38:28 -07001537 if (std::get<3>(result) != nullptr) {
1538 *update_time = std::get<3>(result)->Pop(
1539 event_loop_->node(), std::get<0>(result).monotonic_event_time);
1540 } else {
1541 *update_time = false;
1542 }
Austin Schuh858c9f32020-08-31 16:56:12 -07001543 return std::make_tuple(std::get<0>(result), std::get<1>(result),
1544 std::move(std::get<2>(result)));
1545}
1546
1547monotonic_clock::time_point LogReader::State::OldestMessageTime() const {
1548 if (sorted_messages_.size() > 0) {
Austin Schuh2f8fd752020-09-01 22:38:28 -07001549 VLOG(2) << MaybeNodeName(event_loop_->node()) << "oldest message at "
Austin Schuh858c9f32020-08-31 16:56:12 -07001550 << std::get<0>(sorted_messages_.front()).monotonic_event_time;
1551 return std::get<0>(sorted_messages_.front()).monotonic_event_time;
1552 }
1553
1554 return channel_merger_->OldestMessageTime();
1555}
1556
1557void LogReader::State::SeedSortedMessages() {
1558 const aos::monotonic_clock::time_point end_queue_time =
1559 (sorted_messages_.size() > 0
1560 ? std::get<0>(sorted_messages_.front()).monotonic_event_time
1561 : channel_merger_->monotonic_start_time()) +
1562 std::chrono::seconds(2);
1563
1564 while (true) {
1565 if (channel_merger_->OldestMessageTime() == monotonic_clock::max_time) {
1566 return;
1567 }
1568 if (sorted_messages_.size() > 0) {
1569 // Stop placing sorted messages on the list once we have 2 seconds
1570 // queued up (but queue at least until the log starts.
1571 if (end_queue_time <
1572 std::get<0>(sorted_messages_.back()).monotonic_event_time) {
1573 return;
1574 }
1575 }
1576
1577 TimestampMerger::DeliveryTimestamp channel_timestamp;
1578 int channel_index;
1579 FlatbufferVector<MessageHeader> channel_data =
1580 FlatbufferVector<MessageHeader>::Empty();
1581
Austin Schuh2f8fd752020-09-01 22:38:28 -07001582 message_bridge::NoncausalOffsetEstimator *filter = nullptr;
1583
Austin Schuh858c9f32020-08-31 16:56:12 -07001584 std::tie(channel_timestamp, channel_index, channel_data) =
1585 channel_merger_->PopOldest();
1586
Austin Schuh2f8fd752020-09-01 22:38:28 -07001587 // Skip any messages without forwarding information.
1588 if (channel_timestamp.monotonic_remote_time != monotonic_clock::min_time) {
1589 // Got a forwarding timestamp!
1590 filter = filters_[channel_index];
1591
1592 CHECK(filter != nullptr);
1593
1594 // Call the correct method depending on if we are the forward or
1595 // reverse direction here.
1596 filter->Sample(event_loop_->node(),
1597 channel_timestamp.monotonic_event_time,
1598 channel_timestamp.monotonic_remote_time);
1599 }
Austin Schuh858c9f32020-08-31 16:56:12 -07001600 sorted_messages_.emplace_back(channel_timestamp, channel_index,
Austin Schuh2f8fd752020-09-01 22:38:28 -07001601 std::move(channel_data), filter);
Austin Schuh858c9f32020-08-31 16:56:12 -07001602 }
1603}
1604
1605void LogReader::State::Deregister() {
1606 for (size_t i = 0; i < channels_.size(); ++i) {
1607 channels_[i].reset();
1608 }
1609 event_loop_unique_ptr_.reset();
1610 event_loop_ = nullptr;
1611 timer_handler_ = nullptr;
1612 node_event_loop_factory_ = nullptr;
1613}
1614
Austin Schuhe309d2a2019-11-29 13:25:21 -08001615} // namespace logger
1616} // namespace aos