blob: 5f0e372925e36777285c6d629c5efa2887917c22 [file] [log] [blame]
Austin Schuha36c8902019-12-30 18:07:15 -08001#include "aos/events/logging/logfile_utils.h"
2
3#include <fcntl.h>
Austin Schuha36c8902019-12-30 18:07:15 -08004#include <sys/stat.h>
5#include <sys/types.h>
6#include <sys/uio.h>
7
Brian Silvermanf51499a2020-09-21 12:49:08 -07008#include <algorithm>
9#include <climits>
Austin Schuha36c8902019-12-30 18:07:15 -080010
Austin Schuhe4fca832020-03-07 16:58:53 -080011#include "absl/strings/escaping.h"
Austin Schuh05b70472020-01-01 17:11:17 -080012#include "aos/configuration.h"
Austin Schuhfa895892020-01-07 20:07:41 -080013#include "aos/flatbuffer_merge.h"
Austin Schuh6f3babe2020-01-26 20:34:50 -080014#include "aos/util/file.h"
Austin Schuha36c8902019-12-30 18:07:15 -080015#include "flatbuffers/flatbuffers.h"
Austin Schuh05b70472020-01-01 17:11:17 -080016#include "gflags/gflags.h"
17#include "glog/logging.h"
Austin Schuha36c8902019-12-30 18:07:15 -080018
Brian Silvermanf59fe3f2020-09-22 21:04:09 -070019#if defined(__x86_64__)
20#define ENABLE_LZMA 1
21#elif defined(__aarch64__)
22#define ENABLE_LZMA 1
23#else
24#define ENABLE_LZMA 0
25#endif
26
27#if ENABLE_LZMA
28#include "aos/events/logging/lzma_encoder.h"
29#endif
30
Austin Schuh7fbf5a72020-09-21 16:28:13 -070031DEFINE_int32(flush_size, 128000,
Austin Schuha36c8902019-12-30 18:07:15 -080032 "Number of outstanding bytes to allow before flushing to disk.");
Austin Schuhbd06ae42021-03-31 22:48:21 -070033DEFINE_double(
34 flush_period, 5.0,
35 "Max time to let data sit in the queue before flushing in seconds.");
Austin Schuha36c8902019-12-30 18:07:15 -080036
Austin Schuha040c3f2021-02-13 16:09:07 -080037DEFINE_double(
38 max_out_of_order, -1,
39 "If set, this overrides the max out of order duration for a log file.");
40
Brian Silvermanf51499a2020-09-21 12:49:08 -070041namespace aos::logger {
Austin Schuha36c8902019-12-30 18:07:15 -080042
Austin Schuh05b70472020-01-01 17:11:17 -080043namespace chrono = std::chrono;
44
Brian Silvermanf51499a2020-09-21 12:49:08 -070045DetachedBufferWriter::DetachedBufferWriter(
46 std::string_view filename, std::unique_ptr<DetachedBufferEncoder> encoder)
47 : filename_(filename), encoder_(std::move(encoder)) {
Brian Silvermana9f2ec92020-10-06 18:00:53 -070048 if (!util::MkdirPIfSpace(filename, 0777)) {
49 ran_out_of_space_ = true;
50 } else {
51 fd_ = open(std::string(filename).c_str(),
52 O_RDWR | O_CLOEXEC | O_CREAT | O_EXCL, 0774);
53 if (fd_ == -1 && errno == ENOSPC) {
54 ran_out_of_space_ = true;
55 } else {
56 PCHECK(fd_ != -1) << ": Failed to open " << filename << " for writing";
57 VLOG(1) << "Opened " << filename << " for writing";
58 }
59 }
Austin Schuha36c8902019-12-30 18:07:15 -080060}
61
62DetachedBufferWriter::~DetachedBufferWriter() {
Brian Silverman0465fcf2020-09-24 00:29:18 -070063 Close();
64 if (ran_out_of_space_) {
65 CHECK(acknowledge_ran_out_of_space_)
66 << ": Unacknowledged out of disk space, log file was not completed";
Brian Silvermanf51499a2020-09-21 12:49:08 -070067 }
Austin Schuh2f8fd752020-09-01 22:38:28 -070068}
69
Brian Silvermand90905f2020-09-23 14:42:56 -070070DetachedBufferWriter::DetachedBufferWriter(DetachedBufferWriter &&other) {
Austin Schuh2f8fd752020-09-01 22:38:28 -070071 *this = std::move(other);
72}
73
Brian Silverman87ac0402020-09-17 14:47:01 -070074// When other is destroyed "soon" (which it should be because we're getting an
75// rvalue reference to it), it will flush etc all the data we have queued up
76// (because that data will then be its data).
Austin Schuh2f8fd752020-09-01 22:38:28 -070077DetachedBufferWriter &DetachedBufferWriter::operator=(
78 DetachedBufferWriter &&other) {
Austin Schuh2f8fd752020-09-01 22:38:28 -070079 std::swap(filename_, other.filename_);
Brian Silvermanf51499a2020-09-21 12:49:08 -070080 std::swap(encoder_, other.encoder_);
Austin Schuh2f8fd752020-09-01 22:38:28 -070081 std::swap(fd_, other.fd_);
Brian Silverman0465fcf2020-09-24 00:29:18 -070082 std::swap(ran_out_of_space_, other.ran_out_of_space_);
83 std::swap(acknowledge_ran_out_of_space_, other.acknowledge_ran_out_of_space_);
Austin Schuh2f8fd752020-09-01 22:38:28 -070084 std::swap(iovec_, other.iovec_);
Brian Silvermanf51499a2020-09-21 12:49:08 -070085 std::swap(max_write_time_, other.max_write_time_);
86 std::swap(max_write_time_bytes_, other.max_write_time_bytes_);
87 std::swap(max_write_time_messages_, other.max_write_time_messages_);
88 std::swap(total_write_time_, other.total_write_time_);
89 std::swap(total_write_count_, other.total_write_count_);
90 std::swap(total_write_messages_, other.total_write_messages_);
91 std::swap(total_write_bytes_, other.total_write_bytes_);
Austin Schuh2f8fd752020-09-01 22:38:28 -070092 return *this;
Austin Schuha36c8902019-12-30 18:07:15 -080093}
94
Brian Silvermanf51499a2020-09-21 12:49:08 -070095void DetachedBufferWriter::QueueSpan(absl::Span<const uint8_t> span) {
Brian Silvermana9f2ec92020-10-06 18:00:53 -070096 if (ran_out_of_space_) {
97 // We don't want any later data to be written after space becomes
98 // available, so refuse to write anything more once we've dropped data
99 // because we ran out of space.
100 VLOG(1) << "Ignoring span: " << span.size();
101 return;
102 }
103
Austin Schuhbd06ae42021-03-31 22:48:21 -0700104 aos::monotonic_clock::time_point now;
Brian Silvermanf51499a2020-09-21 12:49:08 -0700105 if (encoder_->may_bypass() && span.size() > 4096u) {
106 // Over this threshold, we'll assume it's cheaper to add an extra
107 // syscall to write the data immediately instead of copying it to
108 // enqueue.
Austin Schuha36c8902019-12-30 18:07:15 -0800109
Brian Silvermanf51499a2020-09-21 12:49:08 -0700110 // First, flush everything.
111 while (encoder_->queue_size() > 0u) {
112 Flush();
113 }
Austin Schuhde031b72020-01-10 19:34:41 -0800114
Brian Silvermanf51499a2020-09-21 12:49:08 -0700115 // Then, write it directly.
116 const auto start = aos::monotonic_clock::now();
117 const ssize_t written = write(fd_, span.data(), span.size());
118 const auto end = aos::monotonic_clock::now();
Brian Silverman0465fcf2020-09-24 00:29:18 -0700119 HandleWriteReturn(written, span.size());
Brian Silvermanf51499a2020-09-21 12:49:08 -0700120 UpdateStatsForWrite(end - start, written, 1);
Austin Schuhbd06ae42021-03-31 22:48:21 -0700121 now = end;
Brian Silvermanf51499a2020-09-21 12:49:08 -0700122 } else {
123 encoder_->Encode(CopySpanAsDetachedBuffer(span));
Austin Schuhbd06ae42021-03-31 22:48:21 -0700124 now = aos::monotonic_clock::now();
Austin Schuha36c8902019-12-30 18:07:15 -0800125 }
Brian Silvermanf51499a2020-09-21 12:49:08 -0700126
Austin Schuhbd06ae42021-03-31 22:48:21 -0700127 FlushAtThreshold(now);
Austin Schuha36c8902019-12-30 18:07:15 -0800128}
129
Brian Silverman0465fcf2020-09-24 00:29:18 -0700130void DetachedBufferWriter::Close() {
131 if (fd_ == -1) {
132 return;
133 }
134 encoder_->Finish();
135 while (encoder_->queue_size() > 0) {
136 Flush();
137 }
138 if (close(fd_) == -1) {
139 if (errno == ENOSPC) {
140 ran_out_of_space_ = true;
141 } else {
142 PLOG(ERROR) << "Closing log file failed";
143 }
144 }
145 fd_ = -1;
146 VLOG(1) << "Closed " << filename_;
147}
148
Austin Schuha36c8902019-12-30 18:07:15 -0800149void DetachedBufferWriter::Flush() {
Brian Silverman0465fcf2020-09-24 00:29:18 -0700150 if (ran_out_of_space_) {
151 // We don't want any later data to be written after space becomes available,
152 // so refuse to write anything more once we've dropped data because we ran
153 // out of space.
Austin Schuha426f1f2021-03-31 22:27:41 -0700154 if (encoder_) {
155 VLOG(1) << "Ignoring queue: " << encoder_->queue().size();
156 encoder_->Clear(encoder_->queue().size());
157 } else {
158 VLOG(1) << "No queue to ignore";
159 }
160 return;
161 }
162
163 const auto queue = encoder_->queue();
164 if (queue.empty()) {
Brian Silverman0465fcf2020-09-24 00:29:18 -0700165 return;
166 }
Brian Silvermanf51499a2020-09-21 12:49:08 -0700167
Austin Schuha36c8902019-12-30 18:07:15 -0800168 iovec_.clear();
Brian Silvermanf51499a2020-09-21 12:49:08 -0700169 const size_t iovec_size = std::min<size_t>(queue.size(), IOV_MAX);
170 iovec_.resize(iovec_size);
Austin Schuha36c8902019-12-30 18:07:15 -0800171 size_t counted_size = 0;
Brian Silvermanf51499a2020-09-21 12:49:08 -0700172 for (size_t i = 0; i < iovec_size; ++i) {
173 iovec_[i].iov_base = const_cast<uint8_t *>(queue[i].data());
174 iovec_[i].iov_len = queue[i].size();
175 counted_size += iovec_[i].iov_len;
Austin Schuha36c8902019-12-30 18:07:15 -0800176 }
Brian Silvermanf51499a2020-09-21 12:49:08 -0700177
178 const auto start = aos::monotonic_clock::now();
Austin Schuha36c8902019-12-30 18:07:15 -0800179 const ssize_t written = writev(fd_, iovec_.data(), iovec_.size());
Brian Silvermanf51499a2020-09-21 12:49:08 -0700180 const auto end = aos::monotonic_clock::now();
Brian Silverman0465fcf2020-09-24 00:29:18 -0700181 HandleWriteReturn(written, counted_size);
Brian Silvermanf51499a2020-09-21 12:49:08 -0700182
183 encoder_->Clear(iovec_size);
184
185 UpdateStatsForWrite(end - start, written, iovec_size);
186}
187
Brian Silverman0465fcf2020-09-24 00:29:18 -0700188void DetachedBufferWriter::HandleWriteReturn(ssize_t write_return,
189 size_t write_size) {
190 if (write_return == -1 && errno == ENOSPC) {
191 ran_out_of_space_ = true;
192 return;
193 }
194 PCHECK(write_return >= 0) << ": write failed";
195 if (write_return < static_cast<ssize_t>(write_size)) {
196 // Sometimes this happens instead of ENOSPC. On a real filesystem, this
197 // never seems to happen in any other case. If we ever want to log to a
198 // socket, this will happen more often. However, until we get there, we'll
199 // just assume it means we ran out of space.
200 ran_out_of_space_ = true;
201 return;
202 }
203}
204
Brian Silvermanf51499a2020-09-21 12:49:08 -0700205void DetachedBufferWriter::UpdateStatsForWrite(
206 aos::monotonic_clock::duration duration, ssize_t written, int iovec_size) {
207 if (duration > max_write_time_) {
208 max_write_time_ = duration;
209 max_write_time_bytes_ = written;
210 max_write_time_messages_ = iovec_size;
211 }
212 total_write_time_ += duration;
213 ++total_write_count_;
214 total_write_messages_ += iovec_size;
215 total_write_bytes_ += written;
216}
217
Austin Schuhbd06ae42021-03-31 22:48:21 -0700218void DetachedBufferWriter::FlushAtThreshold(
219 aos::monotonic_clock::time_point now) {
Austin Schuha426f1f2021-03-31 22:27:41 -0700220 if (ran_out_of_space_) {
221 // We don't want any later data to be written after space becomes available,
222 // so refuse to write anything more once we've dropped data because we ran
223 // out of space.
224 if (encoder_) {
225 VLOG(1) << "Ignoring queue: " << encoder_->queue().size();
226 encoder_->Clear(encoder_->queue().size());
227 } else {
228 VLOG(1) << "No queue to ignore";
229 }
230 return;
231 }
232
Austin Schuhbd06ae42021-03-31 22:48:21 -0700233 // We don't want to flush the first time through. Otherwise we will flush as
234 // the log file header might be compressing, defeating any parallelism and
235 // queueing there.
236 if (last_flush_time_ == aos::monotonic_clock::min_time) {
237 last_flush_time_ = now;
238 }
239
Brian Silvermanf51499a2020-09-21 12:49:08 -0700240 // Flush if we are at the max number of iovs per writev, because there's no
241 // point queueing up any more data in memory. Also flush once we have enough
Austin Schuhbd06ae42021-03-31 22:48:21 -0700242 // data queued up or if it has been long enough.
Brian Silvermanf51499a2020-09-21 12:49:08 -0700243 while (encoder_->queued_bytes() > static_cast<size_t>(FLAGS_flush_size) ||
Austin Schuhbd06ae42021-03-31 22:48:21 -0700244 encoder_->queue_size() >= IOV_MAX ||
245 now > last_flush_time_ +
246 chrono::duration_cast<chrono::nanoseconds>(
247 chrono::duration<double>(FLAGS_flush_period))) {
248 last_flush_time_ = now;
Brian Silvermanf51499a2020-09-21 12:49:08 -0700249 Flush();
250 }
Austin Schuha36c8902019-12-30 18:07:15 -0800251}
252
253flatbuffers::Offset<MessageHeader> PackMessage(
254 flatbuffers::FlatBufferBuilder *fbb, const Context &context,
255 int channel_index, LogType log_type) {
256 flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data_offset;
257
258 switch (log_type) {
259 case LogType::kLogMessage:
260 case LogType::kLogMessageAndDeliveryTime:
Austin Schuh6f3babe2020-01-26 20:34:50 -0800261 case LogType::kLogRemoteMessage:
Brian Silvermaneaa41d62020-07-08 19:47:35 -0700262 data_offset = fbb->CreateVector(
263 static_cast<const uint8_t *>(context.data), context.size);
Austin Schuha36c8902019-12-30 18:07:15 -0800264 break;
265
266 case LogType::kLogDeliveryTimeOnly:
267 break;
268 }
269
270 MessageHeader::Builder message_header_builder(*fbb);
271 message_header_builder.add_channel_index(channel_index);
Austin Schuh6f3babe2020-01-26 20:34:50 -0800272
273 switch (log_type) {
274 case LogType::kLogRemoteMessage:
275 message_header_builder.add_queue_index(context.remote_queue_index);
276 message_header_builder.add_monotonic_sent_time(
277 context.monotonic_remote_time.time_since_epoch().count());
278 message_header_builder.add_realtime_sent_time(
279 context.realtime_remote_time.time_since_epoch().count());
280 break;
281
282 case LogType::kLogMessage:
283 case LogType::kLogMessageAndDeliveryTime:
284 case LogType::kLogDeliveryTimeOnly:
285 message_header_builder.add_queue_index(context.queue_index);
286 message_header_builder.add_monotonic_sent_time(
287 context.monotonic_event_time.time_since_epoch().count());
288 message_header_builder.add_realtime_sent_time(
289 context.realtime_event_time.time_since_epoch().count());
290 break;
291 }
Austin Schuha36c8902019-12-30 18:07:15 -0800292
293 switch (log_type) {
294 case LogType::kLogMessage:
Austin Schuh6f3babe2020-01-26 20:34:50 -0800295 case LogType::kLogRemoteMessage:
Austin Schuha36c8902019-12-30 18:07:15 -0800296 message_header_builder.add_data(data_offset);
297 break;
298
299 case LogType::kLogMessageAndDeliveryTime:
300 message_header_builder.add_data(data_offset);
301 [[fallthrough]];
302
303 case LogType::kLogDeliveryTimeOnly:
304 message_header_builder.add_monotonic_remote_time(
305 context.monotonic_remote_time.time_since_epoch().count());
306 message_header_builder.add_realtime_remote_time(
307 context.realtime_remote_time.time_since_epoch().count());
308 message_header_builder.add_remote_queue_index(context.remote_queue_index);
309 break;
310 }
311
312 return message_header_builder.Finish();
313}
314
Brian Silvermanf51499a2020-09-21 12:49:08 -0700315SpanReader::SpanReader(std::string_view filename) : filename_(filename) {
Brian Silvermanf59fe3f2020-09-22 21:04:09 -0700316 static const std::string_view kXz = ".xz";
317 if (filename.substr(filename.size() - kXz.size()) == kXz) {
318#if ENABLE_LZMA
319 decoder_ = std::make_unique<LzmaDecoder>(filename);
320#else
321 LOG(FATAL) << "Reading xz-compressed files not supported on this platform";
322#endif
323 } else {
324 decoder_ = std::make_unique<DummyDecoder>(filename);
325 }
Austin Schuh05b70472020-01-01 17:11:17 -0800326}
327
Austin Schuhcf5f6442021-07-06 10:43:28 -0700328absl::Span<const uint8_t> SpanReader::PeekMessage() {
Austin Schuh05b70472020-01-01 17:11:17 -0800329 // Make sure we have enough for the size.
330 if (data_.size() - consumed_data_ < sizeof(flatbuffers::uoffset_t)) {
331 if (!ReadBlock()) {
332 return absl::Span<const uint8_t>();
333 }
334 }
335
336 // Now make sure we have enough for the message.
337 const size_t data_size =
338 flatbuffers::GetPrefixedSize(data_.data() + consumed_data_) +
339 sizeof(flatbuffers::uoffset_t);
Austin Schuhe4fca832020-03-07 16:58:53 -0800340 if (data_size == sizeof(flatbuffers::uoffset_t)) {
341 LOG(ERROR) << "Size of data is zero. Log file end is corrupted, skipping.";
342 LOG(ERROR) << " Rest of log file is "
343 << absl::BytesToHexString(std::string_view(
344 reinterpret_cast<const char *>(data_.data() +
345 consumed_data_),
346 data_.size() - consumed_data_));
347 return absl::Span<const uint8_t>();
348 }
Austin Schuh05b70472020-01-01 17:11:17 -0800349 while (data_.size() < consumed_data_ + data_size) {
350 if (!ReadBlock()) {
351 return absl::Span<const uint8_t>();
352 }
353 }
354
355 // And return it, consuming the data.
356 const uint8_t *data_ptr = data_.data() + consumed_data_;
357
Austin Schuh05b70472020-01-01 17:11:17 -0800358 return absl::Span<const uint8_t>(data_ptr, data_size);
359}
360
Austin Schuhcf5f6442021-07-06 10:43:28 -0700361void SpanReader::ConsumeMessage() {
362 consumed_data_ +=
363 flatbuffers::GetPrefixedSize(data_.data() + consumed_data_) +
364 sizeof(flatbuffers::uoffset_t);
365}
366
367absl::Span<const uint8_t> SpanReader::ReadMessage() {
368 absl::Span<const uint8_t> result = PeekMessage();
369 if (result != absl::Span<const uint8_t>()) {
370 ConsumeMessage();
371 }
372 return result;
373}
374
Austin Schuh05b70472020-01-01 17:11:17 -0800375bool SpanReader::ReadBlock() {
Brian Silvermanf51499a2020-09-21 12:49:08 -0700376 // This is the amount of data we grab at a time. Doing larger chunks minimizes
377 // syscalls and helps decompressors batch things more efficiently.
Austin Schuh05b70472020-01-01 17:11:17 -0800378 constexpr size_t kReadSize = 256 * 1024;
379
380 // Strip off any unused data at the front.
381 if (consumed_data_ != 0) {
Brian Silvermanf51499a2020-09-21 12:49:08 -0700382 data_.erase_front(consumed_data_);
Austin Schuh05b70472020-01-01 17:11:17 -0800383 consumed_data_ = 0;
384 }
385
386 const size_t starting_size = data_.size();
387
388 // This should automatically grow the backing store. It won't shrink if we
389 // get a small chunk later. This reduces allocations when we want to append
390 // more data.
Brian Silvermanf51499a2020-09-21 12:49:08 -0700391 data_.resize(starting_size + kReadSize);
Austin Schuh05b70472020-01-01 17:11:17 -0800392
Brian Silvermanf51499a2020-09-21 12:49:08 -0700393 const size_t count =
394 decoder_->Read(data_.begin() + starting_size, data_.end());
395 data_.resize(starting_size + count);
Austin Schuh05b70472020-01-01 17:11:17 -0800396 if (count == 0) {
Austin Schuh05b70472020-01-01 17:11:17 -0800397 return false;
398 }
Austin Schuh05b70472020-01-01 17:11:17 -0800399
400 return true;
401}
402
Austin Schuhadd6eb32020-11-09 21:24:26 -0800403std::optional<SizePrefixedFlatbufferVector<LogFileHeader>> ReadHeader(
Austin Schuh3bd4c402020-11-06 18:19:06 -0800404 std::string_view filename) {
Austin Schuh6f3babe2020-01-26 20:34:50 -0800405 SpanReader span_reader(filename);
Austin Schuh6f3babe2020-01-26 20:34:50 -0800406 absl::Span<const uint8_t> config_data = span_reader.ReadMessage();
407
408 // Make sure something was read.
Austin Schuh3bd4c402020-11-06 18:19:06 -0800409 if (config_data == absl::Span<const uint8_t>()) {
410 return std::nullopt;
411 }
Austin Schuh6f3babe2020-01-26 20:34:50 -0800412
Austin Schuh5212cad2020-09-09 23:12:09 -0700413 // And copy the config so we have it forever, removing the size prefix.
Austin Schuhb929c4e2021-07-12 15:32:53 -0700414 SizePrefixedFlatbufferVector<LogFileHeader> result(config_data);
Austin Schuhe09beb12020-12-11 20:04:27 -0800415 if (!result.Verify()) {
416 return std::nullopt;
417 }
418 return result;
Austin Schuh6f3babe2020-01-26 20:34:50 -0800419}
420
Austin Schuhadd6eb32020-11-09 21:24:26 -0800421std::optional<SizePrefixedFlatbufferVector<MessageHeader>> ReadNthMessage(
Austin Schuh3bd4c402020-11-06 18:19:06 -0800422 std::string_view filename, size_t n) {
Austin Schuh5212cad2020-09-09 23:12:09 -0700423 SpanReader span_reader(filename);
424 absl::Span<const uint8_t> data_span = span_reader.ReadMessage();
425 for (size_t i = 0; i < n + 1; ++i) {
426 data_span = span_reader.ReadMessage();
427
428 // Make sure something was read.
Austin Schuh3bd4c402020-11-06 18:19:06 -0800429 if (data_span == absl::Span<const uint8_t>()) {
430 return std::nullopt;
431 }
Austin Schuh5212cad2020-09-09 23:12:09 -0700432 }
433
Brian Silverman354697a2020-09-22 21:06:32 -0700434 // And copy the config so we have it forever, removing the size prefix.
Austin Schuhb929c4e2021-07-12 15:32:53 -0700435 SizePrefixedFlatbufferVector<MessageHeader> result(data_span);
Austin Schuhe09beb12020-12-11 20:04:27 -0800436 if (!result.Verify()) {
437 return std::nullopt;
438 }
439 return result;
Austin Schuh5212cad2020-09-09 23:12:09 -0700440}
441
Austin Schuh05b70472020-01-01 17:11:17 -0800442MessageReader::MessageReader(std::string_view filename)
Austin Schuh97789fc2020-08-01 14:42:45 -0700443 : span_reader_(filename),
Austin Schuhadd6eb32020-11-09 21:24:26 -0800444 raw_log_file_header_(
445 SizePrefixedFlatbufferVector<LogFileHeader>::Empty()) {
Austin Schuh05b70472020-01-01 17:11:17 -0800446 // Make sure we have enough to read the size.
Austin Schuh97789fc2020-08-01 14:42:45 -0700447 absl::Span<const uint8_t> header_data = span_reader_.ReadMessage();
Austin Schuh05b70472020-01-01 17:11:17 -0800448
449 // Make sure something was read.
Austin Schuh97789fc2020-08-01 14:42:45 -0700450 CHECK(header_data != absl::Span<const uint8_t>())
451 << ": Failed to read header from: " << filename;
Austin Schuh05b70472020-01-01 17:11:17 -0800452
Austin Schuh97789fc2020-08-01 14:42:45 -0700453 // And copy the header data so we have it forever.
Brian Silverman354697a2020-09-22 21:06:32 -0700454 ResizeableBuffer header_data_copy;
Austin Schuhadd6eb32020-11-09 21:24:26 -0800455 header_data_copy.resize(header_data.size());
456 memcpy(header_data_copy.data(), header_data.begin(), header_data_copy.size());
Austin Schuh97789fc2020-08-01 14:42:45 -0700457 raw_log_file_header_ =
Austin Schuhadd6eb32020-11-09 21:24:26 -0800458 SizePrefixedFlatbufferVector<LogFileHeader>(std::move(header_data_copy));
Austin Schuh05b70472020-01-01 17:11:17 -0800459
Austin Schuhcde938c2020-02-02 17:30:07 -0800460 max_out_of_order_duration_ =
Austin Schuha040c3f2021-02-13 16:09:07 -0800461 FLAGS_max_out_of_order > 0
462 ? chrono::duration_cast<chrono::nanoseconds>(
463 chrono::duration<double>(FLAGS_max_out_of_order))
464 : chrono::nanoseconds(log_file_header()->max_out_of_order_duration());
Austin Schuhcde938c2020-02-02 17:30:07 -0800465
466 VLOG(1) << "Opened " << filename << " as node "
467 << FlatbufferToJson(log_file_header()->node());
Austin Schuh05b70472020-01-01 17:11:17 -0800468}
469
Austin Schuhadd6eb32020-11-09 21:24:26 -0800470std::optional<SizePrefixedFlatbufferVector<MessageHeader>>
471MessageReader::ReadMessage() {
Austin Schuh05b70472020-01-01 17:11:17 -0800472 absl::Span<const uint8_t> msg_data = span_reader_.ReadMessage();
473 if (msg_data == absl::Span<const uint8_t>()) {
474 return std::nullopt;
475 }
476
Austin Schuhb929c4e2021-07-12 15:32:53 -0700477 SizePrefixedFlatbufferVector<MessageHeader> result(msg_data);
Austin Schuh05b70472020-01-01 17:11:17 -0800478
479 const monotonic_clock::time_point timestamp = monotonic_clock::time_point(
480 chrono::nanoseconds(result.message().monotonic_sent_time()));
481
482 newest_timestamp_ = std::max(newest_timestamp_, timestamp);
Austin Schuh8bd96322020-02-13 21:18:22 -0800483 VLOG(2) << "Read from " << filename() << " data " << FlatbufferToJson(result);
Austin Schuh6f3babe2020-01-26 20:34:50 -0800484 return std::move(result);
Austin Schuh05b70472020-01-01 17:11:17 -0800485}
486
Austin Schuhc41603c2020-10-11 16:17:37 -0700487PartsMessageReader::PartsMessageReader(LogParts log_parts)
488 : parts_(std::move(log_parts)), message_reader_(parts_.parts[0]) {}
489
Austin Schuhadd6eb32020-11-09 21:24:26 -0800490std::optional<SizePrefixedFlatbufferVector<MessageHeader>>
Austin Schuhc41603c2020-10-11 16:17:37 -0700491PartsMessageReader::ReadMessage() {
492 while (!done_) {
Austin Schuhadd6eb32020-11-09 21:24:26 -0800493 std::optional<SizePrefixedFlatbufferVector<MessageHeader>> message =
Austin Schuhc41603c2020-10-11 16:17:37 -0700494 message_reader_.ReadMessage();
495 if (message) {
496 newest_timestamp_ = message_reader_.newest_timestamp();
Austin Schuh32f68492020-11-08 21:45:51 -0800497 const monotonic_clock::time_point monotonic_sent_time(
498 chrono::nanoseconds(message->message().monotonic_sent_time()));
Austin Schuh4b5c22a2020-11-30 22:58:43 -0800499 // TODO(austin): Does this work with startup? Might need to use the start
500 // time.
501 // TODO(austin): Does this work with startup when we don't know the remote
502 // start time too? Look at one of those logs to compare.
Austin Schuh315b96b2020-12-11 21:21:12 -0800503 if (monotonic_sent_time >
504 parts_.monotonic_start_time + max_out_of_order_duration()) {
505 after_start_ = true;
506 }
507 if (after_start_) {
Austin Schuhb000de62020-12-03 22:00:40 -0800508 CHECK_GE(monotonic_sent_time,
509 newest_timestamp_ - max_out_of_order_duration())
Austin Schuha040c3f2021-02-13 16:09:07 -0800510 << ": Max out of order of " << max_out_of_order_duration().count()
511 << "ns exceeded. " << parts_ << ", start time is "
Austin Schuh315b96b2020-12-11 21:21:12 -0800512 << parts_.monotonic_start_time << " currently reading "
513 << filename();
Austin Schuhb000de62020-12-03 22:00:40 -0800514 }
Austin Schuhc41603c2020-10-11 16:17:37 -0700515 return message;
516 }
517 NextLog();
518 }
Austin Schuh32f68492020-11-08 21:45:51 -0800519 newest_timestamp_ = monotonic_clock::max_time;
Austin Schuhc41603c2020-10-11 16:17:37 -0700520 return std::nullopt;
521}
522
523void PartsMessageReader::NextLog() {
524 if (next_part_index_ == parts_.parts.size()) {
525 done_ = true;
526 return;
527 }
528 message_reader_ = MessageReader(parts_.parts[next_part_index_]);
529 ++next_part_index_;
530}
531
Austin Schuh1be0ce42020-11-29 22:43:26 -0800532bool Message::operator<(const Message &m2) const {
Austin Schuh2dc8c7d2021-07-01 17:41:28 -0700533 CHECK_EQ(this->timestamp.boot, m2.timestamp.boot);
Austin Schuhf16ef6a2021-06-30 21:48:17 -0700534
Austin Schuh2dc8c7d2021-07-01 17:41:28 -0700535 if (this->timestamp.time < m2.timestamp.time) {
Austin Schuh1be0ce42020-11-29 22:43:26 -0800536 return true;
Austin Schuh2dc8c7d2021-07-01 17:41:28 -0700537 } else if (this->timestamp.time > m2.timestamp.time) {
Austin Schuh1be0ce42020-11-29 22:43:26 -0800538 return false;
539 }
540
541 if (this->channel_index < m2.channel_index) {
542 return true;
543 } else if (this->channel_index > m2.channel_index) {
544 return false;
545 }
546
547 return this->queue_index < m2.queue_index;
548}
549
550bool Message::operator>=(const Message &m2) const { return !(*this < m2); }
Austin Schuh8f52ed52020-11-30 23:12:39 -0800551bool Message::operator==(const Message &m2) const {
Austin Schuh2dc8c7d2021-07-01 17:41:28 -0700552 CHECK_EQ(this->timestamp.boot, m2.timestamp.boot);
Austin Schuhf16ef6a2021-06-30 21:48:17 -0700553
Austin Schuh2dc8c7d2021-07-01 17:41:28 -0700554 return timestamp.time == m2.timestamp.time &&
555 channel_index == m2.channel_index && queue_index == m2.queue_index;
Austin Schuh8f52ed52020-11-30 23:12:39 -0800556}
Austin Schuh1be0ce42020-11-29 22:43:26 -0800557
558std::ostream &operator<<(std::ostream &os, const Message &m) {
559 os << "{.channel_index=" << m.channel_index
Austin Schuh2dc8c7d2021-07-01 17:41:28 -0700560 << ", .queue_index=" << m.queue_index << ", .timestamp=" << m.timestamp;
Austin Schuhd2f96102020-12-01 20:27:29 -0800561 if (m.data.Verify()) {
562 os << ", .data="
563 << aos::FlatbufferToJson(m.data,
564 {.multi_line = false, .max_vector_size = 1});
565 }
566 os << "}";
567 return os;
568}
569
570std::ostream &operator<<(std::ostream &os, const TimestampedMessage &m) {
571 os << "{.channel_index=" << m.channel_index
572 << ", .queue_index=" << m.queue_index
573 << ", .monotonic_event_time=" << m.monotonic_event_time
574 << ", .realtime_event_time=" << m.realtime_event_time;
575 if (m.remote_queue_index != 0xffffffff) {
576 os << ", .remote_queue_index=" << m.remote_queue_index;
577 }
Austin Schuh2dc8c7d2021-07-01 17:41:28 -0700578 if (m.monotonic_remote_time != BootTimestamp::min_time()) {
Austin Schuhd2f96102020-12-01 20:27:29 -0800579 os << ", .monotonic_remote_time=" << m.monotonic_remote_time;
580 }
581 if (m.realtime_remote_time != realtime_clock::min_time) {
582 os << ", .realtime_remote_time=" << m.realtime_remote_time;
583 }
Austin Schuh2dc8c7d2021-07-01 17:41:28 -0700584 if (m.monotonic_timestamp_time != BootTimestamp::min_time()) {
Austin Schuh8bf1e632021-01-02 22:41:04 -0800585 os << ", .monotonic_timestamp_time=" << m.monotonic_timestamp_time;
586 }
Austin Schuhd2f96102020-12-01 20:27:29 -0800587 if (m.data.Verify()) {
588 os << ", .data="
589 << aos::FlatbufferToJson(m.data,
590 {.multi_line = false, .max_vector_size = 1});
591 }
592 os << "}";
Austin Schuh1be0ce42020-11-29 22:43:26 -0800593 return os;
594}
595
Austin Schuh4b5c22a2020-11-30 22:58:43 -0800596LogPartsSorter::LogPartsSorter(LogParts log_parts)
597 : parts_message_reader_(log_parts) {}
598
599Message *LogPartsSorter::Front() {
600 // Queue up data until enough data has been queued that the front message is
601 // sorted enough to be safe to pop. This may do nothing, so we should make
602 // sure the nothing path is checked quickly.
603 if (sorted_until() != monotonic_clock::max_time) {
604 while (true) {
Austin Schuh2dc8c7d2021-07-01 17:41:28 -0700605 if (!messages_.empty() && messages_.begin()->timestamp.time < sorted_until() &&
Austin Schuhb000de62020-12-03 22:00:40 -0800606 sorted_until() >= monotonic_start_time()) {
Austin Schuh4b5c22a2020-11-30 22:58:43 -0800607 break;
608 }
609
610 std::optional<SizePrefixedFlatbufferVector<MessageHeader>> m =
611 parts_message_reader_.ReadMessage();
612 // No data left, sorted forever, work through what is left.
613 if (!m) {
614 sorted_until_ = monotonic_clock::max_time;
615 break;
616 }
617
Austin Schuhf16ef6a2021-06-30 21:48:17 -0700618 messages_.insert(Message{
619 .channel_index = m.value().message().channel_index(),
620 .queue_index = m.value().message().queue_index(),
Austin Schuh2dc8c7d2021-07-01 17:41:28 -0700621 .timestamp =
622 BootTimestamp{
623 .boot = parts().boot_count,
624 .time = monotonic_clock::time_point(std::chrono::nanoseconds(
625 m.value().message().monotonic_sent_time()))},
Austin Schuhf16ef6a2021-06-30 21:48:17 -0700626 .data = std::move(m.value())});
Austin Schuh4b5c22a2020-11-30 22:58:43 -0800627
628 // Now, update sorted_until_ to match the new message.
629 if (parts_message_reader_.newest_timestamp() >
630 monotonic_clock::min_time +
631 parts_message_reader_.max_out_of_order_duration()) {
632 sorted_until_ = parts_message_reader_.newest_timestamp() -
633 parts_message_reader_.max_out_of_order_duration();
634 } else {
635 sorted_until_ = monotonic_clock::min_time;
636 }
637 }
638 }
639
640 // Now that we have enough data queued, return a pointer to the oldest piece
641 // of data if it exists.
642 if (messages_.empty()) {
Austin Schuhb000de62020-12-03 22:00:40 -0800643 last_message_time_ = monotonic_clock::max_time;
Austin Schuh4b5c22a2020-11-30 22:58:43 -0800644 return nullptr;
645 }
646
Austin Schuh2dc8c7d2021-07-01 17:41:28 -0700647 CHECK_GE(messages_.begin()->timestamp.time, last_message_time_)
Austin Schuh315b96b2020-12-11 21:21:12 -0800648 << DebugString() << " reading " << parts_message_reader_.filename();
Austin Schuh2dc8c7d2021-07-01 17:41:28 -0700649 last_message_time_ = messages_.begin()->timestamp.time;
Austin Schuh4b5c22a2020-11-30 22:58:43 -0800650 return &(*messages_.begin());
651}
652
653void LogPartsSorter::PopFront() { messages_.erase(messages_.begin()); }
654
655std::string LogPartsSorter::DebugString() const {
656 std::stringstream ss;
657 ss << "messages: [\n";
Austin Schuh315b96b2020-12-11 21:21:12 -0800658 int count = 0;
659 bool no_dots = true;
Austin Schuh4b5c22a2020-11-30 22:58:43 -0800660 for (const Message &m : messages_) {
Austin Schuh315b96b2020-12-11 21:21:12 -0800661 if (count < 15 || count > static_cast<int>(messages_.size()) - 15) {
662 ss << m << "\n";
663 } else if (no_dots) {
664 ss << "...\n";
665 no_dots = false;
666 }
667 ++count;
Austin Schuh4b5c22a2020-11-30 22:58:43 -0800668 }
669 ss << "] <- " << parts_message_reader_.filename();
670 return ss.str();
671}
672
Austin Schuhd2f96102020-12-01 20:27:29 -0800673NodeMerger::NodeMerger(std::vector<LogParts> parts) {
674 CHECK_GE(parts.size(), 1u);
Austin Schuh715adc12021-06-29 22:07:39 -0700675 // Enforce that we are sorting things only from a single node from a single
676 // boot.
677 const std::string_view part0_node = parts[0].node;
678 const std::string_view part0_source_boot_uuid = parts[0].source_boot_uuid;
Austin Schuhd2f96102020-12-01 20:27:29 -0800679 for (size_t i = 1; i < parts.size(); ++i) {
680 CHECK_EQ(part0_node, parts[i].node) << ": Can't merge different nodes.";
Austin Schuh715adc12021-06-29 22:07:39 -0700681 CHECK_EQ(part0_source_boot_uuid, parts[i].source_boot_uuid)
682 << ": Can't merge different boots.";
Austin Schuhd2f96102020-12-01 20:27:29 -0800683 }
Austin Schuh715adc12021-06-29 22:07:39 -0700684
685 node_ = configuration::GetNodeIndex(parts[0].config.get(), part0_node);
686
Austin Schuhd2f96102020-12-01 20:27:29 -0800687 for (LogParts &part : parts) {
688 parts_sorters_.emplace_back(std::move(part));
689 }
690
Austin Schuhd2f96102020-12-01 20:27:29 -0800691 monotonic_start_time_ = monotonic_clock::max_time;
692 realtime_start_time_ = realtime_clock::max_time;
693 for (const LogPartsSorter &parts_sorter : parts_sorters_) {
694 if (parts_sorter.monotonic_start_time() < monotonic_start_time_) {
695 monotonic_start_time_ = parts_sorter.monotonic_start_time();
696 realtime_start_time_ = parts_sorter.realtime_start_time();
697 }
698 }
699}
Austin Schuh8f52ed52020-11-30 23:12:39 -0800700
Austin Schuh0ca51f32020-12-25 21:51:45 -0800701std::vector<const LogParts *> NodeMerger::Parts() const {
702 std::vector<const LogParts *> p;
703 p.reserve(parts_sorters_.size());
704 for (const LogPartsSorter &parts_sorter : parts_sorters_) {
705 p.emplace_back(&parts_sorter.parts());
706 }
707 return p;
708}
709
Austin Schuh8f52ed52020-11-30 23:12:39 -0800710Message *NodeMerger::Front() {
711 // Return the current Front if we have one, otherwise go compute one.
712 if (current_ != nullptr) {
Austin Schuhb000de62020-12-03 22:00:40 -0800713 Message *result = current_->Front();
Austin Schuh2dc8c7d2021-07-01 17:41:28 -0700714 CHECK_GE(result->timestamp.time, last_message_time_);
Austin Schuhb000de62020-12-03 22:00:40 -0800715 return result;
Austin Schuh8f52ed52020-11-30 23:12:39 -0800716 }
717
718 // Otherwise, do a simple search for the oldest message, deduplicating any
719 // duplicates.
720 Message *oldest = nullptr;
721 sorted_until_ = monotonic_clock::max_time;
Austin Schuhd2f96102020-12-01 20:27:29 -0800722 for (LogPartsSorter &parts_sorter : parts_sorters_) {
723 Message *m = parts_sorter.Front();
Austin Schuh8f52ed52020-11-30 23:12:39 -0800724 if (!m) {
Austin Schuhd2f96102020-12-01 20:27:29 -0800725 sorted_until_ = std::min(sorted_until_, parts_sorter.sorted_until());
Austin Schuh8f52ed52020-11-30 23:12:39 -0800726 continue;
727 }
728 if (oldest == nullptr || *m < *oldest) {
729 oldest = m;
Austin Schuhd2f96102020-12-01 20:27:29 -0800730 current_ = &parts_sorter;
Austin Schuh8f52ed52020-11-30 23:12:39 -0800731 } else if (*m == *oldest) {
Austin Schuh8bf1e632021-01-02 22:41:04 -0800732 // Found a duplicate. If there is a choice, we want the one which has the
733 // timestamp time.
734 if (!m->data.message().has_monotonic_timestamp_time()) {
735 parts_sorter.PopFront();
736 } else if (!oldest->data.message().has_monotonic_timestamp_time()) {
737 current_->PopFront();
738 current_ = &parts_sorter;
739 oldest = m;
740 } else {
741 CHECK_EQ(m->data.message().monotonic_timestamp_time(),
742 oldest->data.message().monotonic_timestamp_time());
743 parts_sorter.PopFront();
744 }
Austin Schuh8f52ed52020-11-30 23:12:39 -0800745 }
746
747 // PopFront may change this, so compute it down here.
Austin Schuhd2f96102020-12-01 20:27:29 -0800748 sorted_until_ = std::min(sorted_until_, parts_sorter.sorted_until());
Austin Schuh8f52ed52020-11-30 23:12:39 -0800749 }
750
Austin Schuhb000de62020-12-03 22:00:40 -0800751 if (oldest) {
Austin Schuh2dc8c7d2021-07-01 17:41:28 -0700752 CHECK_GE(oldest->timestamp.time, last_message_time_);
753 last_message_time_ = oldest->timestamp.time;
Austin Schuhb000de62020-12-03 22:00:40 -0800754 } else {
755 last_message_time_ = monotonic_clock::max_time;
756 }
757
Austin Schuh8f52ed52020-11-30 23:12:39 -0800758 // Return the oldest message found. This will be nullptr if nothing was
759 // found, indicating there is nothing left.
760 return oldest;
761}
762
763void NodeMerger::PopFront() {
764 CHECK(current_ != nullptr) << "Popping before calling Front()";
765 current_->PopFront();
766 current_ = nullptr;
767}
768
Austin Schuhf16ef6a2021-06-30 21:48:17 -0700769BootMerger::BootMerger(std::vector<LogParts> files) {
770 std::vector<std::vector<LogParts>> boots;
771
772 // Now, we need to split things out by boot.
773 for (size_t i = 0; i < files.size(); ++i) {
774 LOG(INFO) << "Trying file " << i;
775 const size_t boot_count = files[i].boot_count;
776 LOG(INFO) << "Boot count " << boot_count;
777 if (boot_count + 1 > boots.size()) {
778 boots.resize(boot_count + 1);
779 }
780 boots[boot_count].emplace_back(std::move(files[i]));
781 }
782
783 node_mergers_.reserve(boots.size());
784 for (size_t i = 0; i < boots.size(); ++i) {
785 LOG(INFO) << "Boot " << i;
786 for (auto &p : boots[i]) {
787 LOG(INFO) << "Part " << p;
788 }
789 node_mergers_.emplace_back(
790 std::make_unique<NodeMerger>(std::move(boots[i])));
791 }
792}
793
794Message *BootMerger::Front() {
795 Message *result = node_mergers_[index_]->Front();
796
797 if (result != nullptr) {
798 return result;
799 }
800
801 if (index_ + 1u == node_mergers_.size()) {
802 // At the end of the last node merger, just return.
803 return nullptr;
804 } else {
805 ++index_;
806 return Front();
807 }
808}
809
810void BootMerger::PopFront() { node_mergers_[index_]->PopFront(); }
811
Austin Schuh2dc8c7d2021-07-01 17:41:28 -0700812std::vector<const LogParts *> BootMerger::Parts() const {
813 std::vector<const LogParts *> results;
814 for (const std::unique_ptr<NodeMerger> &node_merger : node_mergers_) {
815 std::vector<const LogParts *> node_parts = node_merger->Parts();
816
817 results.insert(results.end(), std::make_move_iterator(node_parts.begin()),
818 std::make_move_iterator(node_parts.end()));
819 }
820
821 return results;
822}
823
Austin Schuhd2f96102020-12-01 20:27:29 -0800824TimestampMapper::TimestampMapper(std::vector<LogParts> parts)
Austin Schuh2dc8c7d2021-07-01 17:41:28 -0700825 : boot_merger_(std::move(parts)),
Austin Schuh79b30942021-01-24 22:32:21 -0800826 timestamp_callback_([](TimestampedMessage *) {}) {
Austin Schuh2dc8c7d2021-07-01 17:41:28 -0700827 for (const LogParts *part : boot_merger_.Parts()) {
Austin Schuh0ca51f32020-12-25 21:51:45 -0800828 if (!configuration_) {
829 configuration_ = part->config;
830 } else {
831 CHECK_EQ(configuration_.get(), part->config.get());
832 }
833 }
834 const Configuration *config = configuration_.get();
Austin Schuhd2f96102020-12-01 20:27:29 -0800835 // Only fill out nodes_data_ if there are nodes. Otherwise everything gets
836 // pretty simple.
837 if (configuration::MultiNode(config)) {
838 nodes_data_.resize(config->nodes()->size());
839 const Node *my_node = config->nodes()->Get(node());
840 for (size_t node_index = 0; node_index < nodes_data_.size(); ++node_index) {
841 const Node *node = config->nodes()->Get(node_index);
842 NodeData *node_data = &nodes_data_[node_index];
843 node_data->channels.resize(config->channels()->size());
844 // We should save the channel if it is delivered to the node represented
845 // by the NodeData, but not sent by that node. That combo means it is
846 // forwarded.
847 size_t channel_index = 0;
848 node_data->any_delivered = false;
849 for (const Channel *channel : *config->channels()) {
850 node_data->channels[channel_index].delivered =
851 configuration::ChannelIsReadableOnNode(channel, node) &&
Austin Schuhb3dbb6d2021-01-02 17:29:35 -0800852 configuration::ChannelIsSendableOnNode(channel, my_node) &&
853 (my_node != node);
Austin Schuhd2f96102020-12-01 20:27:29 -0800854 node_data->any_delivered = node_data->any_delivered ||
855 node_data->channels[channel_index].delivered;
856 ++channel_index;
857 }
858 }
859
860 for (const Channel *channel : *config->channels()) {
861 source_node_.emplace_back(configuration::GetNodeIndex(
862 config, channel->source_node()->string_view()));
863 }
864 }
865}
866
867void TimestampMapper::AddPeer(TimestampMapper *timestamp_mapper) {
Austin Schuh0ca51f32020-12-25 21:51:45 -0800868 CHECK(configuration::MultiNode(configuration()));
Austin Schuhd2f96102020-12-01 20:27:29 -0800869 CHECK_NE(timestamp_mapper->node(), node());
870 CHECK_LT(timestamp_mapper->node(), nodes_data_.size());
871
872 NodeData *node_data = &nodes_data_[timestamp_mapper->node()];
873 // Only set it if this node delivers to the peer timestamp_mapper. Otherwise
874 // we could needlessly save data.
875 if (node_data->any_delivered) {
Austin Schuh87dd3832021-01-01 23:07:31 -0800876 VLOG(1) << "Registering on node " << node() << " for peer node "
877 << timestamp_mapper->node();
Austin Schuhd2f96102020-12-01 20:27:29 -0800878 CHECK(timestamp_mapper->nodes_data_[node()].peer == nullptr);
879
880 timestamp_mapper->nodes_data_[node()].peer = this;
881 }
882}
883
Austin Schuh79b30942021-01-24 22:32:21 -0800884void TimestampMapper::QueueMessage(Message *m) {
885 matched_messages_.emplace_back(TimestampedMessage{
Austin Schuhd2f96102020-12-01 20:27:29 -0800886 .channel_index = m->channel_index,
887 .queue_index = m->queue_index,
888 .monotonic_event_time = m->timestamp,
889 .realtime_event_time = aos::realtime_clock::time_point(
890 std::chrono::nanoseconds(m->data.message().realtime_sent_time())),
891 .remote_queue_index = 0xffffffff,
Austin Schuh2dc8c7d2021-07-01 17:41:28 -0700892 .monotonic_remote_time = BootTimestamp::min_time(),
Austin Schuhd2f96102020-12-01 20:27:29 -0800893 .realtime_remote_time = realtime_clock::min_time,
Austin Schuh2dc8c7d2021-07-01 17:41:28 -0700894 .monotonic_timestamp_time = BootTimestamp::min_time(),
Austin Schuh79b30942021-01-24 22:32:21 -0800895 .data = std::move(m->data)});
Austin Schuhd2f96102020-12-01 20:27:29 -0800896}
897
898TimestampedMessage *TimestampMapper::Front() {
899 // No need to fetch anything new. A previous message still exists.
900 switch (first_message_) {
901 case FirstMessage::kNeedsUpdate:
902 break;
903 case FirstMessage::kInMessage:
Austin Schuh79b30942021-01-24 22:32:21 -0800904 return &matched_messages_.front();
Austin Schuhd2f96102020-12-01 20:27:29 -0800905 case FirstMessage::kNullptr:
906 return nullptr;
907 }
908
Austin Schuh79b30942021-01-24 22:32:21 -0800909 if (matched_messages_.empty()) {
910 if (!QueueMatched()) {
911 first_message_ = FirstMessage::kNullptr;
912 return nullptr;
913 }
914 }
915 first_message_ = FirstMessage::kInMessage;
916 return &matched_messages_.front();
917}
918
919bool TimestampMapper::QueueMatched() {
Austin Schuhd2f96102020-12-01 20:27:29 -0800920 if (nodes_data_.empty()) {
921 // Simple path. We are single node, so there are no timestamps to match!
922 CHECK_EQ(messages_.size(), 0u);
Austin Schuh2dc8c7d2021-07-01 17:41:28 -0700923 Message *m = boot_merger_.Front();
Austin Schuhd2f96102020-12-01 20:27:29 -0800924 if (!m) {
Austin Schuh79b30942021-01-24 22:32:21 -0800925 return false;
Austin Schuhd2f96102020-12-01 20:27:29 -0800926 }
Austin Schuh79b30942021-01-24 22:32:21 -0800927 // Enqueue this message into matched_messages_ so we have a place to
928 // associate remote timestamps, and return it.
929 QueueMessage(m);
Austin Schuhd2f96102020-12-01 20:27:29 -0800930
Austin Schuh79b30942021-01-24 22:32:21 -0800931 CHECK_GE(matched_messages_.back().monotonic_event_time, last_message_time_);
932 last_message_time_ = matched_messages_.back().monotonic_event_time;
933
934 // We are thin wrapper around node_merger. Call it directly.
Austin Schuh2dc8c7d2021-07-01 17:41:28 -0700935 boot_merger_.PopFront();
Austin Schuh79b30942021-01-24 22:32:21 -0800936 timestamp_callback_(&matched_messages_.back());
937 return true;
Austin Schuhd2f96102020-12-01 20:27:29 -0800938 }
939
940 // We need to only add messages to the list so they get processed for messages
941 // which are delivered. Reuse the flow below which uses messages_ by just
942 // adding the new message to messages_ and continuing.
943 if (messages_.empty()) {
944 if (!Queue()) {
945 // Found nothing to add, we are out of data!
Austin Schuh79b30942021-01-24 22:32:21 -0800946 return false;
Austin Schuhd2f96102020-12-01 20:27:29 -0800947 }
948
949 // Now that it has been added (and cannibalized), forget about it upstream.
Austin Schuh2dc8c7d2021-07-01 17:41:28 -0700950 boot_merger_.PopFront();
Austin Schuhd2f96102020-12-01 20:27:29 -0800951 }
952
953 Message *m = &(messages_.front());
954
955 if (source_node_[m->channel_index] == node()) {
956 // From us, just forward it on, filling the remote data in as invalid.
Austin Schuh79b30942021-01-24 22:32:21 -0800957 QueueMessage(m);
958 CHECK_GE(matched_messages_.back().monotonic_event_time, last_message_time_);
959 last_message_time_ = matched_messages_.back().monotonic_event_time;
960 messages_.pop_front();
961 timestamp_callback_(&matched_messages_.back());
962 return true;
Austin Schuhd2f96102020-12-01 20:27:29 -0800963 } else {
964 // Got a timestamp, find the matching remote data, match it, and return it.
965 Message data = MatchingMessageFor(*m);
966
967 // Return the data from the remote. The local message only has timestamp
968 // info which isn't relevant anymore once extracted.
Austin Schuh79b30942021-01-24 22:32:21 -0800969 matched_messages_.emplace_back(TimestampedMessage{
Austin Schuhd2f96102020-12-01 20:27:29 -0800970 .channel_index = m->channel_index,
971 .queue_index = m->queue_index,
972 .monotonic_event_time = m->timestamp,
973 .realtime_event_time = aos::realtime_clock::time_point(
974 std::chrono::nanoseconds(m->data.message().realtime_sent_time())),
975 .remote_queue_index = m->data.message().remote_queue_index(),
976 .monotonic_remote_time =
Austin Schuh2dc8c7d2021-07-01 17:41:28 -0700977 // TODO(austin): 0 is wrong...
978 {0, monotonic_clock::time_point(std::chrono::nanoseconds(
979 m->data.message().monotonic_remote_time()))},
Austin Schuhd2f96102020-12-01 20:27:29 -0800980 .realtime_remote_time = realtime_clock::time_point(
981 std::chrono::nanoseconds(m->data.message().realtime_remote_time())),
Austin Schuh8bf1e632021-01-02 22:41:04 -0800982 .monotonic_timestamp_time =
Austin Schuh2dc8c7d2021-07-01 17:41:28 -0700983 {0, monotonic_clock::time_point(std::chrono::nanoseconds(
984 m->data.message().monotonic_timestamp_time()))},
Austin Schuh79b30942021-01-24 22:32:21 -0800985 .data = std::move(data.data)});
986 CHECK_GE(matched_messages_.back().monotonic_event_time, last_message_time_);
987 last_message_time_ = matched_messages_.back().monotonic_event_time;
988 // Since messages_ holds the data, drop it.
989 messages_.pop_front();
990 timestamp_callback_(&matched_messages_.back());
991 return true;
992 }
993}
994
Austin Schuh2dc8c7d2021-07-01 17:41:28 -0700995void TimestampMapper::QueueUntil(BootTimestamp queue_time) {
Austin Schuh79b30942021-01-24 22:32:21 -0800996 while (last_message_time_ <= queue_time) {
997 if (!QueueMatched()) {
998 return;
999 }
Austin Schuhd2f96102020-12-01 20:27:29 -08001000 }
1001}
1002
Austin Schuhe639ea12021-01-25 13:00:22 -08001003void TimestampMapper::QueueFor(chrono::nanoseconds time_estimation_buffer) {
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001004 // Note: queueing for time doesn't really work well across boots. So we just
1005 // assume that if you are using this, you only care about the current boot.
1006 //
1007 // TODO(austin): Is that the right concept?
1008 //
Austin Schuhe639ea12021-01-25 13:00:22 -08001009 // Make sure we have something queued first. This makes the end time
1010 // calculation simpler, and is typically what folks want regardless.
1011 if (matched_messages_.empty()) {
1012 if (!QueueMatched()) {
1013 return;
1014 }
1015 }
1016
1017 const aos::monotonic_clock::time_point end_queue_time =
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001018 std::max(monotonic_start_time(
1019 matched_messages_.front().monotonic_event_time.boot),
1020 matched_messages_.front().monotonic_event_time.time) +
Austin Schuhe639ea12021-01-25 13:00:22 -08001021 time_estimation_buffer;
1022
1023 // Place sorted messages on the list until we have
1024 // --time_estimation_buffer_seconds seconds queued up (but queue at least
1025 // until the log starts).
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001026 while (end_queue_time >= last_message_time_.time) {
Austin Schuhe639ea12021-01-25 13:00:22 -08001027 if (!QueueMatched()) {
1028 return;
1029 }
1030 }
1031}
1032
Austin Schuhd2f96102020-12-01 20:27:29 -08001033void TimestampMapper::PopFront() {
1034 CHECK(first_message_ != FirstMessage::kNeedsUpdate);
1035 first_message_ = FirstMessage::kNeedsUpdate;
1036
Austin Schuh79b30942021-01-24 22:32:21 -08001037 matched_messages_.pop_front();
Austin Schuhd2f96102020-12-01 20:27:29 -08001038}
1039
1040Message TimestampMapper::MatchingMessageFor(const Message &message) {
Austin Schuhd2f96102020-12-01 20:27:29 -08001041 // Figure out what queue index we are looking for.
1042 CHECK(message.data.message().has_remote_queue_index());
1043 const uint32_t remote_queue_index =
1044 message.data.message().remote_queue_index();
1045
1046 CHECK(message.data.message().has_monotonic_remote_time());
1047 CHECK(message.data.message().has_realtime_remote_time());
1048
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001049 const BootTimestamp monotonic_remote_time{
1050 .boot = 0,
1051 .time = monotonic_clock::time_point(std::chrono::nanoseconds(
1052 message.data.message().monotonic_remote_time()))};
Austin Schuhd2f96102020-12-01 20:27:29 -08001053 const realtime_clock::time_point realtime_remote_time(
1054 std::chrono::nanoseconds(message.data.message().realtime_remote_time()));
1055
Austin Schuhfecf1d82020-12-19 16:57:28 -08001056 TimestampMapper *peer = nodes_data_[source_node_[message.channel_index]].peer;
1057
1058 // We only register the peers which we have data for. So, if we are being
1059 // asked to pull a timestamp from a peer which doesn't exist, return an empty
1060 // message.
1061 if (peer == nullptr) {
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001062 // TODO(austin): Make sure the tests hit all these paths with a boot count
1063 // of 1...
Austin Schuhfecf1d82020-12-19 16:57:28 -08001064 return Message{
1065 .channel_index = message.channel_index,
1066 .queue_index = remote_queue_index,
1067 .timestamp = monotonic_remote_time,
1068 .data = SizePrefixedFlatbufferVector<MessageHeader>::Empty()};
1069 }
1070
1071 // The queue which will have the matching data, if available.
1072 std::deque<Message> *data_queue =
1073 &peer->nodes_data_[node()].channels[message.channel_index].messages;
1074
Austin Schuh79b30942021-01-24 22:32:21 -08001075 peer->QueueUnmatchedUntil(monotonic_remote_time);
Austin Schuhd2f96102020-12-01 20:27:29 -08001076
1077 if (data_queue->empty()) {
1078 return Message{
1079 .channel_index = message.channel_index,
1080 .queue_index = remote_queue_index,
1081 .timestamp = monotonic_remote_time,
1082 .data = SizePrefixedFlatbufferVector<MessageHeader>::Empty()};
1083 }
1084
Austin Schuhd2f96102020-12-01 20:27:29 -08001085 if (remote_queue_index < data_queue->front().queue_index ||
1086 remote_queue_index > data_queue->back().queue_index) {
1087 return Message{
1088 .channel_index = message.channel_index,
1089 .queue_index = remote_queue_index,
1090 .timestamp = monotonic_remote_time,
1091 .data = SizePrefixedFlatbufferVector<MessageHeader>::Empty()};
1092 }
1093
Austin Schuh993ccb52020-12-12 15:59:32 -08001094 // The algorithm below is constant time with some assumptions. We need there
1095 // to be no missing messages in the data stream. This also assumes a queue
1096 // hasn't wrapped. That is conservative, but should let us get started.
1097 if (data_queue->back().queue_index - data_queue->front().queue_index + 1u ==
1098 data_queue->size()) {
1099 // Pull the data out and confirm that the timestamps match as expected.
1100 Message result = std::move(
1101 (*data_queue)[remote_queue_index - data_queue->front().queue_index]);
1102
1103 CHECK_EQ(result.timestamp, monotonic_remote_time)
1104 << ": Queue index matches, but timestamp doesn't. Please investigate!";
1105 CHECK_EQ(realtime_clock::time_point(std::chrono::nanoseconds(
1106 result.data.message().realtime_sent_time())),
1107 realtime_remote_time)
1108 << ": Queue index matches, but timestamp doesn't. Please investigate!";
1109 // Now drop the data off the front. We have deduplicated timestamps, so we
1110 // are done. And all the data is in order.
1111 data_queue->erase(data_queue->begin(),
1112 data_queue->begin() + (1 + remote_queue_index -
1113 data_queue->front().queue_index));
1114 return result;
1115 } else {
1116 auto it = std::find_if(data_queue->begin(), data_queue->end(),
1117 [remote_queue_index](const Message &m) {
1118 return m.queue_index == remote_queue_index;
1119 });
1120 if (it == data_queue->end()) {
1121 return Message{
1122 .channel_index = message.channel_index,
1123 .queue_index = remote_queue_index,
1124 .timestamp = monotonic_remote_time,
1125 .data = SizePrefixedFlatbufferVector<MessageHeader>::Empty()};
1126 }
1127
1128 Message result = std::move(*it);
1129
1130 CHECK_EQ(result.timestamp, monotonic_remote_time)
1131 << ": Queue index matches, but timestamp doesn't. Please investigate!";
1132 CHECK_EQ(realtime_clock::time_point(std::chrono::nanoseconds(
1133 result.data.message().realtime_sent_time())),
1134 realtime_remote_time)
1135 << ": Queue index matches, but timestamp doesn't. Please investigate!";
1136
1137 data_queue->erase(it);
1138
1139 return result;
1140 }
Austin Schuhd2f96102020-12-01 20:27:29 -08001141}
1142
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001143void TimestampMapper::QueueUnmatchedUntil(BootTimestamp t) {
Austin Schuhd2f96102020-12-01 20:27:29 -08001144 if (queued_until_ > t) {
1145 return;
1146 }
1147 while (true) {
1148 if (!messages_.empty() && messages_.back().timestamp > t) {
1149 queued_until_ = std::max(queued_until_, messages_.back().timestamp);
1150 return;
1151 }
1152
1153 if (!Queue()) {
1154 // Found nothing to add, we are out of data!
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001155 queued_until_ = BootTimestamp::max_time();
Austin Schuhd2f96102020-12-01 20:27:29 -08001156 return;
1157 }
1158
1159 // Now that it has been added (and cannibalized), forget about it upstream.
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001160 boot_merger_.PopFront();
Austin Schuhd2f96102020-12-01 20:27:29 -08001161 }
1162}
1163
1164bool TimestampMapper::Queue() {
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001165 Message *m = boot_merger_.Front();
Austin Schuhd2f96102020-12-01 20:27:29 -08001166 if (m == nullptr) {
1167 return false;
1168 }
1169 for (NodeData &node_data : nodes_data_) {
1170 if (!node_data.any_delivered) continue;
1171 if (node_data.channels[m->channel_index].delivered) {
1172 // TODO(austin): This copies the data... Probably not worth stressing
1173 // about yet.
1174 // TODO(austin): Bound how big this can get. We tend not to send massive
1175 // data, so we can probably ignore this for a bit.
1176 node_data.channels[m->channel_index].messages.emplace_back(*m);
1177 }
1178 }
1179
1180 messages_.emplace_back(std::move(*m));
1181 return true;
1182}
1183
1184std::string TimestampMapper::DebugString() const {
1185 std::stringstream ss;
1186 ss << "node " << node() << " [\n";
1187 for (const Message &message : messages_) {
1188 ss << " " << message << "\n";
1189 }
1190 ss << "] queued_until " << queued_until_;
1191 for (const NodeData &ns : nodes_data_) {
1192 if (ns.peer == nullptr) continue;
1193 ss << "\nnode " << ns.peer->node() << " remote_data [\n";
1194 size_t channel_index = 0;
1195 for (const NodeData::ChannelData &channel_data :
1196 ns.peer->nodes_data_[node()].channels) {
1197 if (channel_data.messages.empty()) {
1198 continue;
1199 }
Austin Schuhb000de62020-12-03 22:00:40 -08001200
Austin Schuhd2f96102020-12-01 20:27:29 -08001201 ss << " channel " << channel_index << " [\n";
1202 for (const Message &m : channel_data.messages) {
1203 ss << " " << m << "\n";
1204 }
1205 ss << " ]\n";
1206 ++channel_index;
1207 }
1208 ss << "] queued_until " << ns.peer->queued_until_;
1209 }
1210 return ss.str();
1211}
1212
Austin Schuhee711052020-08-24 16:06:09 -07001213std::string MaybeNodeName(const Node *node) {
1214 if (node != nullptr) {
1215 return node->name()->str() + " ";
1216 }
1217 return "";
1218}
1219
Brian Silvermanf51499a2020-09-21 12:49:08 -07001220} // namespace aos::logger