blob: 90220c86939727dd9dad798ee7d6d72ca9878edb [file] [log] [blame]
Austin Schuha36c8902019-12-30 18:07:15 -08001#include "aos/events/logging/logfile_utils.h"
2
3#include <fcntl.h>
Austin Schuha36c8902019-12-30 18:07:15 -08004#include <sys/stat.h>
5#include <sys/types.h>
6#include <sys/uio.h>
7
Brian Silvermanf51499a2020-09-21 12:49:08 -07008#include <algorithm>
9#include <climits>
Austin Schuha36c8902019-12-30 18:07:15 -080010
Austin Schuhe4fca832020-03-07 16:58:53 -080011#include "absl/strings/escaping.h"
Austin Schuh05b70472020-01-01 17:11:17 -080012#include "aos/configuration.h"
James Kuszmauldd0a5042021-10-28 23:38:04 -070013#include "aos/events/logging/snappy_encoder.h"
Austin Schuhfa895892020-01-07 20:07:41 -080014#include "aos/flatbuffer_merge.h"
Austin Schuh6f3babe2020-01-26 20:34:50 -080015#include "aos/util/file.h"
Austin Schuha36c8902019-12-30 18:07:15 -080016#include "flatbuffers/flatbuffers.h"
Austin Schuh05b70472020-01-01 17:11:17 -080017#include "gflags/gflags.h"
18#include "glog/logging.h"
Austin Schuha36c8902019-12-30 18:07:15 -080019
Brian Silvermanf59fe3f2020-09-22 21:04:09 -070020#if defined(__x86_64__)
Tyler Chatow2015bc62021-08-04 21:15:09 -070021#define ENABLE_LZMA (!__has_feature(memory_sanitizer))
Brian Silvermanf59fe3f2020-09-22 21:04:09 -070022#elif defined(__aarch64__)
Tyler Chatow2015bc62021-08-04 21:15:09 -070023#define ENABLE_LZMA (!__has_feature(memory_sanitizer))
Brian Silvermanf59fe3f2020-09-22 21:04:09 -070024#else
25#define ENABLE_LZMA 0
26#endif
27
28#if ENABLE_LZMA
29#include "aos/events/logging/lzma_encoder.h"
30#endif
Austin Schuh86110712022-09-16 15:40:54 -070031#if ENABLE_S3
32#include "aos/events/logging/s3_fetcher.h"
33#endif
Brian Silvermanf59fe3f2020-09-22 21:04:09 -070034
Austin Schuh48d10d62022-10-16 22:19:23 -070035DEFINE_int32(flush_size, 128 * 1024,
Austin Schuha36c8902019-12-30 18:07:15 -080036 "Number of outstanding bytes to allow before flushing to disk.");
Austin Schuhbd06ae42021-03-31 22:48:21 -070037DEFINE_double(
38 flush_period, 5.0,
39 "Max time to let data sit in the queue before flushing in seconds.");
Austin Schuha36c8902019-12-30 18:07:15 -080040
Austin Schuha040c3f2021-02-13 16:09:07 -080041DEFINE_double(
Austin Schuh6a7358f2021-11-18 22:40:40 -080042 max_network_delay, 1.0,
43 "Max time to assume a message takes to cross the network before we are "
44 "willing to drop it from our buffers and assume it didn't make it. "
45 "Increasing this number can increase memory usage depending on the packet "
46 "loss of your network or if the timestamps aren't logged for a message.");
47
48DEFINE_double(
Austin Schuha040c3f2021-02-13 16:09:07 -080049 max_out_of_order, -1,
50 "If set, this overrides the max out of order duration for a log file.");
51
Austin Schuh0e8db662021-07-06 10:43:47 -070052DEFINE_bool(workaround_double_headers, true,
53 "Some old log files have two headers at the beginning. Use the "
54 "last header as the actual header.");
55
Brian Smarttea913d42021-12-10 15:02:38 -080056DEFINE_bool(crash_on_corrupt_message, true,
57 "When true, MessageReader will crash the first time a message "
58 "with corrupted format is found. When false, the crash will be "
59 "suppressed, and any remaining readable messages will be "
60 "evaluated to present verified vs corrupted stats.");
61
62DEFINE_bool(ignore_corrupt_messages, false,
63 "When true, and crash_on_corrupt_message is false, then any "
64 "corrupt message found by MessageReader be silently ignored, "
65 "providing access to all uncorrupted messages in a logfile.");
66
Brian Silvermanf51499a2020-09-21 12:49:08 -070067namespace aos::logger {
Tyler Chatowb7c6eba2021-07-28 14:43:23 -070068namespace {
Austin Schuha36c8902019-12-30 18:07:15 -080069
Austin Schuh05b70472020-01-01 17:11:17 -080070namespace chrono = std::chrono;
71
Tyler Chatowb7c6eba2021-07-28 14:43:23 -070072template <typename T>
73void PrintOptionalOrNull(std::ostream *os, const std::optional<T> &t) {
74 if (t.has_value()) {
75 *os << *t;
76 } else {
77 *os << "null";
78 }
79}
80} // namespace
81
Austin Schuh48d10d62022-10-16 22:19:23 -070082DetachedBufferWriter::DetachedBufferWriter(std::string_view filename,
83 std::unique_ptr<DataEncoder> encoder)
Brian Silvermanf51499a2020-09-21 12:49:08 -070084 : filename_(filename), encoder_(std::move(encoder)) {
Brian Silvermana9f2ec92020-10-06 18:00:53 -070085 if (!util::MkdirPIfSpace(filename, 0777)) {
86 ran_out_of_space_ = true;
87 } else {
James Kuszmaul9776b392023-01-14 14:08:08 -080088 fd_ = open(filename_.c_str(), O_RDWR | O_CLOEXEC | O_CREAT | O_EXCL, 0774);
Brian Silvermana9f2ec92020-10-06 18:00:53 -070089 if (fd_ == -1 && errno == ENOSPC) {
90 ran_out_of_space_ = true;
91 } else {
Austin Schuh58646e22021-08-23 23:51:46 -070092 PCHECK(fd_ != -1) << ": Failed to open " << this->filename()
93 << " for writing";
94 VLOG(1) << "Opened " << this->filename() << " for writing";
Brian Silvermana9f2ec92020-10-06 18:00:53 -070095 }
96 }
Austin Schuha36c8902019-12-30 18:07:15 -080097}
98
99DetachedBufferWriter::~DetachedBufferWriter() {
Brian Silverman0465fcf2020-09-24 00:29:18 -0700100 Close();
101 if (ran_out_of_space_) {
102 CHECK(acknowledge_ran_out_of_space_)
103 << ": Unacknowledged out of disk space, log file was not completed";
Brian Silvermanf51499a2020-09-21 12:49:08 -0700104 }
Austin Schuh2f8fd752020-09-01 22:38:28 -0700105}
106
Brian Silvermand90905f2020-09-23 14:42:56 -0700107DetachedBufferWriter::DetachedBufferWriter(DetachedBufferWriter &&other) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700108 *this = std::move(other);
109}
110
Brian Silverman87ac0402020-09-17 14:47:01 -0700111// When other is destroyed "soon" (which it should be because we're getting an
112// rvalue reference to it), it will flush etc all the data we have queued up
113// (because that data will then be its data).
Austin Schuh2f8fd752020-09-01 22:38:28 -0700114DetachedBufferWriter &DetachedBufferWriter::operator=(
115 DetachedBufferWriter &&other) {
Austin Schuh2f8fd752020-09-01 22:38:28 -0700116 std::swap(filename_, other.filename_);
Brian Silvermanf51499a2020-09-21 12:49:08 -0700117 std::swap(encoder_, other.encoder_);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700118 std::swap(fd_, other.fd_);
Brian Silverman0465fcf2020-09-24 00:29:18 -0700119 std::swap(ran_out_of_space_, other.ran_out_of_space_);
120 std::swap(acknowledge_ran_out_of_space_, other.acknowledge_ran_out_of_space_);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700121 std::swap(iovec_, other.iovec_);
Brian Silvermanf51499a2020-09-21 12:49:08 -0700122 std::swap(max_write_time_, other.max_write_time_);
123 std::swap(max_write_time_bytes_, other.max_write_time_bytes_);
124 std::swap(max_write_time_messages_, other.max_write_time_messages_);
125 std::swap(total_write_time_, other.total_write_time_);
126 std::swap(total_write_count_, other.total_write_count_);
127 std::swap(total_write_messages_, other.total_write_messages_);
128 std::swap(total_write_bytes_, other.total_write_bytes_);
Austin Schuh2f8fd752020-09-01 22:38:28 -0700129 return *this;
Austin Schuha36c8902019-12-30 18:07:15 -0800130}
131
Austin Schuh7ef11a42023-02-04 17:15:12 -0800132void DetachedBufferWriter::CopyMessage(DataEncoder::Copier *coppier,
133 aos::monotonic_clock::time_point now) {
Brian Silvermana9f2ec92020-10-06 18:00:53 -0700134 if (ran_out_of_space_) {
135 // We don't want any later data to be written after space becomes
136 // available, so refuse to write anything more once we've dropped data
137 // because we ran out of space.
Austin Schuh48d10d62022-10-16 22:19:23 -0700138 return;
Austin Schuha36c8902019-12-30 18:07:15 -0800139 }
Brian Silvermanf51499a2020-09-21 12:49:08 -0700140
Austin Schuh48d10d62022-10-16 22:19:23 -0700141 if (!encoder_->HasSpace(coppier->size())) {
142 Flush();
143 CHECK(encoder_->HasSpace(coppier->size()));
144 }
145
146 encoder_->Encode(coppier);
Austin Schuhbd06ae42021-03-31 22:48:21 -0700147 FlushAtThreshold(now);
Austin Schuha36c8902019-12-30 18:07:15 -0800148}
149
Brian Silverman0465fcf2020-09-24 00:29:18 -0700150void DetachedBufferWriter::Close() {
151 if (fd_ == -1) {
152 return;
153 }
154 encoder_->Finish();
155 while (encoder_->queue_size() > 0) {
156 Flush();
157 }
158 if (close(fd_) == -1) {
159 if (errno == ENOSPC) {
160 ran_out_of_space_ = true;
161 } else {
162 PLOG(ERROR) << "Closing log file failed";
163 }
164 }
165 fd_ = -1;
Austin Schuh58646e22021-08-23 23:51:46 -0700166 VLOG(1) << "Closed " << filename();
Brian Silverman0465fcf2020-09-24 00:29:18 -0700167}
168
Austin Schuha36c8902019-12-30 18:07:15 -0800169void DetachedBufferWriter::Flush() {
Brian Silverman0465fcf2020-09-24 00:29:18 -0700170 if (ran_out_of_space_) {
171 // We don't want any later data to be written after space becomes available,
172 // so refuse to write anything more once we've dropped data because we ran
173 // out of space.
Austin Schuha426f1f2021-03-31 22:27:41 -0700174 if (encoder_) {
175 VLOG(1) << "Ignoring queue: " << encoder_->queue().size();
176 encoder_->Clear(encoder_->queue().size());
177 } else {
178 VLOG(1) << "No queue to ignore";
179 }
180 return;
181 }
182
183 const auto queue = encoder_->queue();
184 if (queue.empty()) {
Brian Silverman0465fcf2020-09-24 00:29:18 -0700185 return;
186 }
Brian Silvermanf51499a2020-09-21 12:49:08 -0700187
Austin Schuha36c8902019-12-30 18:07:15 -0800188 iovec_.clear();
Brian Silvermanf51499a2020-09-21 12:49:08 -0700189 const size_t iovec_size = std::min<size_t>(queue.size(), IOV_MAX);
190 iovec_.resize(iovec_size);
Austin Schuha36c8902019-12-30 18:07:15 -0800191 size_t counted_size = 0;
Brian Silvermanf51499a2020-09-21 12:49:08 -0700192 for (size_t i = 0; i < iovec_size; ++i) {
193 iovec_[i].iov_base = const_cast<uint8_t *>(queue[i].data());
194 iovec_[i].iov_len = queue[i].size();
195 counted_size += iovec_[i].iov_len;
Austin Schuha36c8902019-12-30 18:07:15 -0800196 }
Brian Silvermanf51499a2020-09-21 12:49:08 -0700197
198 const auto start = aos::monotonic_clock::now();
Austin Schuha36c8902019-12-30 18:07:15 -0800199 const ssize_t written = writev(fd_, iovec_.data(), iovec_.size());
Brian Silvermanf51499a2020-09-21 12:49:08 -0700200 const auto end = aos::monotonic_clock::now();
Brian Silverman0465fcf2020-09-24 00:29:18 -0700201 HandleWriteReturn(written, counted_size);
Brian Silvermanf51499a2020-09-21 12:49:08 -0700202
203 encoder_->Clear(iovec_size);
204
205 UpdateStatsForWrite(end - start, written, iovec_size);
206}
207
Brian Silverman0465fcf2020-09-24 00:29:18 -0700208void DetachedBufferWriter::HandleWriteReturn(ssize_t write_return,
209 size_t write_size) {
210 if (write_return == -1 && errno == ENOSPC) {
211 ran_out_of_space_ = true;
212 return;
213 }
214 PCHECK(write_return >= 0) << ": write failed";
215 if (write_return < static_cast<ssize_t>(write_size)) {
216 // Sometimes this happens instead of ENOSPC. On a real filesystem, this
217 // never seems to happen in any other case. If we ever want to log to a
218 // socket, this will happen more often. However, until we get there, we'll
219 // just assume it means we ran out of space.
220 ran_out_of_space_ = true;
221 return;
222 }
223}
224
Brian Silvermanf51499a2020-09-21 12:49:08 -0700225void DetachedBufferWriter::UpdateStatsForWrite(
226 aos::monotonic_clock::duration duration, ssize_t written, int iovec_size) {
227 if (duration > max_write_time_) {
228 max_write_time_ = duration;
229 max_write_time_bytes_ = written;
230 max_write_time_messages_ = iovec_size;
231 }
232 total_write_time_ += duration;
233 ++total_write_count_;
234 total_write_messages_ += iovec_size;
235 total_write_bytes_ += written;
236}
237
Austin Schuhbd06ae42021-03-31 22:48:21 -0700238void DetachedBufferWriter::FlushAtThreshold(
239 aos::monotonic_clock::time_point now) {
Austin Schuha426f1f2021-03-31 22:27:41 -0700240 if (ran_out_of_space_) {
241 // We don't want any later data to be written after space becomes available,
242 // so refuse to write anything more once we've dropped data because we ran
243 // out of space.
244 if (encoder_) {
245 VLOG(1) << "Ignoring queue: " << encoder_->queue().size();
246 encoder_->Clear(encoder_->queue().size());
247 } else {
248 VLOG(1) << "No queue to ignore";
249 }
250 return;
251 }
252
Austin Schuhbd06ae42021-03-31 22:48:21 -0700253 // We don't want to flush the first time through. Otherwise we will flush as
254 // the log file header might be compressing, defeating any parallelism and
255 // queueing there.
256 if (last_flush_time_ == aos::monotonic_clock::min_time) {
257 last_flush_time_ = now;
258 }
259
Brian Silvermanf51499a2020-09-21 12:49:08 -0700260 // Flush if we are at the max number of iovs per writev, because there's no
261 // point queueing up any more data in memory. Also flush once we have enough
Austin Schuhbd06ae42021-03-31 22:48:21 -0700262 // data queued up or if it has been long enough.
Brian Silvermanf51499a2020-09-21 12:49:08 -0700263 while (encoder_->queued_bytes() > static_cast<size_t>(FLAGS_flush_size) ||
Austin Schuhbd06ae42021-03-31 22:48:21 -0700264 encoder_->queue_size() >= IOV_MAX ||
265 now > last_flush_time_ +
266 chrono::duration_cast<chrono::nanoseconds>(
267 chrono::duration<double>(FLAGS_flush_period))) {
268 last_flush_time_ = now;
Brian Silvermanf51499a2020-09-21 12:49:08 -0700269 Flush();
270 }
Austin Schuha36c8902019-12-30 18:07:15 -0800271}
272
Austin Schuhf2d0e682022-10-16 14:20:58 -0700273// Do the magic dance to convert the endianness of the data and append it to the
274// buffer.
275namespace {
276
277// TODO(austin): Look at the generated code to see if building the header is
278// efficient or not.
279template <typename T>
280uint8_t *Push(uint8_t *buffer, const T data) {
281 const T endian_data = flatbuffers::EndianScalar<T>(data);
282 std::memcpy(buffer, &endian_data, sizeof(T));
283 return buffer + sizeof(T);
284}
285
286uint8_t *PushBytes(uint8_t *buffer, const void *data, size_t size) {
287 std::memcpy(buffer, data, size);
288 return buffer + size;
289}
290
291uint8_t *Pad(uint8_t *buffer, size_t padding) {
292 std::memset(buffer, 0, padding);
293 return buffer + padding;
294}
295} // namespace
296
297flatbuffers::Offset<MessageHeader> PackRemoteMessage(
298 flatbuffers::FlatBufferBuilder *fbb,
299 const message_bridge::RemoteMessage *msg, int channel_index,
300 const aos::monotonic_clock::time_point monotonic_timestamp_time) {
301 logger::MessageHeader::Builder message_header_builder(*fbb);
302 // Note: this must match the same order as MessageBridgeServer and
303 // PackMessage. We want identical headers to have identical
304 // on-the-wire formats to make comparing them easier.
305
306 message_header_builder.add_channel_index(channel_index);
307
308 message_header_builder.add_queue_index(msg->queue_index());
309 message_header_builder.add_monotonic_sent_time(msg->monotonic_sent_time());
310 message_header_builder.add_realtime_sent_time(msg->realtime_sent_time());
311
312 message_header_builder.add_monotonic_remote_time(
313 msg->monotonic_remote_time());
314 message_header_builder.add_realtime_remote_time(msg->realtime_remote_time());
315 message_header_builder.add_remote_queue_index(msg->remote_queue_index());
316
317 message_header_builder.add_monotonic_timestamp_time(
318 monotonic_timestamp_time.time_since_epoch().count());
319
320 return message_header_builder.Finish();
321}
322
323size_t PackRemoteMessageInline(
324 uint8_t *buffer, const message_bridge::RemoteMessage *msg,
325 int channel_index,
326 const aos::monotonic_clock::time_point monotonic_timestamp_time) {
327 const flatbuffers::uoffset_t message_size = PackRemoteMessageSize();
328
329 // clang-format off
330 // header:
331 // +0x00 | 5C 00 00 00 | UOffset32 | 0x0000005C (92) Loc: +0x5C | size prefix
332 buffer = Push<flatbuffers::uoffset_t>(
333 buffer, message_size - sizeof(flatbuffers::uoffset_t));
334 // +0x04 | 20 00 00 00 | UOffset32 | 0x00000020 (32) Loc: +0x24 | offset to root table `aos.logger.MessageHeader`
335 buffer = Push<flatbuffers::uoffset_t>(buffer, 0x20);
336 //
337 // padding:
338 // +0x08 | 00 00 00 00 00 00 | uint8_t[6] | ...... | padding
339 buffer = Pad(buffer, 6);
340 //
341 // vtable (aos.logger.MessageHeader):
342 // +0x0E | 16 00 | uint16_t | 0x0016 (22) | size of this vtable
343 buffer = Push<flatbuffers::voffset_t>(buffer, 0x16);
344 // +0x10 | 3C 00 | uint16_t | 0x003C (60) | size of referring table
345 buffer = Push<flatbuffers::voffset_t>(buffer, 0x3c);
346 // +0x12 | 38 00 | VOffset16 | 0x0038 (56) | offset to field `channel_index` (id: 0)
347 buffer = Push<flatbuffers::voffset_t>(buffer, 0x38);
348 // +0x14 | 2C 00 | VOffset16 | 0x002C (44) | offset to field `monotonic_sent_time` (id: 1)
349 buffer = Push<flatbuffers::voffset_t>(buffer, 0x2c);
350 // +0x16 | 24 00 | VOffset16 | 0x0024 (36) | offset to field `realtime_sent_time` (id: 2)
351 buffer = Push<flatbuffers::voffset_t>(buffer, 0x24);
352 // +0x18 | 34 00 | VOffset16 | 0x0034 (52) | offset to field `queue_index` (id: 3)
353 buffer = Push<flatbuffers::voffset_t>(buffer, 0x34);
354 // +0x1A | 00 00 | VOffset16 | 0x0000 (0) | offset to field `data` (id: 4) <null> (Vector)
355 buffer = Push<flatbuffers::voffset_t>(buffer, 0x00);
356 // +0x1C | 1C 00 | VOffset16 | 0x001C (28) | offset to field `monotonic_remote_time` (id: 5)
357 buffer = Push<flatbuffers::voffset_t>(buffer, 0x1c);
358 // +0x1E | 14 00 | VOffset16 | 0x0014 (20) | offset to field `realtime_remote_time` (id: 6)
359 buffer = Push<flatbuffers::voffset_t>(buffer, 0x14);
360 // +0x20 | 10 00 | VOffset16 | 0x0010 (16) | offset to field `remote_queue_index` (id: 7)
361 buffer = Push<flatbuffers::voffset_t>(buffer, 0x10);
362 // +0x22 | 04 00 | VOffset16 | 0x0004 (4) | offset to field `monotonic_timestamp_time` (id: 8)
363 buffer = Push<flatbuffers::voffset_t>(buffer, 0x04);
364 //
365 // root_table (aos.logger.MessageHeader):
366 // +0x24 | 16 00 00 00 | SOffset32 | 0x00000016 (22) Loc: +0x0E | offset to vtable
367 buffer = Push<flatbuffers::uoffset_t>(buffer, 0x16);
368 // +0x28 | F6 0B D8 11 A4 A8 B1 71 | int64_t | 0x71B1A8A411D80BF6 (8192514619791117302) | table field `monotonic_timestamp_time` (Long)
369 buffer = Push<int64_t>(buffer,
370 monotonic_timestamp_time.time_since_epoch().count());
371 // +0x30 | 00 00 00 00 | uint8_t[4] | .... | padding
372 // TODO(austin): Can we re-arrange the order to ditch the padding?
373 // (Answer is yes, but what is the impact elsewhere? It will change the
374 // binary format)
375 buffer = Pad(buffer, 4);
376 // +0x34 | 75 00 00 00 | uint32_t | 0x00000075 (117) | table field `remote_queue_index` (UInt)
377 buffer = Push<uint32_t>(buffer, msg->remote_queue_index());
378 // +0x38 | AA B0 43 0A 35 BE FA D2 | int64_t | 0xD2FABE350A43B0AA (-3244071446552268630) | table field `realtime_remote_time` (Long)
379 buffer = Push<int64_t>(buffer, msg->realtime_remote_time());
380 // +0x40 | D5 40 30 F3 C1 A7 26 1D | int64_t | 0x1D26A7C1F33040D5 (2100550727665467605) | table field `monotonic_remote_time` (Long)
381 buffer = Push<int64_t>(buffer, msg->monotonic_remote_time());
382 // +0x48 | 5B 25 32 A1 4A E8 46 CA | int64_t | 0xCA46E84AA132255B (-3871151422448720549) | table field `realtime_sent_time` (Long)
383 buffer = Push<int64_t>(buffer, msg->realtime_sent_time());
384 // +0x50 | 49 7D 45 1F 8C 36 6B A3 | int64_t | 0xA36B368C1F457D49 (-6671178447571288759) | table field `monotonic_sent_time` (Long)
385 buffer = Push<int64_t>(buffer, msg->monotonic_sent_time());
386 // +0x58 | 33 00 00 00 | uint32_t | 0x00000033 (51) | table field `queue_index` (UInt)
387 buffer = Push<uint32_t>(buffer, msg->queue_index());
388 // +0x5C | 76 00 00 00 | uint32_t | 0x00000076 (118) | table field `channel_index` (UInt)
389 buffer = Push<uint32_t>(buffer, channel_index);
390 // clang-format on
391
392 return message_size;
393}
394
Austin Schuha36c8902019-12-30 18:07:15 -0800395flatbuffers::Offset<MessageHeader> PackMessage(
396 flatbuffers::FlatBufferBuilder *fbb, const Context &context,
397 int channel_index, LogType log_type) {
398 flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data_offset;
399
400 switch (log_type) {
401 case LogType::kLogMessage:
402 case LogType::kLogMessageAndDeliveryTime:
Austin Schuh6f3babe2020-01-26 20:34:50 -0800403 case LogType::kLogRemoteMessage:
Austin Schuhfa30c352022-10-16 11:12:02 -0700404 // Since the timestamps are 8 byte aligned, we are going to end up adding
405 // padding in the middle of the message to pad everything out to 8 byte
406 // alignment. That's rather wasteful. To make things efficient to mmap
407 // while reading uncompressed logs, we'd actually rather the message be
408 // aligned. So, force 8 byte alignment (enough to preserve alignment
409 // inside the nested message so that we can read it without moving it)
410 // here.
411 fbb->ForceVectorAlignment(context.size, sizeof(uint8_t), 8);
Brian Silvermaneaa41d62020-07-08 19:47:35 -0700412 data_offset = fbb->CreateVector(
413 static_cast<const uint8_t *>(context.data), context.size);
Austin Schuha36c8902019-12-30 18:07:15 -0800414 break;
415
416 case LogType::kLogDeliveryTimeOnly:
417 break;
418 }
419
420 MessageHeader::Builder message_header_builder(*fbb);
421 message_header_builder.add_channel_index(channel_index);
Austin Schuh6f3babe2020-01-26 20:34:50 -0800422
Austin Schuhfa30c352022-10-16 11:12:02 -0700423 // These are split out into very explicit serialization calls because the
424 // order here changes the order things are written out on the wire, and we
425 // want to control and understand it here. Changing the order can increase
426 // the amount of padding bytes in the middle.
427 //
James Kuszmaul9776b392023-01-14 14:08:08 -0800428 // It is also easier to follow... And doesn't actually make things much
429 // bigger.
Austin Schuh6f3babe2020-01-26 20:34:50 -0800430 switch (log_type) {
431 case LogType::kLogRemoteMessage:
432 message_header_builder.add_queue_index(context.remote_queue_index);
Austin Schuhfa30c352022-10-16 11:12:02 -0700433 message_header_builder.add_data(data_offset);
Austin Schuh6f3babe2020-01-26 20:34:50 -0800434 message_header_builder.add_monotonic_sent_time(
435 context.monotonic_remote_time.time_since_epoch().count());
436 message_header_builder.add_realtime_sent_time(
437 context.realtime_remote_time.time_since_epoch().count());
438 break;
439
Austin Schuh6f3babe2020-01-26 20:34:50 -0800440 case LogType::kLogDeliveryTimeOnly:
441 message_header_builder.add_queue_index(context.queue_index);
442 message_header_builder.add_monotonic_sent_time(
443 context.monotonic_event_time.time_since_epoch().count());
444 message_header_builder.add_realtime_sent_time(
445 context.realtime_event_time.time_since_epoch().count());
Austin Schuha36c8902019-12-30 18:07:15 -0800446 message_header_builder.add_monotonic_remote_time(
447 context.monotonic_remote_time.time_since_epoch().count());
448 message_header_builder.add_realtime_remote_time(
449 context.realtime_remote_time.time_since_epoch().count());
450 message_header_builder.add_remote_queue_index(context.remote_queue_index);
451 break;
Austin Schuhfa30c352022-10-16 11:12:02 -0700452
453 case LogType::kLogMessage:
454 message_header_builder.add_queue_index(context.queue_index);
455 message_header_builder.add_data(data_offset);
456 message_header_builder.add_monotonic_sent_time(
457 context.monotonic_event_time.time_since_epoch().count());
458 message_header_builder.add_realtime_sent_time(
459 context.realtime_event_time.time_since_epoch().count());
460 break;
461
462 case LogType::kLogMessageAndDeliveryTime:
463 message_header_builder.add_queue_index(context.queue_index);
464 message_header_builder.add_remote_queue_index(context.remote_queue_index);
465 message_header_builder.add_monotonic_sent_time(
466 context.monotonic_event_time.time_since_epoch().count());
467 message_header_builder.add_realtime_sent_time(
468 context.realtime_event_time.time_since_epoch().count());
469 message_header_builder.add_monotonic_remote_time(
470 context.monotonic_remote_time.time_since_epoch().count());
471 message_header_builder.add_realtime_remote_time(
472 context.realtime_remote_time.time_since_epoch().count());
473 message_header_builder.add_data(data_offset);
474 break;
Austin Schuha36c8902019-12-30 18:07:15 -0800475 }
476
477 return message_header_builder.Finish();
478}
479
Austin Schuhfa30c352022-10-16 11:12:02 -0700480flatbuffers::uoffset_t PackMessageHeaderSize(LogType log_type) {
481 switch (log_type) {
482 case LogType::kLogMessage:
483 return
484 // Root table size + offset.
485 sizeof(flatbuffers::uoffset_t) * 2 +
486 // 6 padding bytes to pad the header out properly.
487 6 +
488 // vtable header (size + size of table)
489 sizeof(flatbuffers::voffset_t) * 2 +
490 // offsets to all the fields.
491 sizeof(flatbuffers::voffset_t) * 5 +
492 // pointer to vtable
493 sizeof(flatbuffers::soffset_t) +
494 // pointer to data
495 sizeof(flatbuffers::uoffset_t) +
496 // realtime_sent_time, monotonic_sent_time
497 sizeof(int64_t) * 2 +
498 // queue_index, channel_index
499 sizeof(uint32_t) * 2;
500
501 case LogType::kLogDeliveryTimeOnly:
502 return
503 // Root table size + offset.
504 sizeof(flatbuffers::uoffset_t) * 2 +
505 // 6 padding bytes to pad the header out properly.
506 4 +
507 // vtable header (size + size of table)
508 sizeof(flatbuffers::voffset_t) * 2 +
509 // offsets to all the fields.
510 sizeof(flatbuffers::voffset_t) * 8 +
511 // pointer to vtable
512 sizeof(flatbuffers::soffset_t) +
513 // remote_queue_index
514 sizeof(uint32_t) +
515 // realtime_remote_time, monotonic_remote_time, realtime_sent_time,
516 // monotonic_sent_time
517 sizeof(int64_t) * 4 +
518 // queue_index, channel_index
519 sizeof(uint32_t) * 2;
520
521 case LogType::kLogMessageAndDeliveryTime:
522 return
523 // Root table size + offset.
524 sizeof(flatbuffers::uoffset_t) * 2 +
525 // 4 padding bytes to pad the header out properly.
526 4 +
527 // vtable header (size + size of table)
528 sizeof(flatbuffers::voffset_t) * 2 +
529 // offsets to all the fields.
530 sizeof(flatbuffers::voffset_t) * 8 +
531 // pointer to vtable
532 sizeof(flatbuffers::soffset_t) +
533 // pointer to data
534 sizeof(flatbuffers::uoffset_t) +
535 // realtime_remote_time, monotonic_remote_time, realtime_sent_time,
536 // monotonic_sent_time
537 sizeof(int64_t) * 4 +
538 // remote_queue_index, queue_index, channel_index
539 sizeof(uint32_t) * 3;
540
541 case LogType::kLogRemoteMessage:
542 return
543 // Root table size + offset.
544 sizeof(flatbuffers::uoffset_t) * 2 +
545 // 6 padding bytes to pad the header out properly.
546 6 +
547 // vtable header (size + size of table)
548 sizeof(flatbuffers::voffset_t) * 2 +
549 // offsets to all the fields.
550 sizeof(flatbuffers::voffset_t) * 5 +
551 // pointer to vtable
552 sizeof(flatbuffers::soffset_t) +
553 // realtime_sent_time, monotonic_sent_time
554 sizeof(int64_t) * 2 +
555 // pointer to data
556 sizeof(flatbuffers::uoffset_t) +
557 // queue_index, channel_index
558 sizeof(uint32_t) * 2;
559 }
560 LOG(FATAL);
561}
562
James Kuszmaul9776b392023-01-14 14:08:08 -0800563flatbuffers::uoffset_t PackMessageSize(LogType log_type, size_t data_size) {
Austin Schuhfa30c352022-10-16 11:12:02 -0700564 static_assert(sizeof(flatbuffers::uoffset_t) == 4u,
565 "Update size logic please.");
566 const flatbuffers::uoffset_t aligned_data_length =
Austin Schuh48d10d62022-10-16 22:19:23 -0700567 ((data_size + 7) & 0xfffffff8u);
Austin Schuhfa30c352022-10-16 11:12:02 -0700568 switch (log_type) {
569 case LogType::kLogDeliveryTimeOnly:
570 return PackMessageHeaderSize(log_type);
571
572 case LogType::kLogMessage:
573 case LogType::kLogMessageAndDeliveryTime:
574 case LogType::kLogRemoteMessage:
575 return PackMessageHeaderSize(log_type) +
576 // Vector...
577 sizeof(flatbuffers::uoffset_t) + aligned_data_length;
578 }
579 LOG(FATAL);
580}
581
Austin Schuhfa30c352022-10-16 11:12:02 -0700582size_t PackMessageInline(uint8_t *buffer, const Context &context,
583 int channel_index, LogType log_type) {
Austin Schuh48d10d62022-10-16 22:19:23 -0700584 // TODO(austin): Figure out how to copy directly from shared memory instead of
585 // first into the fetcher's memory and then into here. That would save a lot
586 // of memory.
Austin Schuhfa30c352022-10-16 11:12:02 -0700587 const flatbuffers::uoffset_t message_size =
Austin Schuh48d10d62022-10-16 22:19:23 -0700588 PackMessageSize(log_type, context.size);
Austin Schuhfa30c352022-10-16 11:12:02 -0700589
590 buffer = Push<flatbuffers::uoffset_t>(
591 buffer, message_size - sizeof(flatbuffers::uoffset_t));
592
593 // Pack all the data in. This is brittle but easy to change. Use the
594 // InlinePackMessage.Equivilent unit test to verify everything matches.
595 switch (log_type) {
596 case LogType::kLogMessage:
597 // clang-format off
598 // header:
599 // +0x00 | 4C 00 00 00 | UOffset32 | 0x0000004C (76) Loc: +0x4C | size prefix
600 // +0x04 | 18 00 00 00 | UOffset32 | 0x00000018 (24) Loc: +0x1C | offset to root table `aos.logger.MessageHeader`
601 buffer = Push<flatbuffers::uoffset_t>(buffer, 0x18);
602 //
603 // padding:
604 // +0x08 | 00 00 00 00 00 00 | uint8_t[6] | ...... | padding
605 buffer = Pad(buffer, 6);
606 //
607 // vtable (aos.logger.MessageHeader):
608 // +0x0E | 0E 00 | uint16_t | 0x000E (14) | size of this vtable
609 buffer = Push<flatbuffers::voffset_t>(buffer, 0xe);
610 // +0x10 | 20 00 | uint16_t | 0x0020 (32) | size of referring table
611 buffer = Push<flatbuffers::voffset_t>(buffer, 0x20);
612 // +0x12 | 1C 00 | VOffset16 | 0x001C (28) | offset to field `channel_index` (id: 0)
613 buffer = Push<flatbuffers::voffset_t>(buffer, 0x1c);
614 // +0x14 | 0C 00 | VOffset16 | 0x000C (12) | offset to field `monotonic_sent_time` (id: 1)
615 buffer = Push<flatbuffers::voffset_t>(buffer, 0x0c);
616 // +0x16 | 04 00 | VOffset16 | 0x0004 (4) | offset to field `realtime_sent_time` (id: 2)
617 buffer = Push<flatbuffers::voffset_t>(buffer, 0x04);
618 // +0x18 | 18 00 | VOffset16 | 0x0018 (24) | offset to field `queue_index` (id: 3)
619 buffer = Push<flatbuffers::voffset_t>(buffer, 0x18);
620 // +0x1A | 14 00 | VOffset16 | 0x0014 (20) | offset to field `data` (id: 4)
621 buffer = Push<flatbuffers::voffset_t>(buffer, 0x14);
622 //
623 // root_table (aos.logger.MessageHeader):
624 // +0x1C | 0E 00 00 00 | SOffset32 | 0x0000000E (14) Loc: +0x0E | offset to vtable
625 buffer = Push<flatbuffers::uoffset_t>(buffer, 0x0e);
626 // +0x20 | B2 E4 EF 89 19 7D 7F 6F | int64_t | 0x6F7F7D1989EFE4B2 (8034277808894108850) | table field `realtime_sent_time` (Long)
627 buffer = Push<int64_t>(buffer, context.realtime_event_time.time_since_epoch().count());
628 // +0x28 | 86 8D 92 65 FC 79 74 2B | int64_t | 0x2B7479FC65928D86 (3131261765872160134) | table field `monotonic_sent_time` (Long)
629 buffer = Push<int64_t>(buffer, context.monotonic_event_time.time_since_epoch().count());
630 // +0x30 | 0C 00 00 00 | UOffset32 | 0x0000000C (12) Loc: +0x3C | offset to field `data` (vector)
631 buffer = Push<flatbuffers::uoffset_t>(buffer, 0x0c);
632 // +0x34 | 86 00 00 00 | uint32_t | 0x00000086 (134) | table field `queue_index` (UInt)
633 buffer = Push<uint32_t>(buffer, context.queue_index);
634 // +0x38 | 71 00 00 00 | uint32_t | 0x00000071 (113) | table field `channel_index` (UInt)
635 buffer = Push<uint32_t>(buffer, channel_index);
636 //
637 // vector (aos.logger.MessageHeader.data):
638 // +0x3C | 0E 00 00 00 | uint32_t | 0x0000000E (14) | length of vector (# items)
639 buffer = Push<flatbuffers::uoffset_t>(buffer, context.size);
640 // +0x40 | FF | uint8_t | 0xFF (255) | value[0]
641 // +0x41 | B8 | uint8_t | 0xB8 (184) | value[1]
642 // +0x42 | EE | uint8_t | 0xEE (238) | value[2]
643 // +0x43 | 00 | uint8_t | 0x00 (0) | value[3]
644 // +0x44 | 20 | uint8_t | 0x20 (32) | value[4]
645 // +0x45 | 4D | uint8_t | 0x4D (77) | value[5]
646 // +0x46 | FF | uint8_t | 0xFF (255) | value[6]
647 // +0x47 | 25 | uint8_t | 0x25 (37) | value[7]
648 // +0x48 | 3C | uint8_t | 0x3C (60) | value[8]
649 // +0x49 | 17 | uint8_t | 0x17 (23) | value[9]
650 // +0x4A | 65 | uint8_t | 0x65 (101) | value[10]
651 // +0x4B | 2F | uint8_t | 0x2F (47) | value[11]
652 // +0x4C | 63 | uint8_t | 0x63 (99) | value[12]
653 // +0x4D | 58 | uint8_t | 0x58 (88) | value[13]
654 buffer = PushBytes(buffer, context.data, context.size);
655 //
656 // padding:
657 // +0x4E | 00 00 | uint8_t[2] | .. | padding
658 buffer = Pad(buffer, ((context.size + 7) & 0xfffffff8u) - context.size);
659 // clang-format on
660 break;
661
662 case LogType::kLogDeliveryTimeOnly:
663 // clang-format off
664 // header:
665 // +0x00 | 4C 00 00 00 | UOffset32 | 0x0000004C (76) Loc: +0x4C | size prefix
666 // +0x04 | 1C 00 00 00 | UOffset32 | 0x0000001C (28) Loc: +0x20 | offset to root table `aos.logger.MessageHeader`
667 buffer = Push<flatbuffers::uoffset_t>(buffer, 0x1c);
668 //
669 // padding:
670 // +0x08 | 00 00 00 00 | uint8_t[4] | .... | padding
671 buffer = Pad(buffer, 4);
672 //
673 // vtable (aos.logger.MessageHeader):
674 // +0x0C | 14 00 | uint16_t | 0x0014 (20) | size of this vtable
675 buffer = Push<flatbuffers::voffset_t>(buffer, 0x14);
676 // +0x0E | 30 00 | uint16_t | 0x0030 (48) | size of referring table
677 buffer = Push<flatbuffers::voffset_t>(buffer, 0x30);
678 // +0x10 | 2C 00 | VOffset16 | 0x002C (44) | offset to field `channel_index` (id: 0)
679 buffer = Push<flatbuffers::voffset_t>(buffer, 0x2c);
680 // +0x12 | 20 00 | VOffset16 | 0x0020 (32) | offset to field `monotonic_sent_time` (id: 1)
681 buffer = Push<flatbuffers::voffset_t>(buffer, 0x20);
682 // +0x14 | 18 00 | VOffset16 | 0x0018 (24) | offset to field `realtime_sent_time` (id: 2)
683 buffer = Push<flatbuffers::voffset_t>(buffer, 0x18);
684 // +0x16 | 28 00 | VOffset16 | 0x0028 (40) | offset to field `queue_index` (id: 3)
685 buffer = Push<flatbuffers::voffset_t>(buffer, 0x28);
686 // +0x18 | 00 00 | VOffset16 | 0x0000 (0) | offset to field `data` (id: 4) <null> (Vector)
687 buffer = Push<flatbuffers::voffset_t>(buffer, 0x00);
688 // +0x1A | 10 00 | VOffset16 | 0x0010 (16) | offset to field `monotonic_remote_time` (id: 5)
689 buffer = Push<flatbuffers::voffset_t>(buffer, 0x10);
690 // +0x1C | 08 00 | VOffset16 | 0x0008 (8) | offset to field `realtime_remote_time` (id: 6)
691 buffer = Push<flatbuffers::voffset_t>(buffer, 0x08);
692 // +0x1E | 04 00 | VOffset16 | 0x0004 (4) | offset to field `remote_queue_index` (id: 7)
693 buffer = Push<flatbuffers::voffset_t>(buffer, 0x04);
694 //
695 // root_table (aos.logger.MessageHeader):
696 // +0x20 | 14 00 00 00 | SOffset32 | 0x00000014 (20) Loc: +0x0C | offset to vtable
697 buffer = Push<flatbuffers::uoffset_t>(buffer, 0x14);
698 // +0x24 | 69 00 00 00 | uint32_t | 0x00000069 (105) | table field `remote_queue_index` (UInt)
699 buffer = Push<uint32_t>(buffer, context.remote_queue_index);
700 // +0x28 | C6 85 F1 AB 83 B5 CD EB | int64_t | 0xEBCDB583ABF185C6 (-1455307527440726586) | table field `realtime_remote_time` (Long)
701 buffer = Push<int64_t>(buffer, context.realtime_remote_time.time_since_epoch().count());
702 // +0x30 | 47 24 D3 97 1E 42 2D 99 | int64_t | 0x992D421E97D32447 (-7409193112790948793) | table field `monotonic_remote_time` (Long)
703 buffer = Push<int64_t>(buffer, context.monotonic_remote_time.time_since_epoch().count());
704 // +0x38 | C8 B9 A7 AB 79 F2 CD 60 | int64_t | 0x60CDF279ABA7B9C8 (6975498002251626952) | table field `realtime_sent_time` (Long)
705 buffer = Push<int64_t>(buffer, context.realtime_event_time.time_since_epoch().count());
706 // +0x40 | EA 8F 2A 0F AF 01 7A AB | int64_t | 0xAB7A01AF0F2A8FEA (-6090553694679822358) | table field `monotonic_sent_time` (Long)
707 buffer = Push<int64_t>(buffer, context.monotonic_event_time.time_since_epoch().count());
708 // +0x48 | F5 00 00 00 | uint32_t | 0x000000F5 (245) | table field `queue_index` (UInt)
709 buffer = Push<uint32_t>(buffer, context.queue_index);
710 // +0x4C | 88 00 00 00 | uint32_t | 0x00000088 (136) | table field `channel_index` (UInt)
711 buffer = Push<uint32_t>(buffer, channel_index);
712
713 // clang-format on
714 break;
715
716 case LogType::kLogMessageAndDeliveryTime:
717 // clang-format off
718 // header:
719 // +0x00 | 5C 00 00 00 | UOffset32 | 0x0000005C (92) Loc: +0x5C | size prefix
720 // +0x04 | 1C 00 00 00 | UOffset32 | 0x0000001C (28) Loc: +0x20 | offset to root table `aos.logger.MessageHeader`
721 buffer = Push<flatbuffers::uoffset_t>(buffer, 0x1c);
722 //
723 // padding:
724 // +0x08 | 00 00 00 00 | uint8_t[4] | .... | padding
725 buffer = Pad(buffer, 4);
726 //
727 // vtable (aos.logger.MessageHeader):
728 // +0x0C | 14 00 | uint16_t | 0x0014 (20) | size of this vtable
729 buffer = Push<flatbuffers::voffset_t>(buffer, 0x14);
730 // +0x0E | 34 00 | uint16_t | 0x0034 (52) | size of referring table
731 buffer = Push<flatbuffers::voffset_t>(buffer, 0x34);
732 // +0x10 | 30 00 | VOffset16 | 0x0030 (48) | offset to field `channel_index` (id: 0)
733 buffer = Push<flatbuffers::voffset_t>(buffer, 0x30);
734 // +0x12 | 20 00 | VOffset16 | 0x0020 (32) | offset to field `monotonic_sent_time` (id: 1)
735 buffer = Push<flatbuffers::voffset_t>(buffer, 0x20);
736 // +0x14 | 18 00 | VOffset16 | 0x0018 (24) | offset to field `realtime_sent_time` (id: 2)
737 buffer = Push<flatbuffers::voffset_t>(buffer, 0x18);
738 // +0x16 | 2C 00 | VOffset16 | 0x002C (44) | offset to field `queue_index` (id: 3)
739 buffer = Push<flatbuffers::voffset_t>(buffer, 0x2c);
740 // +0x18 | 04 00 | VOffset16 | 0x0004 (4) | offset to field `data` (id: 4)
741 buffer = Push<flatbuffers::voffset_t>(buffer, 0x04);
742 // +0x1A | 10 00 | VOffset16 | 0x0010 (16) | offset to field `monotonic_remote_time` (id: 5)
743 buffer = Push<flatbuffers::voffset_t>(buffer, 0x10);
744 // +0x1C | 08 00 | VOffset16 | 0x0008 (8) | offset to field `realtime_remote_time` (id: 6)
745 buffer = Push<flatbuffers::voffset_t>(buffer, 0x08);
746 // +0x1E | 28 00 | VOffset16 | 0x0028 (40) | offset to field `remote_queue_index` (id: 7)
747 buffer = Push<flatbuffers::voffset_t>(buffer, 0x28);
748 //
749 // root_table (aos.logger.MessageHeader):
750 // +0x20 | 14 00 00 00 | SOffset32 | 0x00000014 (20) Loc: +0x0C | offset to vtable
751 buffer = Push<flatbuffers::uoffset_t>(buffer, 0x14);
752 // +0x24 | 30 00 00 00 | UOffset32 | 0x00000030 (48) Loc: +0x54 | offset to field `data` (vector)
753 buffer = Push<flatbuffers::uoffset_t>(buffer, 0x30);
754 // +0x28 | C4 C8 87 BF 40 6C 1F 29 | int64_t | 0x291F6C40BF87C8C4 (2963206105180129476) | table field `realtime_remote_time` (Long)
755 buffer = Push<int64_t>(buffer, context.realtime_remote_time.time_since_epoch().count());
756 // +0x30 | 0F 00 26 FD D2 6D C0 1F | int64_t | 0x1FC06DD2FD26000F (2287949363661897743) | table field `monotonic_remote_time` (Long)
757 buffer = Push<int64_t>(buffer, context.monotonic_remote_time.time_since_epoch().count());
758 // +0x38 | 29 75 09 C0 73 73 BF 88 | int64_t | 0x88BF7373C0097529 (-8593022623019338455) | table field `realtime_sent_time` (Long)
759 buffer = Push<int64_t>(buffer, context.realtime_event_time.time_since_epoch().count());
760 // +0x40 | 6D 8A AE 04 50 25 9C E9 | int64_t | 0xE99C255004AE8A6D (-1613373540899321235) | table field `monotonic_sent_time` (Long)
761 buffer = Push<int64_t>(buffer, context.monotonic_event_time.time_since_epoch().count());
762 // +0x48 | 47 00 00 00 | uint32_t | 0x00000047 (71) | table field `remote_queue_index` (UInt)
763 buffer = Push<uint32_t>(buffer, context.remote_queue_index);
764 // +0x4C | 4C 00 00 00 | uint32_t | 0x0000004C (76) | table field `queue_index` (UInt)
765 buffer = Push<uint32_t>(buffer, context.queue_index);
766 // +0x50 | 72 00 00 00 | uint32_t | 0x00000072 (114) | table field `channel_index` (UInt)
767 buffer = Push<uint32_t>(buffer, channel_index);
768 //
769 // vector (aos.logger.MessageHeader.data):
770 // +0x54 | 07 00 00 00 | uint32_t | 0x00000007 (7) | length of vector (# items)
771 buffer = Push<flatbuffers::uoffset_t>(buffer, context.size);
772 // +0x58 | B1 | uint8_t | 0xB1 (177) | value[0]
773 // +0x59 | 4A | uint8_t | 0x4A (74) | value[1]
774 // +0x5A | 50 | uint8_t | 0x50 (80) | value[2]
775 // +0x5B | 24 | uint8_t | 0x24 (36) | value[3]
776 // +0x5C | AF | uint8_t | 0xAF (175) | value[4]
777 // +0x5D | C8 | uint8_t | 0xC8 (200) | value[5]
778 // +0x5E | D5 | uint8_t | 0xD5 (213) | value[6]
779 buffer = PushBytes(buffer, context.data, context.size);
780 //
781 // padding:
782 // +0x5F | 00 | uint8_t[1] | . | padding
783 buffer = Pad(buffer, ((context.size + 7) & 0xfffffff8u) - context.size);
784 // clang-format on
785
786 break;
787
788 case LogType::kLogRemoteMessage:
789 // This is the message we need to recreate.
790 //
791 // clang-format off
792 // header:
793 // +0x00 | 5C 00 00 00 | UOffset32 | 0x0000005C (92) Loc: +0x5C | size prefix
794 // +0x04 | 18 00 00 00 | UOffset32 | 0x00000018 (24) Loc: +0x1C | offset to root table `aos.logger.MessageHeader`
795 buffer = Push<flatbuffers::uoffset_t>(buffer, 0x18);
796 //
797 // padding:
798 // +0x08 | 00 00 00 00 00 00 | uint8_t[6] | ...... | padding
799 buffer = Pad(buffer, 6);
800 //
801 // vtable (aos.logger.MessageHeader):
802 // +0x0E | 0E 00 | uint16_t | 0x000E (14) | size of this vtable
803 buffer = Push<flatbuffers::voffset_t>(buffer, 0x0e);
804 // +0x10 | 20 00 | uint16_t | 0x0020 (32) | size of referring table
805 buffer = Push<flatbuffers::voffset_t>(buffer, 0x20);
806 // +0x12 | 1C 00 | VOffset16 | 0x001C (28) | offset to field `channel_index` (id: 0)
807 buffer = Push<flatbuffers::voffset_t>(buffer, 0x1c);
808 // +0x14 | 0C 00 | VOffset16 | 0x000C (12) | offset to field `monotonic_sent_time` (id: 1)
809 buffer = Push<flatbuffers::voffset_t>(buffer, 0x0c);
810 // +0x16 | 04 00 | VOffset16 | 0x0004 (4) | offset to field `realtime_sent_time` (id: 2)
811 buffer = Push<flatbuffers::voffset_t>(buffer, 0x04);
812 // +0x18 | 18 00 | VOffset16 | 0x0018 (24) | offset to field `queue_index` (id: 3)
813 buffer = Push<flatbuffers::voffset_t>(buffer, 0x18);
814 // +0x1A | 14 00 | VOffset16 | 0x0014 (20) | offset to field `data` (id: 4)
815 buffer = Push<flatbuffers::voffset_t>(buffer, 0x14);
816 //
817 // root_table (aos.logger.MessageHeader):
818 // +0x1C | 0E 00 00 00 | SOffset32 | 0x0000000E (14) Loc: +0x0E | offset to vtable
819 buffer = Push<flatbuffers::uoffset_t>(buffer, 0x0E);
820 // +0x20 | D8 96 32 1A A0 D3 23 BB | int64_t | 0xBB23D3A01A3296D8 (-4961889679844403496) | table field `realtime_sent_time` (Long)
821 buffer = Push<int64_t>(buffer, context.realtime_remote_time.time_since_epoch().count());
822 // +0x28 | 2E 5D 23 B3 BE 84 CF C2 | int64_t | 0xC2CF84BEB3235D2E (-4409159555588334290) | table field `monotonic_sent_time` (Long)
823 buffer = Push<int64_t>(buffer, context.monotonic_remote_time.time_since_epoch().count());
824 // +0x30 | 0C 00 00 00 | UOffset32 | 0x0000000C (12) Loc: +0x3C | offset to field `data` (vector)
825 buffer = Push<flatbuffers::uoffset_t>(buffer, 0x0C);
826 // +0x34 | 69 00 00 00 | uint32_t | 0x00000069 (105) | table field `queue_index` (UInt)
827 buffer = Push<uint32_t>(buffer, context.remote_queue_index);
828 // +0x38 | F3 00 00 00 | uint32_t | 0x000000F3 (243) | table field `channel_index` (UInt)
829 buffer = Push<uint32_t>(buffer, channel_index);
830 //
831 // vector (aos.logger.MessageHeader.data):
832 // +0x3C | 1A 00 00 00 | uint32_t | 0x0000001A (26) | length of vector (# items)
833 buffer = Push<flatbuffers::uoffset_t>(buffer, context.size);
834 // +0x40 | 38 | uint8_t | 0x38 (56) | value[0]
835 // +0x41 | 1A | uint8_t | 0x1A (26) | value[1]
836 // ...
837 // +0x58 | 90 | uint8_t | 0x90 (144) | value[24]
838 // +0x59 | 92 | uint8_t | 0x92 (146) | value[25]
839 buffer = PushBytes(buffer, context.data, context.size);
840 //
841 // padding:
842 // +0x5A | 00 00 00 00 00 00 | uint8_t[6] | ...... | padding
843 buffer = Pad(buffer, ((context.size + 7) & 0xfffffff8u) - context.size);
844 // clang-format on
845 }
846
847 return message_size;
848}
849
Austin Schuhcd368422021-11-22 21:23:29 -0800850SpanReader::SpanReader(std::string_view filename, bool quiet)
851 : filename_(filename) {
Austin Schuh86110712022-09-16 15:40:54 -0700852 static constexpr std::string_view kS3 = "s3:";
853 if (filename.substr(0, kS3.size()) == kS3) {
854#if ENABLE_S3
855 decoder_ = std::make_unique<S3Fetcher>(filename);
856#else
857 LOG(FATAL) << "Reading files from S3 not supported on this platform";
858#endif
859 } else {
860 decoder_ = std::make_unique<DummyDecoder>(filename);
861 }
Tyler Chatow2015bc62021-08-04 21:15:09 -0700862
863 static constexpr std::string_view kXz = ".xz";
James Kuszmauldd0a5042021-10-28 23:38:04 -0700864 static constexpr std::string_view kSnappy = SnappyDecoder::kExtension;
Brian Silvermanf59fe3f2020-09-22 21:04:09 -0700865 if (filename.substr(filename.size() - kXz.size()) == kXz) {
866#if ENABLE_LZMA
Austin Schuhcd368422021-11-22 21:23:29 -0800867 decoder_ =
868 std::make_unique<ThreadedLzmaDecoder>(std::move(decoder_), quiet);
Brian Silvermanf59fe3f2020-09-22 21:04:09 -0700869#else
Austin Schuhcd368422021-11-22 21:23:29 -0800870 (void)quiet;
Brian Silvermanf59fe3f2020-09-22 21:04:09 -0700871 LOG(FATAL) << "Reading xz-compressed files not supported on this platform";
872#endif
James Kuszmauldd0a5042021-10-28 23:38:04 -0700873 } else if (filename.substr(filename.size() - kSnappy.size()) == kSnappy) {
874 decoder_ = std::make_unique<SnappyDecoder>(std::move(decoder_));
Brian Silvermanf59fe3f2020-09-22 21:04:09 -0700875 }
Austin Schuh05b70472020-01-01 17:11:17 -0800876}
877
Austin Schuhcf5f6442021-07-06 10:43:28 -0700878absl::Span<const uint8_t> SpanReader::PeekMessage() {
Austin Schuh05b70472020-01-01 17:11:17 -0800879 // Make sure we have enough for the size.
880 if (data_.size() - consumed_data_ < sizeof(flatbuffers::uoffset_t)) {
881 if (!ReadBlock()) {
882 return absl::Span<const uint8_t>();
883 }
884 }
885
886 // Now make sure we have enough for the message.
887 const size_t data_size =
888 flatbuffers::GetPrefixedSize(data_.data() + consumed_data_) +
889 sizeof(flatbuffers::uoffset_t);
Austin Schuhe4fca832020-03-07 16:58:53 -0800890 if (data_size == sizeof(flatbuffers::uoffset_t)) {
891 LOG(ERROR) << "Size of data is zero. Log file end is corrupted, skipping.";
892 LOG(ERROR) << " Rest of log file is "
893 << absl::BytesToHexString(std::string_view(
894 reinterpret_cast<const char *>(data_.data() +
895 consumed_data_),
896 data_.size() - consumed_data_));
897 return absl::Span<const uint8_t>();
898 }
Austin Schuh05b70472020-01-01 17:11:17 -0800899 while (data_.size() < consumed_data_ + data_size) {
900 if (!ReadBlock()) {
901 return absl::Span<const uint8_t>();
902 }
903 }
904
905 // And return it, consuming the data.
906 const uint8_t *data_ptr = data_.data() + consumed_data_;
907
Austin Schuh05b70472020-01-01 17:11:17 -0800908 return absl::Span<const uint8_t>(data_ptr, data_size);
909}
910
Austin Schuhcf5f6442021-07-06 10:43:28 -0700911void SpanReader::ConsumeMessage() {
Brian Smarttea913d42021-12-10 15:02:38 -0800912 size_t consumed_size =
Austin Schuhcf5f6442021-07-06 10:43:28 -0700913 flatbuffers::GetPrefixedSize(data_.data() + consumed_data_) +
914 sizeof(flatbuffers::uoffset_t);
Brian Smarttea913d42021-12-10 15:02:38 -0800915 consumed_data_ += consumed_size;
916 total_consumed_ += consumed_size;
Austin Schuhcf5f6442021-07-06 10:43:28 -0700917}
918
919absl::Span<const uint8_t> SpanReader::ReadMessage() {
920 absl::Span<const uint8_t> result = PeekMessage();
James Kuszmaul9776b392023-01-14 14:08:08 -0800921 if (!result.empty()) {
Austin Schuhcf5f6442021-07-06 10:43:28 -0700922 ConsumeMessage();
Brian Smarttea913d42021-12-10 15:02:38 -0800923 } else {
924 is_finished_ = true;
Austin Schuhcf5f6442021-07-06 10:43:28 -0700925 }
926 return result;
927}
928
Austin Schuh05b70472020-01-01 17:11:17 -0800929bool SpanReader::ReadBlock() {
Brian Silvermanf51499a2020-09-21 12:49:08 -0700930 // This is the amount of data we grab at a time. Doing larger chunks minimizes
931 // syscalls and helps decompressors batch things more efficiently.
Austin Schuh05b70472020-01-01 17:11:17 -0800932 constexpr size_t kReadSize = 256 * 1024;
933
934 // Strip off any unused data at the front.
935 if (consumed_data_ != 0) {
Brian Silvermanf51499a2020-09-21 12:49:08 -0700936 data_.erase_front(consumed_data_);
Austin Schuh05b70472020-01-01 17:11:17 -0800937 consumed_data_ = 0;
938 }
939
940 const size_t starting_size = data_.size();
941
942 // This should automatically grow the backing store. It won't shrink if we
943 // get a small chunk later. This reduces allocations when we want to append
944 // more data.
Brian Silvermanf51499a2020-09-21 12:49:08 -0700945 data_.resize(starting_size + kReadSize);
Austin Schuh05b70472020-01-01 17:11:17 -0800946
Brian Silvermanf51499a2020-09-21 12:49:08 -0700947 const size_t count =
948 decoder_->Read(data_.begin() + starting_size, data_.end());
949 data_.resize(starting_size + count);
Austin Schuh05b70472020-01-01 17:11:17 -0800950 if (count == 0) {
Austin Schuh05b70472020-01-01 17:11:17 -0800951 return false;
952 }
Austin Schuh05b70472020-01-01 17:11:17 -0800953
Brian Smarttea913d42021-12-10 15:02:38 -0800954 total_read_ += count;
955
Austin Schuh05b70472020-01-01 17:11:17 -0800956 return true;
957}
958
Austin Schuhadd6eb32020-11-09 21:24:26 -0800959std::optional<SizePrefixedFlatbufferVector<LogFileHeader>> ReadHeader(
Austin Schuh0e8db662021-07-06 10:43:47 -0700960 SpanReader *span_reader) {
961 absl::Span<const uint8_t> config_data = span_reader->ReadMessage();
Austin Schuh6f3babe2020-01-26 20:34:50 -0800962
963 // Make sure something was read.
James Kuszmaul9776b392023-01-14 14:08:08 -0800964 if (config_data.empty()) {
Austin Schuh3bd4c402020-11-06 18:19:06 -0800965 return std::nullopt;
966 }
Austin Schuh6f3babe2020-01-26 20:34:50 -0800967
Austin Schuh5212cad2020-09-09 23:12:09 -0700968 // And copy the config so we have it forever, removing the size prefix.
Austin Schuhb929c4e2021-07-12 15:32:53 -0700969 SizePrefixedFlatbufferVector<LogFileHeader> result(config_data);
Austin Schuhe09beb12020-12-11 20:04:27 -0800970 if (!result.Verify()) {
971 return std::nullopt;
972 }
Austin Schuh0e8db662021-07-06 10:43:47 -0700973
Austin Schuhcc2c9a52022-12-12 15:55:13 -0800974 // We only know of busted headers in the versions of the log file header
975 // *before* the logger_sha1 field was added. At some point before that point,
976 // the logic to track when a header has been written was rewritten in such a
977 // way that it can't happen anymore. We've seen some logs where the body
978 // parses as a header recently, so the simple solution of always looking is
979 // failing us.
980 if (FLAGS_workaround_double_headers && !result.message().has_logger_sha1()) {
Austin Schuh0e8db662021-07-06 10:43:47 -0700981 while (true) {
982 absl::Span<const uint8_t> maybe_header_data = span_reader->PeekMessage();
James Kuszmaul9776b392023-01-14 14:08:08 -0800983 if (maybe_header_data.empty()) {
Austin Schuh0e8db662021-07-06 10:43:47 -0700984 break;
985 }
986
987 aos::SizePrefixedFlatbufferSpan<aos::logger::LogFileHeader> maybe_header(
988 maybe_header_data);
989 if (maybe_header.Verify()) {
990 LOG(WARNING) << "Found duplicate LogFileHeader in "
991 << span_reader->filename();
992 ResizeableBuffer header_data_copy;
993 header_data_copy.resize(maybe_header_data.size());
994 memcpy(header_data_copy.data(), maybe_header_data.begin(),
995 header_data_copy.size());
996 result = SizePrefixedFlatbufferVector<LogFileHeader>(
997 std::move(header_data_copy));
998
999 span_reader->ConsumeMessage();
1000 } else {
1001 break;
1002 }
1003 }
1004 }
Austin Schuhe09beb12020-12-11 20:04:27 -08001005 return result;
Austin Schuh6f3babe2020-01-26 20:34:50 -08001006}
1007
Austin Schuh0e8db662021-07-06 10:43:47 -07001008std::optional<SizePrefixedFlatbufferVector<LogFileHeader>> ReadHeader(
1009 std::string_view filename) {
1010 SpanReader span_reader(filename);
1011 return ReadHeader(&span_reader);
1012}
1013
Austin Schuhadd6eb32020-11-09 21:24:26 -08001014std::optional<SizePrefixedFlatbufferVector<MessageHeader>> ReadNthMessage(
Austin Schuh3bd4c402020-11-06 18:19:06 -08001015 std::string_view filename, size_t n) {
Austin Schuh5212cad2020-09-09 23:12:09 -07001016 SpanReader span_reader(filename);
1017 absl::Span<const uint8_t> data_span = span_reader.ReadMessage();
1018 for (size_t i = 0; i < n + 1; ++i) {
1019 data_span = span_reader.ReadMessage();
1020
1021 // Make sure something was read.
James Kuszmaul9776b392023-01-14 14:08:08 -08001022 if (data_span.empty()) {
Austin Schuh3bd4c402020-11-06 18:19:06 -08001023 return std::nullopt;
1024 }
Austin Schuh5212cad2020-09-09 23:12:09 -07001025 }
1026
Brian Silverman354697a2020-09-22 21:06:32 -07001027 // And copy the config so we have it forever, removing the size prefix.
Austin Schuhb929c4e2021-07-12 15:32:53 -07001028 SizePrefixedFlatbufferVector<MessageHeader> result(data_span);
Austin Schuhe09beb12020-12-11 20:04:27 -08001029 if (!result.Verify()) {
1030 return std::nullopt;
1031 }
1032 return result;
Austin Schuh5212cad2020-09-09 23:12:09 -07001033}
1034
Austin Schuh05b70472020-01-01 17:11:17 -08001035MessageReader::MessageReader(std::string_view filename)
Austin Schuh97789fc2020-08-01 14:42:45 -07001036 : span_reader_(filename),
Austin Schuhadd6eb32020-11-09 21:24:26 -08001037 raw_log_file_header_(
1038 SizePrefixedFlatbufferVector<LogFileHeader>::Empty()) {
Brian Smarttea913d42021-12-10 15:02:38 -08001039 set_crash_on_corrupt_message_flag(FLAGS_crash_on_corrupt_message);
1040 set_ignore_corrupt_messages_flag(FLAGS_ignore_corrupt_messages);
1041
Austin Schuh0e8db662021-07-06 10:43:47 -07001042 std::optional<SizePrefixedFlatbufferVector<LogFileHeader>>
1043 raw_log_file_header = ReadHeader(&span_reader_);
Austin Schuh05b70472020-01-01 17:11:17 -08001044
1045 // Make sure something was read.
Austin Schuh0e8db662021-07-06 10:43:47 -07001046 CHECK(raw_log_file_header) << ": Failed to read header from: " << filename;
Austin Schuh05b70472020-01-01 17:11:17 -08001047
Austin Schuh0e8db662021-07-06 10:43:47 -07001048 raw_log_file_header_ = std::move(*raw_log_file_header);
Austin Schuh05b70472020-01-01 17:11:17 -08001049
Austin Schuh5b728b72021-06-16 14:57:15 -07001050 CHECK(raw_log_file_header_.Verify()) << "Log file header is corrupted";
1051
Brian Smarttea913d42021-12-10 15:02:38 -08001052 total_verified_before_ = span_reader_.TotalConsumed();
1053
Austin Schuhcde938c2020-02-02 17:30:07 -08001054 max_out_of_order_duration_ =
Austin Schuha040c3f2021-02-13 16:09:07 -08001055 FLAGS_max_out_of_order > 0
1056 ? chrono::duration_cast<chrono::nanoseconds>(
1057 chrono::duration<double>(FLAGS_max_out_of_order))
1058 : chrono::nanoseconds(log_file_header()->max_out_of_order_duration());
Austin Schuhcde938c2020-02-02 17:30:07 -08001059
1060 VLOG(1) << "Opened " << filename << " as node "
1061 << FlatbufferToJson(log_file_header()->node());
Austin Schuh05b70472020-01-01 17:11:17 -08001062}
1063
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001064std::shared_ptr<UnpackedMessageHeader> MessageReader::ReadMessage() {
Austin Schuh05b70472020-01-01 17:11:17 -08001065 absl::Span<const uint8_t> msg_data = span_reader_.ReadMessage();
James Kuszmaul9776b392023-01-14 14:08:08 -08001066 if (msg_data.empty()) {
Brian Smarttea913d42021-12-10 15:02:38 -08001067 if (is_corrupted()) {
1068 LOG(ERROR) << "Total corrupted volumes: before = "
1069 << total_verified_before_
1070 << " | corrupted = " << total_corrupted_
1071 << " | during = " << total_verified_during_
1072 << " | after = " << total_verified_after_ << std::endl;
1073 }
1074
1075 if (span_reader_.IsIncomplete()) {
Austin Schuh60e77942022-05-16 17:48:24 -07001076 LOG(ERROR) << "Unable to access some messages in " << filename() << " : "
1077 << span_reader_.TotalRead() << " bytes read, "
Brian Smarttea913d42021-12-10 15:02:38 -08001078 << span_reader_.TotalConsumed() << " bytes usable."
1079 << std::endl;
1080 }
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001081 return nullptr;
Austin Schuh05b70472020-01-01 17:11:17 -08001082 }
1083
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001084 SizePrefixedFlatbufferSpan<MessageHeader> msg(msg_data);
Brian Smarttea913d42021-12-10 15:02:38 -08001085
1086 if (crash_on_corrupt_message_flag_) {
1087 CHECK(msg.Verify()) << "Corrupted message at offset "
Austin Schuh60e77942022-05-16 17:48:24 -07001088 << total_verified_before_ << " found within "
1089 << filename()
Brian Smarttea913d42021-12-10 15:02:38 -08001090 << "; set --nocrash_on_corrupt_message to see summary;"
1091 << " also set --ignore_corrupt_messages to process"
1092 << " anyway";
1093
1094 } else if (!msg.Verify()) {
Austin Schuh60e77942022-05-16 17:48:24 -07001095 LOG(ERROR) << "Corrupted message at offset " << total_verified_before_
Brian Smarttea913d42021-12-10 15:02:38 -08001096 << " from " << filename() << std::endl;
1097
1098 total_corrupted_ += msg_data.size();
1099
1100 while (true) {
1101 absl::Span<const uint8_t> msg_data = span_reader_.ReadMessage();
1102
James Kuszmaul9776b392023-01-14 14:08:08 -08001103 if (msg_data.empty()) {
Brian Smarttea913d42021-12-10 15:02:38 -08001104 if (!ignore_corrupt_messages_flag_) {
1105 LOG(ERROR) << "Total corrupted volumes: before = "
1106 << total_verified_before_
1107 << " | corrupted = " << total_corrupted_
1108 << " | during = " << total_verified_during_
1109 << " | after = " << total_verified_after_ << std::endl;
1110
1111 if (span_reader_.IsIncomplete()) {
1112 LOG(ERROR) << "Unable to access some messages in " << filename()
1113 << " : " << span_reader_.TotalRead() << " bytes read, "
1114 << span_reader_.TotalConsumed() << " bytes usable."
1115 << std::endl;
1116 }
1117 return nullptr;
1118 }
1119 break;
1120 }
1121
1122 SizePrefixedFlatbufferSpan<MessageHeader> next_msg(msg_data);
1123
1124 if (!next_msg.Verify()) {
1125 total_corrupted_ += msg_data.size();
1126 total_verified_during_ += total_verified_after_;
1127 total_verified_after_ = 0;
1128
1129 } else {
1130 total_verified_after_ += msg_data.size();
1131 if (ignore_corrupt_messages_flag_) {
1132 msg = next_msg;
1133 break;
1134 }
1135 }
1136 }
1137 }
1138
1139 if (is_corrupted()) {
1140 total_verified_after_ += msg_data.size();
1141 } else {
1142 total_verified_before_ += msg_data.size();
1143 }
Austin Schuh05b70472020-01-01 17:11:17 -08001144
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001145 auto result = UnpackedMessageHeader::MakeMessage(msg.message());
Austin Schuh0e8db662021-07-06 10:43:47 -07001146
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001147 const monotonic_clock::time_point timestamp = result->monotonic_sent_time;
Austin Schuh05b70472020-01-01 17:11:17 -08001148
1149 newest_timestamp_ = std::max(newest_timestamp_, timestamp);
Austin Schuhd1873292021-11-18 15:35:30 -08001150
1151 if (VLOG_IS_ON(3)) {
1152 VLOG(3) << "Read from " << filename() << " data " << FlatbufferToJson(msg);
1153 } else if (VLOG_IS_ON(2)) {
1154 SizePrefixedFlatbufferVector<MessageHeader> msg_copy = msg;
1155 msg_copy.mutable_message()->clear_data();
1156 VLOG(2) << "Read from " << filename() << " data "
1157 << FlatbufferToJson(msg_copy);
1158 }
1159
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001160 return result;
1161}
1162
1163std::shared_ptr<UnpackedMessageHeader> UnpackedMessageHeader::MakeMessage(
1164 const MessageHeader &message) {
1165 const size_t data_size = message.has_data() ? message.data()->size() : 0;
1166
1167 UnpackedMessageHeader *const unpacked_message =
1168 reinterpret_cast<UnpackedMessageHeader *>(
1169 malloc(sizeof(UnpackedMessageHeader) + data_size +
1170 kChannelDataAlignment - 1));
1171
1172 CHECK(message.has_channel_index());
1173 CHECK(message.has_monotonic_sent_time());
1174
1175 absl::Span<uint8_t> span;
1176 if (data_size > 0) {
1177 span =
1178 absl::Span<uint8_t>(reinterpret_cast<uint8_t *>(RoundChannelData(
1179 &unpacked_message->actual_data[0], data_size)),
1180 data_size);
1181 }
1182
Austin Schuh826e6ce2021-11-18 20:33:10 -08001183 std::optional<aos::monotonic_clock::time_point> monotonic_remote_time;
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001184 if (message.has_monotonic_remote_time()) {
Austin Schuh826e6ce2021-11-18 20:33:10 -08001185 monotonic_remote_time = aos::monotonic_clock::time_point(
1186 std::chrono::nanoseconds(message.monotonic_remote_time()));
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001187 }
1188 std::optional<realtime_clock::time_point> realtime_remote_time;
1189 if (message.has_realtime_remote_time()) {
1190 realtime_remote_time = realtime_clock::time_point(
1191 chrono::nanoseconds(message.realtime_remote_time()));
1192 }
1193
1194 std::optional<uint32_t> remote_queue_index;
1195 if (message.has_remote_queue_index()) {
1196 remote_queue_index = message.remote_queue_index();
1197 }
1198
James Kuszmaul9776b392023-01-14 14:08:08 -08001199 new (unpacked_message) UnpackedMessageHeader(
1200 message.channel_index(),
1201 monotonic_clock::time_point(
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001202 chrono::nanoseconds(message.monotonic_sent_time())),
James Kuszmaul9776b392023-01-14 14:08:08 -08001203 realtime_clock::time_point(
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001204 chrono::nanoseconds(message.realtime_sent_time())),
James Kuszmaul9776b392023-01-14 14:08:08 -08001205 message.queue_index(), monotonic_remote_time, realtime_remote_time,
1206 remote_queue_index,
1207 monotonic_clock::time_point(
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001208 std::chrono::nanoseconds(message.monotonic_timestamp_time())),
James Kuszmaul9776b392023-01-14 14:08:08 -08001209 message.has_monotonic_timestamp_time(), span);
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001210
1211 if (data_size > 0) {
1212 memcpy(span.data(), message.data()->data(), data_size);
1213 }
1214
1215 return std::shared_ptr<UnpackedMessageHeader>(unpacked_message,
1216 &DestroyAndFree);
Austin Schuh05b70472020-01-01 17:11:17 -08001217}
1218
Austin Schuhc41603c2020-10-11 16:17:37 -07001219PartsMessageReader::PartsMessageReader(LogParts log_parts)
Austin Schuh48507722021-07-17 17:29:24 -07001220 : parts_(std::move(log_parts)), message_reader_(parts_.parts[0]) {
Brian Silvermanfee16972021-09-14 12:06:38 -07001221 if (parts_.parts.size() >= 2) {
1222 next_message_reader_.emplace(parts_.parts[1]);
1223 }
Austin Schuh48507722021-07-17 17:29:24 -07001224 ComputeBootCounts();
1225}
1226
1227void PartsMessageReader::ComputeBootCounts() {
1228 boot_counts_.assign(configuration::NodesCount(parts_.config.get()),
1229 std::nullopt);
1230
1231 // We have 3 vintages of log files with different amounts of information.
1232 if (log_file_header()->has_boot_uuids()) {
1233 // The new hotness with the boots explicitly listed out. We can use the log
1234 // file header to compute the boot count of all relevant nodes.
1235 CHECK_EQ(log_file_header()->boot_uuids()->size(), boot_counts_.size());
1236 size_t node_index = 0;
1237 for (const flatbuffers::String *boot_uuid :
1238 *log_file_header()->boot_uuids()) {
1239 CHECK(parts_.boots);
1240 if (boot_uuid->size() != 0) {
1241 auto it = parts_.boots->boot_count_map.find(boot_uuid->str());
1242 if (it != parts_.boots->boot_count_map.end()) {
1243 boot_counts_[node_index] = it->second;
1244 }
1245 } else if (parts().boots->boots[node_index].size() == 1u) {
1246 boot_counts_[node_index] = 0;
1247 }
1248 ++node_index;
1249 }
1250 } else {
1251 // Older multi-node logs which are guarenteed to have UUIDs logged, or
1252 // single node log files with boot UUIDs in the header. We only know how to
1253 // order certain boots in certain circumstances.
1254 if (configuration::MultiNode(parts_.config.get()) || parts_.boots) {
1255 for (size_t node_index = 0; node_index < boot_counts_.size();
1256 ++node_index) {
1257 CHECK(parts_.boots);
1258 if (parts().boots->boots[node_index].size() == 1u) {
1259 boot_counts_[node_index] = 0;
1260 }
1261 }
1262 } else {
1263 // Really old single node logs without any UUIDs. They can't reboot.
1264 CHECK_EQ(boot_counts_.size(), 1u);
1265 boot_counts_[0] = 0u;
1266 }
1267 }
1268}
Austin Schuhc41603c2020-10-11 16:17:37 -07001269
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001270std::shared_ptr<UnpackedMessageHeader> PartsMessageReader::ReadMessage() {
Austin Schuhc41603c2020-10-11 16:17:37 -07001271 while (!done_) {
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001272 std::shared_ptr<UnpackedMessageHeader> message =
Austin Schuhc41603c2020-10-11 16:17:37 -07001273 message_reader_.ReadMessage();
1274 if (message) {
1275 newest_timestamp_ = message_reader_.newest_timestamp();
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001276 const monotonic_clock::time_point monotonic_sent_time =
1277 message->monotonic_sent_time;
1278
1279 // TODO(austin): Does this work with startup? Might need to use the
1280 // start time.
1281 // TODO(austin): Does this work with startup when we don't know the
1282 // remote start time too? Look at one of those logs to compare.
Austin Schuh315b96b2020-12-11 21:21:12 -08001283 if (monotonic_sent_time >
1284 parts_.monotonic_start_time + max_out_of_order_duration()) {
1285 after_start_ = true;
1286 }
1287 if (after_start_) {
Austin Schuhb000de62020-12-03 22:00:40 -08001288 CHECK_GE(monotonic_sent_time,
1289 newest_timestamp_ - max_out_of_order_duration())
Austin Schuha040c3f2021-02-13 16:09:07 -08001290 << ": Max out of order of " << max_out_of_order_duration().count()
1291 << "ns exceeded. " << parts_ << ", start time is "
Austin Schuh315b96b2020-12-11 21:21:12 -08001292 << parts_.monotonic_start_time << " currently reading "
1293 << filename();
Austin Schuhb000de62020-12-03 22:00:40 -08001294 }
Austin Schuhc41603c2020-10-11 16:17:37 -07001295 return message;
1296 }
1297 NextLog();
1298 }
Austin Schuh32f68492020-11-08 21:45:51 -08001299 newest_timestamp_ = monotonic_clock::max_time;
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001300 return nullptr;
Austin Schuhc41603c2020-10-11 16:17:37 -07001301}
1302
1303void PartsMessageReader::NextLog() {
1304 if (next_part_index_ == parts_.parts.size()) {
Brian Silvermanfee16972021-09-14 12:06:38 -07001305 CHECK(!next_message_reader_);
Austin Schuhc41603c2020-10-11 16:17:37 -07001306 done_ = true;
1307 return;
1308 }
Brian Silvermanfee16972021-09-14 12:06:38 -07001309 CHECK(next_message_reader_);
1310 message_reader_ = std::move(*next_message_reader_);
Austin Schuh48507722021-07-17 17:29:24 -07001311 ComputeBootCounts();
Brian Silvermanfee16972021-09-14 12:06:38 -07001312 if (next_part_index_ + 1 < parts_.parts.size()) {
1313 next_message_reader_.emplace(parts_.parts[next_part_index_ + 1]);
1314 } else {
1315 next_message_reader_.reset();
1316 }
Austin Schuhc41603c2020-10-11 16:17:37 -07001317 ++next_part_index_;
1318}
1319
Austin Schuh1be0ce42020-11-29 22:43:26 -08001320bool Message::operator<(const Message &m2) const {
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001321 CHECK_EQ(this->timestamp.boot, m2.timestamp.boot);
Austin Schuhf16ef6a2021-06-30 21:48:17 -07001322
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001323 if (this->timestamp.time < m2.timestamp.time) {
Austin Schuh1be0ce42020-11-29 22:43:26 -08001324 return true;
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001325 } else if (this->timestamp.time > m2.timestamp.time) {
Austin Schuh1be0ce42020-11-29 22:43:26 -08001326 return false;
1327 }
1328
1329 if (this->channel_index < m2.channel_index) {
1330 return true;
1331 } else if (this->channel_index > m2.channel_index) {
1332 return false;
1333 }
1334
1335 return this->queue_index < m2.queue_index;
1336}
1337
1338bool Message::operator>=(const Message &m2) const { return !(*this < m2); }
Austin Schuh8f52ed52020-11-30 23:12:39 -08001339bool Message::operator==(const Message &m2) const {
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001340 CHECK_EQ(this->timestamp.boot, m2.timestamp.boot);
Austin Schuhf16ef6a2021-06-30 21:48:17 -07001341
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001342 return timestamp.time == m2.timestamp.time &&
1343 channel_index == m2.channel_index && queue_index == m2.queue_index;
Austin Schuh8f52ed52020-11-30 23:12:39 -08001344}
Austin Schuh1be0ce42020-11-29 22:43:26 -08001345
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001346std::ostream &operator<<(std::ostream &os, const UnpackedMessageHeader &m) {
1347 os << "{.channel_index=" << m.channel_index
1348 << ", .monotonic_sent_time=" << m.monotonic_sent_time
1349 << ", .realtime_sent_time=" << m.realtime_sent_time
1350 << ", .queue_index=" << m.queue_index;
1351 if (m.monotonic_remote_time) {
Austin Schuh826e6ce2021-11-18 20:33:10 -08001352 os << ", .monotonic_remote_time=" << *m.monotonic_remote_time;
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001353 }
1354 os << ", .realtime_remote_time=";
1355 PrintOptionalOrNull(&os, m.realtime_remote_time);
1356 os << ", .remote_queue_index=";
1357 PrintOptionalOrNull(&os, m.remote_queue_index);
1358 if (m.has_monotonic_timestamp_time) {
1359 os << ", .monotonic_timestamp_time=" << m.monotonic_timestamp_time;
1360 }
Austin Schuh22cf7862022-09-19 19:09:42 -07001361 os << "}";
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001362 return os;
1363}
1364
Austin Schuh1be0ce42020-11-29 22:43:26 -08001365std::ostream &operator<<(std::ostream &os, const Message &m) {
1366 os << "{.channel_index=" << m.channel_index
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001367 << ", .queue_index=" << m.queue_index << ", .timestamp=" << m.timestamp;
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001368 if (m.data != nullptr) {
Austin Schuh826e6ce2021-11-18 20:33:10 -08001369 if (m.data->remote_queue_index.has_value()) {
1370 os << ", .remote_queue_index=" << *m.data->remote_queue_index;
1371 }
1372 if (m.data->monotonic_remote_time.has_value()) {
1373 os << ", .monotonic_remote_time=" << *m.data->monotonic_remote_time;
1374 }
Austin Schuhfb1b3292021-11-16 21:20:15 -08001375 os << ", .data=" << m.data;
Austin Schuhd2f96102020-12-01 20:27:29 -08001376 }
1377 os << "}";
1378 return os;
1379}
1380
1381std::ostream &operator<<(std::ostream &os, const TimestampedMessage &m) {
1382 os << "{.channel_index=" << m.channel_index
1383 << ", .queue_index=" << m.queue_index
1384 << ", .monotonic_event_time=" << m.monotonic_event_time
1385 << ", .realtime_event_time=" << m.realtime_event_time;
Austin Schuh58646e22021-08-23 23:51:46 -07001386 if (m.remote_queue_index != BootQueueIndex::Invalid()) {
Austin Schuhd2f96102020-12-01 20:27:29 -08001387 os << ", .remote_queue_index=" << m.remote_queue_index;
1388 }
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001389 if (m.monotonic_remote_time != BootTimestamp::min_time()) {
Austin Schuhd2f96102020-12-01 20:27:29 -08001390 os << ", .monotonic_remote_time=" << m.monotonic_remote_time;
1391 }
1392 if (m.realtime_remote_time != realtime_clock::min_time) {
1393 os << ", .realtime_remote_time=" << m.realtime_remote_time;
1394 }
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001395 if (m.monotonic_timestamp_time != BootTimestamp::min_time()) {
Austin Schuh8bf1e632021-01-02 22:41:04 -08001396 os << ", .monotonic_timestamp_time=" << m.monotonic_timestamp_time;
1397 }
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001398 if (m.data != nullptr) {
1399 os << ", .data=" << *m.data;
Austin Schuh22cf7862022-09-19 19:09:42 -07001400 } else {
1401 os << ", .data=nullptr";
Austin Schuhd2f96102020-12-01 20:27:29 -08001402 }
1403 os << "}";
Austin Schuh1be0ce42020-11-29 22:43:26 -08001404 return os;
1405}
1406
Austin Schuh4b5c22a2020-11-30 22:58:43 -08001407LogPartsSorter::LogPartsSorter(LogParts log_parts)
Austin Schuh48507722021-07-17 17:29:24 -07001408 : parts_message_reader_(log_parts),
1409 source_node_index_(configuration::SourceNodeIndex(parts().config.get())) {
1410}
Austin Schuh4b5c22a2020-11-30 22:58:43 -08001411
1412Message *LogPartsSorter::Front() {
1413 // Queue up data until enough data has been queued that the front message is
1414 // sorted enough to be safe to pop. This may do nothing, so we should make
1415 // sure the nothing path is checked quickly.
1416 if (sorted_until() != monotonic_clock::max_time) {
1417 while (true) {
Austin Schuh48507722021-07-17 17:29:24 -07001418 if (!messages_.empty() &&
1419 messages_.begin()->timestamp.time < sorted_until() &&
Austin Schuhb000de62020-12-03 22:00:40 -08001420 sorted_until() >= monotonic_start_time()) {
Austin Schuh4b5c22a2020-11-30 22:58:43 -08001421 break;
1422 }
1423
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001424 std::shared_ptr<UnpackedMessageHeader> m =
Austin Schuh4b5c22a2020-11-30 22:58:43 -08001425 parts_message_reader_.ReadMessage();
1426 // No data left, sorted forever, work through what is left.
1427 if (!m) {
1428 sorted_until_ = monotonic_clock::max_time;
1429 break;
1430 }
1431
Austin Schuh48507722021-07-17 17:29:24 -07001432 size_t monotonic_timestamp_boot = 0;
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001433 if (m->has_monotonic_timestamp_time) {
Austin Schuh48507722021-07-17 17:29:24 -07001434 monotonic_timestamp_boot = parts().logger_boot_count;
1435 }
1436 size_t monotonic_remote_boot = 0xffffff;
1437
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001438 if (m->monotonic_remote_time.has_value()) {
Austin Schuh60e77942022-05-16 17:48:24 -07001439 const Node *node =
1440 parts().config->nodes()->Get(source_node_index_[m->channel_index]);
milind-ua50344f2021-08-25 18:22:20 -07001441
Austin Schuh48507722021-07-17 17:29:24 -07001442 std::optional<size_t> boot = parts_message_reader_.boot_count(
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001443 source_node_index_[m->channel_index]);
milind-ua50344f2021-08-25 18:22:20 -07001444 CHECK(boot) << ": Failed to find boot for node " << MaybeNodeName(node)
Austin Schuh60e77942022-05-16 17:48:24 -07001445 << ", with index " << source_node_index_[m->channel_index];
Austin Schuh48507722021-07-17 17:29:24 -07001446 monotonic_remote_boot = *boot;
1447 }
1448
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001449 messages_.insert(
1450 Message{.channel_index = m->channel_index,
1451 .queue_index = BootQueueIndex{.boot = parts().boot_count,
1452 .index = m->queue_index},
1453 .timestamp = BootTimestamp{.boot = parts().boot_count,
1454 .time = m->monotonic_sent_time},
1455 .monotonic_remote_boot = monotonic_remote_boot,
1456 .monotonic_timestamp_boot = monotonic_timestamp_boot,
1457 .data = std::move(m)});
Austin Schuh4b5c22a2020-11-30 22:58:43 -08001458
1459 // Now, update sorted_until_ to match the new message.
1460 if (parts_message_reader_.newest_timestamp() >
1461 monotonic_clock::min_time +
1462 parts_message_reader_.max_out_of_order_duration()) {
1463 sorted_until_ = parts_message_reader_.newest_timestamp() -
1464 parts_message_reader_.max_out_of_order_duration();
1465 } else {
1466 sorted_until_ = monotonic_clock::min_time;
1467 }
1468 }
1469 }
1470
1471 // Now that we have enough data queued, return a pointer to the oldest piece
1472 // of data if it exists.
1473 if (messages_.empty()) {
Austin Schuhb000de62020-12-03 22:00:40 -08001474 last_message_time_ = monotonic_clock::max_time;
Austin Schuh4b5c22a2020-11-30 22:58:43 -08001475 return nullptr;
1476 }
1477
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001478 CHECK_GE(messages_.begin()->timestamp.time, last_message_time_)
Austin Schuh315b96b2020-12-11 21:21:12 -08001479 << DebugString() << " reading " << parts_message_reader_.filename();
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001480 last_message_time_ = messages_.begin()->timestamp.time;
Austin Schuh4b5c22a2020-11-30 22:58:43 -08001481 return &(*messages_.begin());
1482}
1483
1484void LogPartsSorter::PopFront() { messages_.erase(messages_.begin()); }
1485
1486std::string LogPartsSorter::DebugString() const {
1487 std::stringstream ss;
1488 ss << "messages: [\n";
Austin Schuh315b96b2020-12-11 21:21:12 -08001489 int count = 0;
1490 bool no_dots = true;
Austin Schuh4b5c22a2020-11-30 22:58:43 -08001491 for (const Message &m : messages_) {
Austin Schuh315b96b2020-12-11 21:21:12 -08001492 if (count < 15 || count > static_cast<int>(messages_.size()) - 15) {
1493 ss << m << "\n";
1494 } else if (no_dots) {
1495 ss << "...\n";
1496 no_dots = false;
1497 }
1498 ++count;
Austin Schuh4b5c22a2020-11-30 22:58:43 -08001499 }
1500 ss << "] <- " << parts_message_reader_.filename();
1501 return ss.str();
1502}
1503
Austin Schuhd2f96102020-12-01 20:27:29 -08001504NodeMerger::NodeMerger(std::vector<LogParts> parts) {
1505 CHECK_GE(parts.size(), 1u);
Austin Schuh715adc12021-06-29 22:07:39 -07001506 // Enforce that we are sorting things only from a single node from a single
1507 // boot.
1508 const std::string_view part0_node = parts[0].node;
1509 const std::string_view part0_source_boot_uuid = parts[0].source_boot_uuid;
Austin Schuhd2f96102020-12-01 20:27:29 -08001510 for (size_t i = 1; i < parts.size(); ++i) {
1511 CHECK_EQ(part0_node, parts[i].node) << ": Can't merge different nodes.";
Austin Schuh715adc12021-06-29 22:07:39 -07001512 CHECK_EQ(part0_source_boot_uuid, parts[i].source_boot_uuid)
1513 << ": Can't merge different boots.";
Austin Schuhd2f96102020-12-01 20:27:29 -08001514 }
Austin Schuh715adc12021-06-29 22:07:39 -07001515
1516 node_ = configuration::GetNodeIndex(parts[0].config.get(), part0_node);
1517
Austin Schuhd2f96102020-12-01 20:27:29 -08001518 for (LogParts &part : parts) {
1519 parts_sorters_.emplace_back(std::move(part));
1520 }
1521
Austin Schuhd2f96102020-12-01 20:27:29 -08001522 monotonic_start_time_ = monotonic_clock::max_time;
Austin Schuh9dc42612021-09-20 20:41:29 -07001523 realtime_start_time_ = realtime_clock::min_time;
Austin Schuhd2f96102020-12-01 20:27:29 -08001524 for (const LogPartsSorter &parts_sorter : parts_sorters_) {
Sanjay Narayanan9896c752021-09-01 16:16:48 -07001525 // We want to capture the earliest meaningful start time here. The start
1526 // time defaults to min_time when there's no meaningful value to report, so
1527 // let's ignore those.
Austin Schuh9dc42612021-09-20 20:41:29 -07001528 if (parts_sorter.monotonic_start_time() != monotonic_clock::min_time) {
1529 bool accept = false;
1530 // We want to prioritize start times from the logger node. Really, we
1531 // want to prioritize start times with a valid realtime_clock time. So,
1532 // if we have a start time without a RT clock, prefer a start time with a
1533 // RT clock, even it if is later.
1534 if (parts_sorter.realtime_start_time() != realtime_clock::min_time) {
1535 // We've got a good one. See if the current start time has a good RT
1536 // clock, or if we should use this one instead.
1537 if (parts_sorter.monotonic_start_time() < monotonic_start_time_) {
1538 accept = true;
1539 } else if (realtime_start_time_ == realtime_clock::min_time) {
1540 // The previous start time doesn't have a good RT time, so it is very
1541 // likely the start time from a remote part file. We just found a
1542 // better start time with a real RT time, so switch to that instead.
1543 accept = true;
1544 }
1545 } else if (realtime_start_time_ == realtime_clock::min_time) {
1546 // We don't have a RT time, so take the oldest.
1547 if (parts_sorter.monotonic_start_time() < monotonic_start_time_) {
1548 accept = true;
1549 }
1550 }
1551
1552 if (accept) {
1553 monotonic_start_time_ = parts_sorter.monotonic_start_time();
1554 realtime_start_time_ = parts_sorter.realtime_start_time();
1555 }
Austin Schuhd2f96102020-12-01 20:27:29 -08001556 }
1557 }
Sanjay Narayanan9896c752021-09-01 16:16:48 -07001558
1559 // If there was no meaningful start time reported, just use min_time.
1560 if (monotonic_start_time_ == monotonic_clock::max_time) {
1561 monotonic_start_time_ = monotonic_clock::min_time;
1562 realtime_start_time_ = realtime_clock::min_time;
1563 }
Austin Schuhd2f96102020-12-01 20:27:29 -08001564}
Austin Schuh8f52ed52020-11-30 23:12:39 -08001565
Austin Schuh0ca51f32020-12-25 21:51:45 -08001566std::vector<const LogParts *> NodeMerger::Parts() const {
1567 std::vector<const LogParts *> p;
1568 p.reserve(parts_sorters_.size());
1569 for (const LogPartsSorter &parts_sorter : parts_sorters_) {
1570 p.emplace_back(&parts_sorter.parts());
1571 }
1572 return p;
1573}
1574
Austin Schuh8f52ed52020-11-30 23:12:39 -08001575Message *NodeMerger::Front() {
1576 // Return the current Front if we have one, otherwise go compute one.
1577 if (current_ != nullptr) {
Austin Schuhb000de62020-12-03 22:00:40 -08001578 Message *result = current_->Front();
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001579 CHECK_GE(result->timestamp.time, last_message_time_);
Austin Schuhb000de62020-12-03 22:00:40 -08001580 return result;
Austin Schuh8f52ed52020-11-30 23:12:39 -08001581 }
1582
1583 // Otherwise, do a simple search for the oldest message, deduplicating any
1584 // duplicates.
1585 Message *oldest = nullptr;
1586 sorted_until_ = monotonic_clock::max_time;
Austin Schuhd2f96102020-12-01 20:27:29 -08001587 for (LogPartsSorter &parts_sorter : parts_sorters_) {
1588 Message *m = parts_sorter.Front();
Austin Schuh8f52ed52020-11-30 23:12:39 -08001589 if (!m) {
Austin Schuhd2f96102020-12-01 20:27:29 -08001590 sorted_until_ = std::min(sorted_until_, parts_sorter.sorted_until());
Austin Schuh8f52ed52020-11-30 23:12:39 -08001591 continue;
1592 }
1593 if (oldest == nullptr || *m < *oldest) {
1594 oldest = m;
Austin Schuhd2f96102020-12-01 20:27:29 -08001595 current_ = &parts_sorter;
Austin Schuh8f52ed52020-11-30 23:12:39 -08001596 } else if (*m == *oldest) {
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001597 // Found a duplicate. If there is a choice, we want the one which has
1598 // the timestamp time.
1599 if (!m->data->has_monotonic_timestamp_time) {
Austin Schuh8bf1e632021-01-02 22:41:04 -08001600 parts_sorter.PopFront();
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001601 } else if (!oldest->data->has_monotonic_timestamp_time) {
Austin Schuh8bf1e632021-01-02 22:41:04 -08001602 current_->PopFront();
1603 current_ = &parts_sorter;
1604 oldest = m;
1605 } else {
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001606 CHECK_EQ(m->data->monotonic_timestamp_time,
1607 oldest->data->monotonic_timestamp_time);
Austin Schuh8bf1e632021-01-02 22:41:04 -08001608 parts_sorter.PopFront();
1609 }
Austin Schuh8f52ed52020-11-30 23:12:39 -08001610 }
1611
1612 // PopFront may change this, so compute it down here.
Austin Schuhd2f96102020-12-01 20:27:29 -08001613 sorted_until_ = std::min(sorted_until_, parts_sorter.sorted_until());
Austin Schuh8f52ed52020-11-30 23:12:39 -08001614 }
1615
Austin Schuhb000de62020-12-03 22:00:40 -08001616 if (oldest) {
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001617 CHECK_GE(oldest->timestamp.time, last_message_time_);
1618 last_message_time_ = oldest->timestamp.time;
Austin Schuh5dd22842021-11-17 16:09:39 -08001619 monotonic_oldest_time_ =
1620 std::min(monotonic_oldest_time_, oldest->timestamp.time);
Austin Schuhb000de62020-12-03 22:00:40 -08001621 } else {
1622 last_message_time_ = monotonic_clock::max_time;
1623 }
1624
Austin Schuh8f52ed52020-11-30 23:12:39 -08001625 // Return the oldest message found. This will be nullptr if nothing was
1626 // found, indicating there is nothing left.
1627 return oldest;
1628}
1629
1630void NodeMerger::PopFront() {
1631 CHECK(current_ != nullptr) << "Popping before calling Front()";
1632 current_->PopFront();
1633 current_ = nullptr;
1634}
1635
Austin Schuhf16ef6a2021-06-30 21:48:17 -07001636BootMerger::BootMerger(std::vector<LogParts> files) {
1637 std::vector<std::vector<LogParts>> boots;
1638
1639 // Now, we need to split things out by boot.
1640 for (size_t i = 0; i < files.size(); ++i) {
Austin Schuhf16ef6a2021-06-30 21:48:17 -07001641 const size_t boot_count = files[i].boot_count;
Austin Schuhf16ef6a2021-06-30 21:48:17 -07001642 if (boot_count + 1 > boots.size()) {
1643 boots.resize(boot_count + 1);
1644 }
1645 boots[boot_count].emplace_back(std::move(files[i]));
1646 }
1647
1648 node_mergers_.reserve(boots.size());
1649 for (size_t i = 0; i < boots.size(); ++i) {
Austin Schuh48507722021-07-17 17:29:24 -07001650 VLOG(2) << "Boot " << i;
Austin Schuhf16ef6a2021-06-30 21:48:17 -07001651 for (auto &p : boots[i]) {
Austin Schuh48507722021-07-17 17:29:24 -07001652 VLOG(2) << "Part " << p;
Austin Schuhf16ef6a2021-06-30 21:48:17 -07001653 }
1654 node_mergers_.emplace_back(
1655 std::make_unique<NodeMerger>(std::move(boots[i])));
1656 }
1657}
1658
1659Message *BootMerger::Front() {
1660 Message *result = node_mergers_[index_]->Front();
1661
1662 if (result != nullptr) {
1663 return result;
1664 }
1665
1666 if (index_ + 1u == node_mergers_.size()) {
1667 // At the end of the last node merger, just return.
1668 return nullptr;
1669 } else {
1670 ++index_;
1671 return Front();
1672 }
1673}
1674
1675void BootMerger::PopFront() { node_mergers_[index_]->PopFront(); }
1676
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001677std::vector<const LogParts *> BootMerger::Parts() const {
1678 std::vector<const LogParts *> results;
1679 for (const std::unique_ptr<NodeMerger> &node_merger : node_mergers_) {
1680 std::vector<const LogParts *> node_parts = node_merger->Parts();
1681
1682 results.insert(results.end(), std::make_move_iterator(node_parts.begin()),
1683 std::make_move_iterator(node_parts.end()));
1684 }
1685
1686 return results;
1687}
1688
Austin Schuhd2f96102020-12-01 20:27:29 -08001689TimestampMapper::TimestampMapper(std::vector<LogParts> parts)
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001690 : boot_merger_(std::move(parts)),
Austin Schuh79b30942021-01-24 22:32:21 -08001691 timestamp_callback_([](TimestampedMessage *) {}) {
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001692 for (const LogParts *part : boot_merger_.Parts()) {
Austin Schuh0ca51f32020-12-25 21:51:45 -08001693 if (!configuration_) {
1694 configuration_ = part->config;
1695 } else {
1696 CHECK_EQ(configuration_.get(), part->config.get());
1697 }
1698 }
1699 const Configuration *config = configuration_.get();
Austin Schuhd2f96102020-12-01 20:27:29 -08001700 // Only fill out nodes_data_ if there are nodes. Otherwise everything gets
1701 // pretty simple.
1702 if (configuration::MultiNode(config)) {
1703 nodes_data_.resize(config->nodes()->size());
1704 const Node *my_node = config->nodes()->Get(node());
1705 for (size_t node_index = 0; node_index < nodes_data_.size(); ++node_index) {
1706 const Node *node = config->nodes()->Get(node_index);
1707 NodeData *node_data = &nodes_data_[node_index];
1708 node_data->channels.resize(config->channels()->size());
1709 // We should save the channel if it is delivered to the node represented
1710 // by the NodeData, but not sent by that node. That combo means it is
1711 // forwarded.
1712 size_t channel_index = 0;
1713 node_data->any_delivered = false;
1714 for (const Channel *channel : *config->channels()) {
1715 node_data->channels[channel_index].delivered =
1716 configuration::ChannelIsReadableOnNode(channel, node) &&
Austin Schuhb3dbb6d2021-01-02 17:29:35 -08001717 configuration::ChannelIsSendableOnNode(channel, my_node) &&
1718 (my_node != node);
Austin Schuhd2f96102020-12-01 20:27:29 -08001719 node_data->any_delivered = node_data->any_delivered ||
1720 node_data->channels[channel_index].delivered;
Austin Schuh6a7358f2021-11-18 22:40:40 -08001721 if (node_data->channels[channel_index].delivered) {
1722 const Connection *connection =
1723 configuration::ConnectionToNode(channel, node);
1724 node_data->channels[channel_index].time_to_live =
1725 chrono::nanoseconds(connection->time_to_live());
1726 }
Austin Schuhd2f96102020-12-01 20:27:29 -08001727 ++channel_index;
1728 }
1729 }
1730
1731 for (const Channel *channel : *config->channels()) {
1732 source_node_.emplace_back(configuration::GetNodeIndex(
1733 config, channel->source_node()->string_view()));
1734 }
1735 }
1736}
1737
1738void TimestampMapper::AddPeer(TimestampMapper *timestamp_mapper) {
Austin Schuh0ca51f32020-12-25 21:51:45 -08001739 CHECK(configuration::MultiNode(configuration()));
Austin Schuhd2f96102020-12-01 20:27:29 -08001740 CHECK_NE(timestamp_mapper->node(), node());
1741 CHECK_LT(timestamp_mapper->node(), nodes_data_.size());
1742
1743 NodeData *node_data = &nodes_data_[timestamp_mapper->node()];
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001744 // Only set it if this node delivers to the peer timestamp_mapper. Otherwise
Austin Schuhd2f96102020-12-01 20:27:29 -08001745 // we could needlessly save data.
1746 if (node_data->any_delivered) {
Austin Schuh87dd3832021-01-01 23:07:31 -08001747 VLOG(1) << "Registering on node " << node() << " for peer node "
1748 << timestamp_mapper->node();
Austin Schuhd2f96102020-12-01 20:27:29 -08001749 CHECK(timestamp_mapper->nodes_data_[node()].peer == nullptr);
1750
1751 timestamp_mapper->nodes_data_[node()].peer = this;
Austin Schuh36c00932021-07-19 18:13:21 -07001752
1753 node_data->save_for_peer = true;
Austin Schuhd2f96102020-12-01 20:27:29 -08001754 }
1755}
1756
Austin Schuh79b30942021-01-24 22:32:21 -08001757void TimestampMapper::QueueMessage(Message *m) {
Austin Schuh60e77942022-05-16 17:48:24 -07001758 matched_messages_.emplace_back(
1759 TimestampedMessage{.channel_index = m->channel_index,
1760 .queue_index = m->queue_index,
1761 .monotonic_event_time = m->timestamp,
1762 .realtime_event_time = m->data->realtime_sent_time,
1763 .remote_queue_index = BootQueueIndex::Invalid(),
1764 .monotonic_remote_time = BootTimestamp::min_time(),
1765 .realtime_remote_time = realtime_clock::min_time,
1766 .monotonic_timestamp_time = BootTimestamp::min_time(),
1767 .data = std::move(m->data)});
Austin Schuhd2f96102020-12-01 20:27:29 -08001768}
1769
1770TimestampedMessage *TimestampMapper::Front() {
1771 // No need to fetch anything new. A previous message still exists.
1772 switch (first_message_) {
1773 case FirstMessage::kNeedsUpdate:
1774 break;
1775 case FirstMessage::kInMessage:
Austin Schuh79b30942021-01-24 22:32:21 -08001776 return &matched_messages_.front();
Austin Schuhd2f96102020-12-01 20:27:29 -08001777 case FirstMessage::kNullptr:
1778 return nullptr;
1779 }
1780
Austin Schuh79b30942021-01-24 22:32:21 -08001781 if (matched_messages_.empty()) {
1782 if (!QueueMatched()) {
1783 first_message_ = FirstMessage::kNullptr;
1784 return nullptr;
1785 }
1786 }
1787 first_message_ = FirstMessage::kInMessage;
1788 return &matched_messages_.front();
1789}
1790
1791bool TimestampMapper::QueueMatched() {
Eric Schmiedebergb38477e2022-12-02 16:08:04 -07001792 MatchResult result = MatchResult::kEndOfFile;
1793 do {
1794 result = MaybeQueueMatched();
1795 } while (result == MatchResult::kSkipped);
1796 return result == MatchResult::kQueued;
1797}
1798
1799bool TimestampMapper::CheckReplayChannelsAndMaybePop(
1800 const TimestampedMessage & /*message*/) {
1801 if (replay_channels_callback_ &&
1802 !replay_channels_callback_(matched_messages_.back())) {
1803 matched_messages_.pop_back();
1804 return true;
1805 }
1806 return false;
1807}
1808
1809TimestampMapper::MatchResult TimestampMapper::MaybeQueueMatched() {
Austin Schuhd2f96102020-12-01 20:27:29 -08001810 if (nodes_data_.empty()) {
1811 // Simple path. We are single node, so there are no timestamps to match!
1812 CHECK_EQ(messages_.size(), 0u);
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001813 Message *m = boot_merger_.Front();
Austin Schuhd2f96102020-12-01 20:27:29 -08001814 if (!m) {
Eric Schmiedebergb38477e2022-12-02 16:08:04 -07001815 return MatchResult::kEndOfFile;
Austin Schuhd2f96102020-12-01 20:27:29 -08001816 }
Austin Schuh79b30942021-01-24 22:32:21 -08001817 // Enqueue this message into matched_messages_ so we have a place to
1818 // associate remote timestamps, and return it.
1819 QueueMessage(m);
Austin Schuhd2f96102020-12-01 20:27:29 -08001820
Austin Schuh79b30942021-01-24 22:32:21 -08001821 CHECK_GE(matched_messages_.back().monotonic_event_time, last_message_time_);
1822 last_message_time_ = matched_messages_.back().monotonic_event_time;
1823
1824 // We are thin wrapper around node_merger. Call it directly.
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001825 boot_merger_.PopFront();
Austin Schuh79b30942021-01-24 22:32:21 -08001826 timestamp_callback_(&matched_messages_.back());
Eric Schmiedebergb38477e2022-12-02 16:08:04 -07001827 if (CheckReplayChannelsAndMaybePop(matched_messages_.back())) {
1828 return MatchResult::kSkipped;
1829 }
1830 return MatchResult::kQueued;
Austin Schuhd2f96102020-12-01 20:27:29 -08001831 }
1832
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001833 // We need to only add messages to the list so they get processed for
1834 // messages which are delivered. Reuse the flow below which uses messages_
1835 // by just adding the new message to messages_ and continuing.
Austin Schuhd2f96102020-12-01 20:27:29 -08001836 if (messages_.empty()) {
1837 if (!Queue()) {
1838 // Found nothing to add, we are out of data!
Eric Schmiedebergb38477e2022-12-02 16:08:04 -07001839 return MatchResult::kEndOfFile;
Austin Schuhd2f96102020-12-01 20:27:29 -08001840 }
1841
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001842 // Now that it has been added (and cannibalized), forget about it
1843 // upstream.
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001844 boot_merger_.PopFront();
Austin Schuhd2f96102020-12-01 20:27:29 -08001845 }
1846
1847 Message *m = &(messages_.front());
1848
1849 if (source_node_[m->channel_index] == node()) {
1850 // From us, just forward it on, filling the remote data in as invalid.
Austin Schuh79b30942021-01-24 22:32:21 -08001851 QueueMessage(m);
1852 CHECK_GE(matched_messages_.back().monotonic_event_time, last_message_time_);
1853 last_message_time_ = matched_messages_.back().monotonic_event_time;
1854 messages_.pop_front();
1855 timestamp_callback_(&matched_messages_.back());
Eric Schmiedebergb38477e2022-12-02 16:08:04 -07001856 if (CheckReplayChannelsAndMaybePop(matched_messages_.back())) {
1857 return MatchResult::kSkipped;
1858 }
1859 return MatchResult::kQueued;
Austin Schuhd2f96102020-12-01 20:27:29 -08001860 } else {
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001861 // Got a timestamp, find the matching remote data, match it, and return
1862 // it.
Austin Schuhd2f96102020-12-01 20:27:29 -08001863 Message data = MatchingMessageFor(*m);
1864
1865 // Return the data from the remote. The local message only has timestamp
1866 // info which isn't relevant anymore once extracted.
Austin Schuh79b30942021-01-24 22:32:21 -08001867 matched_messages_.emplace_back(TimestampedMessage{
Austin Schuhd2f96102020-12-01 20:27:29 -08001868 .channel_index = m->channel_index,
1869 .queue_index = m->queue_index,
1870 .monotonic_event_time = m->timestamp,
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001871 .realtime_event_time = m->data->realtime_sent_time,
Austin Schuh58646e22021-08-23 23:51:46 -07001872 .remote_queue_index =
1873 BootQueueIndex{.boot = m->monotonic_remote_boot,
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001874 .index = m->data->remote_queue_index.value()},
1875 .monotonic_remote_time = {m->monotonic_remote_boot,
Austin Schuh826e6ce2021-11-18 20:33:10 -08001876 m->data->monotonic_remote_time.value()},
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001877 .realtime_remote_time = m->data->realtime_remote_time.value(),
1878 .monotonic_timestamp_time = {m->monotonic_timestamp_boot,
1879 m->data->monotonic_timestamp_time},
Austin Schuh79b30942021-01-24 22:32:21 -08001880 .data = std::move(data.data)});
1881 CHECK_GE(matched_messages_.back().monotonic_event_time, last_message_time_);
1882 last_message_time_ = matched_messages_.back().monotonic_event_time;
1883 // Since messages_ holds the data, drop it.
1884 messages_.pop_front();
1885 timestamp_callback_(&matched_messages_.back());
Eric Schmiedebergb38477e2022-12-02 16:08:04 -07001886 if (CheckReplayChannelsAndMaybePop(matched_messages_.back())) {
1887 return MatchResult::kSkipped;
1888 }
1889 return MatchResult::kQueued;
Austin Schuh79b30942021-01-24 22:32:21 -08001890 }
1891}
1892
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001893void TimestampMapper::QueueUntil(BootTimestamp queue_time) {
Austin Schuh79b30942021-01-24 22:32:21 -08001894 while (last_message_time_ <= queue_time) {
1895 if (!QueueMatched()) {
1896 return;
1897 }
Austin Schuhd2f96102020-12-01 20:27:29 -08001898 }
1899}
1900
Austin Schuhe639ea12021-01-25 13:00:22 -08001901void TimestampMapper::QueueFor(chrono::nanoseconds time_estimation_buffer) {
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001902 // Note: queueing for time doesn't really work well across boots. So we
1903 // just assume that if you are using this, you only care about the current
1904 // boot.
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001905 //
1906 // TODO(austin): Is that the right concept?
1907 //
Austin Schuhe639ea12021-01-25 13:00:22 -08001908 // Make sure we have something queued first. This makes the end time
1909 // calculation simpler, and is typically what folks want regardless.
1910 if (matched_messages_.empty()) {
1911 if (!QueueMatched()) {
1912 return;
1913 }
1914 }
1915
1916 const aos::monotonic_clock::time_point end_queue_time =
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001917 std::max(monotonic_start_time(
1918 matched_messages_.front().monotonic_event_time.boot),
1919 matched_messages_.front().monotonic_event_time.time) +
Austin Schuhe639ea12021-01-25 13:00:22 -08001920 time_estimation_buffer;
1921
1922 // Place sorted messages on the list until we have
1923 // --time_estimation_buffer_seconds seconds queued up (but queue at least
1924 // until the log starts).
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001925 while (end_queue_time >= last_message_time_.time) {
Austin Schuhe639ea12021-01-25 13:00:22 -08001926 if (!QueueMatched()) {
1927 return;
1928 }
1929 }
1930}
1931
Austin Schuhd2f96102020-12-01 20:27:29 -08001932void TimestampMapper::PopFront() {
1933 CHECK(first_message_ != FirstMessage::kNeedsUpdate);
Austin Schuh6a7358f2021-11-18 22:40:40 -08001934 last_popped_message_time_ = Front()->monotonic_event_time;
Austin Schuhd2f96102020-12-01 20:27:29 -08001935 first_message_ = FirstMessage::kNeedsUpdate;
1936
Austin Schuh79b30942021-01-24 22:32:21 -08001937 matched_messages_.pop_front();
Austin Schuhd2f96102020-12-01 20:27:29 -08001938}
1939
1940Message TimestampMapper::MatchingMessageFor(const Message &message) {
Austin Schuhd2f96102020-12-01 20:27:29 -08001941 // Figure out what queue index we are looking for.
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001942 CHECK_NOTNULL(message.data);
1943 CHECK(message.data->remote_queue_index.has_value());
Austin Schuh58646e22021-08-23 23:51:46 -07001944 const BootQueueIndex remote_queue_index =
1945 BootQueueIndex{.boot = message.monotonic_remote_boot,
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001946 .index = *message.data->remote_queue_index};
Austin Schuhd2f96102020-12-01 20:27:29 -08001947
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001948 CHECK(message.data->monotonic_remote_time.has_value());
1949 CHECK(message.data->realtime_remote_time.has_value());
Austin Schuhd2f96102020-12-01 20:27:29 -08001950
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001951 const BootTimestamp monotonic_remote_time{
Austin Schuh48507722021-07-17 17:29:24 -07001952 .boot = message.monotonic_remote_boot,
Austin Schuh826e6ce2021-11-18 20:33:10 -08001953 .time = message.data->monotonic_remote_time.value()};
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001954 const realtime_clock::time_point realtime_remote_time =
1955 *message.data->realtime_remote_time;
Austin Schuhd2f96102020-12-01 20:27:29 -08001956
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001957 TimestampMapper *peer =
1958 nodes_data_[source_node_[message.data->channel_index]].peer;
Austin Schuhfecf1d82020-12-19 16:57:28 -08001959
1960 // We only register the peers which we have data for. So, if we are being
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001961 // asked to pull a timestamp from a peer which doesn't exist, return an
1962 // empty message.
Austin Schuhfecf1d82020-12-19 16:57:28 -08001963 if (peer == nullptr) {
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07001964 // TODO(austin): Make sure the tests hit all these paths with a boot count
1965 // of 1...
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001966 return Message{.channel_index = message.channel_index,
1967 .queue_index = remote_queue_index,
1968 .timestamp = monotonic_remote_time,
1969 .monotonic_remote_boot = 0xffffff,
1970 .monotonic_timestamp_boot = 0xffffff,
1971 .data = nullptr};
Austin Schuhfecf1d82020-12-19 16:57:28 -08001972 }
1973
1974 // The queue which will have the matching data, if available.
1975 std::deque<Message> *data_queue =
1976 &peer->nodes_data_[node()].channels[message.channel_index].messages;
1977
Austin Schuh79b30942021-01-24 22:32:21 -08001978 peer->QueueUnmatchedUntil(monotonic_remote_time);
Austin Schuhd2f96102020-12-01 20:27:29 -08001979
1980 if (data_queue->empty()) {
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07001981 return Message{.channel_index = message.channel_index,
1982 .queue_index = remote_queue_index,
1983 .timestamp = monotonic_remote_time,
1984 .monotonic_remote_boot = 0xffffff,
1985 .monotonic_timestamp_boot = 0xffffff,
1986 .data = nullptr};
Austin Schuhd2f96102020-12-01 20:27:29 -08001987 }
1988
Austin Schuhd2f96102020-12-01 20:27:29 -08001989 if (remote_queue_index < data_queue->front().queue_index ||
1990 remote_queue_index > data_queue->back().queue_index) {
Austin Schuh60e77942022-05-16 17:48:24 -07001991 return Message{.channel_index = message.channel_index,
1992 .queue_index = remote_queue_index,
1993 .timestamp = monotonic_remote_time,
1994 .monotonic_remote_boot = 0xffffff,
1995 .monotonic_timestamp_boot = 0xffffff,
1996 .data = nullptr};
Austin Schuhd2f96102020-12-01 20:27:29 -08001997 }
1998
Austin Schuh993ccb52020-12-12 15:59:32 -08001999 // The algorithm below is constant time with some assumptions. We need there
2000 // to be no missing messages in the data stream. This also assumes a queue
2001 // hasn't wrapped. That is conservative, but should let us get started.
Austin Schuh58646e22021-08-23 23:51:46 -07002002 if (data_queue->back().queue_index.boot ==
2003 data_queue->front().queue_index.boot &&
2004 (data_queue->back().queue_index.index -
2005 data_queue->front().queue_index.index + 1u ==
2006 data_queue->size())) {
2007 CHECK_EQ(remote_queue_index.boot, data_queue->front().queue_index.boot);
Austin Schuh993ccb52020-12-12 15:59:32 -08002008 // Pull the data out and confirm that the timestamps match as expected.
Austin Schuh58646e22021-08-23 23:51:46 -07002009 //
2010 // TODO(austin): Move if not reliable.
2011 Message result = (*data_queue)[remote_queue_index.index -
2012 data_queue->front().queue_index.index];
Austin Schuh993ccb52020-12-12 15:59:32 -08002013
2014 CHECK_EQ(result.timestamp, monotonic_remote_time)
2015 << ": Queue index matches, but timestamp doesn't. Please investigate!";
Austin Schuh6a7358f2021-11-18 22:40:40 -08002016 CHECK_EQ(result.data->realtime_sent_time, realtime_remote_time)
Austin Schuh993ccb52020-12-12 15:59:32 -08002017 << ": Queue index matches, but timestamp doesn't. Please investigate!";
2018 // Now drop the data off the front. We have deduplicated timestamps, so we
2019 // are done. And all the data is in order.
Austin Schuh58646e22021-08-23 23:51:46 -07002020 data_queue->erase(
2021 data_queue->begin(),
2022 data_queue->begin() +
2023 (remote_queue_index.index - data_queue->front().queue_index.index));
Austin Schuh993ccb52020-12-12 15:59:32 -08002024 return result;
2025 } else {
Austin Schuh58646e22021-08-23 23:51:46 -07002026 // TODO(austin): Binary search.
2027 auto it = std::find_if(
2028 data_queue->begin(), data_queue->end(),
2029 [remote_queue_index,
2030 remote_boot = monotonic_remote_time.boot](const Message &m) {
2031 return m.queue_index == remote_queue_index &&
2032 m.timestamp.boot == remote_boot;
2033 });
Austin Schuh993ccb52020-12-12 15:59:32 -08002034 if (it == data_queue->end()) {
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07002035 return Message{.channel_index = message.channel_index,
2036 .queue_index = remote_queue_index,
2037 .timestamp = monotonic_remote_time,
2038 .monotonic_remote_boot = 0xffffff,
2039 .monotonic_timestamp_boot = 0xffffff,
2040 .data = nullptr};
Austin Schuh993ccb52020-12-12 15:59:32 -08002041 }
2042
2043 Message result = std::move(*it);
2044
2045 CHECK_EQ(result.timestamp, monotonic_remote_time)
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07002046 << ": Queue index matches, but timestamp doesn't. Please "
2047 "investigate!";
2048 CHECK_EQ(result.data->realtime_sent_time, realtime_remote_time)
2049 << ": Queue index matches, but timestamp doesn't. Please "
2050 "investigate!";
Austin Schuh993ccb52020-12-12 15:59:32 -08002051
Austin Schuhd6b1f4c2021-11-18 20:29:00 -08002052 // Erase everything up to this message. We want to keep 1 message in the
2053 // queue so we can handle reliable messages forwarded across boots.
2054 data_queue->erase(data_queue->begin(), it);
Austin Schuh993ccb52020-12-12 15:59:32 -08002055
2056 return result;
2057 }
Austin Schuhd2f96102020-12-01 20:27:29 -08002058}
2059
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07002060void TimestampMapper::QueueUnmatchedUntil(BootTimestamp t) {
Austin Schuhd2f96102020-12-01 20:27:29 -08002061 if (queued_until_ > t) {
2062 return;
2063 }
2064 while (true) {
2065 if (!messages_.empty() && messages_.back().timestamp > t) {
2066 queued_until_ = std::max(queued_until_, messages_.back().timestamp);
2067 return;
2068 }
2069
2070 if (!Queue()) {
2071 // Found nothing to add, we are out of data!
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07002072 queued_until_ = BootTimestamp::max_time();
Austin Schuhd2f96102020-12-01 20:27:29 -08002073 return;
2074 }
2075
Tyler Chatowb7c6eba2021-07-28 14:43:23 -07002076 // Now that it has been added (and cannibalized), forget about it
2077 // upstream.
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07002078 boot_merger_.PopFront();
Austin Schuhd2f96102020-12-01 20:27:29 -08002079 }
2080}
2081
2082bool TimestampMapper::Queue() {
Austin Schuh2dc8c7d2021-07-01 17:41:28 -07002083 Message *m = boot_merger_.Front();
Austin Schuhd2f96102020-12-01 20:27:29 -08002084 if (m == nullptr) {
2085 return false;
2086 }
2087 for (NodeData &node_data : nodes_data_) {
2088 if (!node_data.any_delivered) continue;
Austin Schuh36c00932021-07-19 18:13:21 -07002089 if (!node_data.save_for_peer) continue;
Austin Schuhd2f96102020-12-01 20:27:29 -08002090 if (node_data.channels[m->channel_index].delivered) {
Austin Schuh6a7358f2021-11-18 22:40:40 -08002091 // If we have data but no timestamps (logs where the timestamps didn't get
2092 // logged are classic), we can grow this indefinitely. We don't need to
2093 // keep anything that is older than the last message returned.
2094
2095 // We have the time on the source node.
2096 // We care to wait until we have the time on the destination node.
2097 std::deque<Message> &messages =
2098 node_data.channels[m->channel_index].messages;
2099 // Max delay over the network is the TTL, so let's take the queue time and
2100 // add TTL to it. Don't forget any messages which are reliable until
2101 // someone can come up with a good reason to forget those too.
2102 if (node_data.channels[m->channel_index].time_to_live >
2103 chrono::nanoseconds(0)) {
2104 // We need to make *some* assumptions about network delay for this to
2105 // work. We want to only look at the RX side. This means we need to
2106 // track the last time a message was popped from any channel from the
2107 // node sending this message, and compare that to the max time we expect
2108 // that a message will take to be delivered across the network. This
2109 // assumes that messages are popped in time order as a proxy for
2110 // measuring the distributed time at this layer.
2111 //
2112 // Leave at least 1 message in here so we can handle reboots and
2113 // messages getting sent twice.
2114 while (messages.size() > 1u &&
2115 messages.begin()->timestamp +
2116 node_data.channels[m->channel_index].time_to_live +
2117 chrono::duration_cast<chrono::nanoseconds>(
2118 chrono::duration<double>(FLAGS_max_network_delay)) <
2119 last_popped_message_time_) {
2120 messages.pop_front();
2121 }
2122 }
Austin Schuhd2f96102020-12-01 20:27:29 -08002123 node_data.channels[m->channel_index].messages.emplace_back(*m);
2124 }
2125 }
2126
2127 messages_.emplace_back(std::move(*m));
2128 return true;
2129}
2130
2131std::string TimestampMapper::DebugString() const {
2132 std::stringstream ss;
Austin Schuh6e014b82021-09-14 17:46:33 -07002133 ss << "node " << node() << " (" << node_name() << ") [\n";
Austin Schuhd2f96102020-12-01 20:27:29 -08002134 for (const Message &message : messages_) {
2135 ss << " " << message << "\n";
2136 }
2137 ss << "] queued_until " << queued_until_;
2138 for (const NodeData &ns : nodes_data_) {
2139 if (ns.peer == nullptr) continue;
2140 ss << "\nnode " << ns.peer->node() << " remote_data [\n";
2141 size_t channel_index = 0;
2142 for (const NodeData::ChannelData &channel_data :
2143 ns.peer->nodes_data_[node()].channels) {
2144 if (channel_data.messages.empty()) {
2145 continue;
2146 }
Austin Schuhb000de62020-12-03 22:00:40 -08002147
Austin Schuhd2f96102020-12-01 20:27:29 -08002148 ss << " channel " << channel_index << " [\n";
2149 for (const Message &m : channel_data.messages) {
2150 ss << " " << m << "\n";
2151 }
2152 ss << " ]\n";
2153 ++channel_index;
2154 }
2155 ss << "] queued_until " << ns.peer->queued_until_;
2156 }
2157 return ss.str();
2158}
2159
Austin Schuhee711052020-08-24 16:06:09 -07002160std::string MaybeNodeName(const Node *node) {
2161 if (node != nullptr) {
2162 return node->name()->str() + " ";
2163 }
2164 return "";
2165}
2166
Brian Silvermanf51499a2020-09-21 12:49:08 -07002167} // namespace aos::logger