blob: 6eae9e9f4fa84ecc9cae50d375b0b161e53451b7 [file] [log] [blame]
James Kuszmaul38735e82019-12-07 16:42:06 -08001#include "aos/events/logging/logger.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -08002
3#include <fcntl.h>
Austin Schuh4c4e0092019-12-22 16:18:03 -08004#include <limits.h>
Austin Schuhe309d2a2019-11-29 13:25:21 -08005#include <sys/stat.h>
6#include <sys/types.h>
7#include <sys/uio.h>
8#include <vector>
9
10#include "absl/strings/string_view.h"
11#include "absl/types/span.h"
12#include "aos/events/event_loop.h"
James Kuszmaul38735e82019-12-07 16:42:06 -080013#include "aos/events/logging/logger_generated.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080014#include "aos/flatbuffer_merge.h"
Austin Schuh288479d2019-12-18 19:47:52 -080015#include "aos/network/team_number.h"
Austin Schuhe309d2a2019-11-29 13:25:21 -080016#include "aos/time/time.h"
17#include "flatbuffers/flatbuffers.h"
18
19DEFINE_int32(flush_size, 1000000,
20 "Number of outstanding bytes to allow before flushing to disk.");
Austin Schuh15649d62019-12-28 16:36:38 -080021DEFINE_bool(skip_missing_forwarding_entries, false,
22 "If true, drop any forwarding entries with missing data. If "
23 "false, CHECK.");
Austin Schuhe309d2a2019-11-29 13:25:21 -080024
25namespace aos {
26namespace logger {
27
28namespace chrono = std::chrono;
29
30DetachedBufferWriter::DetachedBufferWriter(absl::string_view filename)
31 : fd_(open(std::string(filename).c_str(),
32 O_RDWR | O_CLOEXEC | O_CREAT | O_EXCL, 0774)) {
33 PCHECK(fd_ != -1) << ": Failed to open " << filename;
34}
35
36DetachedBufferWriter::~DetachedBufferWriter() {
37 Flush();
38 PLOG_IF(ERROR, close(fd_) == -1) << " Failed to close logfile";
39}
40
41void DetachedBufferWriter::QueueSizedFlatbuffer(
42 flatbuffers::FlatBufferBuilder *fbb) {
43 QueueSizedFlatbuffer(fbb->Release());
44}
45
46void DetachedBufferWriter::QueueSizedFlatbuffer(
47 flatbuffers::DetachedBuffer &&buffer) {
48 queued_size_ += buffer.size();
49 queue_.emplace_back(std::move(buffer));
50
Austin Schuh4c4e0092019-12-22 16:18:03 -080051 // Flush if we are at the max number of iovs per writev, or have written
52 // enough data. Otherwise writev will fail with an invalid argument.
53 if (queued_size_ > static_cast<size_t>(FLAGS_flush_size) ||
54 queue_.size() == IOV_MAX) {
Austin Schuhe309d2a2019-11-29 13:25:21 -080055 Flush();
56 }
57}
58
59void DetachedBufferWriter::Flush() {
60 if (queue_.size() == 0u) {
61 return;
62 }
63 iovec_.clear();
64 iovec_.reserve(queue_.size());
65 size_t counted_size = 0;
66 for (size_t i = 0; i < queue_.size(); ++i) {
67 struct iovec n;
68 n.iov_base = queue_[i].data();
69 n.iov_len = queue_[i].size();
70 counted_size += n.iov_len;
71 iovec_.emplace_back(std::move(n));
72 }
73 CHECK_EQ(counted_size, queued_size_);
74 const ssize_t written = writev(fd_, iovec_.data(), iovec_.size());
75
76 PCHECK(written == static_cast<ssize_t>(queued_size_))
77 << ": Wrote " << written << " expected " << queued_size_;
78
79 queued_size_ = 0;
80 queue_.clear();
81 // TODO(austin): Handle partial writes in some way other than crashing...
82}
83
84Logger::Logger(DetachedBufferWriter *writer, EventLoop *event_loop,
85 std::chrono::milliseconds polling_period)
86 : event_loop_(event_loop),
87 writer_(writer),
88 timer_handler_(event_loop_->AddTimer([this]() { DoLogData(); })),
89 polling_period_(polling_period) {
90 for (const Channel *channel : *event_loop_->configuration()->channels()) {
91 FetcherStruct fs;
Austin Schuh15649d62019-12-28 16:36:38 -080092 const bool is_readable =
93 configuration::ChannelIsReadableOnNode(channel, event_loop_->node());
94 const bool log_message = configuration::ChannelMessageIsLoggedOnNode(
95 channel, event_loop_->node()) &&
96 is_readable;
97
98 const bool log_delivery_times =
99 (event_loop_->node() == nullptr)
100 ? false
101 : configuration::ConnectionDeliveryTimeIsLoggedOnNode(
102 channel, event_loop_->node(), event_loop_->node());
103
104 if (log_message || log_delivery_times) {
105 fs.fetcher = event_loop->MakeRawFetcher(channel);
106 VLOG(1) << "Logging channel "
107 << configuration::CleanedChannelToString(channel);
108
109 if (log_delivery_times) {
110 if (log_message) {
111 VLOG(1) << " Logging message and delivery times";
112 fs.log_type = LogType::kLogMessageAndDeliveryTime;
113 } else {
114 VLOG(1) << " Logging delivery times only";
115 fs.log_type = LogType::kLogDeliveryTimeOnly;
116 }
117 } else {
118 // We don't have a particularly great use case right now for logging a
119 // forwarded message, but either not logging the delivery times, or
120 // logging them on another node. Fail rather than produce bad results.
121 CHECK(configuration::ChannelIsSendableOnNode(channel,
122 event_loop_->node()))
123 << ": Logger only knows how to log remote messages with "
124 "forwarding timestamps.";
125 VLOG(1) << " Logging message only";
126 fs.log_type = LogType::kLogMessage;
127 }
128 }
129
Austin Schuhe309d2a2019-11-29 13:25:21 -0800130 fs.written = false;
131 fetchers_.emplace_back(std::move(fs));
132 }
133
134 // When things start, we want to log the header, then the most recent messages
135 // available on each fetcher to capture the previous state, then start
136 // polling.
137 event_loop_->OnRun([this, polling_period]() {
138 // Grab data from each channel right before we declare the log file started
139 // so we can capture the latest message on each channel. This lets us have
140 // non periodic messages with configuration that now get logged.
141 for (FetcherStruct &f : fetchers_) {
Austin Schuh15649d62019-12-28 16:36:38 -0800142 if (f.fetcher.get() != nullptr) {
143 f.written = !f.fetcher->Fetch();
144 }
Austin Schuhe309d2a2019-11-29 13:25:21 -0800145 }
146
147 // We need to pick a point in time to declare the log file "started". This
148 // starts here. It needs to be after everything is fetched so that the
149 // fetchers are all pointed at the most recent message before the start
150 // time.
151 const monotonic_clock::time_point monotonic_now =
152 event_loop_->monotonic_now();
153 const realtime_clock::time_point realtime_now = event_loop_->realtime_now();
154 last_synchronized_time_ = monotonic_now;
155
156 {
157 // Now write the header with this timestamp in it.
158 flatbuffers::FlatBufferBuilder fbb;
159 fbb.ForceDefaults(1);
160
161 flatbuffers::Offset<aos::Configuration> configuration_offset =
162 CopyFlatBuffer(event_loop_->configuration(), &fbb);
163
Austin Schuh288479d2019-12-18 19:47:52 -0800164 flatbuffers::Offset<flatbuffers::String> string_offset =
165 fbb.CreateString(network::GetHostname());
166
Austin Schuh15649d62019-12-28 16:36:38 -0800167 flatbuffers::Offset<Node> node_offset =
168 CopyFlatBuffer(event_loop_->node(), &fbb);
169 LOG(INFO) << "Logging node as " << FlatbufferToJson(event_loop_->node());
170
Austin Schuhe309d2a2019-11-29 13:25:21 -0800171 aos::logger::LogFileHeader::Builder log_file_header_builder(fbb);
172
Austin Schuh288479d2019-12-18 19:47:52 -0800173 log_file_header_builder.add_name(string_offset);
174
Austin Schuh15649d62019-12-28 16:36:38 -0800175 log_file_header_builder.add_node(node_offset);
176
Austin Schuhe309d2a2019-11-29 13:25:21 -0800177 log_file_header_builder.add_configuration(configuration_offset);
178 // The worst case theoretical out of order is the polling period times 2.
179 // One message could get logged right after the boundary, but be for right
180 // before the next boundary. And the reverse could happen for another
181 // message. Report back 3x to be extra safe, and because the cost isn't
182 // huge on the read side.
183 log_file_header_builder.add_max_out_of_order_duration(
184 std::chrono::duration_cast<std::chrono::nanoseconds>(3 *
185 polling_period)
186 .count());
187
Austin Schuh629c9172019-12-23 20:34:43 -0800188 log_file_header_builder.add_monotonic_start_time(
Austin Schuhe309d2a2019-11-29 13:25:21 -0800189 std::chrono::duration_cast<std::chrono::nanoseconds>(
190 monotonic_now.time_since_epoch())
191 .count());
Austin Schuh629c9172019-12-23 20:34:43 -0800192 log_file_header_builder.add_realtime_start_time(
Austin Schuhe309d2a2019-11-29 13:25:21 -0800193 std::chrono::duration_cast<std::chrono::nanoseconds>(
194 realtime_now.time_since_epoch())
195 .count());
196
197 fbb.FinishSizePrefixed(log_file_header_builder.Finish());
198 writer_->QueueSizedFlatbuffer(&fbb);
199 }
200
201 timer_handler_->Setup(event_loop_->monotonic_now() + polling_period,
202 polling_period);
203 });
204}
205
Austin Schuh646b7b82019-12-22 21:38:55 -0800206flatbuffers::Offset<MessageHeader> PackMessage(
207 flatbuffers::FlatBufferBuilder *fbb, const Context &context,
Austin Schuh15649d62019-12-28 16:36:38 -0800208 int channel_index, LogType log_type) {
209 flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data_offset;
210
211 switch(log_type) {
212 case LogType::kLogMessage:
213 case LogType::kLogMessageAndDeliveryTime:
214 data_offset =
215 fbb->CreateVector(static_cast<uint8_t *>(context.data), context.size);
216 break;
217
218 case LogType::kLogDeliveryTimeOnly:
219 break;
220 }
Austin Schuh646b7b82019-12-22 21:38:55 -0800221
222 MessageHeader::Builder message_header_builder(*fbb);
223 message_header_builder.add_channel_index(channel_index);
Austin Schuh15649d62019-12-28 16:36:38 -0800224 message_header_builder.add_queue_index(context.queue_index);
Austin Schuh646b7b82019-12-22 21:38:55 -0800225 message_header_builder.add_monotonic_sent_time(
Austin Schuhad154822019-12-27 15:45:13 -0800226 context.monotonic_event_time.time_since_epoch().count());
Austin Schuh646b7b82019-12-22 21:38:55 -0800227 message_header_builder.add_realtime_sent_time(
Austin Schuhad154822019-12-27 15:45:13 -0800228 context.realtime_event_time.time_since_epoch().count());
Austin Schuh646b7b82019-12-22 21:38:55 -0800229
Austin Schuh15649d62019-12-28 16:36:38 -0800230 switch (log_type) {
231 case LogType::kLogMessage:
232 message_header_builder.add_data(data_offset);
233 break;
Austin Schuh646b7b82019-12-22 21:38:55 -0800234
Austin Schuh15649d62019-12-28 16:36:38 -0800235 case LogType::kLogMessageAndDeliveryTime:
236 message_header_builder.add_data(data_offset);
237 [[fallthrough]];
238
239 case LogType::kLogDeliveryTimeOnly:
240 message_header_builder.add_monotonic_remote_time(
241 context.monotonic_remote_time.time_since_epoch().count());
242 message_header_builder.add_realtime_remote_time(
243 context.realtime_remote_time.time_since_epoch().count());
244 message_header_builder.add_remote_queue_index(context.remote_queue_index);
245 break;
246 }
247
Austin Schuh646b7b82019-12-22 21:38:55 -0800248 return message_header_builder.Finish();
249}
250
Austin Schuhe309d2a2019-11-29 13:25:21 -0800251void Logger::DoLogData() {
252 // We want to guarentee that messages aren't out of order by more than
253 // max_out_of_order_duration. To do this, we need sync points. Every write
254 // cycle should be a sync point.
255 const monotonic_clock::time_point monotonic_now = monotonic_clock::now();
256
257 do {
258 // Move the sync point up by at most polling_period. This forces one sync
259 // per iteration, even if it is small.
260 last_synchronized_time_ =
261 std::min(last_synchronized_time_ + polling_period_, monotonic_now);
262 size_t channel_index = 0;
263 // Write each channel to disk, one at a time.
264 for (FetcherStruct &f : fetchers_) {
Austin Schuh15649d62019-12-28 16:36:38 -0800265 // Skip any channels which we aren't supposed to log.
266 if (f.fetcher.get() != nullptr) {
267 while (true) {
268 if (f.written) {
269 if (!f.fetcher->FetchNext()) {
270 VLOG(2) << "No new data on "
271 << configuration::CleanedChannelToString(
272 f.fetcher->channel());
273 break;
274 } else {
275 f.written = false;
276 }
Austin Schuhe309d2a2019-11-29 13:25:21 -0800277 }
Austin Schuhe309d2a2019-11-29 13:25:21 -0800278
Austin Schuh15649d62019-12-28 16:36:38 -0800279 CHECK(!f.written);
280
281 // TODO(james): Write tests to exercise this logic.
282 if (f.fetcher->context().monotonic_event_time <
283 last_synchronized_time_) {
284 // Write!
285 flatbuffers::FlatBufferBuilder fbb(f.fetcher->context().size +
286 max_header_size_);
287 fbb.ForceDefaults(1);
288
289 fbb.FinishSizePrefixed(PackMessage(&fbb, f.fetcher->context(),
290 channel_index, f.log_type));
291
292 VLOG(2) << "Writing data for channel "
293 << configuration::CleanedChannelToString(
294 f.fetcher->channel());
295
296 max_header_size_ = std::max(
297 max_header_size_, fbb.GetSize() - f.fetcher->context().size);
298 writer_->QueueSizedFlatbuffer(&fbb);
299
300 f.written = true;
Austin Schuhe309d2a2019-11-29 13:25:21 -0800301 } else {
Austin Schuh15649d62019-12-28 16:36:38 -0800302 break;
Austin Schuhe309d2a2019-11-29 13:25:21 -0800303 }
304 }
Austin Schuhe309d2a2019-11-29 13:25:21 -0800305 }
306
307 ++channel_index;
308 }
309
310 CHECK_EQ(channel_index, fetchers_.size());
311
312 // If we missed cycles, we could be pretty far behind. Spin until we are
313 // caught up.
314 } while (last_synchronized_time_ + polling_period_ < monotonic_now);
315
316 writer_->Flush();
317}
318
319LogReader::LogReader(absl::string_view filename)
320 : fd_(open(std::string(filename).c_str(), O_RDONLY | O_CLOEXEC)) {
321 PCHECK(fd_ != -1) << ": Failed to open " << filename;
322
323 // Make sure we have enough to read the size.
324 absl::Span<const uint8_t> config_data = ReadMessage();
325
326 // Make sure something was read.
327 CHECK(config_data != absl::Span<const uint8_t>());
328
329 // And copy the config so we have it forever.
330 configuration_ = std::vector<uint8_t>(config_data.begin(), config_data.end());
331
332 max_out_of_order_duration_ = std::chrono::nanoseconds(
333 flatbuffers::GetSizePrefixedRoot<LogFileHeader>(configuration_.data())
334 ->max_out_of_order_duration());
335
336 channels_.resize(configuration()->channels()->size());
337
338 QueueMessages();
339}
340
341bool LogReader::ReadBlock() {
342 if (end_of_file_) {
343 return false;
344 }
345
346 // Appends 256k. This is enough that the read call is efficient. We don't
347 // want to spend too much time reading small chunks because the syscalls for
348 // that will be expensive.
349 constexpr size_t kReadSize = 256 * 1024;
350
351 // Strip off any unused data at the front.
352 if (consumed_data_ != 0) {
353 data_.erase(data_.begin(), data_.begin() + consumed_data_);
354 consumed_data_ = 0;
355 }
356
357 const size_t starting_size = data_.size();
358
359 // This should automatically grow the backing store. It won't shrink if we
360 // get a small chunk later. This reduces allocations when we want to append
361 // more data.
362 data_.resize(data_.size() + kReadSize);
363
364 ssize_t count = read(fd_, &data_[starting_size], kReadSize);
365 data_.resize(starting_size + std::max(count, static_cast<ssize_t>(0)));
366 if (count == 0) {
367 end_of_file_ = true;
368 return false;
369 }
370 PCHECK(count > 0);
371
372 return true;
373}
374
375bool LogReader::MessageAvailable() {
376 // Are we big enough to read the size?
377 if (data_.size() - consumed_data_ < sizeof(flatbuffers::uoffset_t)) {
378 return false;
379 }
380
381 // Then, are we big enough to read the full message?
382 const size_t data_size =
383 flatbuffers::GetPrefixedSize(data_.data() + consumed_data_) +
384 sizeof(flatbuffers::uoffset_t);
385 if (data_.size() < consumed_data_ + data_size) {
386 return false;
387 }
388
389 return true;
390}
391
392absl::Span<const uint8_t> LogReader::ReadMessage() {
393 // Make sure we have enough for the size.
394 if (data_.size() - consumed_data_ < sizeof(flatbuffers::uoffset_t)) {
395 if (!ReadBlock()) {
396 return absl::Span<const uint8_t>();
397 }
398 }
399
400 // Now make sure we have enough for the message.
401 const size_t data_size =
402 flatbuffers::GetPrefixedSize(data_.data() + consumed_data_) +
403 sizeof(flatbuffers::uoffset_t);
404 while (data_.size() < consumed_data_ + data_size) {
405 if (!ReadBlock()) {
406 return absl::Span<const uint8_t>();
407 }
408 }
409
410 // And return it, consuming the data.
411 const uint8_t *data_ptr = data_.data() + consumed_data_;
412
413 consumed_data_ += data_size;
414
415 return absl::Span<const uint8_t>(data_ptr, data_size);
416}
417
418void LogReader::QueueMessages() {
419 while (true) {
420 // Don't queue if we have enough data already.
421 // When a log file starts, there should be a message from each channel.
422 // Those messages might be very old. Make sure to read a chunk past the
423 // starting time.
424 if (channel_heap_.size() > 0 &&
425 newest_timestamp_ >
426 std::max(oldest_message().first, monotonic_start_time()) +
427 max_out_of_order_duration_) {
428 break;
429 }
430
431 absl::Span<const uint8_t> msg_data = ReadMessage();
432 if (msg_data == absl::Span<const uint8_t>()) {
433 break;
434 }
435
436 FlatbufferVector<MessageHeader> msg(std::vector<uint8_t>(
437 msg_data.begin() + sizeof(flatbuffers::uoffset_t), msg_data.end()));
438
439 EmplaceDataBack(std::move(msg));
440 }
441
442 queue_data_time_ = newest_timestamp_ - max_out_of_order_duration_;
443}
444
Austin Schuh15649d62019-12-28 16:36:38 -0800445const Configuration *LogReader::configuration() const {
Austin Schuhe309d2a2019-11-29 13:25:21 -0800446 return flatbuffers::GetSizePrefixedRoot<LogFileHeader>(configuration_.data())
447 ->configuration();
448}
449
Austin Schuh15649d62019-12-28 16:36:38 -0800450const Node *LogReader::node() const {
451 return configuration::GetNode(
452 configuration(),
453 flatbuffers::GetSizePrefixedRoot<LogFileHeader>(configuration_.data())
454 ->node()
455 ->name()
456 ->string_view());
457}
458
Austin Schuhe309d2a2019-11-29 13:25:21 -0800459monotonic_clock::time_point LogReader::monotonic_start_time() {
460 return monotonic_clock::time_point(std::chrono::nanoseconds(
461 flatbuffers::GetSizePrefixedRoot<LogFileHeader>(configuration_.data())
Austin Schuh629c9172019-12-23 20:34:43 -0800462 ->monotonic_start_time()));
Austin Schuhe309d2a2019-11-29 13:25:21 -0800463}
464
465realtime_clock::time_point LogReader::realtime_start_time() {
466 return realtime_clock::time_point(std::chrono::nanoseconds(
467 flatbuffers::GetSizePrefixedRoot<LogFileHeader>(configuration_.data())
Austin Schuh629c9172019-12-23 20:34:43 -0800468 ->realtime_start_time()));
Austin Schuhe309d2a2019-11-29 13:25:21 -0800469}
470
Austin Schuh92547522019-12-28 14:33:43 -0800471void LogReader::Register(SimulatedEventLoopFactory *event_loop_factory) {
472 event_loop_unique_ptr_ = event_loop_factory->MakeEventLoop("log_reader");
473 event_loop_factory_ = event_loop_factory;
474 // We don't run timing reports when trying to print out logged data, because
475 // otherwise we would end up printing out the timing reports themselves...
476 // This is only really relevant when we are replaying into a simulation.
477 event_loop_unique_ptr_->SkipTimingReport();
478
479 Register(event_loop_unique_ptr_.get());
480 event_loop_factory_->RunFor(monotonic_start_time() -
481 event_loop_factory_->monotonic_now());
482}
483
Austin Schuhe309d2a2019-11-29 13:25:21 -0800484void LogReader::Register(EventLoop *event_loop) {
485 event_loop_ = event_loop;
486
Austin Schuh39788ff2019-12-01 18:22:57 -0800487 // Otherwise we replay the timing report and try to resend it...
488 event_loop_->SkipTimingReport();
489
Austin Schuhe309d2a2019-11-29 13:25:21 -0800490 for (size_t i = 0; i < channels_.size(); ++i) {
491 CHECK_EQ(configuration()->channels()->Get(i)->name(),
492 event_loop_->configuration()->channels()->Get(i)->name());
493 CHECK_EQ(configuration()->channels()->Get(i)->type(),
494 event_loop_->configuration()->channels()->Get(i)->type());
495
496 channels_[i].raw_sender = event_loop_->MakeRawSender(
497 event_loop_->configuration()->channels()->Get(i));
498 }
499
500 timer_handler_ = event_loop_->AddTimer([this]() {
501 std::pair<monotonic_clock::time_point, int> oldest_channel_index =
502 PopOldestChannel();
503 const monotonic_clock::time_point monotonic_now =
Austin Schuhad154822019-12-27 15:45:13 -0800504 event_loop_->context().monotonic_event_time;
Austin Schuhe309d2a2019-11-29 13:25:21 -0800505 CHECK(monotonic_now == oldest_channel_index.first)
506 << ": Now " << monotonic_now.time_since_epoch().count()
507 << " trying to send "
508 << oldest_channel_index.first.time_since_epoch().count();
509
510 struct LogReader::ChannelData &channel =
511 channels_[oldest_channel_index.second];
512
513 FlatbufferVector<MessageHeader> front = std::move(channel.front());
514
Austin Schuh15649d62019-12-28 16:36:38 -0800515 if (oldest_channel_index.first > monotonic_start_time() ||
516 event_loop_factory_ != nullptr) {
517 if (!FLAGS_skip_missing_forwarding_entries ||
518 front.message().data() != nullptr) {
519 CHECK(front.message().data() != nullptr)
520 << ": Got a message without data. Forwarding entry which was not "
521 "matched? Use --skip_missing_forwarding_entries to ignore "
522 "this.";
Austin Schuh92547522019-12-28 14:33:43 -0800523
Austin Schuh15649d62019-12-28 16:36:38 -0800524 // If we have access to the factory, use it to fix the realtime time.
525 if (event_loop_factory_ != nullptr) {
526 event_loop_factory_->SetRealtimeOffset(
527 monotonic_clock::time_point(
528 chrono::nanoseconds(front.message().monotonic_sent_time())),
529 realtime_clock::time_point(
530 chrono::nanoseconds(front.message().realtime_sent_time())));
531 }
532
533 channel.raw_sender->Send(
534 front.message().data()->Data(), front.message().data()->size(),
Austin Schuh92547522019-12-28 14:33:43 -0800535 monotonic_clock::time_point(
Austin Schuh15649d62019-12-28 16:36:38 -0800536 chrono::nanoseconds(front.message().monotonic_remote_time())),
Austin Schuh92547522019-12-28 14:33:43 -0800537 realtime_clock::time_point(
Austin Schuh15649d62019-12-28 16:36:38 -0800538 chrono::nanoseconds(front.message().realtime_remote_time())),
539 front.message().remote_queue_index());
Austin Schuh92547522019-12-28 14:33:43 -0800540 }
Austin Schuhe309d2a2019-11-29 13:25:21 -0800541 } else {
542 LOG(WARNING) << "Not sending data from before the start of the log file. "
543 << oldest_channel_index.first.time_since_epoch().count()
544 << " start "
545 << monotonic_start_time().time_since_epoch().count() << " "
546 << FlatbufferToJson(front);
547 }
548 channel.data.pop_front();
549
550 // Re-push it and update the oldest timestamp.
551 if (channel.data.size() != 0) {
552 const monotonic_clock::time_point timestamp = monotonic_clock::time_point(
553 chrono::nanoseconds(channel.front().message().monotonic_sent_time()));
554 PushChannelHeap(timestamp, oldest_channel_index.second);
555 channel.oldest_timestamp = timestamp;
556 } else {
557 channel.oldest_timestamp = monotonic_clock::min_time;
558 }
559
560 if (monotonic_now > queue_data_time_) {
561 QueueMessages();
562 }
563
564 if (channel_heap_.size() != 0) {
565 timer_handler_->Setup(oldest_message().first);
566 }
567 });
568
569 if (channel_heap_.size() > 0u) {
570 event_loop_->OnRun(
571 [this]() { timer_handler_->Setup(oldest_message().first); });
572 }
573}
574
575void LogReader::Deregister() {
576 for (size_t i = 0; i < channels_.size(); ++i) {
577 channels_[i].raw_sender.reset();
578 }
Austin Schuh92547522019-12-28 14:33:43 -0800579
580 event_loop_factory_ = nullptr;
581 event_loop_unique_ptr_.reset();
582 event_loop_ = nullptr;
Austin Schuhe309d2a2019-11-29 13:25:21 -0800583}
584
585void LogReader::EmplaceDataBack(FlatbufferVector<MessageHeader> &&new_data) {
586 const monotonic_clock::time_point timestamp = monotonic_clock::time_point(
587 chrono::nanoseconds(new_data.message().monotonic_sent_time()));
588 const size_t channel_index = new_data.message().channel_index();
589 CHECK_LT(channel_index, channels_.size());
590 newest_timestamp_ = std::max(newest_timestamp_, timestamp);
591 if (channels_[channel_index].data.size() == 0) {
592 channels_[channel_index].oldest_timestamp = timestamp;
593 PushChannelHeap(timestamp, channel_index);
594 }
595 channels_[channel_index].data.emplace_back(std::move(new_data));
596}
597
598namespace {
599
600bool ChannelHeapCompare(
601 const std::pair<monotonic_clock::time_point, int> first,
602 const std::pair<monotonic_clock::time_point, int> second) {
603 if (first.first > second.first) {
604 return true;
605 } else if (first.first == second.first) {
606 return first.second > second.second;
607 } else {
608 return false;
609 }
610}
611
612} // namespace
613
614void LogReader::PushChannelHeap(monotonic_clock::time_point timestamp,
615 int channel_index) {
616 channel_heap_.push_back(std::make_pair(timestamp, channel_index));
617
618 // The default sort puts the newest message first. Use a custom comparator to
619 // put the oldest message first.
620 std::push_heap(channel_heap_.begin(), channel_heap_.end(),
621 ChannelHeapCompare);
622}
623
624std::pair<monotonic_clock::time_point, int> LogReader::PopOldestChannel() {
625 std::pair<monotonic_clock::time_point, int> result = channel_heap_.front();
626 std::pop_heap(channel_heap_.begin(), channel_heap_.end(),
627 &ChannelHeapCompare);
628 channel_heap_.pop_back();
629 return result;
630}
631
632} // namespace logger
633} // namespace aos