blob: b6ee4a59a54300d2d6cad5a0b6b5f6310f54531a [file] [log] [blame]
Austin Schuhb06f03b2021-02-17 22:00:37 -08001#include "aos/events/logging/log_writer.h"
2
Austin Schuh6bb8a822021-03-31 23:04:39 -07003#include <dirent.h>
4
Austin Schuhb06f03b2021-02-17 22:00:37 -08005#include <functional>
6#include <map>
7#include <vector>
8
9#include "aos/configuration.h"
10#include "aos/events/event_loop.h"
11#include "aos/network/message_bridge_server_generated.h"
12#include "aos/network/team_number.h"
Austin Schuh61e973f2021-02-21 21:43:56 -080013#include "aos/network/timestamp_channel.h"
Austin Schuhb06f03b2021-02-17 22:00:37 -080014
15namespace aos {
16namespace logger {
17namespace {
18using message_bridge::RemoteMessage;
Austin Schuhbd06ae42021-03-31 22:48:21 -070019namespace chrono = std::chrono;
Austin Schuhb06f03b2021-02-17 22:00:37 -080020} // namespace
21
22Logger::Logger(EventLoop *event_loop, const Configuration *configuration,
23 std::function<bool(const Channel *)> should_log)
24 : event_loop_(event_loop),
25 configuration_(configuration),
Austin Schuh5b728b72021-06-16 14:57:15 -070026 node_(configuration::GetNode(configuration_, event_loop->node())),
27 node_index_(configuration::GetNodeIndex(configuration_, node_)),
Austin Schuhb06f03b2021-02-17 22:00:37 -080028 name_(network::GetHostname()),
29 timer_handler_(event_loop_->AddTimer(
Austin Schuh30586902021-03-30 22:54:08 -070030 [this]() { DoLogData(event_loop_->monotonic_now(), true); })),
Austin Schuhb06f03b2021-02-17 22:00:37 -080031 server_statistics_fetcher_(
32 configuration::MultiNode(event_loop_->configuration())
33 ? event_loop_->MakeFetcher<message_bridge::ServerStatistics>(
34 "/aos")
35 : aos::Fetcher<message_bridge::ServerStatistics>()) {
Austin Schuh58646e22021-08-23 23:51:46 -070036 timer_handler_->set_name("channel_poll");
Austin Schuh5b728b72021-06-16 14:57:15 -070037 VLOG(1) << "Creating logger for " << FlatbufferToJson(node_);
Austin Schuhb06f03b2021-02-17 22:00:37 -080038
Naman Gupta41d70c22022-11-21 15:29:52 -080039 // When we are logging remote timestamps, we need to be able to translate
40 // from the channel index that the event loop uses to the channel index in
41 // the config in the log file.
Austin Schuh01f3b392022-01-25 20:03:09 -080042 event_loop_to_logged_channel_index_.resize(
43 event_loop->configuration()->channels()->size(), -1);
44 for (size_t event_loop_channel_index = 0;
45 event_loop_channel_index <
46 event_loop->configuration()->channels()->size();
47 ++event_loop_channel_index) {
48 const Channel *event_loop_channel =
49 event_loop->configuration()->channels()->Get(event_loop_channel_index);
50
51 const Channel *logged_channel = aos::configuration::GetChannel(
52 configuration_, event_loop_channel->name()->string_view(),
53 event_loop_channel->type()->string_view(), "", node_);
54
55 if (logged_channel != nullptr) {
56 event_loop_to_logged_channel_index_[event_loop_channel_index] =
57 configuration::ChannelIndex(configuration_, logged_channel);
58 }
59 }
60
61 // Map to match source channels with the timestamp logger, if the contents
62 // should be reliable, and a list of all channels logged on it to be treated
63 // as reliable.
64 std::map<const Channel *, std::tuple<const Node *, bool, std::vector<bool>>>
65 timestamp_logger_channels;
Austin Schuhb06f03b2021-02-17 22:00:37 -080066
Austin Schuh61e973f2021-02-21 21:43:56 -080067 message_bridge::ChannelTimestampFinder finder(event_loop_);
68 for (const Channel *channel : *event_loop_->configuration()->channels()) {
69 if (!configuration::ChannelIsSendableOnNode(channel, event_loop_->node())) {
Austin Schuhb06f03b2021-02-17 22:00:37 -080070 continue;
71 }
Austin Schuh61e973f2021-02-21 21:43:56 -080072 if (!channel->has_destination_nodes()) {
73 continue;
74 }
Austin Schuh01f3b392022-01-25 20:03:09 -080075 const size_t channel_index =
76 configuration::ChannelIndex(event_loop_->configuration(), channel);
77
Austin Schuh61e973f2021-02-21 21:43:56 -080078 for (const Connection *connection : *channel->destination_nodes()) {
79 if (configuration::ConnectionDeliveryTimeIsLoggedOnNode(
80 connection, event_loop_->node())) {
81 const Node *other_node = configuration::GetNode(
Austin Schuh5b728b72021-06-16 14:57:15 -070082 configuration_, connection->name()->string_view());
Austin Schuh61e973f2021-02-21 21:43:56 -080083
84 VLOG(1) << "Timestamps are logged from "
85 << FlatbufferToJson(other_node);
Austin Schuh01f3b392022-01-25 20:03:09 -080086 // True if each channel's remote timestamps are split into a separate
87 // RemoteMessage channel.
88 const bool is_split =
89 finder.SplitChannelForChannel(channel, connection) != nullptr;
90
91 const Channel *const timestamp_logger_channel =
92 finder.ForChannel(channel, connection);
93
94 auto it = timestamp_logger_channels.find(timestamp_logger_channel);
95 if (it != timestamp_logger_channels.end()) {
96 CHECK(!is_split);
97 CHECK_LT(channel_index, std::get<2>(it->second).size());
Brian Smartt796cca02022-04-12 15:07:21 -070098 std::get<2>(it->second)[channel_index] =
99 (connection->time_to_live() == 0);
Austin Schuh01f3b392022-01-25 20:03:09 -0800100 } else {
101 if (is_split) {
102 timestamp_logger_channels.insert(std::make_pair(
103 timestamp_logger_channel,
104 std::make_tuple(other_node, (connection->time_to_live() == 0),
105 std::vector<bool>())));
106 } else {
107 std::vector<bool> channel_reliable_contents(
108 event_loop->configuration()->channels()->size(), false);
109 channel_reliable_contents[channel_index] =
110 (connection->time_to_live() == 0);
111
112 timestamp_logger_channels.insert(std::make_pair(
113 timestamp_logger_channel,
114 std::make_tuple(other_node, false,
115 std::move(channel_reliable_contents))));
116 }
117 }
Austin Schuh61e973f2021-02-21 21:43:56 -0800118 }
119 }
Austin Schuhb06f03b2021-02-17 22:00:37 -0800120 }
121
Austin Schuhb06f03b2021-02-17 22:00:37 -0800122 for (size_t channel_index = 0;
123 channel_index < configuration_->channels()->size(); ++channel_index) {
124 const Channel *const config_channel =
125 configuration_->channels()->Get(channel_index);
126 // The MakeRawFetcher method needs a channel which is in the event loop
127 // configuration() object, not the configuration_ object. Go look that up
128 // from the config.
129 const Channel *channel = aos::configuration::GetChannel(
130 event_loop_->configuration(), config_channel->name()->string_view(),
131 config_channel->type()->string_view(), "", event_loop_->node());
132 CHECK(channel != nullptr)
133 << ": Failed to look up channel "
134 << aos::configuration::CleanedChannelToString(config_channel);
Austin Schuh5b728b72021-06-16 14:57:15 -0700135 if (!should_log(config_channel)) {
Austin Schuhb06f03b2021-02-17 22:00:37 -0800136 continue;
137 }
138
139 FetcherStruct fs;
140 fs.channel_index = channel_index;
141 fs.channel = channel;
142
143 const bool is_local =
Austin Schuh5b728b72021-06-16 14:57:15 -0700144 configuration::ChannelIsSendableOnNode(config_channel, node_);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800145
146 const bool is_readable =
Austin Schuh5b728b72021-06-16 14:57:15 -0700147 configuration::ChannelIsReadableOnNode(config_channel, node_);
Austin Schuh01f3b392022-01-25 20:03:09 -0800148 const bool is_logged =
149 configuration::ChannelMessageIsLoggedOnNode(config_channel, node_);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800150 const bool log_message = is_logged && is_readable;
151
152 bool log_delivery_times = false;
Austin Schuh5b728b72021-06-16 14:57:15 -0700153 if (configuration::MultiNode(configuration_)) {
Austin Schuh72211ae2021-08-05 14:02:30 -0700154 const aos::Connection *connection =
Austin Schuh5b728b72021-06-16 14:57:15 -0700155 configuration::ConnectionToNode(config_channel, node_);
Austin Schuh72211ae2021-08-05 14:02:30 -0700156
Austin Schuhb06f03b2021-02-17 22:00:37 -0800157 log_delivery_times = configuration::ConnectionDeliveryTimeIsLoggedOnNode(
Austin Schuh72211ae2021-08-05 14:02:30 -0700158 connection, event_loop_->node());
159
160 CHECK_EQ(log_delivery_times,
161 configuration::ConnectionDeliveryTimeIsLoggedOnNode(
Austin Schuh5b728b72021-06-16 14:57:15 -0700162 config_channel, node_, node_));
Austin Schuh72211ae2021-08-05 14:02:30 -0700163
164 if (connection) {
165 fs.reliable_forwarding = (connection->time_to_live() == 0);
166 }
Austin Schuhb06f03b2021-02-17 22:00:37 -0800167 }
168
Austin Schuh01f3b392022-01-25 20:03:09 -0800169 // Now, detect a RemoteMessage timestamp logger where we should just log
170 // the contents to a file directly.
Austin Schuhb06f03b2021-02-17 22:00:37 -0800171 const bool log_contents = timestamp_logger_channels.find(channel) !=
172 timestamp_logger_channels.end();
173
174 if (log_message || log_delivery_times || log_contents) {
175 fs.fetcher = event_loop->MakeRawFetcher(channel);
176 VLOG(1) << "Logging channel "
177 << configuration::CleanedChannelToString(channel);
178
179 if (log_delivery_times) {
180 VLOG(1) << " Delivery times";
181 fs.wants_timestamp_writer = true;
Austin Schuh5b728b72021-06-16 14:57:15 -0700182 fs.timestamp_node_index = static_cast<int>(node_index_);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800183 }
Austin Schuhe46492f2021-07-31 19:49:41 -0700184 // Both the timestamp and data writers want data_node_index so it knows
185 // what the source node is.
186 if (log_message || log_delivery_times) {
Austin Schuhb06f03b2021-02-17 22:00:37 -0800187 if (!is_local) {
188 const Node *source_node = configuration::GetNode(
189 configuration_, channel->source_node()->string_view());
190 fs.data_node_index =
191 configuration::GetNodeIndex(configuration_, source_node);
Austin Schuhe46492f2021-07-31 19:49:41 -0700192 }
193 }
194 if (log_message) {
195 VLOG(1) << " Data";
196 fs.wants_writer = true;
197 if (!is_local) {
Austin Schuhb06f03b2021-02-17 22:00:37 -0800198 fs.log_type = LogType::kLogRemoteMessage;
199 } else {
Austin Schuh5b728b72021-06-16 14:57:15 -0700200 fs.data_node_index = static_cast<int>(node_index_);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800201 }
202 }
203 if (log_contents) {
204 VLOG(1) << "Timestamp logger channel "
205 << configuration::CleanedChannelToString(channel);
Austin Schuh01f3b392022-01-25 20:03:09 -0800206 auto timestamp_logger_channel_info =
207 timestamp_logger_channels.find(channel);
208 CHECK(timestamp_logger_channel_info != timestamp_logger_channels.end());
209 fs.timestamp_node = std::get<0>(timestamp_logger_channel_info->second);
210 fs.reliable_contents =
211 std::get<1>(timestamp_logger_channel_info->second);
212 fs.channel_reliable_contents =
213 std::get<2>(timestamp_logger_channel_info->second);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800214 fs.wants_contents_writer = true;
215 fs.contents_node_index =
216 configuration::GetNodeIndex(configuration_, fs.timestamp_node);
217 }
218 fetchers_.emplace_back(std::move(fs));
219 }
220 }
Austin Schuhb06f03b2021-02-17 22:00:37 -0800221}
222
223Logger::~Logger() {
224 if (log_namer_) {
225 // If we are replaying a log file, or in simulation, we want to force the
226 // last bit of data to be logged. The easiest way to deal with this is to
Austin Schuh01f3b392022-01-25 20:03:09 -0800227 // poll everything as we go to destroy the class, ie, shut down the
228 // logger, and write it to disk.
Austin Schuhb06f03b2021-02-17 22:00:37 -0800229 StopLogging(event_loop_->monotonic_now());
230 }
231}
232
Alexei Strotscaf17d32023-04-03 22:31:11 -0700233
Austin Schuh6bb8a822021-03-31 23:04:39 -0700234
Brian Smartt796cca02022-04-12 15:07:21 -0700235std::string Logger::WriteConfiguration(LogNamer *log_namer) {
Austin Schuhb06f03b2021-02-17 22:00:37 -0800236 std::string config_sha256;
Brian Smartt03c00da2022-02-24 10:25:00 -0800237
Austin Schuhb06f03b2021-02-17 22:00:37 -0800238 if (separate_config_) {
239 flatbuffers::FlatBufferBuilder fbb;
240 flatbuffers::Offset<aos::Configuration> configuration_offset =
241 CopyFlatBuffer(configuration_, &fbb);
242 LogFileHeader::Builder log_file_header_builder(fbb);
243 log_file_header_builder.add_configuration(configuration_offset);
244 fbb.FinishSizePrefixed(log_file_header_builder.Finish());
245 aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> config_header(
246 fbb.Release());
247 config_sha256 = Sha256(config_header.span());
248 LOG(INFO) << "Config sha256 of " << config_sha256;
Brian Smartt03c00da2022-02-24 10:25:00 -0800249 log_namer->WriteConfiguration(&config_header, config_sha256);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800250 }
251
Brian Smartt03c00da2022-02-24 10:25:00 -0800252 return config_sha256;
253}
254
255void Logger::StartLogging(std::unique_ptr<LogNamer> log_namer,
256 std::optional<UUID> log_start_uuid) {
257 CHECK(!log_namer_) << ": Already logging";
258
259 VLOG(1) << "Starting logger for " << FlatbufferToJson(node_);
260
261 auto config_sha256 = WriteConfiguration(log_namer.get());
262
263 log_namer_ = std::move(log_namer);
264
Austin Schuhb06f03b2021-02-17 22:00:37 -0800265 log_event_uuid_ = UUID::Random();
266 log_start_uuid_ = log_start_uuid;
Austin Schuhb06f03b2021-02-17 22:00:37 -0800267
268 // We want to do as much work as possible before the initial Fetch. Time
269 // between that and actually starting to log opens up the possibility of
270 // falling off the end of the queue during that time.
271
272 for (FetcherStruct &f : fetchers_) {
273 if (f.wants_writer) {
274 f.writer = log_namer_->MakeWriter(f.channel);
275 }
276 if (f.wants_timestamp_writer) {
277 f.timestamp_writer = log_namer_->MakeTimestampWriter(f.channel);
278 }
279 if (f.wants_contents_writer) {
280 f.contents_writer = log_namer_->MakeForwardedTimestampWriter(
281 f.channel, CHECK_NOTNULL(f.timestamp_node));
282 }
283 }
284
Austin Schuh73340842021-07-30 22:32:06 -0700285 log_namer_->SetHeaderTemplate(MakeHeader(config_sha256));
Austin Schuhb06f03b2021-02-17 22:00:37 -0800286
Austin Schuha42ee962021-03-31 22:49:30 -0700287 const aos::monotonic_clock::time_point beginning_time =
288 event_loop_->monotonic_now();
289
Austin Schuhb06f03b2021-02-17 22:00:37 -0800290 // Grab data from each channel right before we declare the log file started
291 // so we can capture the latest message on each channel. This lets us have
292 // non periodic messages with configuration that now get logged.
293 for (FetcherStruct &f : fetchers_) {
294 const auto start = event_loop_->monotonic_now();
295 const bool got_new = f.fetcher->Fetch();
296 const auto end = event_loop_->monotonic_now();
297 RecordFetchResult(start, end, got_new, &f);
298
299 // If there is a message, we want to write it.
300 f.written = f.fetcher->context().data == nullptr;
301 }
302
303 // Clear out any old timestamps in case we are re-starting logging.
Austin Schuh572924a2021-07-30 22:32:12 -0700304 for (size_t i = 0; i < configuration::NodesCount(configuration_); ++i) {
Austin Schuh58646e22021-08-23 23:51:46 -0700305 log_namer_->ClearStartTimes();
Austin Schuhb06f03b2021-02-17 22:00:37 -0800306 }
307
Austin Schuha42ee962021-03-31 22:49:30 -0700308 const aos::monotonic_clock::time_point fetch_time =
309 event_loop_->monotonic_now();
Austin Schuhb06f03b2021-02-17 22:00:37 -0800310 WriteHeader();
Austin Schuha42ee962021-03-31 22:49:30 -0700311 const aos::monotonic_clock::time_point header_time =
312 event_loop_->monotonic_now();
Austin Schuhb06f03b2021-02-17 22:00:37 -0800313
Brian Smartt796cca02022-04-12 15:07:21 -0700314 VLOG(1) << "Logging node as " << FlatbufferToJson(node_) << " start_time "
315 << last_synchronized_time_ << ", took "
Brian Smartt03c00da2022-02-24 10:25:00 -0800316 << chrono::duration<double>(fetch_time - beginning_time).count()
317 << " to fetch, "
318 << chrono::duration<double>(header_time - fetch_time).count()
319 << " to write headers, boot uuid " << event_loop_->boot_uuid();
Austin Schuhb06f03b2021-02-17 22:00:37 -0800320
321 // Force logging up until the start of the log file now, so the messages at
322 // the start are always ordered before the rest of the messages.
323 // Note: this ship may have already sailed, but we don't have to make it
324 // worse.
325 // TODO(austin): Test...
Austin Schuh855f8932021-03-19 22:01:32 -0700326 //
Naman Gupta41d70c22022-11-21 15:29:52 -0800327 // This is safe to call here since we have set last_synchronized_time_ as
328 // the same time as in the header, and all the data before it should be
329 // logged without ordering concerns.
Austin Schuhb06f03b2021-02-17 22:00:37 -0800330 LogUntil(last_synchronized_time_);
331
332 timer_handler_->Setup(event_loop_->monotonic_now() + polling_period_,
333 polling_period_);
334}
335
Brian Smartt796cca02022-04-12 15:07:21 -0700336std::unique_ptr<LogNamer> Logger::RestartLogging(
337 std::unique_ptr<LogNamer> log_namer, std::optional<UUID> log_start_uuid) {
Brian Smartt03c00da2022-02-24 10:25:00 -0800338 CHECK(log_namer_) << ": Unexpected restart while not logging";
339
340 VLOG(1) << "Restarting logger for " << FlatbufferToJson(node_);
341
Austin Schuh08dba8f2023-05-01 08:29:30 -0700342 // Make sure not to write past now so we don't risk out of order problems. We
343 // don't want to get into a situation where we write out up to now + 0.1 sec,
344 // and that operation takes ~0.1 seconds, so we end up writing a different
345 // amount of the early and late channels. That would then result in the next
346 // go around finding more than 0.1 sec of data on the early channels.
347 //
348 // Make sure we read up until "now" and log it. This sets us up so that we
349 // are unlikely to fetch a message far in the future and have a ton of data
350 // before the offical start time.
351 monotonic_clock::time_point newest_record =
352 monotonic_clock::min_time;
353 while (true) {
354 aos::monotonic_clock::time_point next_time =
355 last_synchronized_time_ + polling_period_;
356 const aos::monotonic_clock::time_point monotonic_now =
357 event_loop_->monotonic_now();
358 if (next_time > monotonic_now) {
359 next_time = monotonic_now;
360 }
361
362 bool wrote_messages = false;
363 std::tie(wrote_messages, newest_record) = LogUntil(next_time);
364
365 if (next_time == monotonic_now &&
366 (!wrote_messages || newest_record < monotonic_now + polling_period_)) {
367 // If we stopped writing messages, then we probably have stopped making
368 // progress. If the newest record (unwritten or written) on a channel is
369 // very close to the current time, then there won't be much data
370 // officially after the end of the last log but before the start of the
371 // current one. We need to pick the start of the current log to be after
372 // the last message on record so we don't have holes in the log.
373 break;
374 }
375 }
376
377 // We are now synchronized up to last_synchronized_time_. Our start time can
378 // safely be "newest_record". But, we need to guarentee that the start time
379 // is after the newest message we have a record of, and that we don't skip any
380 // messages as we rotate. This means we can't call Fetch anywhere.
Brian Smartt03c00da2022-02-24 10:25:00 -0800381
382 std::unique_ptr<LogNamer> old_log_namer = std::move(log_namer_);
383 log_namer_ = std::move(log_namer);
384
Naman Gupta41d70c22022-11-21 15:29:52 -0800385 // Now grab a representative time on both the RT and monotonic clock.
386 // Average a monotonic clock before and after to reduce the error.
Brian Smartt03c00da2022-02-24 10:25:00 -0800387 const aos::monotonic_clock::time_point beginning_time =
388 event_loop_->monotonic_now();
Austin Schuh41f8df92022-04-15 11:45:52 -0700389 const aos::realtime_clock::time_point beginning_time_rt =
390 event_loop_->realtime_now();
391 const aos::monotonic_clock::time_point beginning_time2 =
392 event_loop_->monotonic_now();
393
394 if (beginning_time > last_synchronized_time_) {
395 LOG(WARNING) << "Took over " << polling_period_.count()
396 << "ns to swap log_namer";
397 }
398
Austin Schuh08dba8f2023-05-01 08:29:30 -0700399 // Our start time is now the newest message we have a record of. We will
400 // declare the old log "done", and start in on the new one, double-logging
401 // anything we have a record of so we have all the messages from before the
402 // start.
403 const aos::monotonic_clock::time_point monotonic_start_time = newest_record;
Austin Schuh41f8df92022-04-15 11:45:52 -0700404 const aos::realtime_clock::time_point realtime_start_time =
405 (beginning_time_rt + (monotonic_start_time.time_since_epoch() -
406 ((beginning_time.time_since_epoch() +
407 beginning_time2.time_since_epoch()) /
408 2)));
Brian Smartt03c00da2022-02-24 10:25:00 -0800409
410 auto config_sha256 = WriteConfiguration(log_namer_.get());
411
412 log_event_uuid_ = UUID::Random();
413 log_start_uuid_ = log_start_uuid;
414
415 log_namer_->SetHeaderTemplate(MakeHeader(config_sha256));
416
417 // Note that WriteHeader updates last_synchronized_time_ to be the
418 // current time when it is called, which is then the "start time"
419 // of the new (restarted) log. This timestamp will be after
Naman Gupta41d70c22022-11-21 15:29:52 -0800420 // the timestamp of the last message fetched on each channel, but is
421 // carefully picked per the comment above to not violate
422 // max_out_of_order_duration.
Austin Schuh41f8df92022-04-15 11:45:52 -0700423 WriteHeader(monotonic_start_time, realtime_start_time);
Brian Smartt03c00da2022-02-24 10:25:00 -0800424
425 const aos::monotonic_clock::time_point header_time =
426 event_loop_->monotonic_now();
427
Austin Schuh08dba8f2023-05-01 08:29:30 -0700428 // Close out the old writers to free up memory to be used by the new writers.
429 old_log_namer->Close();
430
Brian Smartt03c00da2022-02-24 10:25:00 -0800431 for (FetcherStruct &f : fetchers_) {
Brian Smartt03c00da2022-02-24 10:25:00 -0800432 // Create writers from the new namer
Brian Smartt03c00da2022-02-24 10:25:00 -0800433
434 if (f.wants_writer) {
Austin Schuh08dba8f2023-05-01 08:29:30 -0700435 f.writer = log_namer_->MakeWriter(f.channel);
Brian Smartt03c00da2022-02-24 10:25:00 -0800436 }
437 if (f.wants_timestamp_writer) {
Austin Schuh08dba8f2023-05-01 08:29:30 -0700438 f.timestamp_writer = log_namer_->MakeTimestampWriter(f.channel);
Brian Smartt03c00da2022-02-24 10:25:00 -0800439 }
440 if (f.wants_contents_writer) {
Austin Schuh08dba8f2023-05-01 08:29:30 -0700441 f.contents_writer = log_namer_->MakeForwardedTimestampWriter(
Brian Smartt03c00da2022-02-24 10:25:00 -0800442 f.channel, CHECK_NOTNULL(f.timestamp_node));
443 }
444
Austin Schuh08dba8f2023-05-01 08:29:30 -0700445 // Mark each channel with data as not written. That triggers each channel
446 // to be re-logged.
447 f.written = f.fetcher->context().data == nullptr;
Brian Smartt03c00da2022-02-24 10:25:00 -0800448 }
449
Austin Schuh08dba8f2023-05-01 08:29:30 -0700450 // And now make sure to log everything up to the start time in 1 big go so we
451 // make sure we have it before we let the world start logging normally again.
452 LogUntil(monotonic_start_time);
453
Brian Smartt03c00da2022-02-24 10:25:00 -0800454 const aos::monotonic_clock::time_point channel_time =
455 event_loop_->monotonic_now();
456
Brian Smartt796cca02022-04-12 15:07:21 -0700457 VLOG(1) << "Logging node as " << FlatbufferToJson(node_) << " restart_time "
458 << last_synchronized_time_ << ", took "
Brian Smartt03c00da2022-02-24 10:25:00 -0800459 << chrono::duration<double>(header_time - beginning_time).count()
460 << " to prepare and write header, "
461 << chrono::duration<double>(channel_time - header_time).count()
Brian Smartt796cca02022-04-12 15:07:21 -0700462 << " to write initial channel messages, boot uuid "
463 << event_loop_->boot_uuid();
Brian Smartt03c00da2022-02-24 10:25:00 -0800464
465 return old_log_namer;
466}
467
Austin Schuhb06f03b2021-02-17 22:00:37 -0800468std::unique_ptr<LogNamer> Logger::StopLogging(
469 aos::monotonic_clock::time_point end_time) {
470 CHECK(log_namer_) << ": Not logging right now";
471
472 if (end_time != aos::monotonic_clock::min_time) {
Austin Schuh30586902021-03-30 22:54:08 -0700473 // Folks like to use the on_logged_period_ callback to trigger stop and
474 // start events. We can't have those then recurse and try to stop again.
475 // Rather than making everything reentrant, let's just instead block the
476 // callback here.
477 DoLogData(end_time, false);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800478 }
479 timer_handler_->Disable();
480
481 for (FetcherStruct &f : fetchers_) {
482 f.writer = nullptr;
483 f.timestamp_writer = nullptr;
484 f.contents_writer = nullptr;
485 }
Austin Schuhb06f03b2021-02-17 22:00:37 -0800486
487 log_event_uuid_ = UUID::Zero();
Austin Schuh34f9e482021-03-31 22:54:18 -0700488 log_start_uuid_ = std::nullopt;
Austin Schuhb06f03b2021-02-17 22:00:37 -0800489
Alexei Strotsbc776d52023-04-15 23:46:45 -0700490 log_namer_->Close();
491
Austin Schuhb06f03b2021-02-17 22:00:37 -0800492 return std::move(log_namer_);
493}
494
Austin Schuh41f8df92022-04-15 11:45:52 -0700495void Logger::WriteHeader(aos::monotonic_clock::time_point monotonic_start_time,
496 aos::realtime_clock::time_point realtime_start_time) {
Austin Schuhb06f03b2021-02-17 22:00:37 -0800497 if (configuration::MultiNode(configuration_)) {
498 server_statistics_fetcher_.Fetch();
499 }
500
Austin Schuh41f8df92022-04-15 11:45:52 -0700501 if (monotonic_start_time == aos::monotonic_clock::min_time) {
502 monotonic_start_time = event_loop_->monotonic_now();
503 realtime_start_time = event_loop_->realtime_now();
504 }
Austin Schuhb06f03b2021-02-17 22:00:37 -0800505
506 // We need to pick a point in time to declare the log file "started". This
507 // starts here. It needs to be after everything is fetched so that the
508 // fetchers are all pointed at the most recent message before the start
509 // time.
510 last_synchronized_time_ = monotonic_start_time;
511
512 for (const Node *node : log_namer_->nodes()) {
513 const int node_index = configuration::GetNodeIndex(configuration_, node);
514 MaybeUpdateTimestamp(node, node_index, monotonic_start_time,
515 realtime_start_time);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800516 }
517}
518
Austin Schuhb06f03b2021-02-17 22:00:37 -0800519void Logger::WriteMissingTimestamps() {
520 if (configuration::MultiNode(configuration_)) {
521 server_statistics_fetcher_.Fetch();
522 } else {
523 return;
524 }
525
526 if (server_statistics_fetcher_.get() == nullptr) {
527 return;
528 }
529
530 for (const Node *node : log_namer_->nodes()) {
531 const int node_index = configuration::GetNodeIndex(configuration_, node);
532 if (MaybeUpdateTimestamp(
533 node, node_index,
534 server_statistics_fetcher_.context().monotonic_event_time,
535 server_statistics_fetcher_.context().realtime_event_time)) {
Austin Schuh58646e22021-08-23 23:51:46 -0700536 VLOG(1) << "Timestamps changed on " << aos::FlatbufferToJson(node);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800537 }
538 }
539}
540
Austin Schuhb06f03b2021-02-17 22:00:37 -0800541bool Logger::MaybeUpdateTimestamp(
542 const Node *node, int node_index,
543 aos::monotonic_clock::time_point monotonic_start_time,
544 aos::realtime_clock::time_point realtime_start_time) {
545 // Bail early if the start times are already set.
Austin Schuh58646e22021-08-23 23:51:46 -0700546 if (node_ == node || !configuration::MultiNode(configuration_)) {
547 if (log_namer_->monotonic_start_time(node_index,
548 event_loop_->boot_uuid()) !=
549 monotonic_clock::min_time) {
550 return false;
551 }
Brian Smartt03c00da2022-02-24 10:25:00 -0800552
Austin Schuhb06f03b2021-02-17 22:00:37 -0800553 // There are no offsets to compute for ourself, so always succeed.
Austin Schuh58646e22021-08-23 23:51:46 -0700554 log_namer_->SetStartTimes(node_index, event_loop_->boot_uuid(),
555 monotonic_start_time, realtime_start_time,
556 monotonic_start_time, realtime_start_time);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800557 return true;
558 } else if (server_statistics_fetcher_.get() != nullptr) {
559 // We must be a remote node now. Look for the connection and see if it is
560 // connected.
James Kuszmaul17607fb2021-10-15 20:00:32 -0700561 CHECK(server_statistics_fetcher_->has_connections());
Austin Schuhb06f03b2021-02-17 22:00:37 -0800562
563 for (const message_bridge::ServerConnection *connection :
564 *server_statistics_fetcher_->connections()) {
565 if (connection->node()->name()->string_view() !=
566 node->name()->string_view()) {
567 continue;
568 }
569
570 if (connection->state() != message_bridge::State::CONNECTED) {
571 VLOG(1) << node->name()->string_view()
572 << " is not connected, can't start it yet.";
573 break;
574 }
575
Austin Schuhb06f03b2021-02-17 22:00:37 -0800576 if (!connection->has_monotonic_offset()) {
577 VLOG(1) << "Missing monotonic offset for setting start time for node "
578 << aos::FlatbufferToJson(node);
579 break;
580 }
581
James Kuszmaul17607fb2021-10-15 20:00:32 -0700582 CHECK(connection->has_boot_uuid());
Austin Schuh58646e22021-08-23 23:51:46 -0700583 const UUID boot_uuid =
584 UUID::FromString(connection->boot_uuid()->string_view());
585
586 if (log_namer_->monotonic_start_time(node_index, boot_uuid) !=
587 monotonic_clock::min_time) {
588 break;
589 }
590
591 VLOG(1) << "Updating start time for "
592 << aos::FlatbufferToJson(connection);
593
Austin Schuhb06f03b2021-02-17 22:00:37 -0800594 // Found it and it is connected. Compensate and go.
Austin Schuh73340842021-07-30 22:32:06 -0700595 log_namer_->SetStartTimes(
Austin Schuh58646e22021-08-23 23:51:46 -0700596 node_index, boot_uuid,
Austin Schuh73340842021-07-30 22:32:06 -0700597 monotonic_start_time +
598 std::chrono::nanoseconds(connection->monotonic_offset()),
599 realtime_start_time, monotonic_start_time, realtime_start_time);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800600 return true;
601 }
602 }
603 return false;
604}
605
606aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> Logger::MakeHeader(
Austin Schuh73340842021-07-30 22:32:06 -0700607 std::string_view config_sha256) {
Austin Schuhb06f03b2021-02-17 22:00:37 -0800608 flatbuffers::FlatBufferBuilder fbb;
609 fbb.ForceDefaults(true);
610
611 flatbuffers::Offset<aos::Configuration> configuration_offset;
612 if (!separate_config_) {
613 configuration_offset = CopyFlatBuffer(configuration_, &fbb);
614 } else {
615 CHECK(!config_sha256.empty());
616 }
617
618 const flatbuffers::Offset<flatbuffers::String> name_offset =
619 fbb.CreateString(name_);
620
Austin Schuhfa712682022-05-11 16:43:42 -0700621 const flatbuffers::Offset<flatbuffers::String> logger_sha1_offset =
622 logger_sha1_.empty() ? 0 : fbb.CreateString(logger_sha1_);
623 const flatbuffers::Offset<flatbuffers::String> logger_version_offset =
624 logger_version_.empty() ? 0 : fbb.CreateString(logger_version_);
625
Austin Schuhb06f03b2021-02-17 22:00:37 -0800626 CHECK(log_event_uuid_ != UUID::Zero());
627 const flatbuffers::Offset<flatbuffers::String> log_event_uuid_offset =
Austin Schuh5e2bfb82021-03-13 22:46:55 -0800628 log_event_uuid_.PackString(&fbb);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800629
630 const flatbuffers::Offset<flatbuffers::String> logger_instance_uuid_offset =
Austin Schuh5e2bfb82021-03-13 22:46:55 -0800631 logger_instance_uuid_.PackString(&fbb);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800632
633 flatbuffers::Offset<flatbuffers::String> log_start_uuid_offset;
Austin Schuh34f9e482021-03-31 22:54:18 -0700634 if (log_start_uuid_) {
635 log_start_uuid_offset = fbb.CreateString(log_start_uuid_->ToString());
Austin Schuhb06f03b2021-02-17 22:00:37 -0800636 }
637
638 flatbuffers::Offset<flatbuffers::String> config_sha256_offset;
639 if (!config_sha256.empty()) {
640 config_sha256_offset = fbb.CreateString(config_sha256);
641 }
642
643 const flatbuffers::Offset<flatbuffers::String> logger_node_boot_uuid_offset =
Austin Schuh5e2bfb82021-03-13 22:46:55 -0800644 event_loop_->boot_uuid().PackString(&fbb);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800645
Austin Schuhb06f03b2021-02-17 22:00:37 -0800646 flatbuffers::Offset<Node> logger_node_offset;
647
648 if (configuration::MultiNode(configuration_)) {
Austin Schuh5b728b72021-06-16 14:57:15 -0700649 logger_node_offset = RecursiveCopyFlatBuffer(node_, &fbb);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800650 }
651
652 aos::logger::LogFileHeader::Builder log_file_header_builder(fbb);
653
654 log_file_header_builder.add_name(name_offset);
Austin Schuhfa712682022-05-11 16:43:42 -0700655 if (!logger_sha1_offset.IsNull()) {
656 log_file_header_builder.add_logger_sha1(logger_sha1_offset);
657 }
658 if (!logger_version_offset.IsNull()) {
659 log_file_header_builder.add_logger_version(logger_version_offset);
660 }
Austin Schuhb06f03b2021-02-17 22:00:37 -0800661
662 // Only add the node if we are running in a multinode configuration.
Austin Schuh73340842021-07-30 22:32:06 -0700663 if (configuration::MultiNode(configuration_)) {
Austin Schuhb06f03b2021-02-17 22:00:37 -0800664 log_file_header_builder.add_logger_node(logger_node_offset);
665 }
666
667 if (!configuration_offset.IsNull()) {
668 log_file_header_builder.add_configuration(configuration_offset);
669 }
670 // The worst case theoretical out of order is the polling period times 2.
671 // One message could get logged right after the boundary, but be for right
672 // before the next boundary. And the reverse could happen for another
673 // message. Report back 3x to be extra safe, and because the cost isn't
674 // huge on the read side.
675 log_file_header_builder.add_max_out_of_order_duration(
676 std::chrono::nanoseconds(3 * polling_period_).count());
677
Austin Schuhb06f03b2021-02-17 22:00:37 -0800678 log_file_header_builder.add_log_event_uuid(log_event_uuid_offset);
679 log_file_header_builder.add_logger_instance_uuid(logger_instance_uuid_offset);
680 if (!log_start_uuid_offset.IsNull()) {
681 log_file_header_builder.add_log_start_uuid(log_start_uuid_offset);
682 }
683 log_file_header_builder.add_logger_node_boot_uuid(
684 logger_node_boot_uuid_offset);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800685
686 if (!config_sha256_offset.IsNull()) {
687 log_file_header_builder.add_configuration_sha256(config_sha256_offset);
688 }
689
690 fbb.FinishSizePrefixed(log_file_header_builder.Finish());
691 aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> result(
692 fbb.Release());
693
694 CHECK(result.Verify()) << ": Built a corrupted header.";
695
696 return result;
697}
698
699void Logger::ResetStatisics() {
700 max_message_fetch_time_ = std::chrono::nanoseconds::zero();
701 max_message_fetch_time_channel_ = -1;
702 max_message_fetch_time_size_ = -1;
703 total_message_fetch_time_ = std::chrono::nanoseconds::zero();
704 total_message_fetch_count_ = 0;
705 total_message_fetch_bytes_ = 0;
706 total_nop_fetch_time_ = std::chrono::nanoseconds::zero();
707 total_nop_fetch_count_ = 0;
708 max_copy_time_ = std::chrono::nanoseconds::zero();
709 max_copy_time_channel_ = -1;
710 max_copy_time_size_ = -1;
711 total_copy_time_ = std::chrono::nanoseconds::zero();
712 total_copy_count_ = 0;
713 total_copy_bytes_ = 0;
714}
715
716void Logger::Rotate() {
717 for (const Node *node : log_namer_->nodes()) {
Austin Schuh73340842021-07-30 22:32:06 -0700718 log_namer_->Rotate(node);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800719 }
720}
721
Brian Smartt03c00da2022-02-24 10:25:00 -0800722void Logger::WriteData(NewDataWriter *writer, const FetcherStruct &f) {
723 if (writer != nullptr) {
724 const UUID source_node_boot_uuid =
725 static_cast<int>(node_index_) != f.data_node_index
726 ? f.fetcher->context().source_boot_uuid
727 : event_loop_->boot_uuid();
728 // Write!
729 const auto start = event_loop_->monotonic_now();
Brian Smartt03c00da2022-02-24 10:25:00 -0800730
Naman Gupta41d70c22022-11-21 15:29:52 -0800731 ContextDataCopier coppier(f.fetcher->context(), f.channel_index, f.log_type,
732 event_loop_);
Brian Smartt03c00da2022-02-24 10:25:00 -0800733
Austin Schuh48d10d62022-10-16 22:19:23 -0700734 writer->CopyMessage(&coppier, source_node_boot_uuid, start);
735 RecordCreateMessageTime(start, coppier.end_time(), f);
Brian Smartt03c00da2022-02-24 10:25:00 -0800736
Brian Smartt796cca02022-04-12 15:07:21 -0700737 VLOG(2) << "Wrote data as node " << FlatbufferToJson(node_)
738 << " for channel "
Brian Smartt03c00da2022-02-24 10:25:00 -0800739 << configuration::CleanedChannelToString(f.fetcher->channel())
Austin Schuh48d10d62022-10-16 22:19:23 -0700740 << " to " << writer->filename();
Brian Smartt03c00da2022-02-24 10:25:00 -0800741 }
742}
743
Brian Smartt796cca02022-04-12 15:07:21 -0700744void Logger::WriteTimestamps(NewDataWriter *timestamp_writer,
745 const FetcherStruct &f) {
Brian Smartt03c00da2022-02-24 10:25:00 -0800746 if (timestamp_writer != nullptr) {
747 // And now handle timestamps.
Brian Smartt03c00da2022-02-24 10:25:00 -0800748
749 // Tell our writer that we know something about the remote boot.
750 timestamp_writer->UpdateRemote(
751 f.data_node_index, f.fetcher->context().source_boot_uuid,
752 f.fetcher->context().monotonic_remote_time,
753 f.fetcher->context().monotonic_event_time, f.reliable_forwarding);
Austin Schuh48d10d62022-10-16 22:19:23 -0700754
755 const auto start = event_loop_->monotonic_now();
756 ContextDataCopier coppier(f.fetcher->context(), f.channel_index,
Naman Gupta41d70c22022-11-21 15:29:52 -0800757 LogType::kLogDeliveryTimeOnly, event_loop_);
Austin Schuh48d10d62022-10-16 22:19:23 -0700758
759 timestamp_writer->CopyMessage(&coppier, event_loop_->boot_uuid(), start);
760 RecordCreateMessageTime(start, coppier.end_time(), f);
Brian Smartt03c00da2022-02-24 10:25:00 -0800761
Brian Smartt796cca02022-04-12 15:07:21 -0700762 VLOG(2) << "Wrote timestamps as node " << FlatbufferToJson(node_)
763 << " for channel "
Brian Smartt03c00da2022-02-24 10:25:00 -0800764 << configuration::CleanedChannelToString(f.fetcher->channel())
Austin Schuh48d10d62022-10-16 22:19:23 -0700765 << " to " << timestamp_writer->filename() << " timestamp";
Brian Smartt03c00da2022-02-24 10:25:00 -0800766 }
767}
768
Brian Smartt796cca02022-04-12 15:07:21 -0700769void Logger::WriteContent(NewDataWriter *contents_writer,
770 const FetcherStruct &f) {
Brian Smartt03c00da2022-02-24 10:25:00 -0800771 if (contents_writer != nullptr) {
772 const auto start = event_loop_->monotonic_now();
773 // And now handle the special message contents channel. Copy the
774 // message into a FlatBufferBuilder and save it to disk.
Brian Smartt03c00da2022-02-24 10:25:00 -0800775 const RemoteMessage *msg =
776 flatbuffers::GetRoot<RemoteMessage>(f.fetcher->context().data);
777
778 CHECK(msg->has_boot_uuid()) << ": " << aos::FlatbufferToJson(msg);
Brian Smartt03c00da2022-02-24 10:25:00 -0800779 // Translate from the channel index that the event loop uses to the
780 // channel index in the log file.
Austin Schuhf2d0e682022-10-16 14:20:58 -0700781 const int channel_index =
782 event_loop_to_logged_channel_index_[msg->channel_index()];
Brian Smartt03c00da2022-02-24 10:25:00 -0800783
784 const aos::monotonic_clock::time_point monotonic_timestamp_time =
785 f.fetcher->context().monotonic_event_time;
Brian Smartt03c00da2022-02-24 10:25:00 -0800786
Brian Smartt03c00da2022-02-24 10:25:00 -0800787 // Timestamps tell us information about what happened too!
788 // Capture any reboots so UpdateRemote is properly recorded.
789 contents_writer->UpdateBoot(UUID::FromVector(msg->boot_uuid()));
790
791 // Start with recording info about the data flowing from our node to the
792 // remote.
793 const bool reliable =
794 f.channel_reliable_contents.size() != 0u
795 ? f.channel_reliable_contents[msg->channel_index()]
796 : f.reliable_contents;
797
Brian Smartt796cca02022-04-12 15:07:21 -0700798 contents_writer->UpdateRemote(
799 node_index_, event_loop_->boot_uuid(),
Brian Smartt03c00da2022-02-24 10:25:00 -0800800 monotonic_clock::time_point(
801 chrono::nanoseconds(msg->monotonic_remote_time())),
802 monotonic_clock::time_point(
803 chrono::nanoseconds(msg->monotonic_sent_time())),
804 reliable, monotonic_timestamp_time);
805
Austin Schuh48d10d62022-10-16 22:19:23 -0700806 RemoteMessageCopier coppier(msg, channel_index, monotonic_timestamp_time,
Naman Gupta41d70c22022-11-21 15:29:52 -0800807 event_loop_);
Austin Schuh48d10d62022-10-16 22:19:23 -0700808
809 contents_writer->CopyMessage(&coppier, UUID::FromVector(msg->boot_uuid()),
810 start);
811
812 RecordCreateMessageTime(start, coppier.end_time(), f);
Brian Smartt03c00da2022-02-24 10:25:00 -0800813 }
814}
815
816void Logger::WriteFetchedRecord(FetcherStruct &f) {
817 WriteData(f.writer, f);
818 WriteTimestamps(f.timestamp_writer, f);
819 WriteContent(f.contents_writer, f);
820}
821
Austin Schuh08dba8f2023-05-01 08:29:30 -0700822std::pair<bool, monotonic_clock::time_point> Logger::LogUntil(
823 monotonic_clock::time_point t) {
824 bool wrote_messages = false;
825 monotonic_clock::time_point newest_record = monotonic_clock::min_time;
Brian Smartt03c00da2022-02-24 10:25:00 -0800826
Austin Schuhb06f03b2021-02-17 22:00:37 -0800827 // Grab the latest ServerStatistics message. This will always have the
828 // oppertunity to be >= to the current time, so it will always represent any
829 // reboots which may have happened.
830 WriteMissingTimestamps();
831
832 // Write each channel to disk, one at a time.
833 for (FetcherStruct &f : fetchers_) {
Austin Schuh08dba8f2023-05-01 08:29:30 -0700834 if (f.fetcher->context().data != nullptr) {
835 newest_record =
836 std::max(newest_record, f.fetcher->context().monotonic_event_time);
837 }
838
Austin Schuhb06f03b2021-02-17 22:00:37 -0800839 while (true) {
840 if (f.written) {
841 const auto start = event_loop_->monotonic_now();
842 const bool got_new = f.fetcher->FetchNext();
843 const auto end = event_loop_->monotonic_now();
844 RecordFetchResult(start, end, got_new, &f);
845 if (!got_new) {
846 VLOG(2) << "No new data on "
847 << configuration::CleanedChannelToString(
848 f.fetcher->channel());
849 break;
850 }
Austin Schuh08dba8f2023-05-01 08:29:30 -0700851 newest_record =
852 std::max(newest_record, f.fetcher->context().monotonic_event_time);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800853 f.written = false;
854 }
855
856 // TODO(james): Write tests to exercise this logic.
857 if (f.fetcher->context().monotonic_event_time >= t) {
858 break;
859 }
Austin Schuhb06f03b2021-02-17 22:00:37 -0800860
Brian Smartt03c00da2022-02-24 10:25:00 -0800861 WriteFetchedRecord(f);
Austin Schuh08dba8f2023-05-01 08:29:30 -0700862 wrote_messages = true;
Austin Schuhb06f03b2021-02-17 22:00:37 -0800863
864 f.written = true;
865 }
866 }
867 last_synchronized_time_ = t;
Brian Smartt03c00da2022-02-24 10:25:00 -0800868
Austin Schuh08dba8f2023-05-01 08:29:30 -0700869 return std::make_pair(wrote_messages, newest_record);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800870}
871
Austin Schuh30586902021-03-30 22:54:08 -0700872void Logger::DoLogData(const monotonic_clock::time_point end_time,
873 bool run_on_logged) {
Austin Schuhb06f03b2021-02-17 22:00:37 -0800874 // We want to guarantee that messages aren't out of order by more than
875 // max_out_of_order_duration. To do this, we need sync points. Every write
876 // cycle should be a sync point.
877
878 do {
879 // Move the sync point up by at most polling_period. This forces one sync
880 // per iteration, even if it is small.
881 LogUntil(std::min(last_synchronized_time_ + polling_period_, end_time));
882
Austin Schuh30586902021-03-30 22:54:08 -0700883 if (run_on_logged) {
884 on_logged_period_();
885 }
Austin Schuhb06f03b2021-02-17 22:00:37 -0800886
887 // If we missed cycles, we could be pretty far behind. Spin until we are
888 // caught up.
889 } while (last_synchronized_time_ + polling_period_ < end_time);
890}
891
892void Logger::RecordFetchResult(aos::monotonic_clock::time_point start,
893 aos::monotonic_clock::time_point end,
894 bool got_new, FetcherStruct *fetcher) {
895 const auto duration = end - start;
896 if (!got_new) {
897 ++total_nop_fetch_count_;
898 total_nop_fetch_time_ += duration;
899 return;
900 }
901 ++total_message_fetch_count_;
902 total_message_fetch_bytes_ += fetcher->fetcher->context().size;
903 total_message_fetch_time_ += duration;
904 if (duration > max_message_fetch_time_) {
905 max_message_fetch_time_ = duration;
906 max_message_fetch_time_channel_ = fetcher->channel_index;
907 max_message_fetch_time_size_ = fetcher->fetcher->context().size;
908 }
909}
910
911void Logger::RecordCreateMessageTime(aos::monotonic_clock::time_point start,
912 aos::monotonic_clock::time_point end,
Brian Smartt03c00da2022-02-24 10:25:00 -0800913 const FetcherStruct &fetcher) {
Austin Schuhb06f03b2021-02-17 22:00:37 -0800914 const auto duration = end - start;
915 total_copy_time_ += duration;
916 ++total_copy_count_;
Brian Smartt03c00da2022-02-24 10:25:00 -0800917 total_copy_bytes_ += fetcher.fetcher->context().size;
Austin Schuhb06f03b2021-02-17 22:00:37 -0800918 if (duration > max_copy_time_) {
919 max_copy_time_ = duration;
Brian Smartt03c00da2022-02-24 10:25:00 -0800920 max_copy_time_channel_ = fetcher.channel_index;
921 max_copy_time_size_ = fetcher.fetcher->context().size;
Austin Schuhb06f03b2021-02-17 22:00:37 -0800922 }
923}
924
925} // namespace logger
926} // namespace aos