blob: 1a333e7d931863b8bfac04ae221b8463e3ee88e2 [file] [log] [blame]
Austin Schuhb06f03b2021-02-17 22:00:37 -08001#include "aos/events/logging/log_writer.h"
2
Austin Schuh6bb8a822021-03-31 23:04:39 -07003#include <dirent.h>
4
Austin Schuhb06f03b2021-02-17 22:00:37 -08005#include <functional>
6#include <map>
7#include <vector>
8
9#include "aos/configuration.h"
10#include "aos/events/event_loop.h"
11#include "aos/network/message_bridge_server_generated.h"
12#include "aos/network/team_number.h"
Austin Schuh61e973f2021-02-21 21:43:56 -080013#include "aos/network/timestamp_channel.h"
Austin Schuhb06f03b2021-02-17 22:00:37 -080014
15namespace aos {
16namespace logger {
17namespace {
18using message_bridge::RemoteMessage;
Austin Schuhbd06ae42021-03-31 22:48:21 -070019namespace chrono = std::chrono;
Austin Schuhb06f03b2021-02-17 22:00:37 -080020} // namespace
21
22Logger::Logger(EventLoop *event_loop, const Configuration *configuration,
23 std::function<bool(const Channel *)> should_log)
24 : event_loop_(event_loop),
25 configuration_(configuration),
Austin Schuh5b728b72021-06-16 14:57:15 -070026 node_(configuration::GetNode(configuration_, event_loop->node())),
27 node_index_(configuration::GetNodeIndex(configuration_, node_)),
Austin Schuhb06f03b2021-02-17 22:00:37 -080028 name_(network::GetHostname()),
29 timer_handler_(event_loop_->AddTimer(
Austin Schuh30586902021-03-30 22:54:08 -070030 [this]() { DoLogData(event_loop_->monotonic_now(), true); })),
Austin Schuhb06f03b2021-02-17 22:00:37 -080031 server_statistics_fetcher_(
32 configuration::MultiNode(event_loop_->configuration())
33 ? event_loop_->MakeFetcher<message_bridge::ServerStatistics>(
34 "/aos")
35 : aos::Fetcher<message_bridge::ServerStatistics>()) {
Austin Schuh58646e22021-08-23 23:51:46 -070036 timer_handler_->set_name("channel_poll");
Austin Schuh5b728b72021-06-16 14:57:15 -070037 VLOG(1) << "Creating logger for " << FlatbufferToJson(node_);
Austin Schuhb06f03b2021-02-17 22:00:37 -080038
Naman Gupta41d70c22022-11-21 15:29:52 -080039 // When we are logging remote timestamps, we need to be able to translate
40 // from the channel index that the event loop uses to the channel index in
41 // the config in the log file.
Austin Schuh01f3b392022-01-25 20:03:09 -080042 event_loop_to_logged_channel_index_.resize(
43 event_loop->configuration()->channels()->size(), -1);
44 for (size_t event_loop_channel_index = 0;
45 event_loop_channel_index <
46 event_loop->configuration()->channels()->size();
47 ++event_loop_channel_index) {
48 const Channel *event_loop_channel =
49 event_loop->configuration()->channels()->Get(event_loop_channel_index);
50
51 const Channel *logged_channel = aos::configuration::GetChannel(
52 configuration_, event_loop_channel->name()->string_view(),
53 event_loop_channel->type()->string_view(), "", node_);
54
55 if (logged_channel != nullptr) {
56 event_loop_to_logged_channel_index_[event_loop_channel_index] =
57 configuration::ChannelIndex(configuration_, logged_channel);
58 }
59 }
60
61 // Map to match source channels with the timestamp logger, if the contents
62 // should be reliable, and a list of all channels logged on it to be treated
63 // as reliable.
64 std::map<const Channel *, std::tuple<const Node *, bool, std::vector<bool>>>
65 timestamp_logger_channels;
Austin Schuhb06f03b2021-02-17 22:00:37 -080066
Austin Schuh61e973f2021-02-21 21:43:56 -080067 message_bridge::ChannelTimestampFinder finder(event_loop_);
68 for (const Channel *channel : *event_loop_->configuration()->channels()) {
69 if (!configuration::ChannelIsSendableOnNode(channel, event_loop_->node())) {
Austin Schuhb06f03b2021-02-17 22:00:37 -080070 continue;
71 }
Austin Schuh61e973f2021-02-21 21:43:56 -080072 if (!channel->has_destination_nodes()) {
73 continue;
74 }
Austin Schuh01f3b392022-01-25 20:03:09 -080075 const size_t channel_index =
76 configuration::ChannelIndex(event_loop_->configuration(), channel);
77
Austin Schuh61e973f2021-02-21 21:43:56 -080078 for (const Connection *connection : *channel->destination_nodes()) {
79 if (configuration::ConnectionDeliveryTimeIsLoggedOnNode(
80 connection, event_loop_->node())) {
81 const Node *other_node = configuration::GetNode(
Austin Schuh5b728b72021-06-16 14:57:15 -070082 configuration_, connection->name()->string_view());
Austin Schuh61e973f2021-02-21 21:43:56 -080083
84 VLOG(1) << "Timestamps are logged from "
85 << FlatbufferToJson(other_node);
Austin Schuh01f3b392022-01-25 20:03:09 -080086 // True if each channel's remote timestamps are split into a separate
87 // RemoteMessage channel.
88 const bool is_split =
89 finder.SplitChannelForChannel(channel, connection) != nullptr;
90
91 const Channel *const timestamp_logger_channel =
92 finder.ForChannel(channel, connection);
93
94 auto it = timestamp_logger_channels.find(timestamp_logger_channel);
95 if (it != timestamp_logger_channels.end()) {
96 CHECK(!is_split);
97 CHECK_LT(channel_index, std::get<2>(it->second).size());
Brian Smartt796cca02022-04-12 15:07:21 -070098 std::get<2>(it->second)[channel_index] =
99 (connection->time_to_live() == 0);
Austin Schuh01f3b392022-01-25 20:03:09 -0800100 } else {
101 if (is_split) {
102 timestamp_logger_channels.insert(std::make_pair(
103 timestamp_logger_channel,
104 std::make_tuple(other_node, (connection->time_to_live() == 0),
105 std::vector<bool>())));
106 } else {
107 std::vector<bool> channel_reliable_contents(
108 event_loop->configuration()->channels()->size(), false);
109 channel_reliable_contents[channel_index] =
110 (connection->time_to_live() == 0);
111
112 timestamp_logger_channels.insert(std::make_pair(
113 timestamp_logger_channel,
114 std::make_tuple(other_node, false,
115 std::move(channel_reliable_contents))));
116 }
117 }
Austin Schuh61e973f2021-02-21 21:43:56 -0800118 }
119 }
Austin Schuhb06f03b2021-02-17 22:00:37 -0800120 }
121
Austin Schuhb06f03b2021-02-17 22:00:37 -0800122 for (size_t channel_index = 0;
123 channel_index < configuration_->channels()->size(); ++channel_index) {
124 const Channel *const config_channel =
125 configuration_->channels()->Get(channel_index);
126 // The MakeRawFetcher method needs a channel which is in the event loop
127 // configuration() object, not the configuration_ object. Go look that up
128 // from the config.
129 const Channel *channel = aos::configuration::GetChannel(
130 event_loop_->configuration(), config_channel->name()->string_view(),
131 config_channel->type()->string_view(), "", event_loop_->node());
132 CHECK(channel != nullptr)
133 << ": Failed to look up channel "
134 << aos::configuration::CleanedChannelToString(config_channel);
Austin Schuh5b728b72021-06-16 14:57:15 -0700135 if (!should_log(config_channel)) {
Austin Schuhb06f03b2021-02-17 22:00:37 -0800136 continue;
137 }
138
139 FetcherStruct fs;
140 fs.channel_index = channel_index;
141 fs.channel = channel;
142
143 const bool is_local =
Austin Schuh5b728b72021-06-16 14:57:15 -0700144 configuration::ChannelIsSendableOnNode(config_channel, node_);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800145
146 const bool is_readable =
Austin Schuh5b728b72021-06-16 14:57:15 -0700147 configuration::ChannelIsReadableOnNode(config_channel, node_);
Austin Schuh01f3b392022-01-25 20:03:09 -0800148 const bool is_logged =
149 configuration::ChannelMessageIsLoggedOnNode(config_channel, node_);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800150 const bool log_message = is_logged && is_readable;
151
152 bool log_delivery_times = false;
Austin Schuh5b728b72021-06-16 14:57:15 -0700153 if (configuration::MultiNode(configuration_)) {
Austin Schuh72211ae2021-08-05 14:02:30 -0700154 const aos::Connection *connection =
Austin Schuh5b728b72021-06-16 14:57:15 -0700155 configuration::ConnectionToNode(config_channel, node_);
Austin Schuh72211ae2021-08-05 14:02:30 -0700156
Austin Schuhb06f03b2021-02-17 22:00:37 -0800157 log_delivery_times = configuration::ConnectionDeliveryTimeIsLoggedOnNode(
Austin Schuh72211ae2021-08-05 14:02:30 -0700158 connection, event_loop_->node());
159
160 CHECK_EQ(log_delivery_times,
161 configuration::ConnectionDeliveryTimeIsLoggedOnNode(
Austin Schuh5b728b72021-06-16 14:57:15 -0700162 config_channel, node_, node_));
Austin Schuh72211ae2021-08-05 14:02:30 -0700163
164 if (connection) {
165 fs.reliable_forwarding = (connection->time_to_live() == 0);
166 }
Austin Schuhb06f03b2021-02-17 22:00:37 -0800167 }
168
Austin Schuh01f3b392022-01-25 20:03:09 -0800169 // Now, detect a RemoteMessage timestamp logger where we should just log
170 // the contents to a file directly.
Austin Schuhb06f03b2021-02-17 22:00:37 -0800171 const bool log_contents = timestamp_logger_channels.find(channel) !=
172 timestamp_logger_channels.end();
173
174 if (log_message || log_delivery_times || log_contents) {
175 fs.fetcher = event_loop->MakeRawFetcher(channel);
176 VLOG(1) << "Logging channel "
177 << configuration::CleanedChannelToString(channel);
178
179 if (log_delivery_times) {
180 VLOG(1) << " Delivery times";
181 fs.wants_timestamp_writer = true;
Austin Schuh5b728b72021-06-16 14:57:15 -0700182 fs.timestamp_node_index = static_cast<int>(node_index_);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800183 }
Austin Schuhe46492f2021-07-31 19:49:41 -0700184 // Both the timestamp and data writers want data_node_index so it knows
185 // what the source node is.
186 if (log_message || log_delivery_times) {
Austin Schuhb06f03b2021-02-17 22:00:37 -0800187 if (!is_local) {
188 const Node *source_node = configuration::GetNode(
189 configuration_, channel->source_node()->string_view());
190 fs.data_node_index =
191 configuration::GetNodeIndex(configuration_, source_node);
Austin Schuhe46492f2021-07-31 19:49:41 -0700192 }
193 }
194 if (log_message) {
195 VLOG(1) << " Data";
196 fs.wants_writer = true;
197 if (!is_local) {
Austin Schuhb06f03b2021-02-17 22:00:37 -0800198 fs.log_type = LogType::kLogRemoteMessage;
199 } else {
Austin Schuh5b728b72021-06-16 14:57:15 -0700200 fs.data_node_index = static_cast<int>(node_index_);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800201 }
202 }
203 if (log_contents) {
204 VLOG(1) << "Timestamp logger channel "
205 << configuration::CleanedChannelToString(channel);
Austin Schuh01f3b392022-01-25 20:03:09 -0800206 auto timestamp_logger_channel_info =
207 timestamp_logger_channels.find(channel);
208 CHECK(timestamp_logger_channel_info != timestamp_logger_channels.end());
209 fs.timestamp_node = std::get<0>(timestamp_logger_channel_info->second);
210 fs.reliable_contents =
211 std::get<1>(timestamp_logger_channel_info->second);
212 fs.channel_reliable_contents =
213 std::get<2>(timestamp_logger_channel_info->second);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800214 fs.wants_contents_writer = true;
215 fs.contents_node_index =
216 configuration::GetNodeIndex(configuration_, fs.timestamp_node);
217 }
218 fetchers_.emplace_back(std::move(fs));
219 }
220 }
Austin Schuhb06f03b2021-02-17 22:00:37 -0800221}
222
223Logger::~Logger() {
224 if (log_namer_) {
225 // If we are replaying a log file, or in simulation, we want to force the
226 // last bit of data to be logged. The easiest way to deal with this is to
Austin Schuh01f3b392022-01-25 20:03:09 -0800227 // poll everything as we go to destroy the class, ie, shut down the
228 // logger, and write it to disk.
Austin Schuhb06f03b2021-02-17 22:00:37 -0800229 StopLogging(event_loop_->monotonic_now());
230 }
231}
232
Austin Schuh6bb8a822021-03-31 23:04:39 -0700233bool Logger::RenameLogBase(std::string new_base_name) {
Naman Gupta41d70c22022-11-21 15:29:52 -0800234 // TODO(Naman): Got a crash in RenameLogBase. Putting in a CHECK_NOTNULL to
235 // catch the bug if it happens again
236 if (new_base_name == CHECK_NOTNULL(log_namer_)->base_name()) {
Austin Schuh6bb8a822021-03-31 23:04:39 -0700237 return true;
238 }
Austin Schuh6bb8a822021-03-31 23:04:39 -0700239 log_namer_->set_base_name(new_base_name);
240 Rotate();
241 return true;
242}
243
Brian Smartt796cca02022-04-12 15:07:21 -0700244std::string Logger::WriteConfiguration(LogNamer *log_namer) {
Austin Schuhb06f03b2021-02-17 22:00:37 -0800245 std::string config_sha256;
Brian Smartt03c00da2022-02-24 10:25:00 -0800246
Austin Schuhb06f03b2021-02-17 22:00:37 -0800247 if (separate_config_) {
248 flatbuffers::FlatBufferBuilder fbb;
249 flatbuffers::Offset<aos::Configuration> configuration_offset =
250 CopyFlatBuffer(configuration_, &fbb);
251 LogFileHeader::Builder log_file_header_builder(fbb);
252 log_file_header_builder.add_configuration(configuration_offset);
253 fbb.FinishSizePrefixed(log_file_header_builder.Finish());
254 aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> config_header(
255 fbb.Release());
256 config_sha256 = Sha256(config_header.span());
257 LOG(INFO) << "Config sha256 of " << config_sha256;
Brian Smartt03c00da2022-02-24 10:25:00 -0800258 log_namer->WriteConfiguration(&config_header, config_sha256);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800259 }
260
Brian Smartt03c00da2022-02-24 10:25:00 -0800261 return config_sha256;
262}
263
264void Logger::StartLogging(std::unique_ptr<LogNamer> log_namer,
265 std::optional<UUID> log_start_uuid) {
266 CHECK(!log_namer_) << ": Already logging";
267
268 VLOG(1) << "Starting logger for " << FlatbufferToJson(node_);
269
270 auto config_sha256 = WriteConfiguration(log_namer.get());
271
272 log_namer_ = std::move(log_namer);
273
Austin Schuhb06f03b2021-02-17 22:00:37 -0800274 log_event_uuid_ = UUID::Random();
275 log_start_uuid_ = log_start_uuid;
Austin Schuhb06f03b2021-02-17 22:00:37 -0800276
277 // We want to do as much work as possible before the initial Fetch. Time
278 // between that and actually starting to log opens up the possibility of
279 // falling off the end of the queue during that time.
280
281 for (FetcherStruct &f : fetchers_) {
282 if (f.wants_writer) {
283 f.writer = log_namer_->MakeWriter(f.channel);
284 }
285 if (f.wants_timestamp_writer) {
286 f.timestamp_writer = log_namer_->MakeTimestampWriter(f.channel);
287 }
288 if (f.wants_contents_writer) {
289 f.contents_writer = log_namer_->MakeForwardedTimestampWriter(
290 f.channel, CHECK_NOTNULL(f.timestamp_node));
291 }
292 }
293
Austin Schuh73340842021-07-30 22:32:06 -0700294 log_namer_->SetHeaderTemplate(MakeHeader(config_sha256));
Austin Schuhb06f03b2021-02-17 22:00:37 -0800295
Austin Schuha42ee962021-03-31 22:49:30 -0700296 const aos::monotonic_clock::time_point beginning_time =
297 event_loop_->monotonic_now();
298
Austin Schuhb06f03b2021-02-17 22:00:37 -0800299 // Grab data from each channel right before we declare the log file started
300 // so we can capture the latest message on each channel. This lets us have
301 // non periodic messages with configuration that now get logged.
302 for (FetcherStruct &f : fetchers_) {
303 const auto start = event_loop_->monotonic_now();
304 const bool got_new = f.fetcher->Fetch();
305 const auto end = event_loop_->monotonic_now();
306 RecordFetchResult(start, end, got_new, &f);
307
308 // If there is a message, we want to write it.
309 f.written = f.fetcher->context().data == nullptr;
310 }
311
312 // Clear out any old timestamps in case we are re-starting logging.
Austin Schuh572924a2021-07-30 22:32:12 -0700313 for (size_t i = 0; i < configuration::NodesCount(configuration_); ++i) {
Austin Schuh58646e22021-08-23 23:51:46 -0700314 log_namer_->ClearStartTimes();
Austin Schuhb06f03b2021-02-17 22:00:37 -0800315 }
316
Austin Schuha42ee962021-03-31 22:49:30 -0700317 const aos::monotonic_clock::time_point fetch_time =
318 event_loop_->monotonic_now();
Austin Schuhb06f03b2021-02-17 22:00:37 -0800319 WriteHeader();
Austin Schuha42ee962021-03-31 22:49:30 -0700320 const aos::monotonic_clock::time_point header_time =
321 event_loop_->monotonic_now();
Austin Schuhb06f03b2021-02-17 22:00:37 -0800322
Brian Smartt796cca02022-04-12 15:07:21 -0700323 VLOG(1) << "Logging node as " << FlatbufferToJson(node_) << " start_time "
324 << last_synchronized_time_ << ", took "
Brian Smartt03c00da2022-02-24 10:25:00 -0800325 << chrono::duration<double>(fetch_time - beginning_time).count()
326 << " to fetch, "
327 << chrono::duration<double>(header_time - fetch_time).count()
328 << " to write headers, boot uuid " << event_loop_->boot_uuid();
Austin Schuhb06f03b2021-02-17 22:00:37 -0800329
330 // Force logging up until the start of the log file now, so the messages at
331 // the start are always ordered before the rest of the messages.
332 // Note: this ship may have already sailed, but we don't have to make it
333 // worse.
334 // TODO(austin): Test...
Austin Schuh855f8932021-03-19 22:01:32 -0700335 //
Naman Gupta41d70c22022-11-21 15:29:52 -0800336 // This is safe to call here since we have set last_synchronized_time_ as
337 // the same time as in the header, and all the data before it should be
338 // logged without ordering concerns.
Austin Schuhb06f03b2021-02-17 22:00:37 -0800339 LogUntil(last_synchronized_time_);
340
341 timer_handler_->Setup(event_loop_->monotonic_now() + polling_period_,
342 polling_period_);
343}
344
Brian Smartt796cca02022-04-12 15:07:21 -0700345std::unique_ptr<LogNamer> Logger::RestartLogging(
346 std::unique_ptr<LogNamer> log_namer, std::optional<UUID> log_start_uuid) {
Brian Smartt03c00da2022-02-24 10:25:00 -0800347 CHECK(log_namer_) << ": Unexpected restart while not logging";
348
349 VLOG(1) << "Restarting logger for " << FlatbufferToJson(node_);
350
351 // Force out every currently pending message, pointing all fetchers at the
352 // last (currently available) records. Note that LogUntil() updates
353 // last_synchronized_time_ to the time value that it receives.
Brian Smartt796cca02022-04-12 15:07:21 -0700354 while (LogUntil(last_synchronized_time_ + polling_period_))
355 ;
Brian Smartt03c00da2022-02-24 10:25:00 -0800356
357 std::unique_ptr<LogNamer> old_log_namer = std::move(log_namer_);
358 log_namer_ = std::move(log_namer);
359
Naman Gupta41d70c22022-11-21 15:29:52 -0800360 // Now grab a representative time on both the RT and monotonic clock.
361 // Average a monotonic clock before and after to reduce the error.
Brian Smartt03c00da2022-02-24 10:25:00 -0800362 const aos::monotonic_clock::time_point beginning_time =
363 event_loop_->monotonic_now();
Austin Schuh41f8df92022-04-15 11:45:52 -0700364 const aos::realtime_clock::time_point beginning_time_rt =
365 event_loop_->realtime_now();
366 const aos::monotonic_clock::time_point beginning_time2 =
367 event_loop_->monotonic_now();
368
369 if (beginning_time > last_synchronized_time_) {
370 LOG(WARNING) << "Took over " << polling_period_.count()
371 << "ns to swap log_namer";
372 }
373
Naman Gupta41d70c22022-11-21 15:29:52 -0800374 // Since we are going to log all in 1 big go, we need our log start time to
375 // be after the previous LogUntil call finished, but before 1 period after
376 // it. The best way to guarentee that is to pick a start time that is the
377 // earliest of the two. That covers the case where the OS puts us to sleep
378 // between when we finish LogUntil and capture beginning_time.
Austin Schuh41f8df92022-04-15 11:45:52 -0700379 const aos::monotonic_clock::time_point monotonic_start_time =
380 std::min(last_synchronized_time_, beginning_time);
381 const aos::realtime_clock::time_point realtime_start_time =
382 (beginning_time_rt + (monotonic_start_time.time_since_epoch() -
383 ((beginning_time.time_since_epoch() +
384 beginning_time2.time_since_epoch()) /
385 2)));
Brian Smartt03c00da2022-02-24 10:25:00 -0800386
387 auto config_sha256 = WriteConfiguration(log_namer_.get());
388
389 log_event_uuid_ = UUID::Random();
390 log_start_uuid_ = log_start_uuid;
391
392 log_namer_->SetHeaderTemplate(MakeHeader(config_sha256));
393
394 // Note that WriteHeader updates last_synchronized_time_ to be the
395 // current time when it is called, which is then the "start time"
396 // of the new (restarted) log. This timestamp will be after
Naman Gupta41d70c22022-11-21 15:29:52 -0800397 // the timestamp of the last message fetched on each channel, but is
398 // carefully picked per the comment above to not violate
399 // max_out_of_order_duration.
Austin Schuh41f8df92022-04-15 11:45:52 -0700400 WriteHeader(monotonic_start_time, realtime_start_time);
Brian Smartt03c00da2022-02-24 10:25:00 -0800401
402 const aos::monotonic_clock::time_point header_time =
403 event_loop_->monotonic_now();
404
405 // Write the transition record(s) for each channel ...
406 for (FetcherStruct &f : fetchers_) {
Brian Smartt03c00da2022-02-24 10:25:00 -0800407 // Create writers from the new namer
408 NewDataWriter *next_writer = nullptr;
409 NewDataWriter *next_timestamp_writer = nullptr;
410 NewDataWriter *next_contents_writer = nullptr;
411
412 if (f.wants_writer) {
413 next_writer = log_namer_->MakeWriter(f.channel);
414 }
415 if (f.wants_timestamp_writer) {
416 next_timestamp_writer = log_namer_->MakeTimestampWriter(f.channel);
417 }
418 if (f.wants_contents_writer) {
419 next_contents_writer = log_namer_->MakeForwardedTimestampWriter(
420 f.channel, CHECK_NOTNULL(f.timestamp_node));
421 }
422
423 if (f.fetcher->context().data != nullptr) {
Brian Smartt796cca02022-04-12 15:07:21 -0700424 // Write the last message fetched as the first of the new log of this
425 // type. The timestamps on these will all be before the new start time.
Brian Smartt03c00da2022-02-24 10:25:00 -0800426 WriteData(next_writer, f);
427 WriteTimestamps(next_timestamp_writer, f);
428 WriteContent(next_contents_writer, f);
429
Brian Smartt796cca02022-04-12 15:07:21 -0700430 // It is possible that a few more snuck in. Write them all out also,
431 // including any that should also be in the old log.
Brian Smartt03c00da2022-02-24 10:25:00 -0800432 while (true) {
Brian Smartt796cca02022-04-12 15:07:21 -0700433 // Get the next message ...
434 const auto start = event_loop_->monotonic_now();
435 const bool got_new = f.fetcher->FetchNext();
436 const auto end = event_loop_->monotonic_now();
437 RecordFetchResult(start, end, got_new, &f);
Brian Smartt03c00da2022-02-24 10:25:00 -0800438
Brian Smartt796cca02022-04-12 15:07:21 -0700439 if (got_new) {
440 if (f.fetcher->context().monotonic_event_time <=
441 last_synchronized_time_) {
442 WriteFetchedRecord(f);
Brian Smartt03c00da2022-02-24 10:25:00 -0800443 WriteData(next_writer, f);
444 WriteTimestamps(next_timestamp_writer, f);
445 WriteContent(next_contents_writer, f);
446
Brian Smartt03c00da2022-02-24 10:25:00 -0800447 } else {
Brian Smartt796cca02022-04-12 15:07:21 -0700448 f.written = false;
Brian Smartt03c00da2022-02-24 10:25:00 -0800449 break;
450 }
Brian Smartt796cca02022-04-12 15:07:21 -0700451
452 } else {
453 f.written = true;
454 break;
455 }
Brian Smartt03c00da2022-02-24 10:25:00 -0800456 }
457 }
458
459 // Switch fully over to the new writers.
460 f.writer = next_writer;
461 f.timestamp_writer = next_timestamp_writer;
462 f.contents_writer = next_contents_writer;
Brian Smartt03c00da2022-02-24 10:25:00 -0800463 }
464
465 const aos::monotonic_clock::time_point channel_time =
466 event_loop_->monotonic_now();
467
Brian Smartt796cca02022-04-12 15:07:21 -0700468 VLOG(1) << "Logging node as " << FlatbufferToJson(node_) << " restart_time "
469 << last_synchronized_time_ << ", took "
Brian Smartt03c00da2022-02-24 10:25:00 -0800470 << chrono::duration<double>(header_time - beginning_time).count()
471 << " to prepare and write header, "
472 << chrono::duration<double>(channel_time - header_time).count()
Brian Smartt796cca02022-04-12 15:07:21 -0700473 << " to write initial channel messages, boot uuid "
474 << event_loop_->boot_uuid();
Brian Smartt03c00da2022-02-24 10:25:00 -0800475
476 return old_log_namer;
477}
478
Austin Schuhb06f03b2021-02-17 22:00:37 -0800479std::unique_ptr<LogNamer> Logger::StopLogging(
480 aos::monotonic_clock::time_point end_time) {
481 CHECK(log_namer_) << ": Not logging right now";
482
483 if (end_time != aos::monotonic_clock::min_time) {
Austin Schuh30586902021-03-30 22:54:08 -0700484 // Folks like to use the on_logged_period_ callback to trigger stop and
485 // start events. We can't have those then recurse and try to stop again.
486 // Rather than making everything reentrant, let's just instead block the
487 // callback here.
488 DoLogData(end_time, false);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800489 }
490 timer_handler_->Disable();
491
492 for (FetcherStruct &f : fetchers_) {
493 f.writer = nullptr;
494 f.timestamp_writer = nullptr;
495 f.contents_writer = nullptr;
496 }
Austin Schuhb06f03b2021-02-17 22:00:37 -0800497
498 log_event_uuid_ = UUID::Zero();
Austin Schuh34f9e482021-03-31 22:54:18 -0700499 log_start_uuid_ = std::nullopt;
Austin Schuhb06f03b2021-02-17 22:00:37 -0800500
501 return std::move(log_namer_);
502}
503
Austin Schuh41f8df92022-04-15 11:45:52 -0700504void Logger::WriteHeader(aos::monotonic_clock::time_point monotonic_start_time,
505 aos::realtime_clock::time_point realtime_start_time) {
Austin Schuhb06f03b2021-02-17 22:00:37 -0800506 if (configuration::MultiNode(configuration_)) {
507 server_statistics_fetcher_.Fetch();
508 }
509
Austin Schuh41f8df92022-04-15 11:45:52 -0700510 if (monotonic_start_time == aos::monotonic_clock::min_time) {
511 monotonic_start_time = event_loop_->monotonic_now();
512 realtime_start_time = event_loop_->realtime_now();
513 }
Austin Schuhb06f03b2021-02-17 22:00:37 -0800514
515 // We need to pick a point in time to declare the log file "started". This
516 // starts here. It needs to be after everything is fetched so that the
517 // fetchers are all pointed at the most recent message before the start
518 // time.
519 last_synchronized_time_ = monotonic_start_time;
520
521 for (const Node *node : log_namer_->nodes()) {
522 const int node_index = configuration::GetNodeIndex(configuration_, node);
523 MaybeUpdateTimestamp(node, node_index, monotonic_start_time,
524 realtime_start_time);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800525 }
526}
527
Austin Schuhb06f03b2021-02-17 22:00:37 -0800528void Logger::WriteMissingTimestamps() {
529 if (configuration::MultiNode(configuration_)) {
530 server_statistics_fetcher_.Fetch();
531 } else {
532 return;
533 }
534
535 if (server_statistics_fetcher_.get() == nullptr) {
536 return;
537 }
538
539 for (const Node *node : log_namer_->nodes()) {
540 const int node_index = configuration::GetNodeIndex(configuration_, node);
541 if (MaybeUpdateTimestamp(
542 node, node_index,
543 server_statistics_fetcher_.context().monotonic_event_time,
544 server_statistics_fetcher_.context().realtime_event_time)) {
Austin Schuh58646e22021-08-23 23:51:46 -0700545 VLOG(1) << "Timestamps changed on " << aos::FlatbufferToJson(node);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800546 }
547 }
548}
549
Austin Schuhb06f03b2021-02-17 22:00:37 -0800550bool Logger::MaybeUpdateTimestamp(
551 const Node *node, int node_index,
552 aos::monotonic_clock::time_point monotonic_start_time,
553 aos::realtime_clock::time_point realtime_start_time) {
554 // Bail early if the start times are already set.
Austin Schuh58646e22021-08-23 23:51:46 -0700555 if (node_ == node || !configuration::MultiNode(configuration_)) {
556 if (log_namer_->monotonic_start_time(node_index,
557 event_loop_->boot_uuid()) !=
558 monotonic_clock::min_time) {
559 return false;
560 }
Brian Smartt03c00da2022-02-24 10:25:00 -0800561
Austin Schuhb06f03b2021-02-17 22:00:37 -0800562 // There are no offsets to compute for ourself, so always succeed.
Austin Schuh58646e22021-08-23 23:51:46 -0700563 log_namer_->SetStartTimes(node_index, event_loop_->boot_uuid(),
564 monotonic_start_time, realtime_start_time,
565 monotonic_start_time, realtime_start_time);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800566 return true;
567 } else if (server_statistics_fetcher_.get() != nullptr) {
568 // We must be a remote node now. Look for the connection and see if it is
569 // connected.
James Kuszmaul17607fb2021-10-15 20:00:32 -0700570 CHECK(server_statistics_fetcher_->has_connections());
Austin Schuhb06f03b2021-02-17 22:00:37 -0800571
572 for (const message_bridge::ServerConnection *connection :
573 *server_statistics_fetcher_->connections()) {
574 if (connection->node()->name()->string_view() !=
575 node->name()->string_view()) {
576 continue;
577 }
578
579 if (connection->state() != message_bridge::State::CONNECTED) {
580 VLOG(1) << node->name()->string_view()
581 << " is not connected, can't start it yet.";
582 break;
583 }
584
Austin Schuhb06f03b2021-02-17 22:00:37 -0800585 if (!connection->has_monotonic_offset()) {
586 VLOG(1) << "Missing monotonic offset for setting start time for node "
587 << aos::FlatbufferToJson(node);
588 break;
589 }
590
James Kuszmaul17607fb2021-10-15 20:00:32 -0700591 CHECK(connection->has_boot_uuid());
Austin Schuh58646e22021-08-23 23:51:46 -0700592 const UUID boot_uuid =
593 UUID::FromString(connection->boot_uuid()->string_view());
594
595 if (log_namer_->monotonic_start_time(node_index, boot_uuid) !=
596 monotonic_clock::min_time) {
597 break;
598 }
599
600 VLOG(1) << "Updating start time for "
601 << aos::FlatbufferToJson(connection);
602
Austin Schuhb06f03b2021-02-17 22:00:37 -0800603 // Found it and it is connected. Compensate and go.
Austin Schuh73340842021-07-30 22:32:06 -0700604 log_namer_->SetStartTimes(
Austin Schuh58646e22021-08-23 23:51:46 -0700605 node_index, boot_uuid,
Austin Schuh73340842021-07-30 22:32:06 -0700606 monotonic_start_time +
607 std::chrono::nanoseconds(connection->monotonic_offset()),
608 realtime_start_time, monotonic_start_time, realtime_start_time);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800609 return true;
610 }
611 }
612 return false;
613}
614
615aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> Logger::MakeHeader(
Austin Schuh73340842021-07-30 22:32:06 -0700616 std::string_view config_sha256) {
Austin Schuhb06f03b2021-02-17 22:00:37 -0800617 flatbuffers::FlatBufferBuilder fbb;
618 fbb.ForceDefaults(true);
619
620 flatbuffers::Offset<aos::Configuration> configuration_offset;
621 if (!separate_config_) {
622 configuration_offset = CopyFlatBuffer(configuration_, &fbb);
623 } else {
624 CHECK(!config_sha256.empty());
625 }
626
627 const flatbuffers::Offset<flatbuffers::String> name_offset =
628 fbb.CreateString(name_);
629
Austin Schuhfa712682022-05-11 16:43:42 -0700630 const flatbuffers::Offset<flatbuffers::String> logger_sha1_offset =
631 logger_sha1_.empty() ? 0 : fbb.CreateString(logger_sha1_);
632 const flatbuffers::Offset<flatbuffers::String> logger_version_offset =
633 logger_version_.empty() ? 0 : fbb.CreateString(logger_version_);
634
Austin Schuhb06f03b2021-02-17 22:00:37 -0800635 CHECK(log_event_uuid_ != UUID::Zero());
636 const flatbuffers::Offset<flatbuffers::String> log_event_uuid_offset =
Austin Schuh5e2bfb82021-03-13 22:46:55 -0800637 log_event_uuid_.PackString(&fbb);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800638
639 const flatbuffers::Offset<flatbuffers::String> logger_instance_uuid_offset =
Austin Schuh5e2bfb82021-03-13 22:46:55 -0800640 logger_instance_uuid_.PackString(&fbb);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800641
642 flatbuffers::Offset<flatbuffers::String> log_start_uuid_offset;
Austin Schuh34f9e482021-03-31 22:54:18 -0700643 if (log_start_uuid_) {
644 log_start_uuid_offset = fbb.CreateString(log_start_uuid_->ToString());
Austin Schuhb06f03b2021-02-17 22:00:37 -0800645 }
646
647 flatbuffers::Offset<flatbuffers::String> config_sha256_offset;
648 if (!config_sha256.empty()) {
649 config_sha256_offset = fbb.CreateString(config_sha256);
650 }
651
652 const flatbuffers::Offset<flatbuffers::String> logger_node_boot_uuid_offset =
Austin Schuh5e2bfb82021-03-13 22:46:55 -0800653 event_loop_->boot_uuid().PackString(&fbb);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800654
Austin Schuhb06f03b2021-02-17 22:00:37 -0800655 flatbuffers::Offset<Node> logger_node_offset;
656
657 if (configuration::MultiNode(configuration_)) {
Austin Schuh5b728b72021-06-16 14:57:15 -0700658 logger_node_offset = RecursiveCopyFlatBuffer(node_, &fbb);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800659 }
660
661 aos::logger::LogFileHeader::Builder log_file_header_builder(fbb);
662
663 log_file_header_builder.add_name(name_offset);
Austin Schuhfa712682022-05-11 16:43:42 -0700664 if (!logger_sha1_offset.IsNull()) {
665 log_file_header_builder.add_logger_sha1(logger_sha1_offset);
666 }
667 if (!logger_version_offset.IsNull()) {
668 log_file_header_builder.add_logger_version(logger_version_offset);
669 }
Austin Schuhb06f03b2021-02-17 22:00:37 -0800670
671 // Only add the node if we are running in a multinode configuration.
Austin Schuh73340842021-07-30 22:32:06 -0700672 if (configuration::MultiNode(configuration_)) {
Austin Schuhb06f03b2021-02-17 22:00:37 -0800673 log_file_header_builder.add_logger_node(logger_node_offset);
674 }
675
676 if (!configuration_offset.IsNull()) {
677 log_file_header_builder.add_configuration(configuration_offset);
678 }
679 // The worst case theoretical out of order is the polling period times 2.
680 // One message could get logged right after the boundary, but be for right
681 // before the next boundary. And the reverse could happen for another
682 // message. Report back 3x to be extra safe, and because the cost isn't
683 // huge on the read side.
684 log_file_header_builder.add_max_out_of_order_duration(
685 std::chrono::nanoseconds(3 * polling_period_).count());
686
Austin Schuhb06f03b2021-02-17 22:00:37 -0800687 log_file_header_builder.add_log_event_uuid(log_event_uuid_offset);
688 log_file_header_builder.add_logger_instance_uuid(logger_instance_uuid_offset);
689 if (!log_start_uuid_offset.IsNull()) {
690 log_file_header_builder.add_log_start_uuid(log_start_uuid_offset);
691 }
692 log_file_header_builder.add_logger_node_boot_uuid(
693 logger_node_boot_uuid_offset);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800694
695 if (!config_sha256_offset.IsNull()) {
696 log_file_header_builder.add_configuration_sha256(config_sha256_offset);
697 }
698
699 fbb.FinishSizePrefixed(log_file_header_builder.Finish());
700 aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> result(
701 fbb.Release());
702
703 CHECK(result.Verify()) << ": Built a corrupted header.";
704
705 return result;
706}
707
708void Logger::ResetStatisics() {
709 max_message_fetch_time_ = std::chrono::nanoseconds::zero();
710 max_message_fetch_time_channel_ = -1;
711 max_message_fetch_time_size_ = -1;
712 total_message_fetch_time_ = std::chrono::nanoseconds::zero();
713 total_message_fetch_count_ = 0;
714 total_message_fetch_bytes_ = 0;
715 total_nop_fetch_time_ = std::chrono::nanoseconds::zero();
716 total_nop_fetch_count_ = 0;
717 max_copy_time_ = std::chrono::nanoseconds::zero();
718 max_copy_time_channel_ = -1;
719 max_copy_time_size_ = -1;
720 total_copy_time_ = std::chrono::nanoseconds::zero();
721 total_copy_count_ = 0;
722 total_copy_bytes_ = 0;
723}
724
725void Logger::Rotate() {
726 for (const Node *node : log_namer_->nodes()) {
Austin Schuh73340842021-07-30 22:32:06 -0700727 log_namer_->Rotate(node);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800728 }
729}
730
Brian Smartt03c00da2022-02-24 10:25:00 -0800731void Logger::WriteData(NewDataWriter *writer, const FetcherStruct &f) {
732 if (writer != nullptr) {
733 const UUID source_node_boot_uuid =
734 static_cast<int>(node_index_) != f.data_node_index
735 ? f.fetcher->context().source_boot_uuid
736 : event_loop_->boot_uuid();
737 // Write!
738 const auto start = event_loop_->monotonic_now();
Brian Smartt03c00da2022-02-24 10:25:00 -0800739
Naman Gupta41d70c22022-11-21 15:29:52 -0800740 ContextDataCopier coppier(f.fetcher->context(), f.channel_index, f.log_type,
741 event_loop_);
Brian Smartt03c00da2022-02-24 10:25:00 -0800742
Austin Schuh48d10d62022-10-16 22:19:23 -0700743 writer->CopyMessage(&coppier, source_node_boot_uuid, start);
744 RecordCreateMessageTime(start, coppier.end_time(), f);
Brian Smartt03c00da2022-02-24 10:25:00 -0800745
Brian Smartt796cca02022-04-12 15:07:21 -0700746 VLOG(2) << "Wrote data as node " << FlatbufferToJson(node_)
747 << " for channel "
Brian Smartt03c00da2022-02-24 10:25:00 -0800748 << configuration::CleanedChannelToString(f.fetcher->channel())
Austin Schuh48d10d62022-10-16 22:19:23 -0700749 << " to " << writer->filename();
Brian Smartt03c00da2022-02-24 10:25:00 -0800750 }
751}
752
Brian Smartt796cca02022-04-12 15:07:21 -0700753void Logger::WriteTimestamps(NewDataWriter *timestamp_writer,
754 const FetcherStruct &f) {
Brian Smartt03c00da2022-02-24 10:25:00 -0800755 if (timestamp_writer != nullptr) {
756 // And now handle timestamps.
Brian Smartt03c00da2022-02-24 10:25:00 -0800757
758 // Tell our writer that we know something about the remote boot.
759 timestamp_writer->UpdateRemote(
760 f.data_node_index, f.fetcher->context().source_boot_uuid,
761 f.fetcher->context().monotonic_remote_time,
762 f.fetcher->context().monotonic_event_time, f.reliable_forwarding);
Austin Schuh48d10d62022-10-16 22:19:23 -0700763
764 const auto start = event_loop_->monotonic_now();
765 ContextDataCopier coppier(f.fetcher->context(), f.channel_index,
Naman Gupta41d70c22022-11-21 15:29:52 -0800766 LogType::kLogDeliveryTimeOnly, event_loop_);
Austin Schuh48d10d62022-10-16 22:19:23 -0700767
768 timestamp_writer->CopyMessage(&coppier, event_loop_->boot_uuid(), start);
769 RecordCreateMessageTime(start, coppier.end_time(), f);
Brian Smartt03c00da2022-02-24 10:25:00 -0800770
Brian Smartt796cca02022-04-12 15:07:21 -0700771 VLOG(2) << "Wrote timestamps as node " << FlatbufferToJson(node_)
772 << " for channel "
Brian Smartt03c00da2022-02-24 10:25:00 -0800773 << configuration::CleanedChannelToString(f.fetcher->channel())
Austin Schuh48d10d62022-10-16 22:19:23 -0700774 << " to " << timestamp_writer->filename() << " timestamp";
Brian Smartt03c00da2022-02-24 10:25:00 -0800775 }
776}
777
Brian Smartt796cca02022-04-12 15:07:21 -0700778void Logger::WriteContent(NewDataWriter *contents_writer,
779 const FetcherStruct &f) {
Brian Smartt03c00da2022-02-24 10:25:00 -0800780 if (contents_writer != nullptr) {
781 const auto start = event_loop_->monotonic_now();
782 // And now handle the special message contents channel. Copy the
783 // message into a FlatBufferBuilder and save it to disk.
Brian Smartt03c00da2022-02-24 10:25:00 -0800784 const RemoteMessage *msg =
785 flatbuffers::GetRoot<RemoteMessage>(f.fetcher->context().data);
786
787 CHECK(msg->has_boot_uuid()) << ": " << aos::FlatbufferToJson(msg);
Brian Smartt03c00da2022-02-24 10:25:00 -0800788 // Translate from the channel index that the event loop uses to the
789 // channel index in the log file.
Austin Schuhf2d0e682022-10-16 14:20:58 -0700790 const int channel_index =
791 event_loop_to_logged_channel_index_[msg->channel_index()];
Brian Smartt03c00da2022-02-24 10:25:00 -0800792
793 const aos::monotonic_clock::time_point monotonic_timestamp_time =
794 f.fetcher->context().monotonic_event_time;
Brian Smartt03c00da2022-02-24 10:25:00 -0800795
Brian Smartt03c00da2022-02-24 10:25:00 -0800796 // Timestamps tell us information about what happened too!
797 // Capture any reboots so UpdateRemote is properly recorded.
798 contents_writer->UpdateBoot(UUID::FromVector(msg->boot_uuid()));
799
800 // Start with recording info about the data flowing from our node to the
801 // remote.
802 const bool reliable =
803 f.channel_reliable_contents.size() != 0u
804 ? f.channel_reliable_contents[msg->channel_index()]
805 : f.reliable_contents;
806
Brian Smartt796cca02022-04-12 15:07:21 -0700807 contents_writer->UpdateRemote(
808 node_index_, event_loop_->boot_uuid(),
Brian Smartt03c00da2022-02-24 10:25:00 -0800809 monotonic_clock::time_point(
810 chrono::nanoseconds(msg->monotonic_remote_time())),
811 monotonic_clock::time_point(
812 chrono::nanoseconds(msg->monotonic_sent_time())),
813 reliable, monotonic_timestamp_time);
814
Austin Schuh48d10d62022-10-16 22:19:23 -0700815 RemoteMessageCopier coppier(msg, channel_index, monotonic_timestamp_time,
Naman Gupta41d70c22022-11-21 15:29:52 -0800816 event_loop_);
Austin Schuh48d10d62022-10-16 22:19:23 -0700817
818 contents_writer->CopyMessage(&coppier, UUID::FromVector(msg->boot_uuid()),
819 start);
820
821 RecordCreateMessageTime(start, coppier.end_time(), f);
Brian Smartt03c00da2022-02-24 10:25:00 -0800822 }
823}
824
825void Logger::WriteFetchedRecord(FetcherStruct &f) {
826 WriteData(f.writer, f);
827 WriteTimestamps(f.timestamp_writer, f);
828 WriteContent(f.contents_writer, f);
829}
830
831bool Logger::LogUntil(monotonic_clock::time_point t) {
832 bool has_pending_messages = false;
833
Austin Schuhb06f03b2021-02-17 22:00:37 -0800834 // Grab the latest ServerStatistics message. This will always have the
835 // oppertunity to be >= to the current time, so it will always represent any
836 // reboots which may have happened.
837 WriteMissingTimestamps();
838
839 // Write each channel to disk, one at a time.
840 for (FetcherStruct &f : fetchers_) {
841 while (true) {
842 if (f.written) {
843 const auto start = event_loop_->monotonic_now();
844 const bool got_new = f.fetcher->FetchNext();
845 const auto end = event_loop_->monotonic_now();
846 RecordFetchResult(start, end, got_new, &f);
847 if (!got_new) {
848 VLOG(2) << "No new data on "
849 << configuration::CleanedChannelToString(
850 f.fetcher->channel());
851 break;
852 }
853 f.written = false;
854 }
855
856 // TODO(james): Write tests to exercise this logic.
857 if (f.fetcher->context().monotonic_event_time >= t) {
Brian Smartt03c00da2022-02-24 10:25:00 -0800858 has_pending_messages = true;
Austin Schuhb06f03b2021-02-17 22:00:37 -0800859 break;
860 }
Austin Schuhb06f03b2021-02-17 22:00:37 -0800861
Brian Smartt03c00da2022-02-24 10:25:00 -0800862 WriteFetchedRecord(f);
Austin Schuhb06f03b2021-02-17 22:00:37 -0800863
864 f.written = true;
865 }
866 }
867 last_synchronized_time_ = t;
Brian Smartt03c00da2022-02-24 10:25:00 -0800868
869 return has_pending_messages;
Austin Schuhb06f03b2021-02-17 22:00:37 -0800870}
871
Austin Schuh30586902021-03-30 22:54:08 -0700872void Logger::DoLogData(const monotonic_clock::time_point end_time,
873 bool run_on_logged) {
Austin Schuhb06f03b2021-02-17 22:00:37 -0800874 // We want to guarantee that messages aren't out of order by more than
875 // max_out_of_order_duration. To do this, we need sync points. Every write
876 // cycle should be a sync point.
877
878 do {
879 // Move the sync point up by at most polling_period. This forces one sync
880 // per iteration, even if it is small.
881 LogUntil(std::min(last_synchronized_time_ + polling_period_, end_time));
882
Austin Schuh30586902021-03-30 22:54:08 -0700883 if (run_on_logged) {
884 on_logged_period_();
885 }
Austin Schuhb06f03b2021-02-17 22:00:37 -0800886
887 // If we missed cycles, we could be pretty far behind. Spin until we are
888 // caught up.
889 } while (last_synchronized_time_ + polling_period_ < end_time);
890}
891
892void Logger::RecordFetchResult(aos::monotonic_clock::time_point start,
893 aos::monotonic_clock::time_point end,
894 bool got_new, FetcherStruct *fetcher) {
895 const auto duration = end - start;
896 if (!got_new) {
897 ++total_nop_fetch_count_;
898 total_nop_fetch_time_ += duration;
899 return;
900 }
901 ++total_message_fetch_count_;
902 total_message_fetch_bytes_ += fetcher->fetcher->context().size;
903 total_message_fetch_time_ += duration;
904 if (duration > max_message_fetch_time_) {
905 max_message_fetch_time_ = duration;
906 max_message_fetch_time_channel_ = fetcher->channel_index;
907 max_message_fetch_time_size_ = fetcher->fetcher->context().size;
908 }
909}
910
911void Logger::RecordCreateMessageTime(aos::monotonic_clock::time_point start,
912 aos::monotonic_clock::time_point end,
Brian Smartt03c00da2022-02-24 10:25:00 -0800913 const FetcherStruct &fetcher) {
Austin Schuhb06f03b2021-02-17 22:00:37 -0800914 const auto duration = end - start;
915 total_copy_time_ += duration;
916 ++total_copy_count_;
Brian Smartt03c00da2022-02-24 10:25:00 -0800917 total_copy_bytes_ += fetcher.fetcher->context().size;
Austin Schuhb06f03b2021-02-17 22:00:37 -0800918 if (duration > max_copy_time_) {
919 max_copy_time_ = duration;
Brian Smartt03c00da2022-02-24 10:25:00 -0800920 max_copy_time_channel_ = fetcher.channel_index;
921 max_copy_time_size_ = fetcher.fetcher->context().size;
Austin Schuhb06f03b2021-02-17 22:00:37 -0800922 }
923}
924
925} // namespace logger
926} // namespace aos