Fix loggers running out of space
Change-Id: Ic2b5bcc0c2ae475404658aa35904f19edb7c5ce7
diff --git a/aos/events/logging/log_namer.cc b/aos/events/logging/log_namer.cc
index a0625d0..2067bdc 100644
--- a/aos/events/logging/log_namer.cc
+++ b/aos/events/logging/log_namer.cc
@@ -334,6 +334,12 @@
// Refuse to open any new files, which might skip data. Any existing files
// are in the same folder, which means they're on the same filesystem, which
// means they're probably going to run out of space and get stuck too.
+ if (!destination->get()) {
+ // But avoid leaving a nullptr writer if we're out of space when
+ // attempting to open the first file.
+ *destination = std::make_unique<DetachedBufferWriter>(
+ DetachedBufferWriter::already_out_of_space_t());
+ }
return;
}
const std::string_view separator = base_name_.back() == '/' ? "" : "_";
diff --git a/aos/events/logging/logfile_utils.cc b/aos/events/logging/logfile_utils.cc
index 686e0a8..84ea84a 100644
--- a/aos/events/logging/logfile_utils.cc
+++ b/aos/events/logging/logfile_utils.cc
@@ -141,16 +141,21 @@
}
void DetachedBufferWriter::Flush() {
- const auto queue = encoder_->queue();
- if (queue.empty()) {
- return;
- }
if (ran_out_of_space_) {
// We don't want any later data to be written after space becomes available,
// so refuse to write anything more once we've dropped data because we ran
// out of space.
- VLOG(1) << "Ignoring queue: " << queue.size();
- encoder_->Clear(queue.size());
+ if (encoder_) {
+ VLOG(1) << "Ignoring queue: " << encoder_->queue().size();
+ encoder_->Clear(encoder_->queue().size());
+ } else {
+ VLOG(1) << "No queue to ignore";
+ }
+ return;
+ }
+
+ const auto queue = encoder_->queue();
+ if (queue.empty()) {
return;
}
@@ -205,6 +210,19 @@
}
void DetachedBufferWriter::FlushAtThreshold() {
+ if (ran_out_of_space_) {
+ // We don't want any later data to be written after space becomes available,
+ // so refuse to write anything more once we've dropped data because we ran
+ // out of space.
+ if (encoder_) {
+ VLOG(1) << "Ignoring queue: " << encoder_->queue().size();
+ encoder_->Clear(encoder_->queue().size());
+ } else {
+ VLOG(1) << "No queue to ignore";
+ }
+ return;
+ }
+
// Flush if we are at the max number of iovs per writev, because there's no
// point queueing up any more data in memory. Also flush once we have enough
// data queued up.
diff --git a/aos/events/logging/logfile_utils.h b/aos/events/logging/logfile_utils.h
index d251772..5fe45a5 100644
--- a/aos/events/logging/logfile_utils.h
+++ b/aos/events/logging/logfile_utils.h
@@ -107,7 +107,12 @@
void Close();
// Returns the total number of bytes written and currently queued.
- size_t total_bytes() const { return encoder_->total_bytes(); }
+ size_t total_bytes() const {
+ if (!encoder_) {
+ return 0;
+ }
+ return encoder_->total_bytes();
+ }
// The maximum time for a single write call, or 0 if none have been performed.
std::chrono::nanoseconds max_write_time() const { return max_write_time_; }