| #ifndef AOS_EVENTS_LOGGING_LOGFILE_UTILS_H_ |
| #define AOS_EVENTS_LOGGING_LOGFILE_UTILS_H_ |
| |
| #include <sys/uio.h> |
| |
| #include <chrono> |
| #include <deque> |
| #include <limits> |
| #include <memory> |
| #include <optional> |
| #include <string> |
| #include <string_view> |
| #include <tuple> |
| #include <utility> |
| #include <vector> |
| |
| #include "absl/types/span.h" |
| #include "aos/containers/resizeable_buffer.h" |
| #include "aos/events/event_loop.h" |
| #include "aos/events/logging/buffer_encoder.h" |
| #include "aos/events/logging/logfile_sorting.h" |
| #include "aos/events/logging/logger_generated.h" |
| #include "aos/flatbuffers.h" |
| #include "flatbuffers/flatbuffers.h" |
| |
| namespace aos::logger { |
| |
| enum class LogType : uint8_t { |
| // The message originated on this node and should be logged here. |
| kLogMessage, |
| // The message originated on another node, but only the delivery times are |
| // logged here. |
| kLogDeliveryTimeOnly, |
| // The message originated on another node. Log it and the delivery times |
| // together. The message_gateway is responsible for logging any messages |
| // which didn't get delivered. |
| kLogMessageAndDeliveryTime, |
| // The message originated on the other node and should be logged on this node. |
| kLogRemoteMessage |
| }; |
| |
| // This class manages efficiently writing a sequence of detached buffers to a |
| // file. It encodes them, queues them up, and batches the write operation. |
| class DetachedBufferWriter { |
| public: |
| // Marker struct for one of our constructor overloads. |
| struct already_out_of_space_t {}; |
| |
| DetachedBufferWriter(std::string_view filename, |
| std::unique_ptr<DetachedBufferEncoder> encoder); |
| // Creates a dummy instance which won't even open a file. It will act as if |
| // opening the file ran out of space immediately. |
| DetachedBufferWriter(already_out_of_space_t) : ran_out_of_space_(true) {} |
| DetachedBufferWriter(DetachedBufferWriter &&other); |
| DetachedBufferWriter(const DetachedBufferWriter &) = delete; |
| |
| ~DetachedBufferWriter(); |
| |
| DetachedBufferWriter &operator=(DetachedBufferWriter &&other); |
| DetachedBufferWriter &operator=(const DetachedBufferWriter &) = delete; |
| |
| std::string_view filename() const { return filename_; } |
| |
| // This will be true until Close() is called, unless the file couldn't be |
| // created due to running out of space. |
| bool is_open() const { return fd_ != -1; } |
| |
| // Queues up a finished FlatBufferBuilder to be encoded and written. |
| // |
| // Triggers a flush if there's enough data queued up. |
| // |
| // Steals the detached buffer from it. |
| void QueueSizedFlatbuffer(flatbuffers::FlatBufferBuilder *fbb) { |
| QueueSizedFlatbuffer(fbb->Release()); |
| } |
| // May steal the backing storage of buffer, or may leave it alone. |
| void QueueSizedFlatbuffer(flatbuffers::DetachedBuffer &&buffer) { |
| if (ran_out_of_space_) { |
| return; |
| } |
| encoder_->Encode(std::move(buffer)); |
| FlushAtThreshold(); |
| } |
| |
| // Queues up data in span. May copy or may write it to disk immediately. |
| void QueueSpan(absl::Span<const uint8_t> span); |
| |
| // Indicates we got ENOSPC when trying to write. After this returns true, no |
| // further data is written. |
| bool ran_out_of_space() const { return ran_out_of_space_; } |
| |
| // To avoid silently failing to write logfiles, you must call this before |
| // destruction if ran_out_of_space() is true and the situation has been |
| // handled. |
| void acknowledge_out_of_space() { |
| CHECK(ran_out_of_space_); |
| acknowledge_ran_out_of_space_ = true; |
| } |
| |
| // Fully flushes and closes the underlying file now. No additional data may be |
| // enqueued after calling this. |
| // |
| // This will be performed in the destructor automatically. |
| // |
| // Note that this may set ran_out_of_space(). |
| void Close(); |
| |
| // Returns the total number of bytes written and currently queued. |
| size_t total_bytes() const { return encoder_->total_bytes(); } |
| |
| // The maximum time for a single write call, or 0 if none have been performed. |
| std::chrono::nanoseconds max_write_time() const { return max_write_time_; } |
| // The number of bytes in the longest write call, or -1 if none have been |
| // performed. |
| int max_write_time_bytes() const { return max_write_time_bytes_; } |
| // The number of buffers in the longest write call, or -1 if none have been |
| // performed. |
| int max_write_time_messages() const { return max_write_time_messages_; } |
| // The total time spent in write calls. |
| std::chrono::nanoseconds total_write_time() const { |
| return total_write_time_; |
| } |
| // The total number of writes which have been performed. |
| int total_write_count() const { return total_write_count_; } |
| // The total number of messages which have been written. |
| int total_write_messages() const { return total_write_messages_; } |
| // The total number of bytes which have been written. |
| int total_write_bytes() const { return total_write_bytes_; } |
| void ResetStatistics() { |
| max_write_time_ = std::chrono::nanoseconds::zero(); |
| max_write_time_bytes_ = -1; |
| max_write_time_messages_ = -1; |
| total_write_time_ = std::chrono::nanoseconds::zero(); |
| total_write_count_ = 0; |
| total_write_messages_ = 0; |
| total_write_bytes_ = 0; |
| } |
| |
| private: |
| // Performs a single writev call with as much of the data we have queued up as |
| // possible. |
| // |
| // This will normally take all of the data we have queued up, unless an |
| // encoder has spit out a big enough chunk all at once that we can't manage |
| // all of it. |
| void Flush(); |
| |
| // write_return is what write(2) or writev(2) returned. write_size is the |
| // number of bytes we expected it to write. |
| void HandleWriteReturn(ssize_t write_return, size_t write_size); |
| |
| void UpdateStatsForWrite(aos::monotonic_clock::duration duration, |
| ssize_t written, int iovec_size); |
| |
| // Flushes data if we've reached the threshold to do that as part of normal |
| // operation. |
| void FlushAtThreshold(); |
| |
| std::string filename_; |
| std::unique_ptr<DetachedBufferEncoder> encoder_; |
| |
| int fd_ = -1; |
| bool ran_out_of_space_ = false; |
| bool acknowledge_ran_out_of_space_ = false; |
| |
| // List of iovecs to use with writev. This is a member variable to avoid |
| // churn. |
| std::vector<struct iovec> iovec_; |
| |
| std::chrono::nanoseconds max_write_time_ = std::chrono::nanoseconds::zero(); |
| int max_write_time_bytes_ = -1; |
| int max_write_time_messages_ = -1; |
| std::chrono::nanoseconds total_write_time_ = std::chrono::nanoseconds::zero(); |
| int total_write_count_ = 0; |
| int total_write_messages_ = 0; |
| int total_write_bytes_ = 0; |
| }; |
| |
| // Packes a message pointed to by the context into a MessageHeader. |
| flatbuffers::Offset<MessageHeader> PackMessage( |
| flatbuffers::FlatBufferBuilder *fbb, const Context &context, |
| int channel_index, LogType log_type); |
| |
| std::optional<SizePrefixedFlatbufferVector<LogFileHeader>> ReadHeader( |
| std::string_view filename); |
| std::optional<SizePrefixedFlatbufferVector<MessageHeader>> ReadNthMessage( |
| std::string_view filename, size_t n); |
| |
| // Class to read chunks out of a log file. |
| class SpanReader { |
| public: |
| SpanReader(std::string_view filename); |
| |
| std::string_view filename() const { return filename_; } |
| |
| // Returns a span with the data for a message from the log file, excluding |
| // the size. |
| absl::Span<const uint8_t> ReadMessage(); |
| |
| private: |
| // TODO(austin): Optimization: |
| // Allocate the 256k blocks like we do today. But, refcount them with |
| // shared_ptr pointed to by the messageheader that is returned. This avoids |
| // the copy. Need to do more benchmarking. |
| // And (Brian): Consider just mmapping the file and handing out refcounted |
| // pointers into that too. |
| |
| // Reads a chunk of data into data_. Returns false if no data was read. |
| bool ReadBlock(); |
| |
| std::string filename_; |
| |
| // File reader and data decoder. |
| std::unique_ptr<DataDecoder> decoder_; |
| |
| // Vector to read into. |
| ResizeableBuffer data_; |
| |
| // Amount of data consumed already in data_. |
| size_t consumed_data_ = 0; |
| }; |
| |
| // Class which handles reading the header and messages from the log file. This |
| // handles any per-file state left before merging below. |
| class MessageReader { |
| public: |
| MessageReader(std::string_view filename); |
| |
| std::string_view filename() const { return span_reader_.filename(); } |
| |
| // Returns the header from the log file. |
| const LogFileHeader *log_file_header() const { |
| return &raw_log_file_header_.message(); |
| } |
| |
| // Returns the raw data of the header from the log file. |
| const SizePrefixedFlatbufferVector<LogFileHeader> &raw_log_file_header() |
| const { |
| return raw_log_file_header_; |
| } |
| |
| // Returns the minimum maount of data needed to queue up for sorting before |
| // ware guarenteed to not see data out of order. |
| std::chrono::nanoseconds max_out_of_order_duration() const { |
| return max_out_of_order_duration_; |
| } |
| |
| // Returns the newest timestamp read out of the log file. |
| monotonic_clock::time_point newest_timestamp() const { |
| return newest_timestamp_; |
| } |
| |
| // Returns the next message if there is one. |
| std::optional<SizePrefixedFlatbufferVector<MessageHeader>> ReadMessage(); |
| |
| // The time at which we need to read another chunk from the logfile. |
| monotonic_clock::time_point queue_data_time() const { |
| return newest_timestamp() - max_out_of_order_duration(); |
| } |
| |
| private: |
| // Log chunk reader. |
| SpanReader span_reader_; |
| |
| // Vector holding the raw data for the log file header. |
| SizePrefixedFlatbufferVector<LogFileHeader> raw_log_file_header_; |
| |
| // Minimum amount of data to queue up for sorting before we are guarenteed |
| // to not see data out of order. |
| std::chrono::nanoseconds max_out_of_order_duration_; |
| |
| // Timestamp of the newest message in a channel queue. |
| monotonic_clock::time_point newest_timestamp_ = monotonic_clock::min_time; |
| }; |
| |
| // A class to seamlessly read messages from a list of part files. |
| class PartsMessageReader { |
| public: |
| PartsMessageReader(LogParts log_parts); |
| |
| std::string_view filename() const { return message_reader_.filename(); } |
| |
| // Returns the minimum amount of data needed to queue up for sorting before |
| // we are guarenteed to not see data out of order. |
| std::chrono::nanoseconds max_out_of_order_duration() const { |
| return message_reader_.max_out_of_order_duration(); |
| } |
| |
| // Returns the newest timestamp read out of the log file. |
| monotonic_clock::time_point newest_timestamp() const { |
| return newest_timestamp_; |
| } |
| |
| // Returns the next message if there is one, or nullopt if we have reached the |
| // end of all the files. |
| // Note: reading the next message may change the max_out_of_order_duration(). |
| std::optional<SizePrefixedFlatbufferVector<MessageHeader>> ReadMessage(); |
| |
| private: |
| // Opens the next log and updates message_reader_. Sets done_ if there is |
| // nothing more to do. |
| void NextLog(); |
| |
| const LogParts parts_; |
| size_t next_part_index_ = 1u; |
| bool done_ = false; |
| MessageReader message_reader_; |
| |
| monotonic_clock::time_point newest_timestamp_ = monotonic_clock::min_time; |
| }; |
| |
| // Struct to hold a message as it gets sorted on a single node. |
| struct Message { |
| // The channel. |
| uint32_t channel_index = 0xffffffff; |
| // The local queue index. |
| uint32_t queue_index = 0xffffffff; |
| // The local timestamp on the monotonic clock. |
| monotonic_clock::time_point timestamp = monotonic_clock::min_time; |
| // The data (either a timestamp header, or a data header). |
| SizePrefixedFlatbufferVector<MessageHeader> data; |
| |
| bool operator<(const Message &m2) const; |
| bool operator>=(const Message &m2) const; |
| }; |
| |
| std::ostream &operator<<(std::ostream &os, const Message &m); |
| |
| class TimestampMerger; |
| |
| // A design requirement is that the relevant data for a channel is not more than |
| // max_out_of_order_duration out of order. We approach sorting in layers. |
| // |
| // 1) Split each (maybe chunked) log file into one queue per channel. Read this |
| // log file looking for data pertaining to a specific node. |
| // (SplitMessageReader) |
| // 2) Merge all the data per channel from the different log files into a sorted |
| // list of timestamps and messages. (TimestampMerger) |
| // 3) Combine the timestamps and messages. (TimestampMerger) |
| // 4) Merge all the channels to produce the next message on a node. |
| // (ChannelMerger) |
| // 5) Duplicate this entire stack per node. |
| |
| // This class splits messages and timestamps up into a queue per channel, and |
| // handles reading data from multiple chunks. |
| class SplitMessageReader { |
| public: |
| SplitMessageReader(const std::vector<std::string> &filenames); |
| |
| // Sets the TimestampMerger that gets notified for each channel. The node |
| // that the TimestampMerger is merging as needs to be passed in. |
| void SetTimestampMerger(TimestampMerger *timestamp_merger, int channel, |
| const Node *target_node); |
| |
| // Returns the (timestamp, queue_index, message_header) for the oldest message |
| // in a channel, or max_time if there is nothing in the channel. |
| std::tuple<monotonic_clock::time_point, uint32_t, const MessageHeader *> |
| oldest_message(int channel) { |
| return channels_[channel].data.front_timestamp(); |
| } |
| |
| // Returns the (timestamp, queue_index, message_header) for the oldest |
| // delivery time in a channel, or max_time if there is nothing in the channel. |
| std::tuple<monotonic_clock::time_point, uint32_t, const MessageHeader *> |
| oldest_message(int channel, int destination_node) { |
| return channels_[channel].timestamps[destination_node].front_timestamp(); |
| } |
| |
| // Returns the timestamp, queue_index, and message for the oldest data on a |
| // channel. Requeues data as needed. |
| std::tuple<monotonic_clock::time_point, uint32_t, |
| SizePrefixedFlatbufferVector<MessageHeader>> |
| PopOldest(int channel_index); |
| |
| // Returns the timestamp, queue_index, and message for the oldest timestamp on |
| // a channel delivered to a node. Requeues data as needed. |
| std::tuple<monotonic_clock::time_point, uint32_t, |
| SizePrefixedFlatbufferVector<MessageHeader>> |
| PopOldestTimestamp(int channel, int node_index); |
| |
| // Returns the header for the log files. |
| const LogFileHeader *log_file_header() const { |
| return &log_file_header_.message(); |
| } |
| |
| const SizePrefixedFlatbufferVector<LogFileHeader> &raw_log_file_header() |
| const { |
| return log_file_header_; |
| } |
| |
| // Returns the starting time for this set of log files. |
| monotonic_clock::time_point monotonic_start_time() { |
| return monotonic_clock::time_point( |
| std::chrono::nanoseconds(log_file_header()->monotonic_start_time())); |
| } |
| realtime_clock::time_point realtime_start_time() { |
| return realtime_clock::time_point( |
| std::chrono::nanoseconds(log_file_header()->realtime_start_time())); |
| } |
| |
| // Returns the configuration from the log file header. |
| const Configuration *configuration() const { |
| return log_file_header()->configuration(); |
| } |
| |
| // Returns the node who's point of view this log file is from. Make sure this |
| // is a pointer in the configuration() nodes list so it can be consumed |
| // elsewhere. |
| const Node *node() const { |
| if (configuration()->has_nodes()) { |
| return configuration::GetNodeOrDie(configuration(), |
| log_file_header()->node()); |
| } else { |
| CHECK(!log_file_header()->has_node()); |
| return nullptr; |
| } |
| } |
| |
| // Returns the timestamp of the newest message read from the log file, and the |
| // timestamp that we need to re-queue data. |
| monotonic_clock::time_point newest_timestamp() const { |
| return newest_timestamp_; |
| } |
| |
| // Returns the next time to trigger a requeue. |
| monotonic_clock::time_point time_to_queue() const { return time_to_queue_; } |
| |
| // Returns the minimum amount of data needed to queue up for sorting before |
| // we are guarenteed to not see data out of order. |
| std::chrono::nanoseconds max_out_of_order_duration() const { |
| return message_reader_->max_out_of_order_duration(); |
| } |
| |
| std::string_view filename() const { return message_reader_->filename(); } |
| |
| // Adds more messages to the sorted list. This reads enough data such that |
| // oldest_message_time can be replayed safely. Returns false if the log file |
| // has all been read. |
| bool QueueMessages(monotonic_clock::time_point oldest_message_time); |
| |
| // Returns debug strings for a channel, and timestamps for a node. |
| std::string DebugString(int channel) const; |
| std::string DebugString(int channel, int node_index) const; |
| |
| // Returns true if all the messages have been queued from the last log file in |
| // the list of log files chunks. |
| bool at_end() const { return at_end_; } |
| |
| private: |
| // TODO(austin): Need to copy or refcount the message instead of running |
| // multiple copies of the reader. Or maybe have a "as_node" index and hide it |
| // inside. |
| |
| // Moves to the next log file in the list. |
| bool NextLogFile(); |
| |
| // Filenames of the log files. |
| std::vector<std::string> filenames_; |
| // And the index of the next file to open. |
| size_t next_filename_index_ = 0; |
| |
| // Node we are reading as. |
| const Node *target_node_ = nullptr; |
| |
| // Log file header to report. This is a copy. |
| SizePrefixedFlatbufferVector<LogFileHeader> log_file_header_; |
| // Current log file being read. |
| std::unique_ptr<MessageReader> message_reader_; |
| |
| // Datastructure to hold the list of messages, cached timestamp for the |
| // oldest message, and sender to send with. |
| struct MessageHeaderQueue { |
| // If true, this is a timestamp queue. |
| bool timestamps = false; |
| |
| // Returns a reference to the the oldest message. |
| SizePrefixedFlatbufferVector<MessageHeader> &front() { |
| CHECK_GT(data_.size(), 0u); |
| return data_.front(); |
| } |
| |
| // Adds a message to the back of the queue. Returns true if it was actually |
| // emplaced. |
| bool emplace_back(SizePrefixedFlatbufferVector<MessageHeader> &&msg); |
| |
| // Drops the front message. Invalidates the front() reference. |
| void PopFront(); |
| |
| // The size of the queue. |
| size_t size() { return data_.size(); } |
| |
| // Returns a debug string with info about each message in the queue. |
| std::string DebugString() const; |
| |
| // Returns the (timestamp, queue_index, message_header) for the oldest |
| // message. |
| const std::tuple<monotonic_clock::time_point, uint32_t, |
| const MessageHeader *> |
| front_timestamp() { |
| const MessageHeader &message = front().message(); |
| return std::make_tuple( |
| monotonic_clock::time_point( |
| std::chrono::nanoseconds(message.monotonic_sent_time())), |
| message.queue_index(), &message); |
| } |
| |
| // Pointer to the timestamp merger for this queue if available. |
| TimestampMerger *timestamp_merger = nullptr; |
| // Pointer to the reader which feeds this queue. |
| SplitMessageReader *split_reader = nullptr; |
| |
| private: |
| // The data. |
| std::deque<SizePrefixedFlatbufferVector<MessageHeader>> data_; |
| }; |
| |
| // All the queues needed for a channel. There isn't going to be data in all |
| // of these. |
| struct ChannelData { |
| // The data queue for the channel. |
| MessageHeaderQueue data; |
| // Queues for timestamps for each node. |
| std::vector<MessageHeaderQueue> timestamps; |
| }; |
| |
| // Data for all the channels. |
| std::vector<ChannelData> channels_; |
| |
| // Once we know the node that this SplitMessageReader will be writing as, |
| // there will be only one MessageHeaderQueue that a specific channel matches. |
| // Precompute this here for efficiency. |
| std::vector<MessageHeaderQueue *> channels_to_write_; |
| |
| monotonic_clock::time_point time_to_queue_ = monotonic_clock::min_time; |
| |
| // Latches true when we hit the end of the last log file and there is no sense |
| // poking it further. |
| bool at_end_ = false; |
| |
| // Timestamp of the newest message that was read and actually queued. We want |
| // to track this independently from the log file because we need the |
| // timestamps here to be timestamps of messages that are queued. |
| monotonic_clock::time_point newest_timestamp_ = monotonic_clock::min_time; |
| }; |
| |
| class ChannelMerger; |
| |
| // Sorts channels (and timestamps) from multiple log files for a single channel. |
| class TimestampMerger { |
| public: |
| TimestampMerger(const Configuration *configuration, |
| std::vector<SplitMessageReader *> split_message_readers, |
| int channel_index, const Node *target_node, |
| ChannelMerger *channel_merger); |
| |
| // Metadata used to schedule the message. |
| struct DeliveryTimestamp { |
| monotonic_clock::time_point monotonic_event_time = |
| monotonic_clock::min_time; |
| realtime_clock::time_point realtime_event_time = realtime_clock::min_time; |
| uint32_t queue_index = 0xffffffff; |
| |
| monotonic_clock::time_point monotonic_remote_time = |
| monotonic_clock::min_time; |
| realtime_clock::time_point realtime_remote_time = realtime_clock::min_time; |
| uint32_t remote_queue_index = 0xffffffff; |
| }; |
| |
| // Pushes SplitMessageReader onto the timestamp heap. This should only be |
| // called when timestamps are placed in the channel this class is merging for |
| // the reader. |
| void UpdateTimestamp( |
| SplitMessageReader *split_message_reader, |
| std::tuple<monotonic_clock::time_point, uint32_t, const MessageHeader *> |
| oldest_message_time) { |
| PushTimestampHeap(oldest_message_time, split_message_reader); |
| } |
| // Pushes SplitMessageReader onto the message heap. This should only be |
| // called when data is placed in the channel this class is merging for the |
| // reader. |
| void Update( |
| SplitMessageReader *split_message_reader, |
| std::tuple<monotonic_clock::time_point, uint32_t, const MessageHeader *> |
| oldest_message_time) { |
| PushMessageHeap(oldest_message_time, split_message_reader); |
| } |
| |
| // Returns the oldest combined timestamp and data for this channel. If there |
| // isn't a matching piece of data, returns only the timestamp with no data. |
| // The caller can determine what the appropriate action is to recover. |
| std::tuple<DeliveryTimestamp, SizePrefixedFlatbufferVector<MessageHeader>> |
| PopOldest(); |
| |
| // Tracks if the channel merger has pushed this onto it's heap or not. |
| bool pushed() { return pushed_; } |
| // Sets if this has been pushed to the channel merger heap. Should only be |
| // called by the channel merger. |
| void set_pushed(bool pushed) { pushed_ = pushed; } |
| |
| // Returns a debug string with the heaps printed out. |
| std::string DebugString() const; |
| |
| // Returns true if we have timestamps. |
| bool has_timestamps() const { return has_timestamps_; } |
| |
| // Records that one of the log files ran out of data. This should only be |
| // called by a SplitMessageReader. |
| void NoticeAtEnd(); |
| |
| aos::monotonic_clock::time_point channel_merger_time() { |
| if (has_timestamps_) { |
| return std::get<0>(timestamp_heap_[0]); |
| } else { |
| return std::get<0>(message_heap_[0]); |
| } |
| } |
| |
| private: |
| // Pushes messages and timestamps to the corresponding heaps. |
| void PushMessageHeap( |
| std::tuple<monotonic_clock::time_point, uint32_t, const MessageHeader *> |
| timestamp, |
| SplitMessageReader *split_message_reader); |
| void PushTimestampHeap( |
| std::tuple<monotonic_clock::time_point, uint32_t, const MessageHeader *> |
| timestamp, |
| SplitMessageReader *split_message_reader); |
| |
| // Pops a message from the message heap. This automatically triggers the |
| // split message reader to re-fetch any new data. |
| std::tuple<monotonic_clock::time_point, uint32_t, |
| SizePrefixedFlatbufferVector<MessageHeader>> |
| PopMessageHeap(); |
| |
| std::tuple<monotonic_clock::time_point, uint32_t, const MessageHeader *> |
| oldest_message() const; |
| std::tuple<monotonic_clock::time_point, uint32_t, const MessageHeader *> |
| oldest_timestamp() const; |
| // Pops a message from the timestamp heap. This automatically triggers the |
| // split message reader to re-fetch any new data. |
| std::tuple<monotonic_clock::time_point, uint32_t, |
| SizePrefixedFlatbufferVector<MessageHeader>> |
| PopTimestampHeap(); |
| |
| const Configuration *configuration_; |
| |
| // If true, this is a forwarded channel and timestamps should be matched. |
| bool has_timestamps_ = false; |
| |
| // Tracks if the ChannelMerger has pushed this onto it's queue. |
| bool pushed_ = false; |
| |
| // The split message readers used for source data. |
| std::vector<SplitMessageReader *> split_message_readers_; |
| |
| // The channel to merge. |
| int channel_index_; |
| |
| // Our node. |
| int node_index_; |
| |
| // Heaps for messages and timestamps. |
| std::vector< |
| std::tuple<monotonic_clock::time_point, uint32_t, SplitMessageReader *>> |
| message_heap_; |
| std::vector< |
| std::tuple<monotonic_clock::time_point, uint32_t, SplitMessageReader *>> |
| timestamp_heap_; |
| |
| // Parent channel merger. |
| ChannelMerger *channel_merger_; |
| }; |
| |
| // This class handles constructing all the split message readers, channel |
| // mergers, and combining the results. |
| class ChannelMerger { |
| public: |
| // Builds a ChannelMerger around a set of log files. These are of the format: |
| // { |
| // {log1_part0, log1_part1, ...}, |
| // {log2} |
| // } |
| // The inner vector is a list of log file chunks which form up a log file. |
| // The outer vector is a list of log files with subsets of the messages, or |
| // messages from different nodes. |
| ChannelMerger(const std::vector<std::vector<std::string>> &filenames); |
| |
| // Returns the nodes that we know how to merge. |
| const std::vector<const Node *> nodes() const; |
| // Sets the node that we will return messages as. Returns true if the node |
| // has log files and will produce data. This can only be called once, and |
| // will likely corrupt state if called a second time. |
| bool SetNode(const Node *target_node); |
| |
| // Everything else needs the node set before it works. |
| |
| // Returns a timestamp for the oldest message in this group of logfiles. |
| monotonic_clock::time_point OldestMessageTime() const; |
| // Pops the oldest message. |
| std::tuple<TimestampMerger::DeliveryTimestamp, int, |
| SizePrefixedFlatbufferVector<MessageHeader>> |
| PopOldest(); |
| |
| // Returns the config for this set of log files. |
| const Configuration *configuration() const { |
| return log_file_header()->configuration(); |
| } |
| |
| const LogFileHeader *log_file_header() const { |
| return &log_file_header_.message(); |
| } |
| |
| // Returns the start times for the configured node's log files. |
| monotonic_clock::time_point monotonic_start_time() const { |
| return monotonic_clock::time_point( |
| std::chrono::nanoseconds(log_file_header()->monotonic_start_time())); |
| } |
| realtime_clock::time_point realtime_start_time() const { |
| return realtime_clock::time_point( |
| std::chrono::nanoseconds(log_file_header()->realtime_start_time())); |
| } |
| |
| // Returns the node set by SetNode above. |
| const Node *node() const { return node_; } |
| |
| // Called by the TimestampMerger when new data is available with the provided |
| // timestamp and channel_index. |
| void Update(monotonic_clock::time_point timestamp, int channel_index) { |
| PushChannelHeap(timestamp, channel_index); |
| } |
| |
| // Returns a debug string with all the heaps in it. Generally only useful for |
| // debugging what went wrong. |
| std::string DebugString() const; |
| |
| // Returns true if one of the log files has finished reading everything. When |
| // log file chunks are involved, this means that the last chunk in a log file |
| // has been read. It is acceptable to be missing data at this point in time. |
| bool at_end() const { return at_end_; } |
| |
| // Marks that one of the log files is at the end. This should only be called |
| // by timestamp mergers. |
| void NoticeAtEnd() { at_end_ = true; } |
| |
| private: |
| // Pushes the timestamp for new data on the provided channel. |
| void PushChannelHeap(monotonic_clock::time_point timestamp, |
| int channel_index); |
| |
| // CHECKs that channel_heap_ and timestamp_heap_ are valid heaps. |
| void VerifyHeaps(); |
| |
| // All the message readers. |
| std::vector<std::unique_ptr<SplitMessageReader>> split_message_readers_; |
| |
| // The log header we are claiming to be. |
| SizePrefixedFlatbufferVector<LogFileHeader> log_file_header_; |
| |
| // The timestamp mergers which combine data from the split message readers. |
| std::vector<TimestampMerger> timestamp_mergers_; |
| |
| // A heap of the channel readers and timestamps for the oldest data in each. |
| std::vector<std::pair<monotonic_clock::time_point, int>> channel_heap_; |
| |
| // Configured node. |
| const Node *node_; |
| |
| bool at_end_ = false; |
| |
| // Cached copy of the list of nodes. |
| std::vector<const Node *> nodes_; |
| |
| // Last time popped. Used to detect events being returned out of order. |
| monotonic_clock::time_point last_popped_time_ = monotonic_clock::min_time; |
| }; |
| |
| // Returns the node name with a trailing space, or an empty string if we are on |
| // a single node. |
| std::string MaybeNodeName(const Node *); |
| |
| } // namespace aos::logger |
| |
| #endif // AOS_EVENTS_LOGGING_LOGFILE_UTILS_H_ |