Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 1 | #ifndef AOS_EVENTS_LOGGER_H_ |
| 2 | #define AOS_EVENTS_LOGGER_H_ |
| 3 | |
| 4 | #include <deque> |
| 5 | #include <vector> |
| 6 | |
| 7 | #include "absl/strings/string_view.h" |
| 8 | #include "absl/types/span.h" |
| 9 | #include "aos/events/event_loop.h" |
James Kuszmaul | 38735e8 | 2019-12-07 16:42:06 -0800 | [diff] [blame] | 10 | #include "aos/events/logging/logger_generated.h" |
Austin Schuh | 9254752 | 2019-12-28 14:33:43 -0800 | [diff] [blame^] | 11 | #include "aos/events/simulated_event_loop.h" |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 12 | #include "aos/time/time.h" |
| 13 | #include "flatbuffers/flatbuffers.h" |
| 14 | |
| 15 | namespace aos { |
| 16 | namespace logger { |
| 17 | |
| 18 | // This class manages efficiently writing a sequence of detached buffers to a |
| 19 | // file. It queues them up and batches the write operation. |
| 20 | class DetachedBufferWriter { |
| 21 | public: |
| 22 | DetachedBufferWriter(absl::string_view filename); |
| 23 | ~DetachedBufferWriter(); |
| 24 | |
| 25 | // TODO(austin): Snappy compress the log file if it ends with .snappy! |
| 26 | |
| 27 | // Queues up a finished FlatBufferBuilder to be written. Steals the detached |
| 28 | // buffer from it. |
| 29 | void QueueSizedFlatbuffer(flatbuffers::FlatBufferBuilder *fbb); |
| 30 | // Queues up a detached buffer directly. |
| 31 | void QueueSizedFlatbuffer(flatbuffers::DetachedBuffer &&buffer); |
| 32 | |
| 33 | // Triggers data to be provided to the kernel and written. |
| 34 | void Flush(); |
| 35 | |
| 36 | private: |
| 37 | int fd_ = -1; |
| 38 | |
| 39 | // Size of all the data in the queue. |
| 40 | size_t queued_size_ = 0; |
| 41 | |
| 42 | // List of buffers to flush. |
| 43 | std::vector<flatbuffers::DetachedBuffer> queue_; |
| 44 | // List of iovecs to use with writev. This is a member variable to avoid |
| 45 | // churn. |
| 46 | std::vector<struct iovec> iovec_; |
| 47 | }; |
| 48 | |
Austin Schuh | 646b7b8 | 2019-12-22 21:38:55 -0800 | [diff] [blame] | 49 | // Packes a message pointed to by the context into a MessageHeader. |
| 50 | flatbuffers::Offset<MessageHeader> PackMessage( |
| 51 | flatbuffers::FlatBufferBuilder *fbb, const Context &context, |
| 52 | int channel_index); |
| 53 | |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 54 | // Logs all channels available in the event loop to disk every 100 ms. |
| 55 | // Start by logging one message per channel to capture any state and |
| 56 | // configuration that is sent rately on a channel and would affect execution. |
| 57 | class Logger { |
| 58 | public: |
| 59 | Logger(DetachedBufferWriter *writer, EventLoop *event_loop, |
| 60 | std::chrono::milliseconds polling_period = |
| 61 | std::chrono::milliseconds(100)); |
| 62 | |
| 63 | private: |
| 64 | void DoLogData(); |
| 65 | |
| 66 | EventLoop *event_loop_; |
| 67 | DetachedBufferWriter *writer_; |
| 68 | |
| 69 | // Structure to track both a fetcher, and if the data fetched has been |
| 70 | // written. We may want to delay writing data to disk so that we don't let |
| 71 | // data get too far out of order when written to disk so we can avoid making |
| 72 | // it too hard to sort when reading. |
| 73 | struct FetcherStruct { |
| 74 | std::unique_ptr<RawFetcher> fetcher; |
| 75 | bool written = false; |
| 76 | }; |
| 77 | |
| 78 | std::vector<FetcherStruct> fetchers_; |
| 79 | TimerHandler *timer_handler_; |
| 80 | |
| 81 | // Period to poll the channels. |
| 82 | const std::chrono::milliseconds polling_period_; |
| 83 | |
| 84 | // Last time that data was written for all channels to disk. |
| 85 | monotonic_clock::time_point last_synchronized_time_; |
| 86 | |
| 87 | // Max size that the header has consumed. This much extra data will be |
| 88 | // reserved in the builder to avoid reallocating. |
| 89 | size_t max_header_size_ = 0; |
| 90 | }; |
| 91 | |
| 92 | // Replays all the channels in the logfile to the event loop. |
| 93 | class LogReader { |
| 94 | public: |
| 95 | LogReader(absl::string_view filename); |
| 96 | |
| 97 | // Registers the timer and senders used to resend the messages from the log |
| 98 | // file. |
| 99 | void Register(EventLoop *event_loop); |
Austin Schuh | 9254752 | 2019-12-28 14:33:43 -0800 | [diff] [blame^] | 100 | // Registers everything, but also updates the real time time in sync. Runs |
| 101 | // until the log file starts. |
| 102 | void Register(SimulatedEventLoopFactory *factory); |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 103 | // Unregisters the senders. |
| 104 | void Deregister(); |
| 105 | |
| 106 | // TODO(austin): Remap channels? |
| 107 | |
| 108 | // Returns the configuration from the log file. |
| 109 | const Configuration *configuration(); |
| 110 | |
| 111 | // Returns the starting timestamp for the log file. |
| 112 | monotonic_clock::time_point monotonic_start_time(); |
| 113 | realtime_clock::time_point realtime_start_time(); |
| 114 | |
| 115 | // TODO(austin): Add the ability to re-publish the fetched messages. Add 2 |
| 116 | // options, one which publishes them *now*, and another which publishes them |
| 117 | // to the simulated event loop factory back in time where they actually |
| 118 | // happened. |
| 119 | |
| 120 | private: |
| 121 | // Reads a chunk of data into data_. Returns false if no data was read. |
| 122 | bool ReadBlock(); |
| 123 | |
| 124 | // Returns true if there is a full message available in the buffer, or if we |
| 125 | // will have to read more data from disk. |
| 126 | bool MessageAvailable(); |
| 127 | |
| 128 | // Returns a span with the data for a message from the log file, excluding the |
| 129 | // size. |
| 130 | absl::Span<const uint8_t> ReadMessage(); |
| 131 | |
| 132 | // Queues at least max_out_of_order_duration_ messages into channels_. |
| 133 | void QueueMessages(); |
| 134 | |
| 135 | // We need to read a large chunk at a time, then kit it up into parts and |
| 136 | // sort. |
| 137 | // |
| 138 | // We want to read 256 KB chunks at a time. This is the fastest read size. |
| 139 | // This leaves us with a fragmentation problem though. |
| 140 | // |
| 141 | // The easy answer is to read 256 KB chunks. Then, malloc and memcpy those |
| 142 | // chunks into single flatbuffer messages and manage them in a sorted queue. |
| 143 | // Everything is copied three times (into 256 kb buffer, then into separate |
| 144 | // buffer, then into sender), but none of it is all that expensive. We can |
| 145 | // optimize if it is slow later. |
| 146 | // |
| 147 | // As we place the elements in the sorted list of times, keep doing this until |
| 148 | // we read a message that is newer than the threshold. |
| 149 | // |
| 150 | // Then repeat. Keep filling up the sorted list with 256 KB chunks (need a |
| 151 | // small state machine so we can resume), and keep pulling messages back out |
| 152 | // and sending. |
| 153 | // |
| 154 | // For sorting, we want to use the fact that each channel is sorted, and then |
| 155 | // merge sort the channels. Have a vector of deques, and then hold a sorted |
| 156 | // list of pointers to those. |
| 157 | // |
| 158 | // TODO(austin): Multithreaded read at some point. Gotta go faster! |
| 159 | // Especially if we start compressing. |
| 160 | |
| 161 | // Allocator which doesn't zero initialize memory. |
| 162 | template <typename T> |
| 163 | struct DefaultInitAllocator { |
| 164 | typedef T value_type; |
| 165 | |
| 166 | template <typename U> |
| 167 | void construct(U *p) { |
| 168 | ::new (static_cast<void *>(p)) U; |
| 169 | } |
| 170 | |
| 171 | template <typename U, typename... Args> |
| 172 | void construct(U *p, Args &&... args) { |
| 173 | ::new (static_cast<void *>(p)) U(std::forward<Args>(args)...); |
| 174 | } |
| 175 | |
| 176 | T *allocate(std::size_t n) { |
| 177 | return reinterpret_cast<T *>(::operator new(sizeof(T) * n)); |
| 178 | } |
| 179 | |
| 180 | template <typename U> |
| 181 | void deallocate(U *p, std::size_t /*n*/) { |
| 182 | ::operator delete(static_cast<void *>(p)); |
| 183 | } |
| 184 | }; |
| 185 | |
| 186 | // Minimum amount of data to queue up for sorting before we are guarenteed to |
| 187 | // not see data out of order. |
| 188 | std::chrono::nanoseconds max_out_of_order_duration_; |
| 189 | |
| 190 | // File descriptor for the log file. |
| 191 | int fd_ = -1; |
| 192 | |
Austin Schuh | 9254752 | 2019-12-28 14:33:43 -0800 | [diff] [blame^] | 193 | SimulatedEventLoopFactory *event_loop_factory_ = nullptr; |
| 194 | std::unique_ptr<EventLoop> event_loop_unique_ptr_; |
| 195 | EventLoop *event_loop_ = nullptr; |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 196 | TimerHandler *timer_handler_; |
| 197 | |
| 198 | // Vector to read into. This uses an allocator which doesn't zero initialize |
| 199 | // the memory. |
| 200 | std::vector<uint8_t, DefaultInitAllocator<uint8_t>> data_; |
| 201 | |
| 202 | // Amount of data consumed already in data_. |
| 203 | size_t consumed_data_ = 0; |
| 204 | |
| 205 | // Vector holding the data for the configuration. |
| 206 | std::vector<uint8_t> configuration_; |
| 207 | |
| 208 | // Moves the message to the correct channel queue. |
| 209 | void EmplaceDataBack(FlatbufferVector<MessageHeader> &&new_data); |
| 210 | |
| 211 | // Pushes a pointer to the channel for the given timestamp to the sorted |
| 212 | // channel list. |
| 213 | void PushChannelHeap(monotonic_clock::time_point timestamp, |
| 214 | int channel_index); |
| 215 | |
| 216 | // Returns a pointer to the channel with the oldest message in it, and the |
| 217 | // timestamp. |
| 218 | const std::pair<monotonic_clock::time_point, int> &oldest_message() const { |
| 219 | return channel_heap_.front(); |
| 220 | } |
| 221 | |
| 222 | // Pops a pointer to the channel with the oldest message in it, and the |
| 223 | // timestamp. |
| 224 | std::pair<monotonic_clock::time_point, int> PopOldestChannel(); |
| 225 | |
| 226 | // Datastructure to hold the list of messages, cached timestamp for the oldest |
| 227 | // message, and sender to send with. |
| 228 | struct ChannelData { |
| 229 | monotonic_clock::time_point oldest_timestamp = monotonic_clock::min_time; |
| 230 | std::deque<FlatbufferVector<MessageHeader>> data; |
| 231 | std::unique_ptr<RawSender> raw_sender; |
| 232 | |
| 233 | // Returns the oldest message. |
| 234 | const FlatbufferVector<MessageHeader> &front() { return data.front(); } |
| 235 | |
| 236 | // Returns the timestamp for the oldest message. |
| 237 | const monotonic_clock::time_point front_timestamp() { |
| 238 | return monotonic_clock::time_point( |
| 239 | std::chrono::nanoseconds(front().message().monotonic_sent_time())); |
| 240 | } |
| 241 | }; |
| 242 | |
| 243 | // List of channels and messages for them. |
| 244 | std::vector<ChannelData> channels_; |
| 245 | |
| 246 | // Heap of channels so we can track which channel to send next. |
| 247 | std::vector<std::pair<monotonic_clock::time_point, int>> channel_heap_; |
| 248 | |
| 249 | // Timestamp of the newest message in a channel queue. |
| 250 | monotonic_clock::time_point newest_timestamp_ = monotonic_clock::min_time; |
| 251 | |
| 252 | // The time at which we need to read another chunk from the logfile. |
| 253 | monotonic_clock::time_point queue_data_time_ = monotonic_clock::min_time; |
| 254 | |
| 255 | // Cached bit for if we have reached the end of the file. Otherwise we will |
| 256 | // hammer on the kernel asking for more data each time we send. |
| 257 | bool end_of_file_ = false; |
| 258 | }; |
| 259 | |
| 260 | } // namespace logger |
| 261 | } // namespace aos |
| 262 | |
| 263 | #endif // AOS_EVENTS_LOGGER_H_ |