James Kuszmaul | 38735e8 | 2019-12-07 16:42:06 -0800 | [diff] [blame] | 1 | #include "aos/events/logging/logger.h" |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 2 | |
| 3 | #include <fcntl.h> |
Austin Schuh | 4c4e009 | 2019-12-22 16:18:03 -0800 | [diff] [blame] | 4 | #include <limits.h> |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 5 | #include <sys/stat.h> |
| 6 | #include <sys/types.h> |
| 7 | #include <sys/uio.h> |
| 8 | #include <vector> |
| 9 | |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 10 | #include "Eigen/Dense" |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 11 | #include "absl/types/span.h" |
| 12 | #include "aos/events/event_loop.h" |
James Kuszmaul | 38735e8 | 2019-12-07 16:42:06 -0800 | [diff] [blame] | 13 | #include "aos/events/logging/logger_generated.h" |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 14 | #include "aos/flatbuffer_merge.h" |
Austin Schuh | 288479d | 2019-12-18 19:47:52 -0800 | [diff] [blame] | 15 | #include "aos/network/team_number.h" |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 16 | #include "aos/time/time.h" |
| 17 | #include "flatbuffers/flatbuffers.h" |
| 18 | |
Austin Schuh | 15649d6 | 2019-12-28 16:36:38 -0800 | [diff] [blame] | 19 | DEFINE_bool(skip_missing_forwarding_entries, false, |
| 20 | "If true, drop any forwarding entries with missing data. If " |
| 21 | "false, CHECK."); |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 22 | |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 23 | DEFINE_bool(timestamps_to_csv, false, |
| 24 | "If true, write all the time synchronization information to a set " |
| 25 | "of CSV files in /tmp/. This should only be needed when debugging " |
| 26 | "time synchronization."); |
| 27 | |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 28 | namespace aos { |
| 29 | namespace logger { |
| 30 | |
| 31 | namespace chrono = std::chrono; |
| 32 | |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 33 | Logger::Logger(DetachedBufferWriter *writer, EventLoop *event_loop, |
| 34 | std::chrono::milliseconds polling_period) |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 35 | : Logger(std::make_unique<LocalLogNamer>(writer, event_loop->node()), |
| 36 | event_loop, polling_period) {} |
| 37 | |
| 38 | Logger::Logger(std::unique_ptr<LogNamer> log_namer, EventLoop *event_loop, |
| 39 | std::chrono::milliseconds polling_period) |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 40 | : event_loop_(event_loop), |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 41 | log_namer_(std::move(log_namer)), |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 42 | timer_handler_(event_loop_->AddTimer([this]() { DoLogData(); })), |
| 43 | polling_period_(polling_period) { |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 44 | VLOG(1) << "Starting logger for " << FlatbufferToJson(event_loop_->node()); |
| 45 | int channel_index = 0; |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 46 | for (const Channel *channel : *event_loop_->configuration()->channels()) { |
| 47 | FetcherStruct fs; |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 48 | const bool is_local = |
| 49 | configuration::ChannelIsSendableOnNode(channel, event_loop_->node()); |
| 50 | |
Austin Schuh | 15649d6 | 2019-12-28 16:36:38 -0800 | [diff] [blame] | 51 | const bool is_readable = |
| 52 | configuration::ChannelIsReadableOnNode(channel, event_loop_->node()); |
| 53 | const bool log_message = configuration::ChannelMessageIsLoggedOnNode( |
| 54 | channel, event_loop_->node()) && |
| 55 | is_readable; |
| 56 | |
| 57 | const bool log_delivery_times = |
| 58 | (event_loop_->node() == nullptr) |
| 59 | ? false |
| 60 | : configuration::ConnectionDeliveryTimeIsLoggedOnNode( |
| 61 | channel, event_loop_->node(), event_loop_->node()); |
| 62 | |
| 63 | if (log_message || log_delivery_times) { |
| 64 | fs.fetcher = event_loop->MakeRawFetcher(channel); |
| 65 | VLOG(1) << "Logging channel " |
| 66 | << configuration::CleanedChannelToString(channel); |
| 67 | |
| 68 | if (log_delivery_times) { |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 69 | VLOG(1) << " Delivery times"; |
| 70 | fs.timestamp_writer = log_namer_->MakeTimestampWriter(channel); |
Austin Schuh | 15649d6 | 2019-12-28 16:36:38 -0800 | [diff] [blame] | 71 | } |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 72 | if (log_message) { |
| 73 | VLOG(1) << " Data"; |
| 74 | fs.writer = log_namer_->MakeWriter(channel); |
| 75 | if (!is_local) { |
| 76 | fs.log_type = LogType::kLogRemoteMessage; |
| 77 | } |
| 78 | } |
| 79 | fs.channel_index = channel_index; |
| 80 | fs.written = false; |
| 81 | fetchers_.emplace_back(std::move(fs)); |
Austin Schuh | 15649d6 | 2019-12-28 16:36:38 -0800 | [diff] [blame] | 82 | } |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 83 | ++channel_index; |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 84 | } |
| 85 | |
| 86 | // When things start, we want to log the header, then the most recent messages |
| 87 | // available on each fetcher to capture the previous state, then start |
| 88 | // polling. |
| 89 | event_loop_->OnRun([this, polling_period]() { |
| 90 | // Grab data from each channel right before we declare the log file started |
| 91 | // so we can capture the latest message on each channel. This lets us have |
| 92 | // non periodic messages with configuration that now get logged. |
| 93 | for (FetcherStruct &f : fetchers_) { |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 94 | f.written = !f.fetcher->Fetch(); |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 95 | } |
| 96 | |
| 97 | // We need to pick a point in time to declare the log file "started". This |
| 98 | // starts here. It needs to be after everything is fetched so that the |
| 99 | // fetchers are all pointed at the most recent message before the start |
| 100 | // time. |
Austin Schuh | fa89589 | 2020-01-07 20:07:41 -0800 | [diff] [blame] | 101 | monotonic_start_time_ = event_loop_->monotonic_now(); |
| 102 | realtime_start_time_ = event_loop_->realtime_now(); |
| 103 | last_synchronized_time_ = monotonic_start_time_; |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 104 | |
Austin Schuh | cde938c | 2020-02-02 17:30:07 -0800 | [diff] [blame] | 105 | LOG(INFO) << "Logging node as " << FlatbufferToJson(event_loop_->node()) |
| 106 | << " start_time " << monotonic_start_time_; |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 107 | |
Austin Schuh | fa89589 | 2020-01-07 20:07:41 -0800 | [diff] [blame] | 108 | WriteHeader(); |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 109 | |
| 110 | timer_handler_->Setup(event_loop_->monotonic_now() + polling_period, |
| 111 | polling_period); |
| 112 | }); |
| 113 | } |
| 114 | |
Austin Schuh | cde938c | 2020-02-02 17:30:07 -0800 | [diff] [blame] | 115 | // TODO(austin): Set the remote start time to the first time we see a remote |
| 116 | // message when we are logging those messages separate? Need to signal what to |
| 117 | // do, or how to get a good timestamp. |
Austin Schuh | fa89589 | 2020-01-07 20:07:41 -0800 | [diff] [blame] | 118 | void Logger::WriteHeader() { |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 119 | for (const Node *node : log_namer_->nodes()) { |
| 120 | WriteHeader(node); |
| 121 | } |
| 122 | } |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 123 | |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 124 | void Logger::WriteHeader(const Node *node) { |
Austin Schuh | fa89589 | 2020-01-07 20:07:41 -0800 | [diff] [blame] | 125 | // Now write the header with this timestamp in it. |
| 126 | flatbuffers::FlatBufferBuilder fbb; |
Austin Schuh | d7b15da | 2020-02-17 15:06:11 -0800 | [diff] [blame] | 127 | fbb.ForceDefaults(true); |
Austin Schuh | fa89589 | 2020-01-07 20:07:41 -0800 | [diff] [blame] | 128 | |
| 129 | flatbuffers::Offset<aos::Configuration> configuration_offset = |
| 130 | CopyFlatBuffer(event_loop_->configuration(), &fbb); |
| 131 | |
| 132 | flatbuffers::Offset<flatbuffers::String> string_offset = |
| 133 | fbb.CreateString(network::GetHostname()); |
| 134 | |
| 135 | flatbuffers::Offset<Node> node_offset; |
| 136 | if (event_loop_->node() != nullptr) { |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 137 | node_offset = CopyFlatBuffer(node, &fbb); |
Austin Schuh | fa89589 | 2020-01-07 20:07:41 -0800 | [diff] [blame] | 138 | } |
| 139 | |
| 140 | aos::logger::LogFileHeader::Builder log_file_header_builder(fbb); |
| 141 | |
| 142 | log_file_header_builder.add_name(string_offset); |
| 143 | |
| 144 | // Only add the node if we are running in a multinode configuration. |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 145 | if (node != nullptr) { |
Austin Schuh | fa89589 | 2020-01-07 20:07:41 -0800 | [diff] [blame] | 146 | log_file_header_builder.add_node(node_offset); |
| 147 | } |
| 148 | |
| 149 | log_file_header_builder.add_configuration(configuration_offset); |
| 150 | // The worst case theoretical out of order is the polling period times 2. |
| 151 | // One message could get logged right after the boundary, but be for right |
| 152 | // before the next boundary. And the reverse could happen for another |
| 153 | // message. Report back 3x to be extra safe, and because the cost isn't |
| 154 | // huge on the read side. |
| 155 | log_file_header_builder.add_max_out_of_order_duration( |
| 156 | std::chrono::duration_cast<std::chrono::nanoseconds>(3 * polling_period_) |
| 157 | .count()); |
| 158 | |
| 159 | log_file_header_builder.add_monotonic_start_time( |
| 160 | std::chrono::duration_cast<std::chrono::nanoseconds>( |
| 161 | monotonic_start_time_.time_since_epoch()) |
| 162 | .count()); |
| 163 | log_file_header_builder.add_realtime_start_time( |
| 164 | std::chrono::duration_cast<std::chrono::nanoseconds>( |
| 165 | realtime_start_time_.time_since_epoch()) |
| 166 | .count()); |
| 167 | |
| 168 | fbb.FinishSizePrefixed(log_file_header_builder.Finish()); |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 169 | log_namer_->WriteHeader(&fbb, node); |
Austin Schuh | fa89589 | 2020-01-07 20:07:41 -0800 | [diff] [blame] | 170 | } |
| 171 | |
| 172 | void Logger::Rotate(DetachedBufferWriter *writer) { |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 173 | Rotate(std::make_unique<LocalLogNamer>(writer, event_loop_->node())); |
| 174 | } |
| 175 | |
| 176 | void Logger::Rotate(std::unique_ptr<LogNamer> log_namer) { |
Austin Schuh | fa89589 | 2020-01-07 20:07:41 -0800 | [diff] [blame] | 177 | // Force data up until now to be written. |
| 178 | DoLogData(); |
| 179 | |
| 180 | // Swap the writer out, and re-write the header. |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 181 | log_namer_ = std::move(log_namer); |
| 182 | |
| 183 | // And then update the writers. |
| 184 | for (FetcherStruct &f : fetchers_) { |
| 185 | const Channel *channel = |
| 186 | event_loop_->configuration()->channels()->Get(f.channel_index); |
| 187 | if (f.timestamp_writer != nullptr) { |
| 188 | f.timestamp_writer = log_namer_->MakeTimestampWriter(channel); |
| 189 | } |
| 190 | if (f.writer != nullptr) { |
| 191 | f.writer = log_namer_->MakeWriter(channel); |
| 192 | } |
| 193 | } |
| 194 | |
Austin Schuh | fa89589 | 2020-01-07 20:07:41 -0800 | [diff] [blame] | 195 | WriteHeader(); |
| 196 | } |
| 197 | |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 198 | void Logger::DoLogData() { |
| 199 | // We want to guarentee that messages aren't out of order by more than |
| 200 | // max_out_of_order_duration. To do this, we need sync points. Every write |
| 201 | // cycle should be a sync point. |
Austin Schuh | fa89589 | 2020-01-07 20:07:41 -0800 | [diff] [blame] | 202 | const monotonic_clock::time_point monotonic_now = |
| 203 | event_loop_->monotonic_now(); |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 204 | |
| 205 | do { |
| 206 | // Move the sync point up by at most polling_period. This forces one sync |
| 207 | // per iteration, even if it is small. |
| 208 | last_synchronized_time_ = |
| 209 | std::min(last_synchronized_time_ + polling_period_, monotonic_now); |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 210 | // Write each channel to disk, one at a time. |
| 211 | for (FetcherStruct &f : fetchers_) { |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 212 | while (true) { |
| 213 | if (f.written) { |
| 214 | if (!f.fetcher->FetchNext()) { |
| 215 | VLOG(2) << "No new data on " |
| 216 | << configuration::CleanedChannelToString( |
| 217 | f.fetcher->channel()); |
| 218 | break; |
| 219 | } else { |
| 220 | f.written = false; |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 221 | } |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 222 | } |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 223 | |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 224 | CHECK(!f.written); |
Austin Schuh | 15649d6 | 2019-12-28 16:36:38 -0800 | [diff] [blame] | 225 | |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 226 | // TODO(james): Write tests to exercise this logic. |
| 227 | if (f.fetcher->context().monotonic_event_time < |
| 228 | last_synchronized_time_) { |
| 229 | if (f.writer != nullptr) { |
Austin Schuh | 15649d6 | 2019-12-28 16:36:38 -0800 | [diff] [blame] | 230 | // Write! |
| 231 | flatbuffers::FlatBufferBuilder fbb(f.fetcher->context().size + |
| 232 | max_header_size_); |
Austin Schuh | d7b15da | 2020-02-17 15:06:11 -0800 | [diff] [blame] | 233 | fbb.ForceDefaults(true); |
Austin Schuh | 15649d6 | 2019-12-28 16:36:38 -0800 | [diff] [blame] | 234 | |
| 235 | fbb.FinishSizePrefixed(PackMessage(&fbb, f.fetcher->context(), |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 236 | f.channel_index, f.log_type)); |
Austin Schuh | 15649d6 | 2019-12-28 16:36:38 -0800 | [diff] [blame] | 237 | |
Austin Schuh | cde938c | 2020-02-02 17:30:07 -0800 | [diff] [blame] | 238 | VLOG(2) << "Writing data as node " |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 239 | << FlatbufferToJson(event_loop_->node()) << " for channel " |
Austin Schuh | 15649d6 | 2019-12-28 16:36:38 -0800 | [diff] [blame] | 240 | << configuration::CleanedChannelToString( |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 241 | f.fetcher->channel()) |
Austin Schuh | cde938c | 2020-02-02 17:30:07 -0800 | [diff] [blame] | 242 | << " to " << f.writer->filename() << " data " |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 243 | << FlatbufferToJson( |
| 244 | flatbuffers::GetSizePrefixedRoot<MessageHeader>( |
| 245 | fbb.GetBufferPointer())); |
Austin Schuh | 15649d6 | 2019-12-28 16:36:38 -0800 | [diff] [blame] | 246 | |
| 247 | max_header_size_ = std::max( |
| 248 | max_header_size_, fbb.GetSize() - f.fetcher->context().size); |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 249 | f.writer->QueueSizedFlatbuffer(&fbb); |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 250 | } |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 251 | |
| 252 | if (f.timestamp_writer != nullptr) { |
| 253 | // And now handle timestamps. |
| 254 | flatbuffers::FlatBufferBuilder fbb; |
Austin Schuh | d7b15da | 2020-02-17 15:06:11 -0800 | [diff] [blame] | 255 | fbb.ForceDefaults(true); |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 256 | |
| 257 | fbb.FinishSizePrefixed(PackMessage(&fbb, f.fetcher->context(), |
| 258 | f.channel_index, |
| 259 | LogType::kLogDeliveryTimeOnly)); |
| 260 | |
Austin Schuh | cde938c | 2020-02-02 17:30:07 -0800 | [diff] [blame] | 261 | VLOG(2) << "Writing timestamps as node " |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 262 | << FlatbufferToJson(event_loop_->node()) << " for channel " |
| 263 | << configuration::CleanedChannelToString( |
| 264 | f.fetcher->channel()) |
Austin Schuh | cde938c | 2020-02-02 17:30:07 -0800 | [diff] [blame] | 265 | << " to " << f.timestamp_writer->filename() << " timestamp " |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 266 | << FlatbufferToJson( |
| 267 | flatbuffers::GetSizePrefixedRoot<MessageHeader>( |
| 268 | fbb.GetBufferPointer())); |
| 269 | |
| 270 | f.timestamp_writer->QueueSizedFlatbuffer(&fbb); |
| 271 | } |
| 272 | |
| 273 | f.written = true; |
| 274 | } else { |
| 275 | break; |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 276 | } |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 277 | } |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 278 | } |
| 279 | |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 280 | // If we missed cycles, we could be pretty far behind. Spin until we are |
| 281 | // caught up. |
| 282 | } while (last_synchronized_time_ + polling_period_ < monotonic_now); |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 283 | } |
| 284 | |
James Kuszmaul | c7bbb3e | 2020-01-03 20:01:00 -0800 | [diff] [blame] | 285 | LogReader::LogReader(std::string_view filename, |
| 286 | const Configuration *replay_configuration) |
Austin Schuh | fa89589 | 2020-01-07 20:07:41 -0800 | [diff] [blame] | 287 | : LogReader(std::vector<std::string>{std::string(filename)}, |
| 288 | replay_configuration) {} |
| 289 | |
| 290 | LogReader::LogReader(const std::vector<std::string> &filenames, |
| 291 | const Configuration *replay_configuration) |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 292 | : LogReader(std::vector<std::vector<std::string>>{filenames}, |
| 293 | replay_configuration) {} |
| 294 | |
| 295 | LogReader::LogReader(const std::vector<std::vector<std::string>> &filenames, |
| 296 | const Configuration *replay_configuration) |
| 297 | : filenames_(filenames), |
| 298 | log_file_header_(ReadHeader(filenames[0][0])), |
James Kuszmaul | c7bbb3e | 2020-01-03 20:01:00 -0800 | [diff] [blame] | 299 | replay_configuration_(replay_configuration) { |
Austin Schuh | 6331ef9 | 2020-01-07 18:28:09 -0800 | [diff] [blame] | 300 | MakeRemappedConfig(); |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 301 | |
Austin Schuh | 6aa77be | 2020-02-22 21:06:40 -0800 | [diff] [blame^] | 302 | if (replay_configuration) { |
| 303 | CHECK_EQ(configuration::MultiNode(configuration()), |
| 304 | configuration::MultiNode(replay_configuration)) |
| 305 | << ": Log file and replay config need to both be multi or single node."; |
| 306 | } |
| 307 | |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 308 | if (!configuration::MultiNode(configuration())) { |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 309 | states_.emplace_back(std::make_unique<State>()); |
| 310 | State *state = states_[0].get(); |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 311 | |
| 312 | state->channel_merger = std::make_unique<ChannelMerger>(filenames); |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 313 | } else { |
Austin Schuh | 6aa77be | 2020-02-22 21:06:40 -0800 | [diff] [blame^] | 314 | if (replay_configuration) { |
| 315 | CHECK_EQ(configuration()->nodes()->size(), |
| 316 | replay_configuration->nodes()->size()) |
| 317 | << ": Log file and replay config need to have matching nodes lists."; |
| 318 | } |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 319 | states_.resize(configuration()->nodes()->size()); |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 320 | } |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 321 | } |
| 322 | |
Austin Schuh | 6aa77be | 2020-02-22 21:06:40 -0800 | [diff] [blame^] | 323 | LogReader::~LogReader() { |
| 324 | Deregister(); |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 325 | if (offset_fp_ != nullptr) { |
| 326 | fclose(offset_fp_); |
| 327 | } |
| 328 | } |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 329 | |
James Kuszmaul | c7bbb3e | 2020-01-03 20:01:00 -0800 | [diff] [blame] | 330 | const Configuration *LogReader::logged_configuration() const { |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 331 | return log_file_header_.message().configuration(); |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 332 | } |
| 333 | |
James Kuszmaul | c7bbb3e | 2020-01-03 20:01:00 -0800 | [diff] [blame] | 334 | const Configuration *LogReader::configuration() const { |
James Kuszmaul | c7bbb3e | 2020-01-03 20:01:00 -0800 | [diff] [blame] | 335 | return remapped_configuration_; |
| 336 | } |
| 337 | |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 338 | std::vector<const Node *> LogReader::Nodes() const { |
James Kuszmaul | c7bbb3e | 2020-01-03 20:01:00 -0800 | [diff] [blame] | 339 | // Because the Node pointer will only be valid if it actually points to memory |
| 340 | // owned by remapped_configuration_, we need to wait for the |
| 341 | // remapped_configuration_ to be populated before accessing it. |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 342 | // |
| 343 | // Also, note, that when ever a map is changed, the nodes in here are |
| 344 | // invalidated. |
James Kuszmaul | c7bbb3e | 2020-01-03 20:01:00 -0800 | [diff] [blame] | 345 | CHECK(remapped_configuration_ != nullptr) |
| 346 | << ": Need to call Register before the node() pointer will be valid."; |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 347 | return configuration::GetNodes(remapped_configuration_); |
James Kuszmaul | c7bbb3e | 2020-01-03 20:01:00 -0800 | [diff] [blame] | 348 | } |
Austin Schuh | 15649d6 | 2019-12-28 16:36:38 -0800 | [diff] [blame] | 349 | |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 350 | monotonic_clock::time_point LogReader::monotonic_start_time(const Node *node) { |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 351 | State *state = |
| 352 | states_[configuration::GetNodeIndex(configuration(), node)].get(); |
| 353 | CHECK(state != nullptr) << ": Unknown node " << FlatbufferToJson(node); |
| 354 | |
| 355 | return state->channel_merger->monotonic_start_time(); |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 356 | } |
| 357 | |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 358 | realtime_clock::time_point LogReader::realtime_start_time(const Node *node) { |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 359 | State *state = |
| 360 | states_[configuration::GetNodeIndex(configuration(), node)].get(); |
| 361 | CHECK(state != nullptr) << ": Unknown node " << FlatbufferToJson(node); |
| 362 | |
| 363 | return state->channel_merger->realtime_start_time(); |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 364 | } |
| 365 | |
James Kuszmaul | 84ff3e5 | 2020-01-03 19:48:53 -0800 | [diff] [blame] | 366 | void LogReader::Register() { |
| 367 | event_loop_factory_unique_ptr_ = |
Austin Schuh | ac0771c | 2020-01-07 18:36:30 -0800 | [diff] [blame] | 368 | std::make_unique<SimulatedEventLoopFactory>(configuration()); |
James Kuszmaul | 84ff3e5 | 2020-01-03 19:48:53 -0800 | [diff] [blame] | 369 | Register(event_loop_factory_unique_ptr_.get()); |
| 370 | } |
| 371 | |
Austin Schuh | 9254752 | 2019-12-28 14:33:43 -0800 | [diff] [blame] | 372 | void LogReader::Register(SimulatedEventLoopFactory *event_loop_factory) { |
Austin Schuh | 9254752 | 2019-12-28 14:33:43 -0800 | [diff] [blame] | 373 | event_loop_factory_ = event_loop_factory; |
Austin Schuh | 9254752 | 2019-12-28 14:33:43 -0800 | [diff] [blame] | 374 | |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 375 | for (const Node *node : configuration::GetNodes(configuration())) { |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 376 | const size_t node_index = |
| 377 | configuration::GetNodeIndex(configuration(), node); |
| 378 | states_[node_index] = std::make_unique<State>(); |
| 379 | State *state = states_[node_index].get(); |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 380 | |
| 381 | state->channel_merger = std::make_unique<ChannelMerger>(filenames_); |
| 382 | |
| 383 | state->node_event_loop_factory = |
| 384 | event_loop_factory_->GetNodeEventLoopFactory(node); |
| 385 | state->event_loop_unique_ptr = |
| 386 | event_loop_factory->MakeEventLoop("log_reader", node); |
| 387 | |
| 388 | Register(state->event_loop_unique_ptr.get()); |
Austin Schuh | cde938c | 2020-02-02 17:30:07 -0800 | [diff] [blame] | 389 | } |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 390 | |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 391 | // We need to now seed our per-node time offsets and get everything set up to |
| 392 | // run. |
| 393 | const size_t num_nodes = !configuration::MultiNode(logged_configuration()) |
| 394 | ? 1u |
| 395 | : logged_configuration()->nodes()->size(); |
Austin Schuh | cde938c | 2020-02-02 17:30:07 -0800 | [diff] [blame] | 396 | |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 397 | // It is easiest to solve for per node offsets with a matrix rather than |
| 398 | // trying to solve the equations by hand. So let's get after it. |
| 399 | // |
| 400 | // Now, build up the map matrix. |
| 401 | // |
| 402 | // sample_matrix_ = map_matrix_ * offset_matrix_ |
| 403 | map_matrix_ = Eigen::MatrixXd::Zero(filters_.size() + 1, num_nodes); |
Austin Schuh | cde938c | 2020-02-02 17:30:07 -0800 | [diff] [blame] | 404 | |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 405 | sample_matrix_ = Eigen::VectorXd::Zero(filters_.size() + 1); |
| 406 | offset_matrix_ = Eigen::VectorXd::Zero(num_nodes); |
Austin Schuh | cde938c | 2020-02-02 17:30:07 -0800 | [diff] [blame] | 407 | |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 408 | // And the base offset matrix, which will be a copy of the initial offset |
| 409 | // matrix. |
| 410 | base_offset_matrix_ = |
| 411 | Eigen::Matrix<std::chrono::nanoseconds, Eigen::Dynamic, 1>::Zero( |
| 412 | num_nodes); |
| 413 | |
| 414 | // All offsets should sum to 0. Add that as the first constraint in our least |
| 415 | // squares. |
| 416 | map_matrix_.row(0).setOnes(); |
| 417 | |
| 418 | { |
| 419 | // Now, add the a - b -> sample elements. |
| 420 | size_t i = 1; |
| 421 | for (std::pair<const std::tuple<const Node *, const Node *>, |
| 422 | message_bridge::ClippedAverageFilter> &filter : filters_) { |
| 423 | const Node *const node_a = std::get<0>(filter.first); |
| 424 | const Node *const node_b = std::get<1>(filter.first); |
| 425 | |
| 426 | const size_t node_a_index = |
| 427 | configuration::GetNodeIndex(configuration(), node_a); |
| 428 | const size_t node_b_index = |
| 429 | configuration::GetNodeIndex(configuration(), node_b); |
| 430 | |
| 431 | // +a |
| 432 | map_matrix_(i, node_a_index) = 1.0; |
| 433 | // -b |
| 434 | map_matrix_(i, node_b_index) = -1.0; |
| 435 | |
| 436 | // -> sample |
| 437 | filter.second.set_sample_pointer(&sample_matrix_(i, 0)); |
| 438 | |
| 439 | ++i; |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 440 | } |
| 441 | } |
| 442 | |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 443 | // Rank of the map matrix tells you if all the nodes are in communication with |
| 444 | // each other, which tells you if the offsets are observable. |
| 445 | const size_t connected_nodes = |
| 446 | Eigen::FullPivLU<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>>( |
| 447 | map_matrix_) |
| 448 | .rank(); |
| 449 | |
| 450 | // We don't need to support isolated nodes until someone has a real use case. |
| 451 | CHECK_EQ(connected_nodes, num_nodes) |
| 452 | << ": There is a node which isn't communicating with the rest."; |
| 453 | |
| 454 | // Now, iterate through all the timestamps from all the nodes and seed |
| 455 | // everything. |
| 456 | for (std::unique_ptr<State> &state : states_) { |
| 457 | for (size_t i = 0; i < logged_configuration()->channels()->size(); ++i) { |
| 458 | TimestampMerger::DeliveryTimestamp timestamp = |
| 459 | state->channel_merger->OldestTimestampForChannel(i); |
| 460 | if (timestamp.monotonic_event_time != monotonic_clock::min_time) { |
| 461 | CHECK(state->MaybeUpdateTimestamp(timestamp, i)); |
| 462 | } |
| 463 | } |
| 464 | } |
| 465 | |
| 466 | // Make sure all the samples have been seeded. |
| 467 | for (int i = 1; i < sample_matrix_.cols(); ++i) { |
| 468 | // The seeding logic is pretty basic right now because we don't have great |
| 469 | // use cases yet. It wants to see data from every node. Blow up for now, |
| 470 | // and once we have a reason to do something different, update this logic. |
| 471 | // Maybe read further in the log file? Or seed off the realtime time? |
| 472 | CHECK_NE(sample_matrix_(i, 0), 0.0) |
| 473 | << ": Sample " << i << " is not seeded."; |
| 474 | } |
| 475 | |
| 476 | // And solve. |
| 477 | offset_matrix_ = SolveOffsets(); |
| 478 | |
| 479 | // Save off the base offsets so we can work in deltas from here out. That |
| 480 | // will significantly simplify the numerical precision problems. |
| 481 | for (size_t i = 0; i < num_nodes; ++i) { |
| 482 | base_offset_matrix_(i, 0) = |
| 483 | std::chrono::duration_cast<std::chrono::nanoseconds>( |
| 484 | std::chrono::duration<double>(offset_matrix_(i, 0))); |
| 485 | } |
| 486 | |
| 487 | { |
| 488 | // Shift everything so we never could (reasonably) require the distributed |
| 489 | // clock to have a large backwards jump in time. This makes it so the boot |
| 490 | // time on the node up the longest will essentially start matching the |
| 491 | // distributed clock. |
| 492 | const chrono::nanoseconds offset = -base_offset_matrix_.maxCoeff(); |
| 493 | for (int i = 0; i < base_offset_matrix_.rows(); ++i) { |
| 494 | base_offset_matrix_(i, 0) += offset; |
| 495 | } |
| 496 | } |
| 497 | |
| 498 | { |
| 499 | // Re-compute the samples and setup all the filters so that they |
| 500 | // subtract this base offset. |
| 501 | |
| 502 | size_t i = 1; |
| 503 | for (std::pair<const std::tuple<const Node *, const Node *>, |
| 504 | message_bridge::ClippedAverageFilter> &filter : filters_) { |
| 505 | CHECK(filter.second.sample_pointer() == &sample_matrix_(i, 0)); |
| 506 | |
| 507 | const Node *const node_a = std::get<0>(filter.first); |
| 508 | const Node *const node_b = std::get<1>(filter.first); |
| 509 | |
| 510 | const size_t node_a_index = |
| 511 | configuration::GetNodeIndex(configuration(), node_a); |
| 512 | const size_t node_b_index = |
| 513 | configuration::GetNodeIndex(configuration(), node_b); |
| 514 | |
| 515 | filter.second.set_base_offset(base_offset_matrix_(node_a_index) - |
| 516 | base_offset_matrix_(node_b_index)); |
| 517 | |
| 518 | ++i; |
| 519 | } |
| 520 | } |
| 521 | |
| 522 | // Now, iterate again through all the offsets now that we have set the base |
| 523 | // offset to something sane. This will seed everything with an accurate |
| 524 | // initial offset. |
| 525 | for (std::unique_ptr<State> &state : states_) { |
| 526 | for (size_t i = 0; i < logged_configuration()->channels()->size(); ++i) { |
| 527 | TimestampMerger::DeliveryTimestamp timestamp = |
| 528 | state->channel_merger->OldestTimestampForChannel(i); |
| 529 | if (timestamp.monotonic_event_time != monotonic_clock::min_time) { |
| 530 | CHECK(state->MaybeUpdateTimestamp(timestamp, i)); |
| 531 | } |
| 532 | } |
| 533 | } |
| 534 | |
| 535 | UpdateOffsets(); |
| 536 | |
Austin Schuh | cde938c | 2020-02-02 17:30:07 -0800 | [diff] [blame] | 537 | // We want to start the log file at the last start time of the log files from |
| 538 | // all the nodes. Compute how long each node's simulation needs to run to |
| 539 | // move time to this point. |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 540 | distributed_clock::time_point start_time = distributed_clock::min_time; |
Austin Schuh | cde938c | 2020-02-02 17:30:07 -0800 | [diff] [blame] | 541 | |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 542 | for (std::unique_ptr<State> &state : states_) { |
| 543 | // Setup the realtime clock to have something sane in it now. |
Austin Schuh | cde938c | 2020-02-02 17:30:07 -0800 | [diff] [blame] | 544 | state->node_event_loop_factory->SetRealtimeOffset( |
| 545 | state->channel_merger->monotonic_start_time(), |
| 546 | state->channel_merger->realtime_start_time()); |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 547 | // And start computing the start time on the distributed clock now that that |
| 548 | // works. |
| 549 | start_time = std::max(start_time, |
| 550 | state->node_event_loop_factory->ToDistributedClock( |
| 551 | state->channel_merger->monotonic_start_time())); |
Austin Schuh | cde938c | 2020-02-02 17:30:07 -0800 | [diff] [blame] | 552 | } |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 553 | CHECK_GE(start_time, distributed_clock::epoch()); |
Austin Schuh | cde938c | 2020-02-02 17:30:07 -0800 | [diff] [blame] | 554 | |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 555 | // Forwarding is tracked per channel. If it is enabled, we want to turn it |
| 556 | // off. Otherwise messages replayed will get forwarded across to the other |
| 557 | // nodes, and also replayed on the other nodes. This may not satisfy all our |
| 558 | // users, but it'll start the discussion. |
| 559 | if (configuration::MultiNode(event_loop_factory_->configuration())) { |
| 560 | for (size_t i = 0; i < logged_configuration()->channels()->size(); ++i) { |
| 561 | const Channel *channel = logged_configuration()->channels()->Get(i); |
| 562 | const Node *node = configuration::GetNode( |
| 563 | configuration(), channel->source_node()->string_view()); |
| 564 | |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 565 | State *state = |
| 566 | states_[configuration::GetNodeIndex(configuration(), node)].get(); |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 567 | |
| 568 | const Channel *remapped_channel = |
| 569 | RemapChannel(state->event_loop, channel); |
| 570 | |
| 571 | event_loop_factory_->DisableForwarding(remapped_channel); |
| 572 | } |
| 573 | } |
| 574 | |
Austin Schuh | cde938c | 2020-02-02 17:30:07 -0800 | [diff] [blame] | 575 | // While we are starting the system up, we might be relying on matching data |
| 576 | // to timestamps on log files where the timestamp log file starts before the |
| 577 | // data. In this case, it is reasonable to expect missing data. |
| 578 | ignore_missing_data_ = true; |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 579 | event_loop_factory_->RunFor(start_time.time_since_epoch()); |
Austin Schuh | cde938c | 2020-02-02 17:30:07 -0800 | [diff] [blame] | 580 | // Now that we are running for real, missing data means that the log file is |
| 581 | // corrupted or went wrong. |
| 582 | ignore_missing_data_ = false; |
Austin Schuh | 9254752 | 2019-12-28 14:33:43 -0800 | [diff] [blame] | 583 | } |
| 584 | |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 585 | void LogReader::UpdateOffsets() { |
| 586 | // TODO(austin): Evaluate less accurate inverses. We might be able to |
| 587 | // do some tricks to keep the accuracy up. |
| 588 | offset_matrix_ = SolveOffsets(); |
| 589 | |
| 590 | size_t node_index = 0; |
| 591 | for (std::unique_ptr<State> &state : states_) { |
| 592 | state->node_event_loop_factory->SetDistributedOffset(offset(node_index)); |
| 593 | ++node_index; |
| 594 | } |
| 595 | } |
| 596 | |
| 597 | std::tuple<message_bridge::ClippedAverageFilter *, bool> LogReader::GetFilter( |
| 598 | const Node *node_a, const Node *node_b) { |
| 599 | CHECK_NE(node_a, node_b); |
| 600 | CHECK_EQ(configuration::GetNode(configuration(), node_a), node_a); |
| 601 | CHECK_EQ(configuration::GetNode(configuration(), node_b), node_b); |
| 602 | |
| 603 | if (node_a > node_b) { |
| 604 | return std::make_pair(std::get<0>(GetFilter(node_b, node_a)), false); |
| 605 | } |
| 606 | |
| 607 | auto tuple = std::make_tuple(node_a, node_b); |
| 608 | |
| 609 | auto it = filters_.find(tuple); |
| 610 | |
| 611 | if (it == filters_.end()) { |
| 612 | auto &x = filters_ |
| 613 | .insert(std::make_pair( |
| 614 | tuple, message_bridge::ClippedAverageFilter())) |
| 615 | .first->second; |
| 616 | if (FLAGS_timestamps_to_csv) { |
| 617 | std::string fwd_name = |
| 618 | absl::StrCat("/tmp/timestamp_", node_a->name()->string_view(), "_", |
| 619 | node_b->name()->string_view(), ".csv"); |
| 620 | x.fwd_fp = fopen(fwd_name.c_str(), "w"); |
| 621 | std::string rev_name = |
| 622 | absl::StrCat("/tmp/timestamp_", node_b->name()->string_view(), "_", |
| 623 | node_a->name()->string_view(), ".csv"); |
| 624 | x.rev_fp = fopen(rev_name.c_str(), "w"); |
| 625 | } |
| 626 | |
| 627 | return std::make_tuple(&x, true); |
| 628 | } else { |
| 629 | return std::make_tuple(&(it->second), true); |
| 630 | } |
| 631 | } |
| 632 | |
| 633 | bool LogReader::State::MaybeUpdateTimestamp( |
| 634 | const TimestampMerger::DeliveryTimestamp &channel_timestamp, |
| 635 | int channel_index) { |
| 636 | if (channel_timestamp.monotonic_remote_time == monotonic_clock::min_time) { |
| 637 | return false; |
| 638 | } |
| 639 | |
| 640 | // Got a forwarding timestamp! |
| 641 | CHECK(std::get<0>(filters[channel_index]) != nullptr); |
| 642 | |
| 643 | // Call the correct method depending on if we are the forward or reverse |
| 644 | // direction here. |
| 645 | if (std::get<1>(filters[channel_index])) { |
| 646 | std::get<0>(filters[channel_index]) |
| 647 | ->FwdSample(channel_timestamp.monotonic_event_time, |
| 648 | channel_timestamp.monotonic_event_time - |
| 649 | channel_timestamp.monotonic_remote_time); |
| 650 | } else { |
| 651 | std::get<0>(filters[channel_index]) |
| 652 | ->RevSample(channel_timestamp.monotonic_event_time, |
| 653 | channel_timestamp.monotonic_event_time - |
| 654 | channel_timestamp.monotonic_remote_time); |
| 655 | } |
| 656 | return true; |
| 657 | } |
| 658 | |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 659 | void LogReader::Register(EventLoop *event_loop) { |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 660 | State *state = |
| 661 | states_[configuration::GetNodeIndex(configuration(), event_loop->node())] |
| 662 | .get(); |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 663 | |
| 664 | state->event_loop = event_loop; |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 665 | |
Tyler Chatow | 67ddb03 | 2020-01-12 14:30:04 -0800 | [diff] [blame] | 666 | // We don't run timing reports when trying to print out logged data, because |
| 667 | // otherwise we would end up printing out the timing reports themselves... |
| 668 | // This is only really relevant when we are replaying into a simulation. |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 669 | event_loop->SkipTimingReport(); |
| 670 | event_loop->SkipAosLog(); |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 671 | |
Austin Schuh | 6aa77be | 2020-02-22 21:06:40 -0800 | [diff] [blame^] | 672 | const bool has_data = state->channel_merger->SetNode(event_loop->node()); |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 673 | |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 674 | state->channels.resize(logged_configuration()->channels()->size()); |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 675 | state->filters.resize(state->channels.size()); |
| 676 | |
| 677 | state->channel_target_event_loop_factory.resize(state->channels.size()); |
Austin Schuh | 6331ef9 | 2020-01-07 18:28:09 -0800 | [diff] [blame] | 678 | |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 679 | for (size_t i = 0; i < state->channels.size(); ++i) { |
| 680 | const Channel *channel = |
| 681 | RemapChannel(event_loop, logged_configuration()->channels()->Get(i)); |
Austin Schuh | 6331ef9 | 2020-01-07 18:28:09 -0800 | [diff] [blame] | 682 | |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 683 | state->channels[i] = event_loop->MakeRawSender(channel); |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 684 | |
| 685 | state->filters[i] = std::make_tuple(nullptr, false); |
| 686 | |
| 687 | if (!configuration::ChannelIsSendableOnNode(channel, event_loop->node()) && |
| 688 | configuration::ChannelIsReadableOnNode(channel, event_loop->node())) { |
| 689 | const Node *target_node = configuration::GetNode( |
| 690 | event_loop->configuration(), channel->source_node()->string_view()); |
| 691 | state->filters[i] = GetFilter(event_loop->node(), target_node); |
| 692 | |
| 693 | if (event_loop_factory_ != nullptr) { |
| 694 | state->channel_target_event_loop_factory[i] = |
| 695 | event_loop_factory_->GetNodeEventLoopFactory(target_node); |
| 696 | } |
| 697 | } |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 698 | } |
| 699 | |
Austin Schuh | 6aa77be | 2020-02-22 21:06:40 -0800 | [diff] [blame^] | 700 | // If we didn't find any log files with data in them, we won't ever get a |
| 701 | // callback or be live. So skip the rest of the setup. |
| 702 | if (!has_data) { |
| 703 | return; |
| 704 | } |
| 705 | |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 706 | state->timer_handler = event_loop->AddTimer([this, state]() { |
| 707 | if (state->channel_merger->OldestMessage() == monotonic_clock::max_time) { |
| 708 | --live_nodes_; |
Austin Schuh | 6aa77be | 2020-02-22 21:06:40 -0800 | [diff] [blame^] | 709 | VLOG(1) << "Node down!"; |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 710 | if (live_nodes_ == 0) { |
| 711 | event_loop_factory_->Exit(); |
| 712 | } |
James Kuszmaul | 314f167 | 2020-01-03 20:02:08 -0800 | [diff] [blame] | 713 | return; |
| 714 | } |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 715 | bool update_offsets = false; |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 716 | TimestampMerger::DeliveryTimestamp channel_timestamp; |
Austin Schuh | 05b7047 | 2020-01-01 17:11:17 -0800 | [diff] [blame] | 717 | int channel_index; |
| 718 | FlatbufferVector<MessageHeader> channel_data = |
| 719 | FlatbufferVector<MessageHeader>::Empty(); |
| 720 | |
| 721 | std::tie(channel_timestamp, channel_index, channel_data) = |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 722 | state->channel_merger->PopOldest(); |
Austin Schuh | 05b7047 | 2020-01-01 17:11:17 -0800 | [diff] [blame] | 723 | |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 724 | const monotonic_clock::time_point monotonic_now = |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 725 | state->event_loop->context().monotonic_event_time; |
| 726 | CHECK(monotonic_now == channel_timestamp.monotonic_event_time) |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 727 | << ": " << FlatbufferToJson(state->event_loop->node()) << " Now " |
| 728 | << monotonic_now << " trying to send " |
| 729 | << channel_timestamp.monotonic_event_time << " failure " |
| 730 | << state->channel_merger->DebugString(); |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 731 | |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 732 | if (channel_timestamp.monotonic_event_time > |
| 733 | state->channel_merger->monotonic_start_time() || |
Austin Schuh | 15649d6 | 2019-12-28 16:36:38 -0800 | [diff] [blame] | 734 | event_loop_factory_ != nullptr) { |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 735 | if ((!ignore_missing_data_ && !FLAGS_skip_missing_forwarding_entries && |
| 736 | !state->channel_merger->at_end()) || |
Austin Schuh | 05b7047 | 2020-01-01 17:11:17 -0800 | [diff] [blame] | 737 | channel_data.message().data() != nullptr) { |
| 738 | CHECK(channel_data.message().data() != nullptr) |
| 739 | << ": Got a message without data. Forwarding entry which was " |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 740 | "not matched? Use --skip_missing_forwarding_entries to ignore " |
Austin Schuh | 15649d6 | 2019-12-28 16:36:38 -0800 | [diff] [blame] | 741 | "this."; |
Austin Schuh | 9254752 | 2019-12-28 14:33:43 -0800 | [diff] [blame] | 742 | |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 743 | if (state->MaybeUpdateTimestamp(channel_timestamp, channel_index)) { |
| 744 | // Confirm that the message was sent on the sending node before the |
| 745 | // destination node (this node). As a proxy, do this by making sure |
| 746 | // that time on the source node is past when the message was sent. |
| 747 | CHECK_LT(channel_timestamp.monotonic_remote_time, |
| 748 | state->channel_target_event_loop_factory[channel_index] |
| 749 | ->monotonic_now()); |
| 750 | |
| 751 | update_offsets = true; |
| 752 | |
| 753 | if (FLAGS_timestamps_to_csv) { |
| 754 | if (offset_fp_ == nullptr) { |
| 755 | offset_fp_ = fopen("/tmp/offsets.csv", "w"); |
| 756 | fprintf( |
| 757 | offset_fp_, |
| 758 | "# time_since_start, offset node 0, offset node 1, ...\n"); |
| 759 | first_time_ = channel_timestamp.realtime_event_time; |
| 760 | } |
| 761 | |
| 762 | fprintf(offset_fp_, "%.9f", |
| 763 | std::chrono::duration_cast<std::chrono::duration<double>>( |
| 764 | channel_timestamp.realtime_event_time - first_time_) |
| 765 | .count()); |
| 766 | for (int i = 0; i < base_offset_matrix_.rows(); ++i) { |
| 767 | fprintf( |
| 768 | offset_fp_, ", %.9f", |
| 769 | offset_matrix_(i, 0) + |
| 770 | std::chrono::duration_cast<std::chrono::duration<double>>( |
| 771 | base_offset_matrix_(i, 0)) |
| 772 | .count()); |
| 773 | } |
| 774 | fprintf(offset_fp_, "\n"); |
| 775 | } |
| 776 | |
| 777 | } else { |
| 778 | CHECK(std::get<0>(state->filters[channel_index]) == nullptr); |
| 779 | } |
| 780 | |
Austin Schuh | 15649d6 | 2019-12-28 16:36:38 -0800 | [diff] [blame] | 781 | // If we have access to the factory, use it to fix the realtime time. |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 782 | if (state->node_event_loop_factory != nullptr) { |
| 783 | state->node_event_loop_factory->SetRealtimeOffset( |
| 784 | channel_timestamp.monotonic_event_time, |
| 785 | channel_timestamp.realtime_event_time); |
Austin Schuh | 15649d6 | 2019-12-28 16:36:38 -0800 | [diff] [blame] | 786 | } |
| 787 | |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 788 | state->channels[channel_index]->Send( |
Austin Schuh | 05b7047 | 2020-01-01 17:11:17 -0800 | [diff] [blame] | 789 | channel_data.message().data()->Data(), |
| 790 | channel_data.message().data()->size(), |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 791 | channel_timestamp.monotonic_remote_time, |
| 792 | channel_timestamp.realtime_remote_time, |
| 793 | channel_timestamp.remote_queue_index); |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 794 | } else if (state->channel_merger->at_end()) { |
| 795 | // We are at the end of the log file and found missing data. Finish |
| 796 | // reading the rest of the log file and call it quits. We don't want to |
| 797 | // replay partial data. |
| 798 | while (state->channel_merger->OldestMessage() != |
| 799 | monotonic_clock::max_time) { |
| 800 | state->channel_merger->PopOldest(); |
| 801 | } |
Austin Schuh | 9254752 | 2019-12-28 14:33:43 -0800 | [diff] [blame] | 802 | } |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 803 | |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 804 | } else { |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 805 | LOG(WARNING) |
| 806 | << "Not sending data from before the start of the log file. " |
| 807 | << channel_timestamp.monotonic_event_time.time_since_epoch().count() |
| 808 | << " start " << monotonic_start_time().time_since_epoch().count() |
| 809 | << " " << FlatbufferToJson(channel_data); |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 810 | } |
| 811 | |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 812 | const monotonic_clock::time_point next_time = |
| 813 | state->channel_merger->OldestMessage(); |
| 814 | if (next_time != monotonic_clock::max_time) { |
| 815 | state->timer_handler->Setup(next_time); |
James Kuszmaul | 314f167 | 2020-01-03 20:02:08 -0800 | [diff] [blame] | 816 | } else { |
| 817 | // Set a timer up immediately after now to die. If we don't do this, then |
| 818 | // the senders waiting on the message we just read will never get called. |
Austin Schuh | eecb928 | 2020-01-08 17:43:30 -0800 | [diff] [blame] | 819 | if (event_loop_factory_ != nullptr) { |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 820 | state->timer_handler->Setup(monotonic_now + |
| 821 | event_loop_factory_->send_delay() + |
| 822 | std::chrono::nanoseconds(1)); |
Austin Schuh | eecb928 | 2020-01-08 17:43:30 -0800 | [diff] [blame] | 823 | } |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 824 | } |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 825 | |
| 826 | // Once we make this call, the current time changes. So do everything which |
| 827 | // involves time before changing it. That especially includes sending the |
| 828 | // message. |
| 829 | if (update_offsets) { |
| 830 | UpdateOffsets(); |
| 831 | } |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 832 | }); |
| 833 | |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 834 | ++live_nodes_; |
| 835 | |
| 836 | if (state->channel_merger->OldestMessage() != monotonic_clock::max_time) { |
| 837 | event_loop->OnRun([state]() { |
| 838 | state->timer_handler->Setup(state->channel_merger->OldestMessage()); |
Austin Schuh | 05b7047 | 2020-01-01 17:11:17 -0800 | [diff] [blame] | 839 | }); |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 840 | } |
| 841 | } |
| 842 | |
| 843 | void LogReader::Deregister() { |
James Kuszmaul | 84ff3e5 | 2020-01-03 19:48:53 -0800 | [diff] [blame] | 844 | // Make sure that things get destroyed in the correct order, rather than |
| 845 | // relying on getting the order correct in the class definition. |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 846 | for (std::unique_ptr<State> &state : states_) { |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 847 | for (size_t i = 0; i < state->channels.size(); ++i) { |
| 848 | state->channels[i].reset(); |
| 849 | } |
| 850 | state->event_loop_unique_ptr.reset(); |
| 851 | state->event_loop = nullptr; |
| 852 | state->node_event_loop_factory = nullptr; |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 853 | } |
Austin Schuh | 9254752 | 2019-12-28 14:33:43 -0800 | [diff] [blame] | 854 | |
James Kuszmaul | 84ff3e5 | 2020-01-03 19:48:53 -0800 | [diff] [blame] | 855 | event_loop_factory_unique_ptr_.reset(); |
| 856 | event_loop_factory_ = nullptr; |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 857 | } |
| 858 | |
James Kuszmaul | c7bbb3e | 2020-01-03 20:01:00 -0800 | [diff] [blame] | 859 | void LogReader::RemapLoggedChannel(std::string_view name, std::string_view type, |
| 860 | std::string_view add_prefix) { |
James Kuszmaul | c7bbb3e | 2020-01-03 20:01:00 -0800 | [diff] [blame] | 861 | for (size_t ii = 0; ii < logged_configuration()->channels()->size(); ++ii) { |
| 862 | const Channel *const channel = logged_configuration()->channels()->Get(ii); |
| 863 | if (channel->name()->str() == name && |
| 864 | channel->type()->string_view() == type) { |
| 865 | CHECK_EQ(0u, remapped_channels_.count(ii)) |
| 866 | << "Already remapped channel " |
| 867 | << configuration::CleanedChannelToString(channel); |
| 868 | remapped_channels_[ii] = std::string(add_prefix) + std::string(name); |
| 869 | VLOG(1) << "Remapping channel " |
| 870 | << configuration::CleanedChannelToString(channel) |
| 871 | << " to have name " << remapped_channels_[ii]; |
Austin Schuh | 6331ef9 | 2020-01-07 18:28:09 -0800 | [diff] [blame] | 872 | MakeRemappedConfig(); |
James Kuszmaul | c7bbb3e | 2020-01-03 20:01:00 -0800 | [diff] [blame] | 873 | return; |
| 874 | } |
| 875 | } |
| 876 | LOG(FATAL) << "Unabled to locate channel with name " << name << " and type " |
| 877 | << type; |
| 878 | } |
| 879 | |
| 880 | void LogReader::MakeRemappedConfig() { |
Austin Schuh | 8bd9632 | 2020-02-13 21:18:22 -0800 | [diff] [blame] | 881 | for (std::unique_ptr<State> &state : states_) { |
Austin Schuh | 6aa77be | 2020-02-22 21:06:40 -0800 | [diff] [blame^] | 882 | if (state) { |
| 883 | CHECK(!state->event_loop) |
| 884 | << ": Can't change the mapping after the events are scheduled."; |
| 885 | } |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 886 | } |
Austin Schuh | ac0771c | 2020-01-07 18:36:30 -0800 | [diff] [blame] | 887 | |
James Kuszmaul | c7bbb3e | 2020-01-03 20:01:00 -0800 | [diff] [blame] | 888 | // If no remapping occurred and we are using the original config, then there |
| 889 | // is nothing interesting to do here. |
| 890 | if (remapped_channels_.empty() && replay_configuration_ == nullptr) { |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 891 | remapped_configuration_ = logged_configuration(); |
James Kuszmaul | c7bbb3e | 2020-01-03 20:01:00 -0800 | [diff] [blame] | 892 | return; |
| 893 | } |
| 894 | // Config to copy Channel definitions from. Use the specified |
| 895 | // replay_configuration_ if it has been provided. |
| 896 | const Configuration *const base_config = replay_configuration_ == nullptr |
| 897 | ? logged_configuration() |
| 898 | : replay_configuration_; |
| 899 | // The remapped config will be identical to the base_config, except that it |
| 900 | // will have a bunch of extra channels in the channel list, which are exact |
| 901 | // copies of the remapped channels, but with different names. |
| 902 | // Because the flatbuffers API is a pain to work with, this requires a bit of |
| 903 | // a song-and-dance to get copied over. |
| 904 | // The order of operations is to: |
| 905 | // 1) Make a flatbuffer builder for a config that will just contain a list of |
| 906 | // the new channels that we want to add. |
| 907 | // 2) For each channel that we are remapping: |
| 908 | // a) Make a buffer/builder and construct into it a Channel table that only |
| 909 | // contains the new name for the channel. |
| 910 | // b) Merge the new channel with just the name into the channel that we are |
| 911 | // trying to copy, built in the flatbuffer builder made in 1. This gives |
| 912 | // us the new channel definition that we need. |
| 913 | // 3) Using this list of offsets, build the Configuration of just new |
| 914 | // Channels. |
| 915 | // 4) Merge the Configuration with the new Channels into the base_config. |
| 916 | // 5) Call MergeConfiguration() on that result to give MergeConfiguration a |
| 917 | // chance to sanitize the config. |
| 918 | |
| 919 | // This is the builder that we use for the config containing all the new |
| 920 | // channels. |
| 921 | flatbuffers::FlatBufferBuilder new_config_fbb; |
Austin Schuh | d7b15da | 2020-02-17 15:06:11 -0800 | [diff] [blame] | 922 | new_config_fbb.ForceDefaults(true); |
James Kuszmaul | c7bbb3e | 2020-01-03 20:01:00 -0800 | [diff] [blame] | 923 | std::vector<flatbuffers::Offset<Channel>> channel_offsets; |
| 924 | for (auto &pair : remapped_channels_) { |
| 925 | // This is the builder that we use for creating the Channel with just the |
| 926 | // new name. |
| 927 | flatbuffers::FlatBufferBuilder new_name_fbb; |
Austin Schuh | d7b15da | 2020-02-17 15:06:11 -0800 | [diff] [blame] | 928 | new_name_fbb.ForceDefaults(true); |
James Kuszmaul | c7bbb3e | 2020-01-03 20:01:00 -0800 | [diff] [blame] | 929 | const flatbuffers::Offset<flatbuffers::String> name_offset = |
| 930 | new_name_fbb.CreateString(pair.second); |
| 931 | ChannelBuilder new_name_builder(new_name_fbb); |
| 932 | new_name_builder.add_name(name_offset); |
| 933 | new_name_fbb.Finish(new_name_builder.Finish()); |
| 934 | const FlatbufferDetachedBuffer<Channel> new_name = new_name_fbb.Release(); |
| 935 | // Retrieve the channel that we want to copy, confirming that it is actually |
| 936 | // present in base_config. |
| 937 | const Channel *const base_channel = CHECK_NOTNULL(configuration::GetChannel( |
| 938 | base_config, logged_configuration()->channels()->Get(pair.first), "", |
| 939 | nullptr)); |
| 940 | // Actually create the new channel and put it into the vector of Offsets |
| 941 | // that we will use to create the new Configuration. |
| 942 | channel_offsets.emplace_back(MergeFlatBuffers<Channel>( |
| 943 | reinterpret_cast<const flatbuffers::Table *>(base_channel), |
| 944 | reinterpret_cast<const flatbuffers::Table *>(&new_name.message()), |
| 945 | &new_config_fbb)); |
| 946 | } |
| 947 | // Create the Configuration containing the new channels that we want to add. |
Austin Schuh | fa89589 | 2020-01-07 20:07:41 -0800 | [diff] [blame] | 948 | const auto new_name_vector_offsets = |
| 949 | new_config_fbb.CreateVector(channel_offsets); |
James Kuszmaul | c7bbb3e | 2020-01-03 20:01:00 -0800 | [diff] [blame] | 950 | ConfigurationBuilder new_config_builder(new_config_fbb); |
| 951 | new_config_builder.add_channels(new_name_vector_offsets); |
| 952 | new_config_fbb.Finish(new_config_builder.Finish()); |
| 953 | const FlatbufferDetachedBuffer<Configuration> new_name_config = |
| 954 | new_config_fbb.Release(); |
| 955 | // Merge the new channels configuration into the base_config, giving us the |
| 956 | // remapped configuration. |
| 957 | remapped_configuration_buffer_ = |
| 958 | std::make_unique<FlatbufferDetachedBuffer<Configuration>>( |
| 959 | MergeFlatBuffers<Configuration>(base_config, |
| 960 | &new_name_config.message())); |
| 961 | // Call MergeConfiguration to deal with sanitizing the config. |
| 962 | remapped_configuration_buffer_ = |
| 963 | std::make_unique<FlatbufferDetachedBuffer<Configuration>>( |
| 964 | configuration::MergeConfiguration(*remapped_configuration_buffer_)); |
| 965 | |
| 966 | remapped_configuration_ = &remapped_configuration_buffer_->message(); |
| 967 | } |
| 968 | |
Austin Schuh | 6f3babe | 2020-01-26 20:34:50 -0800 | [diff] [blame] | 969 | const Channel *LogReader::RemapChannel(const EventLoop *event_loop, |
| 970 | const Channel *channel) { |
| 971 | std::string_view channel_name = channel->name()->string_view(); |
| 972 | std::string_view channel_type = channel->type()->string_view(); |
| 973 | const int channel_index = |
| 974 | configuration::ChannelIndex(logged_configuration(), channel); |
| 975 | // If the channel is remapped, find the correct channel name to use. |
| 976 | if (remapped_channels_.count(channel_index) > 0) { |
| 977 | VLOG(2) << "Got remapped channel on " |
| 978 | << configuration::CleanedChannelToString(channel); |
| 979 | channel_name = remapped_channels_[channel_index]; |
| 980 | } |
| 981 | |
| 982 | VLOG(1) << "Going to remap channel " << channel_name << " " << channel_type; |
| 983 | const Channel *remapped_channel = configuration::GetChannel( |
| 984 | event_loop->configuration(), channel_name, channel_type, |
| 985 | event_loop->name(), event_loop->node()); |
| 986 | |
| 987 | CHECK(remapped_channel != nullptr) |
| 988 | << ": Unable to send {\"name\": \"" << channel_name << "\", \"type\": \"" |
| 989 | << channel_type << "\"} because it is not in the provided configuration."; |
| 990 | |
| 991 | return remapped_channel; |
| 992 | } |
| 993 | |
Austin Schuh | e309d2a | 2019-11-29 13:25:21 -0800 | [diff] [blame] | 994 | } // namespace logger |
| 995 | } // namespace aos |