blob: fed5f4c659d4a37bd721145b30d794285bca6ef1 [file] [log] [blame]
Alex Perrycb7da4b2019-08-28 19:35:56 -07001#include "aos/events/shm_event_loop.h"
2
3#include <sys/mman.h>
4#include <sys/stat.h>
Austin Schuh39788ff2019-12-01 18:22:57 -08005#include <sys/syscall.h>
Alex Perrycb7da4b2019-08-28 19:35:56 -07006#include <sys/types.h>
7#include <unistd.h>
Tyler Chatow67ddb032020-01-12 14:30:04 -08008
Alex Perrycb7da4b2019-08-28 19:35:56 -07009#include <algorithm>
10#include <atomic>
11#include <chrono>
Austin Schuh39788ff2019-12-01 18:22:57 -080012#include <iterator>
Alex Perrycb7da4b2019-08-28 19:35:56 -070013#include <stdexcept>
14
Austin Schuhef323c02020-09-01 14:55:28 -070015#include "absl/strings/str_cat.h"
Tyler Chatow67ddb032020-01-12 14:30:04 -080016#include "aos/events/aos_logging.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070017#include "aos/events/epoll.h"
Austin Schuh39788ff2019-12-01 18:22:57 -080018#include "aos/events/event_loop_generated.h"
19#include "aos/events/timing_statistics.h"
Austin Schuh094d09b2020-11-20 23:26:52 -080020#include "aos/init.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070021#include "aos/ipc_lib/lockless_queue.h"
Austin Schuh39788ff2019-12-01 18:22:57 -080022#include "aos/ipc_lib/signalfd.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070023#include "aos/realtime.h"
Austin Schuh32fd5a72019-12-01 22:20:26 -080024#include "aos/stl_mutex/stl_mutex.h"
Austin Schuhfccb2d02020-01-26 16:11:19 -080025#include "aos/util/file.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070026#include "aos/util/phased_loop.h"
Austin Schuh39788ff2019-12-01 18:22:57 -080027#include "glog/logging.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070028
Austin Schuhe84c3ed2019-12-14 15:29:48 -080029namespace {
30
31// Returns the portion of the path after the last /. This very much assumes
32// that the application name is null terminated.
33const char *Filename(const char *path) {
34 const std::string_view path_string_view = path;
35 auto last_slash_pos = path_string_view.find_last_of("/");
36
37 return last_slash_pos == std::string_view::npos ? path
38 : path + last_slash_pos + 1;
39}
40
41} // namespace
42
Alex Perrycb7da4b2019-08-28 19:35:56 -070043DEFINE_string(shm_base, "/dev/shm/aos",
44 "Directory to place queue backing mmaped files in.");
45DEFINE_uint32(permissions, 0770,
46 "Permissions to make shared memory files and folders.");
Austin Schuhe84c3ed2019-12-14 15:29:48 -080047DEFINE_string(application_name, Filename(program_invocation_name),
48 "The application name");
Alex Perrycb7da4b2019-08-28 19:35:56 -070049
50namespace aos {
51
Brian Silverman148d43d2020-06-07 18:19:22 -050052using namespace shm_event_loop_internal;
53
Austin Schuhcdab6192019-12-29 17:47:46 -080054void SetShmBase(const std::string_view base) {
Austin Schuhef323c02020-09-01 14:55:28 -070055 FLAGS_shm_base = std::string(base) + "/aos";
Austin Schuhcdab6192019-12-29 17:47:46 -080056}
57
Brian Silverman4f4e0612020-08-12 19:54:41 -070058namespace {
59
Austin Schuhef323c02020-09-01 14:55:28 -070060std::string ShmFolder(std::string_view shm_base, const Channel *channel) {
Alex Perrycb7da4b2019-08-28 19:35:56 -070061 CHECK(channel->has_name());
62 CHECK_EQ(channel->name()->string_view()[0], '/');
Austin Schuhef323c02020-09-01 14:55:28 -070063 return absl::StrCat(shm_base, channel->name()->string_view(), "/");
Alex Perrycb7da4b2019-08-28 19:35:56 -070064}
Austin Schuhef323c02020-09-01 14:55:28 -070065std::string ShmPath(std::string_view shm_base, const Channel *channel) {
Alex Perrycb7da4b2019-08-28 19:35:56 -070066 CHECK(channel->has_type());
Austin Schuhef323c02020-09-01 14:55:28 -070067 return ShmFolder(shm_base, channel) + channel->type()->str() + ".v3";
Alex Perrycb7da4b2019-08-28 19:35:56 -070068}
69
Brian Silvermana5450a92020-08-12 19:59:57 -070070void PageFaultDataWrite(char *data, size_t size) {
Brian Silverman3b0cdaf2020-04-28 16:51:51 -070071 // This just has to divide the actual page size. Being smaller will make this
72 // a bit slower than necessary, but not much. 1024 is a pretty conservative
73 // choice (most pages are probably 4096).
74 static constexpr size_t kPageSize = 1024;
75 const size_t pages = (size + kPageSize - 1) / kPageSize;
76 for (size_t i = 0; i < pages; ++i) {
77 char zero = 0;
78 // We need to ensure there's a writable pagetable entry, but avoid modifying
79 // the data.
80 //
81 // Even if you lock the data into memory, some kernels still seem to lazily
82 // create the actual pagetable entries. This means we need to somehow
83 // "write" to the page.
84 //
85 // Also, this takes place while other processes may be concurrently
86 // opening/initializing the memory, so we need to avoid corrupting that.
87 //
88 // This is the simplest operation I could think of which achieves that:
89 // "store 0 if it's already 0".
90 __atomic_compare_exchange_n(&data[i * kPageSize], &zero, 0, true,
91 __ATOMIC_RELAXED, __ATOMIC_RELAXED);
92 }
93}
94
Brian Silvermana5450a92020-08-12 19:59:57 -070095void PageFaultDataRead(const char *data, size_t size) {
96 // This just has to divide the actual page size. Being smaller will make this
97 // a bit slower than necessary, but not much. 1024 is a pretty conservative
98 // choice (most pages are probably 4096).
99 static constexpr size_t kPageSize = 1024;
100 const size_t pages = (size + kPageSize - 1) / kPageSize;
101 for (size_t i = 0; i < pages; ++i) {
102 // We need to ensure there's a readable pagetable entry.
103 __atomic_load_n(&data[i * kPageSize], __ATOMIC_RELAXED);
104 }
105}
106
Brian Silverman4f4e0612020-08-12 19:54:41 -0700107ipc_lib::LocklessQueueConfiguration MakeQueueConfiguration(
108 const Channel *channel, std::chrono::seconds channel_storage_duration) {
109 ipc_lib::LocklessQueueConfiguration config;
110
111 config.num_watchers = channel->num_watchers();
112 config.num_senders = channel->num_senders();
113 // The value in the channel will default to 0 if readers are configured to
114 // copy.
115 config.num_pinners = channel->num_readers();
116 config.queue_size = channel_storage_duration.count() * channel->frequency();
117 config.message_data_size = channel->max_size();
118
119 return config;
120}
121
Austin Schuh2f8fd752020-09-01 22:38:28 -0700122class MMappedQueue {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700123 public:
Austin Schuh2f8fd752020-09-01 22:38:28 -0700124 MMappedQueue(std::string_view shm_base, const Channel *channel,
125 std::chrono::seconds channel_storage_duration)
Brian Silverman4f4e0612020-08-12 19:54:41 -0700126 : config_(MakeQueueConfiguration(channel, channel_storage_duration)) {
Austin Schuhef323c02020-09-01 14:55:28 -0700127 std::string path = ShmPath(shm_base, channel);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700128
Alex Perrycb7da4b2019-08-28 19:35:56 -0700129 size_ = ipc_lib::LocklessQueueMemorySize(config_);
130
Austin Schuhfccb2d02020-01-26 16:11:19 -0800131 util::MkdirP(path, FLAGS_permissions);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700132
133 // There are 2 cases. Either the file already exists, or it does not
134 // already exist and we need to create it. Start by trying to create it. If
135 // that fails, the file has already been created and we can open it
Brian Silverman4f4e0612020-08-12 19:54:41 -0700136 // normally.. Once the file has been created it will never be deleted.
Brian Silvermanf9f30ea2020-03-04 23:18:54 -0800137 int fd = open(path.c_str(), O_RDWR | O_CREAT | O_EXCL,
Brian Silverman148d43d2020-06-07 18:19:22 -0500138 O_CLOEXEC | FLAGS_permissions);
Brian Silvermanf9f30ea2020-03-04 23:18:54 -0800139 if (fd == -1 && errno == EEXIST) {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700140 VLOG(1) << path << " already created.";
141 // File already exists.
Brian Silvermanf9f30ea2020-03-04 23:18:54 -0800142 fd = open(path.c_str(), O_RDWR, O_CLOEXEC);
143 PCHECK(fd != -1) << ": Failed to open " << path;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700144 while (true) {
145 struct stat st;
Brian Silvermanf9f30ea2020-03-04 23:18:54 -0800146 PCHECK(fstat(fd, &st) == 0);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700147 if (st.st_size != 0) {
148 CHECK_EQ(static_cast<size_t>(st.st_size), size_)
149 << ": Size of " << path
150 << " doesn't match expected size of backing queue file. Did the "
151 "queue definition change?";
152 break;
153 } else {
154 // The creating process didn't get around to it yet. Give it a bit.
155 std::this_thread::sleep_for(std::chrono::milliseconds(10));
156 VLOG(1) << path << " is zero size, waiting";
157 }
158 }
159 } else {
160 VLOG(1) << "Created " << path;
Brian Silvermanf9f30ea2020-03-04 23:18:54 -0800161 PCHECK(fd != -1) << ": Failed to open " << path;
162 PCHECK(ftruncate(fd, size_) == 0);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700163 }
164
Brian Silvermanf9f30ea2020-03-04 23:18:54 -0800165 data_ = mmap(NULL, size_, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700166 PCHECK(data_ != MAP_FAILED);
Brian Silvermana5450a92020-08-12 19:59:57 -0700167 const_data_ = mmap(NULL, size_, PROT_READ, MAP_SHARED, fd, 0);
168 PCHECK(const_data_ != MAP_FAILED);
Brian Silvermanf9f30ea2020-03-04 23:18:54 -0800169 PCHECK(close(fd) == 0);
Brian Silvermana5450a92020-08-12 19:59:57 -0700170 PageFaultDataWrite(static_cast<char *>(data_), size_);
171 PageFaultDataRead(static_cast<const char *>(const_data_), size_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700172
173 ipc_lib::InitializeLocklessQueueMemory(memory(), config_);
174 }
175
Austin Schuh2f8fd752020-09-01 22:38:28 -0700176 ~MMappedQueue() {
Brian Silvermana5450a92020-08-12 19:59:57 -0700177 PCHECK(munmap(data_, size_) == 0);
178 PCHECK(munmap(const_cast<void *>(const_data_), size_) == 0);
179 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700180
181 ipc_lib::LocklessQueueMemory *memory() const {
182 return reinterpret_cast<ipc_lib::LocklessQueueMemory *>(data_);
183 }
184
Brian Silvermana5450a92020-08-12 19:59:57 -0700185 const ipc_lib::LocklessQueueMemory *const_memory() const {
186 return reinterpret_cast<const ipc_lib::LocklessQueueMemory *>(const_data_);
187 }
188
Austin Schuh39788ff2019-12-01 18:22:57 -0800189 const ipc_lib::LocklessQueueConfiguration &config() const { return config_; }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700190
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700191 ipc_lib::LocklessQueue queue() const {
Brian Silvermana5450a92020-08-12 19:59:57 -0700192 return ipc_lib::LocklessQueue(const_memory(), memory(), config());
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700193 }
194
Brian Silvermana5450a92020-08-12 19:59:57 -0700195 absl::Span<char> GetMutableSharedMemory() const {
Brian Silverman5120afb2020-01-31 17:44:35 -0800196 return absl::Span<char>(static_cast<char *>(data_), size_);
197 }
198
Brian Silvermana5450a92020-08-12 19:59:57 -0700199 absl::Span<const char> GetConstSharedMemory() const {
200 return absl::Span<const char>(static_cast<const char *>(const_data_),
201 size_);
202 }
203
Alex Perrycb7da4b2019-08-28 19:35:56 -0700204 private:
Brian Silverman4f4e0612020-08-12 19:54:41 -0700205 const ipc_lib::LocklessQueueConfiguration config_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700206
Alex Perrycb7da4b2019-08-28 19:35:56 -0700207 size_t size_;
208 void *data_;
Brian Silvermana5450a92020-08-12 19:59:57 -0700209 const void *const_data_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700210};
211
Austin Schuh217a9782019-12-21 23:02:50 -0800212const Node *MaybeMyNode(const Configuration *configuration) {
213 if (!configuration->has_nodes()) {
214 return nullptr;
215 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700216
Austin Schuh217a9782019-12-21 23:02:50 -0800217 return configuration::GetMyNode(configuration);
218}
Alex Perrycb7da4b2019-08-28 19:35:56 -0700219
220namespace chrono = ::std::chrono;
221
Austin Schuh39788ff2019-12-01 18:22:57 -0800222} // namespace
223
Austin Schuh217a9782019-12-21 23:02:50 -0800224ShmEventLoop::ShmEventLoop(const Configuration *configuration)
225 : EventLoop(configuration),
Austin Schuhef323c02020-09-01 14:55:28 -0700226 shm_base_(FLAGS_shm_base),
Austin Schuhe84c3ed2019-12-14 15:29:48 -0800227 name_(FLAGS_application_name),
Austin Schuh15649d62019-12-28 16:36:38 -0800228 node_(MaybeMyNode(configuration)) {
Austin Schuh094d09b2020-11-20 23:26:52 -0800229 CHECK(IsInitialized()) << ": Need to initialize AOS first.";
Austin Schuh15649d62019-12-28 16:36:38 -0800230 if (configuration->has_nodes()) {
231 CHECK(node_ != nullptr) << ": Couldn't find node in config.";
232 }
233}
Austin Schuh217a9782019-12-21 23:02:50 -0800234
Brian Silverman148d43d2020-06-07 18:19:22 -0500235namespace shm_event_loop_internal {
Austin Schuh39788ff2019-12-01 18:22:57 -0800236
237class SimpleShmFetcher {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700238 public:
Austin Schuhef323c02020-09-01 14:55:28 -0700239 explicit SimpleShmFetcher(std::string_view shm_base, ShmEventLoop *event_loop,
240 const Channel *channel)
Austin Schuh432784f2020-06-23 17:27:35 -0700241 : event_loop_(event_loop),
242 channel_(channel),
Austin Schuhaa79e4e2019-12-29 20:43:32 -0800243 lockless_queue_memory_(
Austin Schuhef323c02020-09-01 14:55:28 -0700244 shm_base, channel,
Brian Silverman587da252020-01-01 17:00:47 -0800245 chrono::ceil<chrono::seconds>(chrono::nanoseconds(
Austin Schuhaa79e4e2019-12-29 20:43:32 -0800246 event_loop->configuration()->channel_storage_duration()))),
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700247 reader_(lockless_queue_memory_.queue()) {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700248 context_.data = nullptr;
249 // Point the queue index at the next index to read starting now. This
250 // makes it such that FetchNext will read the next message sent after
251 // the fetcher is created.
252 PointAtNextQueueIndex();
253 }
254
Austin Schuh39788ff2019-12-01 18:22:57 -0800255 ~SimpleShmFetcher() {}
Alex Perrycb7da4b2019-08-28 19:35:56 -0700256
Brian Silverman77162972020-08-12 19:52:40 -0700257 // Sets this object to pin or copy data, as configured in the channel.
258 void RetrieveData() {
259 if (channel_->read_method() == ReadMethod::PIN) {
260 PinDataOnFetch();
261 } else {
262 CopyDataOnFetch();
263 }
264 }
265
Brian Silverman3bca5322020-08-12 19:35:29 -0700266 // Sets this object to copy data out of the shared memory into a private
267 // buffer when fetching.
268 void CopyDataOnFetch() {
Brian Silverman77162972020-08-12 19:52:40 -0700269 CHECK(!pin_data());
Brian Silverman3bca5322020-08-12 19:35:29 -0700270 data_storage_.reset(static_cast<char *>(
271 malloc(channel_->max_size() + kChannelDataAlignment - 1)));
272 }
273
Brian Silverman77162972020-08-12 19:52:40 -0700274 // Sets this object to pin data in shared memory when fetching.
275 void PinDataOnFetch() {
276 CHECK(!copy_data());
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700277 auto maybe_pinner =
278 ipc_lib::LocklessQueuePinner::Make(lockless_queue_memory_.queue());
Brian Silverman77162972020-08-12 19:52:40 -0700279 if (!maybe_pinner) {
280 LOG(FATAL) << "Failed to create reader on "
281 << configuration::CleanedChannelToString(channel_)
282 << ", too many readers.";
283 }
284 pinner_ = std::move(maybe_pinner.value());
285 }
286
Alex Perrycb7da4b2019-08-28 19:35:56 -0700287 // Points the next message to fetch at the queue index which will be
288 // populated next.
289 void PointAtNextQueueIndex() {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700290 actual_queue_index_ = reader_.LatestIndex();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700291 if (!actual_queue_index_.valid()) {
292 // Nothing in the queue. The next element will show up at the 0th
293 // index in the queue.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700294 actual_queue_index_ = ipc_lib::QueueIndex::Zero(
295 LocklessQueueSize(lockless_queue_memory_.memory()));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700296 } else {
297 actual_queue_index_ = actual_queue_index_.Increment();
298 }
299 }
300
Austin Schuh39788ff2019-12-01 18:22:57 -0800301 bool FetchNext() {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700302 const ipc_lib::LocklessQueueReader::Result read_result =
Brian Silverman3bca5322020-08-12 19:35:29 -0700303 DoFetch(actual_queue_index_);
Austin Schuh432784f2020-06-23 17:27:35 -0700304
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700305 return read_result == ipc_lib::LocklessQueueReader::Result::GOOD;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700306 }
307
Austin Schuh39788ff2019-12-01 18:22:57 -0800308 bool Fetch() {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700309 const ipc_lib::QueueIndex queue_index = reader_.LatestIndex();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700310 // actual_queue_index_ is only meaningful if it was set by Fetch or
311 // FetchNext. This happens when valid_data_ has been set. So, only
312 // skip checking if valid_data_ is true.
313 //
314 // Also, if the latest queue index is invalid, we are empty. So there
315 // is nothing to fetch.
Austin Schuh39788ff2019-12-01 18:22:57 -0800316 if ((context_.data != nullptr &&
Alex Perrycb7da4b2019-08-28 19:35:56 -0700317 queue_index == actual_queue_index_.DecrementBy(1u)) ||
318 !queue_index.valid()) {
319 return false;
320 }
321
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700322 const ipc_lib::LocklessQueueReader::Result read_result =
323 DoFetch(queue_index);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700324
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700325 CHECK(read_result != ipc_lib::LocklessQueueReader::Result::NOTHING_NEW)
Austin Schuhf5652592019-12-29 16:26:15 -0800326 << ": Queue index went backwards. This should never happen. "
327 << configuration::CleanedChannelToString(channel_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700328
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700329 return read_result == ipc_lib::LocklessQueueReader::Result::GOOD;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700330 }
331
Austin Schuh39788ff2019-12-01 18:22:57 -0800332 Context context() const { return context_; }
333
Alex Perrycb7da4b2019-08-28 19:35:56 -0700334 bool RegisterWakeup(int priority) {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700335 CHECK(!watcher_);
336 watcher_ = ipc_lib::LocklessQueueWatcher::Make(
337 lockless_queue_memory_.queue(), priority);
338 return static_cast<bool>(watcher_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700339 }
340
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700341 void UnregisterWakeup() {
342 CHECK(watcher_);
343 watcher_ = std::nullopt;
344 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700345
Brian Silvermana5450a92020-08-12 19:59:57 -0700346 absl::Span<char> GetMutableSharedMemory() {
347 return lockless_queue_memory_.GetMutableSharedMemory();
Brian Silverman5120afb2020-01-31 17:44:35 -0800348 }
349
Brian Silvermana5450a92020-08-12 19:59:57 -0700350 absl::Span<const char> GetConstSharedMemory() const {
351 return lockless_queue_memory_.GetConstSharedMemory();
352 }
353
354 absl::Span<const char> GetPrivateMemory() const {
355 if (pin_data()) {
356 return lockless_queue_memory_.GetConstSharedMemory();
357 }
Brian Silverman6d2b3592020-06-18 14:40:15 -0700358 return absl::Span<char>(
359 const_cast<SimpleShmFetcher *>(this)->data_storage_start(),
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700360 LocklessQueueMessageDataSize(lockless_queue_memory_.memory()));
Brian Silverman6d2b3592020-06-18 14:40:15 -0700361 }
362
Alex Perrycb7da4b2019-08-28 19:35:56 -0700363 private:
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700364 ipc_lib::LocklessQueueReader::Result DoFetch(
365 ipc_lib::QueueIndex queue_index) {
Brian Silverman3bca5322020-08-12 19:35:29 -0700366 // TODO(austin): Get behind and make sure it dies.
367 char *copy_buffer = nullptr;
368 if (copy_data()) {
369 copy_buffer = data_storage_start();
370 }
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700371 ipc_lib::LocklessQueueReader::Result read_result = reader_.Read(
Brian Silverman3bca5322020-08-12 19:35:29 -0700372 queue_index.index(), &context_.monotonic_event_time,
373 &context_.realtime_event_time, &context_.monotonic_remote_time,
374 &context_.realtime_remote_time, &context_.remote_queue_index,
375 &context_.size, copy_buffer);
376
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700377 if (read_result == ipc_lib::LocklessQueueReader::Result::GOOD) {
Brian Silverman77162972020-08-12 19:52:40 -0700378 if (pin_data()) {
Brian Silverman4f4e0612020-08-12 19:54:41 -0700379 const int pin_result = pinner_->PinIndex(queue_index.index());
380 CHECK(pin_result >= 0)
Brian Silverman77162972020-08-12 19:52:40 -0700381 << ": Got behind while reading and the last message was modified "
382 "out from under us while we tried to pin it. Don't get so far "
383 "behind on: "
384 << configuration::CleanedChannelToString(channel_);
Brian Silverman4f4e0612020-08-12 19:54:41 -0700385 context_.buffer_index = pin_result;
386 } else {
387 context_.buffer_index = -1;
Brian Silverman77162972020-08-12 19:52:40 -0700388 }
389
Brian Silverman3bca5322020-08-12 19:35:29 -0700390 context_.queue_index = queue_index.index();
391 if (context_.remote_queue_index == 0xffffffffu) {
392 context_.remote_queue_index = context_.queue_index;
393 }
394 if (context_.monotonic_remote_time == aos::monotonic_clock::min_time) {
395 context_.monotonic_remote_time = context_.monotonic_event_time;
396 }
397 if (context_.realtime_remote_time == aos::realtime_clock::min_time) {
398 context_.realtime_remote_time = context_.realtime_event_time;
399 }
400 const char *const data = DataBuffer();
401 if (data) {
402 context_.data =
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700403 data +
404 LocklessQueueMessageDataSize(lockless_queue_memory_.memory()) -
405 context_.size;
Brian Silverman3bca5322020-08-12 19:35:29 -0700406 } else {
407 context_.data = nullptr;
408 }
409 actual_queue_index_ = queue_index.Increment();
410 }
411
412 // Make sure the data wasn't modified while we were reading it. This
413 // can only happen if you are reading the last message *while* it is
414 // being written to, which means you are pretty far behind.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700415 CHECK(read_result != ipc_lib::LocklessQueueReader::Result::OVERWROTE)
Brian Silverman3bca5322020-08-12 19:35:29 -0700416 << ": Got behind while reading and the last message was modified "
417 "out from under us while we were reading it. Don't get so far "
418 "behind on: "
419 << configuration::CleanedChannelToString(channel_);
420
421 // We fell behind between when we read the index and read the value.
422 // This isn't worth recovering from since this means we went to sleep
423 // for a long time in the middle of this function.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700424 if (read_result == ipc_lib::LocklessQueueReader::Result::TOO_OLD) {
Brian Silverman3bca5322020-08-12 19:35:29 -0700425 event_loop_->SendTimingReport();
426 LOG(FATAL) << "The next message is no longer available. "
427 << configuration::CleanedChannelToString(channel_);
428 }
429
430 return read_result;
431 }
432
433 char *data_storage_start() const {
434 CHECK(copy_data());
Brian Silvermana1652f32020-01-29 20:41:44 -0800435 return RoundChannelData(data_storage_.get(), channel_->max_size());
436 }
Brian Silverman3bca5322020-08-12 19:35:29 -0700437
438 // Note that for some modes the return value will change as new messages are
439 // read.
440 const char *DataBuffer() const {
441 if (copy_data()) {
442 return data_storage_start();
443 }
Brian Silverman77162972020-08-12 19:52:40 -0700444 if (pin_data()) {
445 return static_cast<const char *>(pinner_->Data());
446 }
Brian Silverman3bca5322020-08-12 19:35:29 -0700447 return nullptr;
448 }
449
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800450 bool copy_data() const { return static_cast<bool>(data_storage_); }
Brian Silverman77162972020-08-12 19:52:40 -0700451 bool pin_data() const { return static_cast<bool>(pinner_); }
Brian Silvermana1652f32020-01-29 20:41:44 -0800452
Austin Schuh432784f2020-06-23 17:27:35 -0700453 aos::ShmEventLoop *event_loop_;
Austin Schuhf5652592019-12-29 16:26:15 -0800454 const Channel *const channel_;
Austin Schuh2f8fd752020-09-01 22:38:28 -0700455 MMappedQueue lockless_queue_memory_;
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700456 ipc_lib::LocklessQueueReader reader_;
457 // This being nullopt indicates we're not looking for wakeups right now.
458 std::optional<ipc_lib::LocklessQueueWatcher> watcher_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700459
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700460 ipc_lib::QueueIndex actual_queue_index_ = ipc_lib::QueueIndex::Invalid();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700461
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800462 // This being empty indicates we're not going to copy data.
463 std::unique_ptr<char, decltype(&free)> data_storage_{nullptr, &free};
Austin Schuh39788ff2019-12-01 18:22:57 -0800464
Brian Silverman77162972020-08-12 19:52:40 -0700465 // This being nullopt indicates we're not going to pin messages.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700466 std::optional<ipc_lib::LocklessQueuePinner> pinner_;
Brian Silverman77162972020-08-12 19:52:40 -0700467
Austin Schuh39788ff2019-12-01 18:22:57 -0800468 Context context_;
469};
470
471class ShmFetcher : public RawFetcher {
472 public:
Austin Schuhef323c02020-09-01 14:55:28 -0700473 explicit ShmFetcher(std::string_view shm_base, ShmEventLoop *event_loop,
474 const Channel *channel)
Austin Schuhaa79e4e2019-12-29 20:43:32 -0800475 : RawFetcher(event_loop, channel),
Austin Schuhef323c02020-09-01 14:55:28 -0700476 simple_shm_fetcher_(shm_base, event_loop, channel) {
Brian Silverman77162972020-08-12 19:52:40 -0700477 simple_shm_fetcher_.RetrieveData();
Brian Silverman3bca5322020-08-12 19:35:29 -0700478 }
Austin Schuh39788ff2019-12-01 18:22:57 -0800479
480 ~ShmFetcher() { context_.data = nullptr; }
481
482 std::pair<bool, monotonic_clock::time_point> DoFetchNext() override {
483 if (simple_shm_fetcher_.FetchNext()) {
484 context_ = simple_shm_fetcher_.context();
485 return std::make_pair(true, monotonic_clock::now());
486 }
487 return std::make_pair(false, monotonic_clock::min_time);
488 }
489
490 std::pair<bool, monotonic_clock::time_point> DoFetch() override {
491 if (simple_shm_fetcher_.Fetch()) {
492 context_ = simple_shm_fetcher_.context();
493 return std::make_pair(true, monotonic_clock::now());
494 }
495 return std::make_pair(false, monotonic_clock::min_time);
496 }
497
Brian Silvermana5450a92020-08-12 19:59:57 -0700498 absl::Span<const char> GetPrivateMemory() const {
Brian Silverman6d2b3592020-06-18 14:40:15 -0700499 return simple_shm_fetcher_.GetPrivateMemory();
500 }
501
Austin Schuh39788ff2019-12-01 18:22:57 -0800502 private:
503 SimpleShmFetcher simple_shm_fetcher_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700504};
505
506class ShmSender : public RawSender {
507 public:
Austin Schuhef323c02020-09-01 14:55:28 -0700508 explicit ShmSender(std::string_view shm_base, EventLoop *event_loop,
509 const Channel *channel)
Austin Schuh39788ff2019-12-01 18:22:57 -0800510 : RawSender(event_loop, channel),
Austin Schuhaa79e4e2019-12-29 20:43:32 -0800511 lockless_queue_memory_(
Austin Schuhef323c02020-09-01 14:55:28 -0700512 shm_base, channel,
Brian Silverman587da252020-01-01 17:00:47 -0800513 chrono::ceil<chrono::seconds>(chrono::nanoseconds(
Austin Schuhaa79e4e2019-12-29 20:43:32 -0800514 event_loop->configuration()->channel_storage_duration()))),
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700515 lockless_queue_sender_(VerifySender(
516 ipc_lib::LocklessQueueSender::Make(lockless_queue_memory_.queue()),
517 channel)),
518 wake_upper_(lockless_queue_memory_.queue()) {}
Alex Perrycb7da4b2019-08-28 19:35:56 -0700519
Austin Schuh39788ff2019-12-01 18:22:57 -0800520 ~ShmSender() override {}
521
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700522 static ipc_lib::LocklessQueueSender VerifySender(
523 std::optional<ipc_lib::LocklessQueueSender> sender,
Austin Schuhe516ab02020-05-06 21:37:04 -0700524 const Channel *channel) {
525 if (sender) {
526 return std::move(sender.value());
527 }
528 LOG(FATAL) << "Failed to create sender on "
529 << configuration::CleanedChannelToString(channel)
530 << ", too many senders.";
531 }
532
Alex Perrycb7da4b2019-08-28 19:35:56 -0700533 void *data() override { return lockless_queue_sender_.Data(); }
534 size_t size() override { return lockless_queue_sender_.size(); }
Austin Schuhad154822019-12-27 15:45:13 -0800535 bool DoSend(size_t length,
536 aos::monotonic_clock::time_point monotonic_remote_time,
537 aos::realtime_clock::time_point realtime_remote_time,
538 uint32_t remote_queue_index) override {
Austin Schuh0f7ed462020-03-28 20:38:34 -0700539 CHECK_LE(length, static_cast<size_t>(channel()->max_size()))
540 << ": Sent too big a message on "
541 << configuration::CleanedChannelToString(channel());
Austin Schuh91ba6392020-10-03 13:27:47 -0700542 CHECK(lockless_queue_sender_.Send(
Austin Schuhad154822019-12-27 15:45:13 -0800543 length, monotonic_remote_time, realtime_remote_time, remote_queue_index,
Austin Schuh91ba6392020-10-03 13:27:47 -0700544 &monotonic_sent_time_, &realtime_sent_time_, &sent_queue_index_))
545 << ": Somebody wrote outside the buffer of their message on channel "
546 << configuration::CleanedChannelToString(channel());
547
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700548 wake_upper_.Wakeup(event_loop()->priority());
Alex Perrycb7da4b2019-08-28 19:35:56 -0700549 return true;
550 }
551
Austin Schuhad154822019-12-27 15:45:13 -0800552 bool DoSend(const void *msg, size_t length,
553 aos::monotonic_clock::time_point monotonic_remote_time,
554 aos::realtime_clock::time_point realtime_remote_time,
555 uint32_t remote_queue_index) override {
Austin Schuh0f7ed462020-03-28 20:38:34 -0700556 CHECK_LE(length, static_cast<size_t>(channel()->max_size()))
557 << ": Sent too big a message on "
558 << configuration::CleanedChannelToString(channel());
Brian Silvermanaf9a4d82020-10-06 15:10:58 -0700559 CHECK(lockless_queue_sender_.Send(
560 reinterpret_cast<const char *>(msg), length, monotonic_remote_time,
561 realtime_remote_time, remote_queue_index, &monotonic_sent_time_,
562 &realtime_sent_time_, &sent_queue_index_))
Austin Schuh91ba6392020-10-03 13:27:47 -0700563 << ": Somebody wrote outside the buffer of their message on channel "
564 << configuration::CleanedChannelToString(channel());
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700565 wake_upper_.Wakeup(event_loop()->priority());
Alex Perrycb7da4b2019-08-28 19:35:56 -0700566 // TODO(austin): Return an error if we send too fast.
567 return true;
568 }
569
Brian Silverman5120afb2020-01-31 17:44:35 -0800570 absl::Span<char> GetSharedMemory() const {
Brian Silvermana5450a92020-08-12 19:59:57 -0700571 return lockless_queue_memory_.GetMutableSharedMemory();
Brian Silverman5120afb2020-01-31 17:44:35 -0800572 }
573
Brian Silverman4f4e0612020-08-12 19:54:41 -0700574 int buffer_index() override { return lockless_queue_sender_.buffer_index(); }
575
Alex Perrycb7da4b2019-08-28 19:35:56 -0700576 private:
Austin Schuh2f8fd752020-09-01 22:38:28 -0700577 MMappedQueue lockless_queue_memory_;
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700578 ipc_lib::LocklessQueueSender lockless_queue_sender_;
579 ipc_lib::LocklessQueueWakeUpper wake_upper_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700580};
581
Alex Perrycb7da4b2019-08-28 19:35:56 -0700582// Class to manage the state for a Watcher.
Brian Silverman148d43d2020-06-07 18:19:22 -0500583class ShmWatcherState : public WatcherState {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700584 public:
Brian Silverman148d43d2020-06-07 18:19:22 -0500585 ShmWatcherState(
Austin Schuhef323c02020-09-01 14:55:28 -0700586 std::string_view shm_base, ShmEventLoop *event_loop,
587 const Channel *channel,
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800588 std::function<void(const Context &context, const void *message)> fn,
589 bool copy_data)
Brian Silverman148d43d2020-06-07 18:19:22 -0500590 : WatcherState(event_loop, channel, std::move(fn)),
Austin Schuh7d87b672019-12-01 20:23:49 -0800591 event_loop_(event_loop),
592 event_(this),
Austin Schuhef323c02020-09-01 14:55:28 -0700593 simple_shm_fetcher_(shm_base, event_loop, channel) {
Brian Silverman3bca5322020-08-12 19:35:29 -0700594 if (copy_data) {
Brian Silverman77162972020-08-12 19:52:40 -0700595 simple_shm_fetcher_.RetrieveData();
Brian Silverman3bca5322020-08-12 19:35:29 -0700596 }
597 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700598
Brian Silverman148d43d2020-06-07 18:19:22 -0500599 ~ShmWatcherState() override { event_loop_->RemoveEvent(&event_); }
Austin Schuh39788ff2019-12-01 18:22:57 -0800600
601 void Startup(EventLoop *event_loop) override {
Austin Schuh7d87b672019-12-01 20:23:49 -0800602 simple_shm_fetcher_.PointAtNextQueueIndex();
Austin Schuh39788ff2019-12-01 18:22:57 -0800603 CHECK(RegisterWakeup(event_loop->priority()));
604 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700605
Alex Perrycb7da4b2019-08-28 19:35:56 -0700606 // Returns true if there is new data available.
Austin Schuh7d87b672019-12-01 20:23:49 -0800607 bool CheckForNewData() {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700608 if (!has_new_data_) {
Austin Schuh39788ff2019-12-01 18:22:57 -0800609 has_new_data_ = simple_shm_fetcher_.FetchNext();
Austin Schuh7d87b672019-12-01 20:23:49 -0800610
611 if (has_new_data_) {
612 event_.set_event_time(
Austin Schuhad154822019-12-27 15:45:13 -0800613 simple_shm_fetcher_.context().monotonic_event_time);
Austin Schuh7d87b672019-12-01 20:23:49 -0800614 event_loop_->AddEvent(&event_);
615 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700616 }
617
618 return has_new_data_;
619 }
620
Alex Perrycb7da4b2019-08-28 19:35:56 -0700621 // Consumes the data by calling the callback.
Austin Schuh7d87b672019-12-01 20:23:49 -0800622 void HandleEvent() {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700623 CHECK(has_new_data_);
Austin Schuh39788ff2019-12-01 18:22:57 -0800624 DoCallCallback(monotonic_clock::now, simple_shm_fetcher_.context());
Alex Perrycb7da4b2019-08-28 19:35:56 -0700625 has_new_data_ = false;
Austin Schuh7d87b672019-12-01 20:23:49 -0800626 CheckForNewData();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700627 }
628
Austin Schuh39788ff2019-12-01 18:22:57 -0800629 // Registers us to receive a signal on event reception.
Alex Perrycb7da4b2019-08-28 19:35:56 -0700630 bool RegisterWakeup(int priority) {
Austin Schuh39788ff2019-12-01 18:22:57 -0800631 return simple_shm_fetcher_.RegisterWakeup(priority);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700632 }
633
Austin Schuh39788ff2019-12-01 18:22:57 -0800634 void UnregisterWakeup() { return simple_shm_fetcher_.UnregisterWakeup(); }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700635
Brian Silvermana5450a92020-08-12 19:59:57 -0700636 absl::Span<const char> GetSharedMemory() const {
637 return simple_shm_fetcher_.GetConstSharedMemory();
Brian Silverman5120afb2020-01-31 17:44:35 -0800638 }
639
Alex Perrycb7da4b2019-08-28 19:35:56 -0700640 private:
641 bool has_new_data_ = false;
642
Austin Schuh7d87b672019-12-01 20:23:49 -0800643 ShmEventLoop *event_loop_;
Brian Silverman148d43d2020-06-07 18:19:22 -0500644 EventHandler<ShmWatcherState> event_;
Austin Schuh39788ff2019-12-01 18:22:57 -0800645 SimpleShmFetcher simple_shm_fetcher_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700646};
647
648// Adapter class to adapt a timerfd to a TimerHandler.
Brian Silverman148d43d2020-06-07 18:19:22 -0500649class ShmTimerHandler final : public TimerHandler {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700650 public:
Brian Silverman148d43d2020-06-07 18:19:22 -0500651 ShmTimerHandler(ShmEventLoop *shm_event_loop, ::std::function<void()> fn)
Austin Schuh39788ff2019-12-01 18:22:57 -0800652 : TimerHandler(shm_event_loop, std::move(fn)),
Austin Schuh7d87b672019-12-01 20:23:49 -0800653 shm_event_loop_(shm_event_loop),
654 event_(this) {
Austin Schuhcde39fd2020-02-22 20:58:24 -0800655 shm_event_loop_->epoll_.OnReadable(timerfd_.fd(), [this]() {
656 // The timer may fire spurriously. HandleEvent on the event loop will
657 // call the callback if it is needed. It may also have called it when
658 // processing some other event, and the kernel decided to deliver this
659 // wakeup anyways.
660 timerfd_.Read();
661 shm_event_loop_->HandleEvent();
662 });
Alex Perrycb7da4b2019-08-28 19:35:56 -0700663 }
664
Brian Silverman148d43d2020-06-07 18:19:22 -0500665 ~ShmTimerHandler() {
Austin Schuh7d87b672019-12-01 20:23:49 -0800666 Disable();
667 shm_event_loop_->epoll_.DeleteFd(timerfd_.fd());
668 }
669
670 void HandleEvent() {
Austin Schuhcde39fd2020-02-22 20:58:24 -0800671 CHECK(!event_.valid());
Brian Silvermanaf9a4d82020-10-06 15:10:58 -0700672 disabled_ = false;
Austin Schuhcde39fd2020-02-22 20:58:24 -0800673 const auto monotonic_now = Call(monotonic_clock::now, base_);
674 if (event_.valid()) {
675 // If someone called Setup inside Call, rescheduling is already taken care
676 // of. Bail.
677 return;
Austin Schuh7d87b672019-12-01 20:23:49 -0800678 }
Brian Silvermanaf9a4d82020-10-06 15:10:58 -0700679 if (disabled_) {
680 // Somebody called Disable inside Call, so we don't want to reschedule.
681 // Bail.
682 return;
683 }
Austin Schuh7d87b672019-12-01 20:23:49 -0800684
Austin Schuhcde39fd2020-02-22 20:58:24 -0800685 if (repeat_offset_ == chrono::seconds(0)) {
686 timerfd_.Disable();
687 } else {
688 // Compute how many cycles have elapsed and schedule the next iteration
689 // for the next iteration in the future.
690 const int elapsed_cycles =
691 std::max<int>(0, (monotonic_now - base_ + repeat_offset_ -
692 std::chrono::nanoseconds(1)) /
693 repeat_offset_);
694 base_ += repeat_offset_ * elapsed_cycles;
Austin Schuh7d87b672019-12-01 20:23:49 -0800695
Austin Schuhcde39fd2020-02-22 20:58:24 -0800696 // Update the heap and schedule the timerfd wakeup.
Austin Schuh7d87b672019-12-01 20:23:49 -0800697 event_.set_event_time(base_);
698 shm_event_loop_->AddEvent(&event_);
Austin Schuhcde39fd2020-02-22 20:58:24 -0800699 timerfd_.SetTime(base_, chrono::seconds(0));
Austin Schuh7d87b672019-12-01 20:23:49 -0800700 }
701 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700702
703 void Setup(monotonic_clock::time_point base,
704 monotonic_clock::duration repeat_offset) override {
Austin Schuh7d87b672019-12-01 20:23:49 -0800705 if (event_.valid()) {
706 shm_event_loop_->RemoveEvent(&event_);
707 }
708
Alex Perrycb7da4b2019-08-28 19:35:56 -0700709 timerfd_.SetTime(base, repeat_offset);
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800710 base_ = base;
711 repeat_offset_ = repeat_offset;
Austin Schuh7d87b672019-12-01 20:23:49 -0800712 event_.set_event_time(base_);
713 shm_event_loop_->AddEvent(&event_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700714 }
715
Austin Schuh7d87b672019-12-01 20:23:49 -0800716 void Disable() override {
717 shm_event_loop_->RemoveEvent(&event_);
718 timerfd_.Disable();
Brian Silvermanaf9a4d82020-10-06 15:10:58 -0700719 disabled_ = true;
Austin Schuh7d87b672019-12-01 20:23:49 -0800720 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700721
722 private:
723 ShmEventLoop *shm_event_loop_;
Brian Silverman148d43d2020-06-07 18:19:22 -0500724 EventHandler<ShmTimerHandler> event_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700725
Brian Silverman148d43d2020-06-07 18:19:22 -0500726 internal::TimerFd timerfd_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700727
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800728 monotonic_clock::time_point base_;
729 monotonic_clock::duration repeat_offset_;
Brian Silvermanaf9a4d82020-10-06 15:10:58 -0700730
731 // Used to track if Disable() was called during the callback, so we know not
732 // to reschedule.
733 bool disabled_ = false;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700734};
735
736// Adapter class to the timerfd and PhasedLoop.
Brian Silverman148d43d2020-06-07 18:19:22 -0500737class ShmPhasedLoopHandler final : public PhasedLoopHandler {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700738 public:
Brian Silverman148d43d2020-06-07 18:19:22 -0500739 ShmPhasedLoopHandler(ShmEventLoop *shm_event_loop,
740 ::std::function<void(int)> fn,
741 const monotonic_clock::duration interval,
742 const monotonic_clock::duration offset)
743 : PhasedLoopHandler(shm_event_loop, std::move(fn), interval, offset),
Austin Schuh7d87b672019-12-01 20:23:49 -0800744 shm_event_loop_(shm_event_loop),
745 event_(this) {
746 shm_event_loop_->epoll_.OnReadable(
747 timerfd_.fd(), [this]() { shm_event_loop_->HandleEvent(); });
748 }
749
750 void HandleEvent() {
751 // The return value for read is the number of cycles that have elapsed.
752 // Because we check to see when this event *should* have happened, there are
753 // cases where Read() will return 0, when 1 cycle has actually happened.
754 // This occurs when the timer interrupt hasn't triggered yet. Therefore,
755 // ignore it. Call handles rescheduling and calculating elapsed cycles
756 // without any extra help.
757 timerfd_.Read();
758 event_.Invalidate();
759
760 Call(monotonic_clock::now, [this](monotonic_clock::time_point sleep_time) {
761 Schedule(sleep_time);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700762 });
763 }
764
Brian Silverman148d43d2020-06-07 18:19:22 -0500765 ~ShmPhasedLoopHandler() override {
Austin Schuh39788ff2019-12-01 18:22:57 -0800766 shm_event_loop_->epoll_.DeleteFd(timerfd_.fd());
Austin Schuh7d87b672019-12-01 20:23:49 -0800767 shm_event_loop_->RemoveEvent(&event_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700768 }
769
770 private:
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800771 // Reschedules the timer.
Austin Schuh39788ff2019-12-01 18:22:57 -0800772 void Schedule(monotonic_clock::time_point sleep_time) override {
Austin Schuh7d87b672019-12-01 20:23:49 -0800773 if (event_.valid()) {
774 shm_event_loop_->RemoveEvent(&event_);
775 }
776
Austin Schuh39788ff2019-12-01 18:22:57 -0800777 timerfd_.SetTime(sleep_time, ::aos::monotonic_clock::zero());
Austin Schuh7d87b672019-12-01 20:23:49 -0800778 event_.set_event_time(sleep_time);
779 shm_event_loop_->AddEvent(&event_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700780 }
781
782 ShmEventLoop *shm_event_loop_;
Brian Silverman148d43d2020-06-07 18:19:22 -0500783 EventHandler<ShmPhasedLoopHandler> event_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700784
Brian Silverman148d43d2020-06-07 18:19:22 -0500785 internal::TimerFd timerfd_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700786};
Brian Silverman148d43d2020-06-07 18:19:22 -0500787
788} // namespace shm_event_loop_internal
Alex Perrycb7da4b2019-08-28 19:35:56 -0700789
790::std::unique_ptr<RawFetcher> ShmEventLoop::MakeRawFetcher(
791 const Channel *channel) {
Austin Schuhca4828c2019-12-28 14:21:35 -0800792 if (!configuration::ChannelIsReadableOnNode(channel, node())) {
793 LOG(FATAL) << "Channel { \"name\": \"" << channel->name()->string_view()
794 << "\", \"type\": \"" << channel->type()->string_view()
795 << "\" } is not able to be fetched on this node. Check your "
796 "configuration.";
Austin Schuh217a9782019-12-21 23:02:50 -0800797 }
798
Austin Schuhef323c02020-09-01 14:55:28 -0700799 return ::std::unique_ptr<RawFetcher>(
800 new ShmFetcher(shm_base_, this, channel));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700801}
802
803::std::unique_ptr<RawSender> ShmEventLoop::MakeRawSender(
804 const Channel *channel) {
Brian Silverman0fc69932020-01-24 21:54:02 -0800805 TakeSender(channel);
Austin Schuh39788ff2019-12-01 18:22:57 -0800806
Austin Schuhef323c02020-09-01 14:55:28 -0700807 return ::std::unique_ptr<RawSender>(new ShmSender(shm_base_, this, channel));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700808}
809
810void ShmEventLoop::MakeRawWatcher(
811 const Channel *channel,
812 std::function<void(const Context &context, const void *message)> watcher) {
Brian Silverman0fc69932020-01-24 21:54:02 -0800813 TakeWatcher(channel);
Austin Schuh217a9782019-12-21 23:02:50 -0800814
Austin Schuh39788ff2019-12-01 18:22:57 -0800815 NewWatcher(::std::unique_ptr<WatcherState>(
Austin Schuhef323c02020-09-01 14:55:28 -0700816 new ShmWatcherState(shm_base_, this, channel, std::move(watcher), true)));
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800817}
818
819void ShmEventLoop::MakeRawNoArgWatcher(
820 const Channel *channel,
821 std::function<void(const Context &context)> watcher) {
822 TakeWatcher(channel);
823
Brian Silverman148d43d2020-06-07 18:19:22 -0500824 NewWatcher(::std::unique_ptr<WatcherState>(new ShmWatcherState(
Austin Schuhef323c02020-09-01 14:55:28 -0700825 shm_base_, this, channel,
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800826 [watcher](const Context &context, const void *) { watcher(context); },
827 false)));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700828}
829
830TimerHandler *ShmEventLoop::AddTimer(::std::function<void()> callback) {
Austin Schuh39788ff2019-12-01 18:22:57 -0800831 return NewTimer(::std::unique_ptr<TimerHandler>(
Brian Silverman148d43d2020-06-07 18:19:22 -0500832 new ShmTimerHandler(this, ::std::move(callback))));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700833}
834
835PhasedLoopHandler *ShmEventLoop::AddPhasedLoop(
836 ::std::function<void(int)> callback,
837 const monotonic_clock::duration interval,
838 const monotonic_clock::duration offset) {
Brian Silverman148d43d2020-06-07 18:19:22 -0500839 return NewPhasedLoop(::std::unique_ptr<PhasedLoopHandler>(
840 new ShmPhasedLoopHandler(this, ::std::move(callback), interval, offset)));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700841}
842
843void ShmEventLoop::OnRun(::std::function<void()> on_run) {
844 on_run_.push_back(::std::move(on_run));
845}
846
Austin Schuh7d87b672019-12-01 20:23:49 -0800847void ShmEventLoop::HandleEvent() {
848 // Update all the times for handlers.
849 for (::std::unique_ptr<WatcherState> &base_watcher : watchers_) {
Brian Silverman148d43d2020-06-07 18:19:22 -0500850 ShmWatcherState *watcher =
851 reinterpret_cast<ShmWatcherState *>(base_watcher.get());
Austin Schuh7d87b672019-12-01 20:23:49 -0800852
853 watcher->CheckForNewData();
854 }
855
Austin Schuh39788ff2019-12-01 18:22:57 -0800856 while (true) {
Austin Schuh7d87b672019-12-01 20:23:49 -0800857 if (EventCount() == 0 ||
858 PeekEvent()->event_time() > monotonic_clock::now()) {
Austin Schuh39788ff2019-12-01 18:22:57 -0800859 break;
860 }
861
Austin Schuh7d87b672019-12-01 20:23:49 -0800862 EventLoopEvent *event = PopEvent();
863 event->HandleEvent();
Austin Schuh39788ff2019-12-01 18:22:57 -0800864 }
865}
866
Austin Schuh32fd5a72019-12-01 22:20:26 -0800867// RAII class to mask signals.
868class ScopedSignalMask {
869 public:
870 ScopedSignalMask(std::initializer_list<int> signals) {
871 sigset_t sigset;
872 PCHECK(sigemptyset(&sigset) == 0);
873 for (int signal : signals) {
874 PCHECK(sigaddset(&sigset, signal) == 0);
875 }
876
877 PCHECK(sigprocmask(SIG_BLOCK, &sigset, &old_) == 0);
878 }
879
880 ~ScopedSignalMask() { PCHECK(sigprocmask(SIG_SETMASK, &old_, nullptr) == 0); }
881
882 private:
883 sigset_t old_;
884};
885
886// Class to manage the static state associated with killing multiple event
887// loops.
888class SignalHandler {
889 public:
890 // Gets the singleton.
891 static SignalHandler *global() {
892 static SignalHandler loop;
893 return &loop;
894 }
895
896 // Handles the signal with the singleton.
897 static void HandleSignal(int) { global()->DoHandleSignal(); }
898
899 // Registers an event loop to receive Exit() calls.
900 void Register(ShmEventLoop *event_loop) {
901 // Block signals while we have the mutex so we never race with the signal
902 // handler.
903 ScopedSignalMask mask({SIGINT, SIGHUP, SIGTERM});
904 std::unique_lock<stl_mutex> locker(mutex_);
905 if (event_loops_.size() == 0) {
906 // The first caller registers the signal handler.
907 struct sigaction new_action;
908 sigemptyset(&new_action.sa_mask);
909 // This makes it so that 2 control c's to a stuck process will kill it by
910 // restoring the original signal handler.
911 new_action.sa_flags = SA_RESETHAND;
912 new_action.sa_handler = &HandleSignal;
913
914 PCHECK(sigaction(SIGINT, &new_action, &old_action_int_) == 0);
915 PCHECK(sigaction(SIGHUP, &new_action, &old_action_hup_) == 0);
916 PCHECK(sigaction(SIGTERM, &new_action, &old_action_term_) == 0);
917 }
918
919 event_loops_.push_back(event_loop);
920 }
921
922 // Unregisters an event loop to receive Exit() calls.
923 void Unregister(ShmEventLoop *event_loop) {
924 // Block signals while we have the mutex so we never race with the signal
925 // handler.
926 ScopedSignalMask mask({SIGINT, SIGHUP, SIGTERM});
927 std::unique_lock<stl_mutex> locker(mutex_);
928
Brian Silverman5120afb2020-01-31 17:44:35 -0800929 event_loops_.erase(
930 std::find(event_loops_.begin(), event_loops_.end(), event_loop));
Austin Schuh32fd5a72019-12-01 22:20:26 -0800931
932 if (event_loops_.size() == 0u) {
933 // The last caller restores the original signal handlers.
934 PCHECK(sigaction(SIGINT, &old_action_int_, nullptr) == 0);
935 PCHECK(sigaction(SIGHUP, &old_action_hup_, nullptr) == 0);
936 PCHECK(sigaction(SIGTERM, &old_action_term_, nullptr) == 0);
937 }
938 }
939
940 private:
941 void DoHandleSignal() {
942 // We block signals while grabbing the lock, so there should never be a
943 // race. Confirm that this is true using trylock.
944 CHECK(mutex_.try_lock()) << ": sigprocmask failed to block signals while "
945 "modifing the event loop list.";
946 for (ShmEventLoop *event_loop : event_loops_) {
947 event_loop->Exit();
948 }
949 mutex_.unlock();
950 }
951
952 // Mutex to protect all state.
953 stl_mutex mutex_;
954 std::vector<ShmEventLoop *> event_loops_;
955 struct sigaction old_action_int_;
956 struct sigaction old_action_hup_;
957 struct sigaction old_action_term_;
958};
959
Alex Perrycb7da4b2019-08-28 19:35:56 -0700960void ShmEventLoop::Run() {
Austin Schuh32fd5a72019-12-01 22:20:26 -0800961 SignalHandler::global()->Register(this);
Austin Schuh39788ff2019-12-01 18:22:57 -0800962
Alex Perrycb7da4b2019-08-28 19:35:56 -0700963 std::unique_ptr<ipc_lib::SignalFd> signalfd;
964
965 if (watchers_.size() > 0) {
966 signalfd.reset(new ipc_lib::SignalFd({ipc_lib::kWakeupSignal}));
967
968 epoll_.OnReadable(signalfd->fd(), [signalfd_ptr = signalfd.get(), this]() {
969 signalfd_siginfo result = signalfd_ptr->Read();
970 CHECK_EQ(result.ssi_signo, ipc_lib::kWakeupSignal);
971
972 // TODO(austin): We should really be checking *everything*, not just
973 // watchers, and calling the oldest thing first. That will improve
974 // determinism a lot.
975
Austin Schuh7d87b672019-12-01 20:23:49 -0800976 HandleEvent();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700977 });
978 }
979
Austin Schuh39788ff2019-12-01 18:22:57 -0800980 MaybeScheduleTimingReports();
981
Austin Schuh7d87b672019-12-01 20:23:49 -0800982 ReserveEvents();
983
Tyler Chatow67ddb032020-01-12 14:30:04 -0800984 {
Austin Schuha0c41ba2020-09-10 22:59:14 -0700985 logging::ScopedLogRestorer prev_logger;
Tyler Chatow67ddb032020-01-12 14:30:04 -0800986 AosLogToFbs aos_logger;
987 if (!skip_logger_) {
988 aos_logger.Initialize(MakeSender<logging::LogMessageFbs>("/aos"));
Austin Schuha0c41ba2020-09-10 22:59:14 -0700989 prev_logger.Swap(aos_logger.implementation());
Tyler Chatow67ddb032020-01-12 14:30:04 -0800990 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700991
Tyler Chatow67ddb032020-01-12 14:30:04 -0800992 aos::SetCurrentThreadName(name_.substr(0, 16));
Brian Silverman6a54ff32020-04-28 16:41:39 -0700993 const cpu_set_t default_affinity = DefaultAffinity();
994 if (!CPU_EQUAL(&affinity_, &default_affinity)) {
995 ::aos::SetCurrentThreadAffinity(affinity_);
996 }
Tyler Chatow67ddb032020-01-12 14:30:04 -0800997 // Now, all the callbacks are setup. Lock everything into memory and go RT.
998 if (priority_ != 0) {
999 ::aos::InitRT();
1000
1001 LOG(INFO) << "Setting priority to " << priority_;
1002 ::aos::SetCurrentThreadRealtimePriority(priority_);
1003 }
1004
1005 set_is_running(true);
1006
1007 // Now that we are realtime (but before the OnRun handlers run), snap the
1008 // queue index.
1009 for (::std::unique_ptr<WatcherState> &watcher : watchers_) {
1010 watcher->Startup(this);
1011 }
1012
1013 // Now that we are RT, run all the OnRun handlers.
1014 for (const auto &run : on_run_) {
1015 run();
1016 }
1017
1018 // And start our main event loop which runs all the timers and handles Quit.
1019 epoll_.Run();
1020
1021 // Once epoll exits, there is no useful nonrt work left to do.
1022 set_is_running(false);
1023
1024 // Nothing time or synchronization critical needs to happen after this
1025 // point. Drop RT priority.
1026 ::aos::UnsetCurrentThreadRealtimePriority();
Alex Perrycb7da4b2019-08-28 19:35:56 -07001027 }
1028
Austin Schuh39788ff2019-12-01 18:22:57 -08001029 for (::std::unique_ptr<WatcherState> &base_watcher : watchers_) {
Brian Silverman148d43d2020-06-07 18:19:22 -05001030 ShmWatcherState *watcher =
1031 reinterpret_cast<ShmWatcherState *>(base_watcher.get());
Alex Perrycb7da4b2019-08-28 19:35:56 -07001032 watcher->UnregisterWakeup();
1033 }
1034
1035 if (watchers_.size() > 0) {
1036 epoll_.DeleteFd(signalfd->fd());
1037 signalfd.reset();
1038 }
Austin Schuh32fd5a72019-12-01 22:20:26 -08001039
1040 SignalHandler::global()->Unregister(this);
Austin Schuhe84c3ed2019-12-14 15:29:48 -08001041
1042 // Trigger any remaining senders or fetchers to be cleared before destroying
1043 // the event loop so the book keeping matches. Do this in the thread that
1044 // created the timing reporter.
1045 timing_report_sender_.reset();
Alex Perrycb7da4b2019-08-28 19:35:56 -07001046}
1047
1048void ShmEventLoop::Exit() { epoll_.Quit(); }
1049
1050ShmEventLoop::~ShmEventLoop() {
Austin Schuh39788ff2019-12-01 18:22:57 -08001051 // Force everything with a registered fd with epoll to be destroyed now.
1052 timers_.clear();
1053 phased_loops_.clear();
1054 watchers_.clear();
1055
Alex Perrycb7da4b2019-08-28 19:35:56 -07001056 CHECK(!is_running()) << ": ShmEventLoop destroyed while running";
1057}
1058
Alex Perrycb7da4b2019-08-28 19:35:56 -07001059void ShmEventLoop::SetRuntimeRealtimePriority(int priority) {
1060 if (is_running()) {
1061 LOG(FATAL) << "Cannot set realtime priority while running.";
1062 }
1063 priority_ = priority;
1064}
1065
Brian Silverman6a54ff32020-04-28 16:41:39 -07001066void ShmEventLoop::SetRuntimeAffinity(const cpu_set_t &cpuset) {
1067 if (is_running()) {
1068 LOG(FATAL) << "Cannot set affinity while running.";
1069 }
1070 affinity_ = cpuset;
1071}
1072
James Kuszmaul57c2baa2020-01-19 14:52:52 -08001073void ShmEventLoop::set_name(const std::string_view name) {
1074 name_ = std::string(name);
1075 UpdateTimingReport();
1076}
1077
Brian Silvermana5450a92020-08-12 19:59:57 -07001078absl::Span<const char> ShmEventLoop::GetWatcherSharedMemory(
1079 const Channel *channel) {
Brian Silverman148d43d2020-06-07 18:19:22 -05001080 ShmWatcherState *const watcher_state =
1081 static_cast<ShmWatcherState *>(GetWatcherState(channel));
Brian Silverman5120afb2020-01-31 17:44:35 -08001082 return watcher_state->GetSharedMemory();
1083}
1084
Brian Silverman4f4e0612020-08-12 19:54:41 -07001085int ShmEventLoop::NumberBuffers(const Channel *channel) {
1086 return MakeQueueConfiguration(
1087 channel, chrono::ceil<chrono::seconds>(chrono::nanoseconds(
1088 configuration()->channel_storage_duration())))
1089 .num_messages();
1090}
1091
Brian Silverman5120afb2020-01-31 17:44:35 -08001092absl::Span<char> ShmEventLoop::GetShmSenderSharedMemory(
1093 const aos::RawSender *sender) const {
Brian Silverman148d43d2020-06-07 18:19:22 -05001094 return static_cast<const ShmSender *>(sender)->GetSharedMemory();
Brian Silverman5120afb2020-01-31 17:44:35 -08001095}
1096
Brian Silvermana5450a92020-08-12 19:59:57 -07001097absl::Span<const char> ShmEventLoop::GetShmFetcherPrivateMemory(
Brian Silverman6d2b3592020-06-18 14:40:15 -07001098 const aos::RawFetcher *fetcher) const {
1099 return static_cast<const ShmFetcher *>(fetcher)->GetPrivateMemory();
1100}
1101
Austin Schuh39788ff2019-12-01 18:22:57 -08001102pid_t ShmEventLoop::GetTid() { return syscall(SYS_gettid); }
1103
Alex Perrycb7da4b2019-08-28 19:35:56 -07001104} // namespace aos