blob: 7b4daf8dce62e4a0189380f123fc6ef394fbacf9 [file] [log] [blame]
Alex Perrycb7da4b2019-08-28 19:35:56 -07001#include "aos/events/shm_event_loop.h"
2
3#include <sys/mman.h>
4#include <sys/stat.h>
Austin Schuh39788ff2019-12-01 18:22:57 -08005#include <sys/syscall.h>
Alex Perrycb7da4b2019-08-28 19:35:56 -07006#include <sys/types.h>
7#include <unistd.h>
Tyler Chatow67ddb032020-01-12 14:30:04 -08008
Alex Perrycb7da4b2019-08-28 19:35:56 -07009#include <algorithm>
10#include <atomic>
11#include <chrono>
Austin Schuh39788ff2019-12-01 18:22:57 -080012#include <iterator>
Alex Perrycb7da4b2019-08-28 19:35:56 -070013#include <stdexcept>
14
Tyler Chatow67ddb032020-01-12 14:30:04 -080015#include "aos/events/aos_logging.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070016#include "aos/events/epoll.h"
Austin Schuh39788ff2019-12-01 18:22:57 -080017#include "aos/events/event_loop_generated.h"
18#include "aos/events/timing_statistics.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070019#include "aos/ipc_lib/lockless_queue.h"
Austin Schuh39788ff2019-12-01 18:22:57 -080020#include "aos/ipc_lib/signalfd.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070021#include "aos/realtime.h"
Austin Schuh32fd5a72019-12-01 22:20:26 -080022#include "aos/stl_mutex/stl_mutex.h"
Austin Schuhfccb2d02020-01-26 16:11:19 -080023#include "aos/util/file.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070024#include "aos/util/phased_loop.h"
Austin Schuh39788ff2019-12-01 18:22:57 -080025#include "glog/logging.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070026
Austin Schuhe84c3ed2019-12-14 15:29:48 -080027namespace {
28
29// Returns the portion of the path after the last /. This very much assumes
30// that the application name is null terminated.
31const char *Filename(const char *path) {
32 const std::string_view path_string_view = path;
33 auto last_slash_pos = path_string_view.find_last_of("/");
34
35 return last_slash_pos == std::string_view::npos ? path
36 : path + last_slash_pos + 1;
37}
38
39} // namespace
40
Alex Perrycb7da4b2019-08-28 19:35:56 -070041DEFINE_string(shm_base, "/dev/shm/aos",
42 "Directory to place queue backing mmaped files in.");
43DEFINE_uint32(permissions, 0770,
44 "Permissions to make shared memory files and folders.");
Austin Schuhe84c3ed2019-12-14 15:29:48 -080045DEFINE_string(application_name, Filename(program_invocation_name),
46 "The application name");
Alex Perrycb7da4b2019-08-28 19:35:56 -070047
48namespace aos {
49
Austin Schuhcdab6192019-12-29 17:47:46 -080050void SetShmBase(const std::string_view base) {
51 FLAGS_shm_base = std::string(base) + "/dev/shm/aos";
52}
53
Alex Perrycb7da4b2019-08-28 19:35:56 -070054std::string ShmFolder(const Channel *channel) {
55 CHECK(channel->has_name());
56 CHECK_EQ(channel->name()->string_view()[0], '/');
57 return FLAGS_shm_base + channel->name()->str() + "/";
58}
59std::string ShmPath(const Channel *channel) {
60 CHECK(channel->has_type());
Austin Schuh3328d132020-02-28 13:54:57 -080061 return ShmFolder(channel) + channel->type()->str() + ".v2";
Alex Perrycb7da4b2019-08-28 19:35:56 -070062}
63
64class MMapedQueue {
65 public:
Austin Schuhaa79e4e2019-12-29 20:43:32 -080066 MMapedQueue(const Channel *channel,
67 const std::chrono::seconds channel_storage_duration) {
Alex Perrycb7da4b2019-08-28 19:35:56 -070068 std::string path = ShmPath(channel);
69
Austin Schuh80c7fce2019-12-05 20:48:43 -080070 config_.num_watchers = channel->num_watchers();
71 config_.num_senders = channel->num_senders();
Austin Schuhaa79e4e2019-12-29 20:43:32 -080072 config_.queue_size =
73 channel_storage_duration.count() * channel->frequency();
Alex Perrycb7da4b2019-08-28 19:35:56 -070074 config_.message_data_size = channel->max_size();
75
76 size_ = ipc_lib::LocklessQueueMemorySize(config_);
77
Austin Schuhfccb2d02020-01-26 16:11:19 -080078 util::MkdirP(path, FLAGS_permissions);
Alex Perrycb7da4b2019-08-28 19:35:56 -070079
80 // There are 2 cases. Either the file already exists, or it does not
81 // already exist and we need to create it. Start by trying to create it. If
82 // that fails, the file has already been created and we can open it
83 // normally.. Once the file has been created it wil never be deleted.
Brian Silvermanf9f30ea2020-03-04 23:18:54 -080084 int fd = open(path.c_str(), O_RDWR | O_CREAT | O_EXCL,
Alex Perrycb7da4b2019-08-28 19:35:56 -070085 O_CLOEXEC | FLAGS_permissions);
Brian Silvermanf9f30ea2020-03-04 23:18:54 -080086 if (fd == -1 && errno == EEXIST) {
Alex Perrycb7da4b2019-08-28 19:35:56 -070087 VLOG(1) << path << " already created.";
88 // File already exists.
Brian Silvermanf9f30ea2020-03-04 23:18:54 -080089 fd = open(path.c_str(), O_RDWR, O_CLOEXEC);
90 PCHECK(fd != -1) << ": Failed to open " << path;
Alex Perrycb7da4b2019-08-28 19:35:56 -070091 while (true) {
92 struct stat st;
Brian Silvermanf9f30ea2020-03-04 23:18:54 -080093 PCHECK(fstat(fd, &st) == 0);
Alex Perrycb7da4b2019-08-28 19:35:56 -070094 if (st.st_size != 0) {
95 CHECK_EQ(static_cast<size_t>(st.st_size), size_)
96 << ": Size of " << path
97 << " doesn't match expected size of backing queue file. Did the "
98 "queue definition change?";
99 break;
100 } else {
101 // The creating process didn't get around to it yet. Give it a bit.
102 std::this_thread::sleep_for(std::chrono::milliseconds(10));
103 VLOG(1) << path << " is zero size, waiting";
104 }
105 }
106 } else {
107 VLOG(1) << "Created " << path;
Brian Silvermanf9f30ea2020-03-04 23:18:54 -0800108 PCHECK(fd != -1) << ": Failed to open " << path;
109 PCHECK(ftruncate(fd, size_) == 0);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700110 }
111
Brian Silvermanf9f30ea2020-03-04 23:18:54 -0800112 data_ = mmap(NULL, size_, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700113 PCHECK(data_ != MAP_FAILED);
Brian Silvermanf9f30ea2020-03-04 23:18:54 -0800114 PCHECK(close(fd) == 0);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700115
116 ipc_lib::InitializeLocklessQueueMemory(memory(), config_);
117 }
118
119 ~MMapedQueue() {
120 PCHECK(munmap(data_, size_) == 0);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700121 }
122
123 ipc_lib::LocklessQueueMemory *memory() const {
124 return reinterpret_cast<ipc_lib::LocklessQueueMemory *>(data_);
125 }
126
Austin Schuh39788ff2019-12-01 18:22:57 -0800127 const ipc_lib::LocklessQueueConfiguration &config() const { return config_; }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700128
Brian Silverman5120afb2020-01-31 17:44:35 -0800129 absl::Span<char> GetSharedMemory() const {
130 return absl::Span<char>(static_cast<char *>(data_), size_);
131 }
132
Alex Perrycb7da4b2019-08-28 19:35:56 -0700133 private:
Alex Perrycb7da4b2019-08-28 19:35:56 -0700134 ipc_lib::LocklessQueueConfiguration config_;
135
Alex Perrycb7da4b2019-08-28 19:35:56 -0700136 size_t size_;
137 void *data_;
138};
139
Austin Schuh217a9782019-12-21 23:02:50 -0800140namespace {
141
Austin Schuh217a9782019-12-21 23:02:50 -0800142const Node *MaybeMyNode(const Configuration *configuration) {
143 if (!configuration->has_nodes()) {
144 return nullptr;
145 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700146
Austin Schuh217a9782019-12-21 23:02:50 -0800147 return configuration::GetMyNode(configuration);
148}
Alex Perrycb7da4b2019-08-28 19:35:56 -0700149
150namespace chrono = ::std::chrono;
151
Austin Schuh39788ff2019-12-01 18:22:57 -0800152} // namespace
153
Austin Schuh217a9782019-12-21 23:02:50 -0800154ShmEventLoop::ShmEventLoop(const Configuration *configuration)
155 : EventLoop(configuration),
Austin Schuhe84c3ed2019-12-14 15:29:48 -0800156 name_(FLAGS_application_name),
Austin Schuh15649d62019-12-28 16:36:38 -0800157 node_(MaybeMyNode(configuration)) {
158 if (configuration->has_nodes()) {
159 CHECK(node_ != nullptr) << ": Couldn't find node in config.";
160 }
161}
Austin Schuh217a9782019-12-21 23:02:50 -0800162
Austin Schuh39788ff2019-12-01 18:22:57 -0800163namespace internal {
164
165class SimpleShmFetcher {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700166 public:
Austin Schuhaa79e4e2019-12-29 20:43:32 -0800167 explicit SimpleShmFetcher(EventLoop *event_loop, const Channel *channel)
Austin Schuhf5652592019-12-29 16:26:15 -0800168 : channel_(channel),
Austin Schuhaa79e4e2019-12-29 20:43:32 -0800169 lockless_queue_memory_(
170 channel,
Brian Silverman587da252020-01-01 17:00:47 -0800171 chrono::ceil<chrono::seconds>(chrono::nanoseconds(
Austin Schuhaa79e4e2019-12-29 20:43:32 -0800172 event_loop->configuration()->channel_storage_duration()))),
Alex Perrycb7da4b2019-08-28 19:35:56 -0700173 lockless_queue_(lockless_queue_memory_.memory(),
174 lockless_queue_memory_.config()),
Brian Silvermana1652f32020-01-29 20:41:44 -0800175 data_storage_(static_cast<char *>(malloc(channel->max_size() +
176 kChannelDataAlignment - 1)),
Alex Perrycb7da4b2019-08-28 19:35:56 -0700177 &free) {
178 context_.data = nullptr;
179 // Point the queue index at the next index to read starting now. This
180 // makes it such that FetchNext will read the next message sent after
181 // the fetcher is created.
182 PointAtNextQueueIndex();
183 }
184
Austin Schuh39788ff2019-12-01 18:22:57 -0800185 ~SimpleShmFetcher() {}
Alex Perrycb7da4b2019-08-28 19:35:56 -0700186
187 // Points the next message to fetch at the queue index which will be
188 // populated next.
189 void PointAtNextQueueIndex() {
190 actual_queue_index_ = lockless_queue_.LatestQueueIndex();
191 if (!actual_queue_index_.valid()) {
192 // Nothing in the queue. The next element will show up at the 0th
193 // index in the queue.
194 actual_queue_index_ =
195 ipc_lib::QueueIndex::Zero(lockless_queue_.queue_size());
196 } else {
197 actual_queue_index_ = actual_queue_index_.Increment();
198 }
199 }
200
Austin Schuh39788ff2019-12-01 18:22:57 -0800201 bool FetchNext() {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700202 // TODO(austin): Get behind and make sure it dies both here and with
203 // Fetch.
204 ipc_lib::LocklessQueue::ReadResult read_result = lockless_queue_.Read(
Austin Schuhad154822019-12-27 15:45:13 -0800205 actual_queue_index_.index(), &context_.monotonic_event_time,
206 &context_.realtime_event_time, &context_.monotonic_remote_time,
207 &context_.realtime_remote_time, &context_.remote_queue_index,
Brian Silvermana1652f32020-01-29 20:41:44 -0800208 &context_.size, data_storage_start());
Alex Perrycb7da4b2019-08-28 19:35:56 -0700209 if (read_result == ipc_lib::LocklessQueue::ReadResult::GOOD) {
210 context_.queue_index = actual_queue_index_.index();
Austin Schuhad154822019-12-27 15:45:13 -0800211 if (context_.remote_queue_index == 0xffffffffu) {
212 context_.remote_queue_index = context_.queue_index;
213 }
214 if (context_.monotonic_remote_time == aos::monotonic_clock::min_time) {
215 context_.monotonic_remote_time = context_.monotonic_event_time;
216 }
217 if (context_.realtime_remote_time == aos::realtime_clock::min_time) {
218 context_.realtime_remote_time = context_.realtime_event_time;
219 }
Brian Silvermana1652f32020-01-29 20:41:44 -0800220 context_.data = data_storage_start() +
Austin Schuh39788ff2019-12-01 18:22:57 -0800221 lockless_queue_.message_data_size() - context_.size;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700222 actual_queue_index_ = actual_queue_index_.Increment();
223 }
224
225 // Make sure the data wasn't modified while we were reading it. This
226 // can only happen if you are reading the last message *while* it is
227 // being written to, which means you are pretty far behind.
228 CHECK(read_result != ipc_lib::LocklessQueue::ReadResult::OVERWROTE)
229 << ": Got behind while reading and the last message was modified "
Austin Schuhf5652592019-12-29 16:26:15 -0800230 "out from under us while we were reading it. Don't get so far "
231 "behind. "
232 << configuration::CleanedChannelToString(channel_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700233
234 CHECK(read_result != ipc_lib::LocklessQueue::ReadResult::TOO_OLD)
Austin Schuhf5652592019-12-29 16:26:15 -0800235 << ": The next message is no longer available. "
236 << configuration::CleanedChannelToString(channel_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700237 return read_result == ipc_lib::LocklessQueue::ReadResult::GOOD;
238 }
239
Austin Schuh39788ff2019-12-01 18:22:57 -0800240 bool Fetch() {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700241 const ipc_lib::QueueIndex queue_index = lockless_queue_.LatestQueueIndex();
242 // actual_queue_index_ is only meaningful if it was set by Fetch or
243 // FetchNext. This happens when valid_data_ has been set. So, only
244 // skip checking if valid_data_ is true.
245 //
246 // Also, if the latest queue index is invalid, we are empty. So there
247 // is nothing to fetch.
Austin Schuh39788ff2019-12-01 18:22:57 -0800248 if ((context_.data != nullptr &&
Alex Perrycb7da4b2019-08-28 19:35:56 -0700249 queue_index == actual_queue_index_.DecrementBy(1u)) ||
250 !queue_index.valid()) {
251 return false;
252 }
253
Austin Schuhad154822019-12-27 15:45:13 -0800254 ipc_lib::LocklessQueue::ReadResult read_result = lockless_queue_.Read(
255 queue_index.index(), &context_.monotonic_event_time,
256 &context_.realtime_event_time, &context_.monotonic_remote_time,
257 &context_.realtime_remote_time, &context_.remote_queue_index,
Brian Silvermana1652f32020-01-29 20:41:44 -0800258 &context_.size, data_storage_start());
Alex Perrycb7da4b2019-08-28 19:35:56 -0700259 if (read_result == ipc_lib::LocklessQueue::ReadResult::GOOD) {
260 context_.queue_index = queue_index.index();
Austin Schuhad154822019-12-27 15:45:13 -0800261 if (context_.remote_queue_index == 0xffffffffu) {
262 context_.remote_queue_index = context_.queue_index;
263 }
264 if (context_.monotonic_remote_time == aos::monotonic_clock::min_time) {
265 context_.monotonic_remote_time = context_.monotonic_event_time;
266 }
267 if (context_.realtime_remote_time == aos::realtime_clock::min_time) {
268 context_.realtime_remote_time = context_.realtime_event_time;
269 }
Brian Silvermana1652f32020-01-29 20:41:44 -0800270 context_.data = data_storage_start() +
Austin Schuh39788ff2019-12-01 18:22:57 -0800271 lockless_queue_.message_data_size() - context_.size;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700272 actual_queue_index_ = queue_index.Increment();
273 }
274
275 // Make sure the data wasn't modified while we were reading it. This
276 // can only happen if you are reading the last message *while* it is
277 // being written to, which means you are pretty far behind.
278 CHECK(read_result != ipc_lib::LocklessQueue::ReadResult::OVERWROTE)
279 << ": Got behind while reading and the last message was modified "
Austin Schuhf5652592019-12-29 16:26:15 -0800280 "out from under us while we were reading it. Don't get so far "
281 "behind."
282 << configuration::CleanedChannelToString(channel_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700283
284 CHECK(read_result != ipc_lib::LocklessQueue::ReadResult::NOTHING_NEW)
Austin Schuhf5652592019-12-29 16:26:15 -0800285 << ": Queue index went backwards. This should never happen. "
286 << configuration::CleanedChannelToString(channel_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700287
288 // We fell behind between when we read the index and read the value.
289 // This isn't worth recovering from since this means we went to sleep
290 // for a long time in the middle of this function.
291 CHECK(read_result != ipc_lib::LocklessQueue::ReadResult::TOO_OLD)
Austin Schuhf5652592019-12-29 16:26:15 -0800292 << ": The next message is no longer available. "
293 << configuration::CleanedChannelToString(channel_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700294 return read_result == ipc_lib::LocklessQueue::ReadResult::GOOD;
295 }
296
Austin Schuh39788ff2019-12-01 18:22:57 -0800297 Context context() const { return context_; }
298
Alex Perrycb7da4b2019-08-28 19:35:56 -0700299 bool RegisterWakeup(int priority) {
300 return lockless_queue_.RegisterWakeup(priority);
301 }
302
303 void UnregisterWakeup() { lockless_queue_.UnregisterWakeup(); }
304
Brian Silverman5120afb2020-01-31 17:44:35 -0800305 absl::Span<char> GetSharedMemory() const {
306 return lockless_queue_memory_.GetSharedMemory();
307 }
308
Alex Perrycb7da4b2019-08-28 19:35:56 -0700309 private:
Brian Silvermana1652f32020-01-29 20:41:44 -0800310 char *data_storage_start() {
311 return RoundChannelData(data_storage_.get(), channel_->max_size());
312 }
313
Austin Schuhf5652592019-12-29 16:26:15 -0800314 const Channel *const channel_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700315 MMapedQueue lockless_queue_memory_;
316 ipc_lib::LocklessQueue lockless_queue_;
317
318 ipc_lib::QueueIndex actual_queue_index_ =
319 ipc_lib::LocklessQueue::empty_queue_index();
320
Brian Silvermana1652f32020-01-29 20:41:44 -0800321 std::unique_ptr<char, decltype(&free)> data_storage_;
Austin Schuh39788ff2019-12-01 18:22:57 -0800322
323 Context context_;
324};
325
326class ShmFetcher : public RawFetcher {
327 public:
328 explicit ShmFetcher(EventLoop *event_loop, const Channel *channel)
Austin Schuhaa79e4e2019-12-29 20:43:32 -0800329 : RawFetcher(event_loop, channel),
330 simple_shm_fetcher_(event_loop, channel) {}
Austin Schuh39788ff2019-12-01 18:22:57 -0800331
332 ~ShmFetcher() { context_.data = nullptr; }
333
334 std::pair<bool, monotonic_clock::time_point> DoFetchNext() override {
335 if (simple_shm_fetcher_.FetchNext()) {
336 context_ = simple_shm_fetcher_.context();
337 return std::make_pair(true, monotonic_clock::now());
338 }
339 return std::make_pair(false, monotonic_clock::min_time);
340 }
341
342 std::pair<bool, monotonic_clock::time_point> DoFetch() override {
343 if (simple_shm_fetcher_.Fetch()) {
344 context_ = simple_shm_fetcher_.context();
345 return std::make_pair(true, monotonic_clock::now());
346 }
347 return std::make_pair(false, monotonic_clock::min_time);
348 }
349
350 private:
351 SimpleShmFetcher simple_shm_fetcher_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700352};
353
354class ShmSender : public RawSender {
355 public:
Austin Schuh39788ff2019-12-01 18:22:57 -0800356 explicit ShmSender(EventLoop *event_loop, const Channel *channel)
357 : RawSender(event_loop, channel),
Austin Schuhaa79e4e2019-12-29 20:43:32 -0800358 lockless_queue_memory_(
359 channel,
Brian Silverman587da252020-01-01 17:00:47 -0800360 chrono::ceil<chrono::seconds>(chrono::nanoseconds(
Austin Schuhaa79e4e2019-12-29 20:43:32 -0800361 event_loop->configuration()->channel_storage_duration()))),
Alex Perrycb7da4b2019-08-28 19:35:56 -0700362 lockless_queue_(lockless_queue_memory_.memory(),
363 lockless_queue_memory_.config()),
364 lockless_queue_sender_(lockless_queue_.MakeSender()) {}
365
Austin Schuh39788ff2019-12-01 18:22:57 -0800366 ~ShmSender() override {}
367
Alex Perrycb7da4b2019-08-28 19:35:56 -0700368 void *data() override { return lockless_queue_sender_.Data(); }
369 size_t size() override { return lockless_queue_sender_.size(); }
Austin Schuhad154822019-12-27 15:45:13 -0800370 bool DoSend(size_t length,
371 aos::monotonic_clock::time_point monotonic_remote_time,
372 aos::realtime_clock::time_point realtime_remote_time,
373 uint32_t remote_queue_index) override {
374 lockless_queue_sender_.Send(
375 length, monotonic_remote_time, realtime_remote_time, remote_queue_index,
376 &monotonic_sent_time_, &realtime_sent_time_, &sent_queue_index_);
Austin Schuh39788ff2019-12-01 18:22:57 -0800377 lockless_queue_.Wakeup(event_loop()->priority());
Alex Perrycb7da4b2019-08-28 19:35:56 -0700378 return true;
379 }
380
Austin Schuhad154822019-12-27 15:45:13 -0800381 bool DoSend(const void *msg, size_t length,
382 aos::monotonic_clock::time_point monotonic_remote_time,
383 aos::realtime_clock::time_point realtime_remote_time,
384 uint32_t remote_queue_index) override {
385 lockless_queue_sender_.Send(reinterpret_cast<const char *>(msg), length,
386 monotonic_remote_time, realtime_remote_time,
387 remote_queue_index, &monotonic_sent_time_,
388 &realtime_sent_time_, &sent_queue_index_);
Austin Schuh39788ff2019-12-01 18:22:57 -0800389 lockless_queue_.Wakeup(event_loop()->priority());
Alex Perrycb7da4b2019-08-28 19:35:56 -0700390 // TODO(austin): Return an error if we send too fast.
391 return true;
392 }
393
Brian Silverman5120afb2020-01-31 17:44:35 -0800394 absl::Span<char> GetSharedMemory() const {
395 return lockless_queue_memory_.GetSharedMemory();
396 }
397
Alex Perrycb7da4b2019-08-28 19:35:56 -0700398 private:
Alex Perrycb7da4b2019-08-28 19:35:56 -0700399 MMapedQueue lockless_queue_memory_;
400 ipc_lib::LocklessQueue lockless_queue_;
401 ipc_lib::LocklessQueue::Sender lockless_queue_sender_;
402};
403
Alex Perrycb7da4b2019-08-28 19:35:56 -0700404// Class to manage the state for a Watcher.
Austin Schuh39788ff2019-12-01 18:22:57 -0800405class WatcherState : public aos::WatcherState {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700406 public:
407 WatcherState(
Austin Schuh7d87b672019-12-01 20:23:49 -0800408 ShmEventLoop *event_loop, const Channel *channel,
Austin Schuh39788ff2019-12-01 18:22:57 -0800409 std::function<void(const Context &context, const void *message)> fn)
410 : aos::WatcherState(event_loop, channel, std::move(fn)),
Austin Schuh7d87b672019-12-01 20:23:49 -0800411 event_loop_(event_loop),
412 event_(this),
Austin Schuhaa79e4e2019-12-29 20:43:32 -0800413 simple_shm_fetcher_(event_loop, channel) {}
Alex Perrycb7da4b2019-08-28 19:35:56 -0700414
Austin Schuh7d87b672019-12-01 20:23:49 -0800415 ~WatcherState() override { event_loop_->RemoveEvent(&event_); }
Austin Schuh39788ff2019-12-01 18:22:57 -0800416
417 void Startup(EventLoop *event_loop) override {
Austin Schuh7d87b672019-12-01 20:23:49 -0800418 simple_shm_fetcher_.PointAtNextQueueIndex();
Austin Schuh39788ff2019-12-01 18:22:57 -0800419 CHECK(RegisterWakeup(event_loop->priority()));
420 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700421
Alex Perrycb7da4b2019-08-28 19:35:56 -0700422 // Returns true if there is new data available.
Austin Schuh7d87b672019-12-01 20:23:49 -0800423 bool CheckForNewData() {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700424 if (!has_new_data_) {
Austin Schuh39788ff2019-12-01 18:22:57 -0800425 has_new_data_ = simple_shm_fetcher_.FetchNext();
Austin Schuh7d87b672019-12-01 20:23:49 -0800426
427 if (has_new_data_) {
428 event_.set_event_time(
Austin Schuhad154822019-12-27 15:45:13 -0800429 simple_shm_fetcher_.context().monotonic_event_time);
Austin Schuh7d87b672019-12-01 20:23:49 -0800430 event_loop_->AddEvent(&event_);
431 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700432 }
433
434 return has_new_data_;
435 }
436
Alex Perrycb7da4b2019-08-28 19:35:56 -0700437 // Consumes the data by calling the callback.
Austin Schuh7d87b672019-12-01 20:23:49 -0800438 void HandleEvent() {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700439 CHECK(has_new_data_);
Austin Schuh39788ff2019-12-01 18:22:57 -0800440 DoCallCallback(monotonic_clock::now, simple_shm_fetcher_.context());
Alex Perrycb7da4b2019-08-28 19:35:56 -0700441 has_new_data_ = false;
Austin Schuh7d87b672019-12-01 20:23:49 -0800442 CheckForNewData();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700443 }
444
Austin Schuh39788ff2019-12-01 18:22:57 -0800445 // Registers us to receive a signal on event reception.
Alex Perrycb7da4b2019-08-28 19:35:56 -0700446 bool RegisterWakeup(int priority) {
Austin Schuh39788ff2019-12-01 18:22:57 -0800447 return simple_shm_fetcher_.RegisterWakeup(priority);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700448 }
449
Austin Schuh39788ff2019-12-01 18:22:57 -0800450 void UnregisterWakeup() { return simple_shm_fetcher_.UnregisterWakeup(); }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700451
Brian Silverman5120afb2020-01-31 17:44:35 -0800452 absl::Span<char> GetSharedMemory() const {
453 return simple_shm_fetcher_.GetSharedMemory();
454 }
455
Alex Perrycb7da4b2019-08-28 19:35:56 -0700456 private:
457 bool has_new_data_ = false;
458
Austin Schuh7d87b672019-12-01 20:23:49 -0800459 ShmEventLoop *event_loop_;
460 EventHandler<WatcherState> event_;
Austin Schuh39788ff2019-12-01 18:22:57 -0800461 SimpleShmFetcher simple_shm_fetcher_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700462};
463
464// Adapter class to adapt a timerfd to a TimerHandler.
Austin Schuh7d87b672019-12-01 20:23:49 -0800465class TimerHandlerState final : public TimerHandler {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700466 public:
467 TimerHandlerState(ShmEventLoop *shm_event_loop, ::std::function<void()> fn)
Austin Schuh39788ff2019-12-01 18:22:57 -0800468 : TimerHandler(shm_event_loop, std::move(fn)),
Austin Schuh7d87b672019-12-01 20:23:49 -0800469 shm_event_loop_(shm_event_loop),
470 event_(this) {
Austin Schuhcde39fd2020-02-22 20:58:24 -0800471 shm_event_loop_->epoll_.OnReadable(timerfd_.fd(), [this]() {
472 // The timer may fire spurriously. HandleEvent on the event loop will
473 // call the callback if it is needed. It may also have called it when
474 // processing some other event, and the kernel decided to deliver this
475 // wakeup anyways.
476 timerfd_.Read();
477 shm_event_loop_->HandleEvent();
478 });
Alex Perrycb7da4b2019-08-28 19:35:56 -0700479 }
480
Austin Schuh7d87b672019-12-01 20:23:49 -0800481 ~TimerHandlerState() {
482 Disable();
483 shm_event_loop_->epoll_.DeleteFd(timerfd_.fd());
484 }
485
486 void HandleEvent() {
Austin Schuhcde39fd2020-02-22 20:58:24 -0800487 CHECK(!event_.valid());
488 const auto monotonic_now = Call(monotonic_clock::now, base_);
489 if (event_.valid()) {
490 // If someone called Setup inside Call, rescheduling is already taken care
491 // of. Bail.
492 return;
Austin Schuh7d87b672019-12-01 20:23:49 -0800493 }
494
Austin Schuhcde39fd2020-02-22 20:58:24 -0800495 if (repeat_offset_ == chrono::seconds(0)) {
496 timerfd_.Disable();
497 } else {
498 // Compute how many cycles have elapsed and schedule the next iteration
499 // for the next iteration in the future.
500 const int elapsed_cycles =
501 std::max<int>(0, (monotonic_now - base_ + repeat_offset_ -
502 std::chrono::nanoseconds(1)) /
503 repeat_offset_);
504 base_ += repeat_offset_ * elapsed_cycles;
Austin Schuh7d87b672019-12-01 20:23:49 -0800505
Austin Schuhcde39fd2020-02-22 20:58:24 -0800506 // Update the heap and schedule the timerfd wakeup.
Austin Schuh7d87b672019-12-01 20:23:49 -0800507 event_.set_event_time(base_);
508 shm_event_loop_->AddEvent(&event_);
Austin Schuhcde39fd2020-02-22 20:58:24 -0800509 timerfd_.SetTime(base_, chrono::seconds(0));
Austin Schuh7d87b672019-12-01 20:23:49 -0800510 }
511 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700512
513 void Setup(monotonic_clock::time_point base,
514 monotonic_clock::duration repeat_offset) override {
Austin Schuh7d87b672019-12-01 20:23:49 -0800515 if (event_.valid()) {
516 shm_event_loop_->RemoveEvent(&event_);
517 }
518
Alex Perrycb7da4b2019-08-28 19:35:56 -0700519 timerfd_.SetTime(base, repeat_offset);
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800520 base_ = base;
521 repeat_offset_ = repeat_offset;
Austin Schuh7d87b672019-12-01 20:23:49 -0800522 event_.set_event_time(base_);
523 shm_event_loop_->AddEvent(&event_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700524 }
525
Austin Schuh7d87b672019-12-01 20:23:49 -0800526 void Disable() override {
527 shm_event_loop_->RemoveEvent(&event_);
528 timerfd_.Disable();
529 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700530
531 private:
532 ShmEventLoop *shm_event_loop_;
Austin Schuh7d87b672019-12-01 20:23:49 -0800533 EventHandler<TimerHandlerState> event_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700534
535 TimerFd timerfd_;
536
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800537 monotonic_clock::time_point base_;
538 monotonic_clock::duration repeat_offset_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700539};
540
541// Adapter class to the timerfd and PhasedLoop.
Austin Schuh7d87b672019-12-01 20:23:49 -0800542class PhasedLoopHandler final : public ::aos::PhasedLoopHandler {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700543 public:
544 PhasedLoopHandler(ShmEventLoop *shm_event_loop, ::std::function<void(int)> fn,
545 const monotonic_clock::duration interval,
546 const monotonic_clock::duration offset)
Austin Schuh39788ff2019-12-01 18:22:57 -0800547 : aos::PhasedLoopHandler(shm_event_loop, std::move(fn), interval, offset),
Austin Schuh7d87b672019-12-01 20:23:49 -0800548 shm_event_loop_(shm_event_loop),
549 event_(this) {
550 shm_event_loop_->epoll_.OnReadable(
551 timerfd_.fd(), [this]() { shm_event_loop_->HandleEvent(); });
552 }
553
554 void HandleEvent() {
555 // The return value for read is the number of cycles that have elapsed.
556 // Because we check to see when this event *should* have happened, there are
557 // cases where Read() will return 0, when 1 cycle has actually happened.
558 // This occurs when the timer interrupt hasn't triggered yet. Therefore,
559 // ignore it. Call handles rescheduling and calculating elapsed cycles
560 // without any extra help.
561 timerfd_.Read();
562 event_.Invalidate();
563
564 Call(monotonic_clock::now, [this](monotonic_clock::time_point sleep_time) {
565 Schedule(sleep_time);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700566 });
567 }
568
Austin Schuh39788ff2019-12-01 18:22:57 -0800569 ~PhasedLoopHandler() override {
570 shm_event_loop_->epoll_.DeleteFd(timerfd_.fd());
Austin Schuh7d87b672019-12-01 20:23:49 -0800571 shm_event_loop_->RemoveEvent(&event_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700572 }
573
574 private:
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800575 // Reschedules the timer.
Austin Schuh39788ff2019-12-01 18:22:57 -0800576 void Schedule(monotonic_clock::time_point sleep_time) override {
Austin Schuh7d87b672019-12-01 20:23:49 -0800577 if (event_.valid()) {
578 shm_event_loop_->RemoveEvent(&event_);
579 }
580
Austin Schuh39788ff2019-12-01 18:22:57 -0800581 timerfd_.SetTime(sleep_time, ::aos::monotonic_clock::zero());
Austin Schuh7d87b672019-12-01 20:23:49 -0800582 event_.set_event_time(sleep_time);
583 shm_event_loop_->AddEvent(&event_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700584 }
585
586 ShmEventLoop *shm_event_loop_;
Austin Schuh7d87b672019-12-01 20:23:49 -0800587 EventHandler<PhasedLoopHandler> event_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700588
589 TimerFd timerfd_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700590};
591} // namespace internal
592
593::std::unique_ptr<RawFetcher> ShmEventLoop::MakeRawFetcher(
594 const Channel *channel) {
Austin Schuhca4828c2019-12-28 14:21:35 -0800595 if (!configuration::ChannelIsReadableOnNode(channel, node())) {
596 LOG(FATAL) << "Channel { \"name\": \"" << channel->name()->string_view()
597 << "\", \"type\": \"" << channel->type()->string_view()
598 << "\" } is not able to be fetched on this node. Check your "
599 "configuration.";
Austin Schuh217a9782019-12-21 23:02:50 -0800600 }
601
Austin Schuh39788ff2019-12-01 18:22:57 -0800602 return ::std::unique_ptr<RawFetcher>(new internal::ShmFetcher(this, channel));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700603}
604
605::std::unique_ptr<RawSender> ShmEventLoop::MakeRawSender(
606 const Channel *channel) {
Brian Silverman0fc69932020-01-24 21:54:02 -0800607 TakeSender(channel);
Austin Schuh39788ff2019-12-01 18:22:57 -0800608
609 return ::std::unique_ptr<RawSender>(new internal::ShmSender(this, channel));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700610}
611
612void ShmEventLoop::MakeRawWatcher(
613 const Channel *channel,
614 std::function<void(const Context &context, const void *message)> watcher) {
Brian Silverman0fc69932020-01-24 21:54:02 -0800615 TakeWatcher(channel);
Austin Schuh217a9782019-12-21 23:02:50 -0800616
Austin Schuh39788ff2019-12-01 18:22:57 -0800617 NewWatcher(::std::unique_ptr<WatcherState>(
618 new internal::WatcherState(this, channel, std::move(watcher))));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700619}
620
621TimerHandler *ShmEventLoop::AddTimer(::std::function<void()> callback) {
Austin Schuh39788ff2019-12-01 18:22:57 -0800622 return NewTimer(::std::unique_ptr<TimerHandler>(
623 new internal::TimerHandlerState(this, ::std::move(callback))));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700624}
625
626PhasedLoopHandler *ShmEventLoop::AddPhasedLoop(
627 ::std::function<void(int)> callback,
628 const monotonic_clock::duration interval,
629 const monotonic_clock::duration offset) {
Austin Schuh39788ff2019-12-01 18:22:57 -0800630 return NewPhasedLoop(
631 ::std::unique_ptr<PhasedLoopHandler>(new internal::PhasedLoopHandler(
632 this, ::std::move(callback), interval, offset)));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700633}
634
635void ShmEventLoop::OnRun(::std::function<void()> on_run) {
636 on_run_.push_back(::std::move(on_run));
637}
638
Austin Schuh7d87b672019-12-01 20:23:49 -0800639void ShmEventLoop::HandleEvent() {
640 // Update all the times for handlers.
641 for (::std::unique_ptr<WatcherState> &base_watcher : watchers_) {
642 internal::WatcherState *watcher =
643 reinterpret_cast<internal::WatcherState *>(base_watcher.get());
644
645 watcher->CheckForNewData();
646 }
647
Austin Schuh39788ff2019-12-01 18:22:57 -0800648 while (true) {
Austin Schuh7d87b672019-12-01 20:23:49 -0800649 if (EventCount() == 0 ||
650 PeekEvent()->event_time() > monotonic_clock::now()) {
Austin Schuh39788ff2019-12-01 18:22:57 -0800651 break;
652 }
653
Austin Schuh7d87b672019-12-01 20:23:49 -0800654 EventLoopEvent *event = PopEvent();
655 event->HandleEvent();
Austin Schuh39788ff2019-12-01 18:22:57 -0800656 }
657}
658
Austin Schuh32fd5a72019-12-01 22:20:26 -0800659// RAII class to mask signals.
660class ScopedSignalMask {
661 public:
662 ScopedSignalMask(std::initializer_list<int> signals) {
663 sigset_t sigset;
664 PCHECK(sigemptyset(&sigset) == 0);
665 for (int signal : signals) {
666 PCHECK(sigaddset(&sigset, signal) == 0);
667 }
668
669 PCHECK(sigprocmask(SIG_BLOCK, &sigset, &old_) == 0);
670 }
671
672 ~ScopedSignalMask() { PCHECK(sigprocmask(SIG_SETMASK, &old_, nullptr) == 0); }
673
674 private:
675 sigset_t old_;
676};
677
678// Class to manage the static state associated with killing multiple event
679// loops.
680class SignalHandler {
681 public:
682 // Gets the singleton.
683 static SignalHandler *global() {
684 static SignalHandler loop;
685 return &loop;
686 }
687
688 // Handles the signal with the singleton.
689 static void HandleSignal(int) { global()->DoHandleSignal(); }
690
691 // Registers an event loop to receive Exit() calls.
692 void Register(ShmEventLoop *event_loop) {
693 // Block signals while we have the mutex so we never race with the signal
694 // handler.
695 ScopedSignalMask mask({SIGINT, SIGHUP, SIGTERM});
696 std::unique_lock<stl_mutex> locker(mutex_);
697 if (event_loops_.size() == 0) {
698 // The first caller registers the signal handler.
699 struct sigaction new_action;
700 sigemptyset(&new_action.sa_mask);
701 // This makes it so that 2 control c's to a stuck process will kill it by
702 // restoring the original signal handler.
703 new_action.sa_flags = SA_RESETHAND;
704 new_action.sa_handler = &HandleSignal;
705
706 PCHECK(sigaction(SIGINT, &new_action, &old_action_int_) == 0);
707 PCHECK(sigaction(SIGHUP, &new_action, &old_action_hup_) == 0);
708 PCHECK(sigaction(SIGTERM, &new_action, &old_action_term_) == 0);
709 }
710
711 event_loops_.push_back(event_loop);
712 }
713
714 // Unregisters an event loop to receive Exit() calls.
715 void Unregister(ShmEventLoop *event_loop) {
716 // Block signals while we have the mutex so we never race with the signal
717 // handler.
718 ScopedSignalMask mask({SIGINT, SIGHUP, SIGTERM});
719 std::unique_lock<stl_mutex> locker(mutex_);
720
Brian Silverman5120afb2020-01-31 17:44:35 -0800721 event_loops_.erase(
722 std::find(event_loops_.begin(), event_loops_.end(), event_loop));
Austin Schuh32fd5a72019-12-01 22:20:26 -0800723
724 if (event_loops_.size() == 0u) {
725 // The last caller restores the original signal handlers.
726 PCHECK(sigaction(SIGINT, &old_action_int_, nullptr) == 0);
727 PCHECK(sigaction(SIGHUP, &old_action_hup_, nullptr) == 0);
728 PCHECK(sigaction(SIGTERM, &old_action_term_, nullptr) == 0);
729 }
730 }
731
732 private:
733 void DoHandleSignal() {
734 // We block signals while grabbing the lock, so there should never be a
735 // race. Confirm that this is true using trylock.
736 CHECK(mutex_.try_lock()) << ": sigprocmask failed to block signals while "
737 "modifing the event loop list.";
738 for (ShmEventLoop *event_loop : event_loops_) {
739 event_loop->Exit();
740 }
741 mutex_.unlock();
742 }
743
744 // Mutex to protect all state.
745 stl_mutex mutex_;
746 std::vector<ShmEventLoop *> event_loops_;
747 struct sigaction old_action_int_;
748 struct sigaction old_action_hup_;
749 struct sigaction old_action_term_;
750};
751
Alex Perrycb7da4b2019-08-28 19:35:56 -0700752void ShmEventLoop::Run() {
Austin Schuh32fd5a72019-12-01 22:20:26 -0800753 SignalHandler::global()->Register(this);
Austin Schuh39788ff2019-12-01 18:22:57 -0800754
Alex Perrycb7da4b2019-08-28 19:35:56 -0700755 std::unique_ptr<ipc_lib::SignalFd> signalfd;
756
757 if (watchers_.size() > 0) {
758 signalfd.reset(new ipc_lib::SignalFd({ipc_lib::kWakeupSignal}));
759
760 epoll_.OnReadable(signalfd->fd(), [signalfd_ptr = signalfd.get(), this]() {
761 signalfd_siginfo result = signalfd_ptr->Read();
762 CHECK_EQ(result.ssi_signo, ipc_lib::kWakeupSignal);
763
764 // TODO(austin): We should really be checking *everything*, not just
765 // watchers, and calling the oldest thing first. That will improve
766 // determinism a lot.
767
Austin Schuh7d87b672019-12-01 20:23:49 -0800768 HandleEvent();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700769 });
770 }
771
Austin Schuh39788ff2019-12-01 18:22:57 -0800772 MaybeScheduleTimingReports();
773
Austin Schuh7d87b672019-12-01 20:23:49 -0800774 ReserveEvents();
775
Tyler Chatow67ddb032020-01-12 14:30:04 -0800776 {
777 AosLogToFbs aos_logger;
778 if (!skip_logger_) {
779 aos_logger.Initialize(MakeSender<logging::LogMessageFbs>("/aos"));
780 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700781
Tyler Chatow67ddb032020-01-12 14:30:04 -0800782 aos::SetCurrentThreadName(name_.substr(0, 16));
783 // Now, all the callbacks are setup. Lock everything into memory and go RT.
784 if (priority_ != 0) {
785 ::aos::InitRT();
786
787 LOG(INFO) << "Setting priority to " << priority_;
788 ::aos::SetCurrentThreadRealtimePriority(priority_);
789 }
790
791 set_is_running(true);
792
793 // Now that we are realtime (but before the OnRun handlers run), snap the
794 // queue index.
795 for (::std::unique_ptr<WatcherState> &watcher : watchers_) {
796 watcher->Startup(this);
797 }
798
799 // Now that we are RT, run all the OnRun handlers.
800 for (const auto &run : on_run_) {
801 run();
802 }
803
804 // And start our main event loop which runs all the timers and handles Quit.
805 epoll_.Run();
806
807 // Once epoll exits, there is no useful nonrt work left to do.
808 set_is_running(false);
809
810 // Nothing time or synchronization critical needs to happen after this
811 // point. Drop RT priority.
812 ::aos::UnsetCurrentThreadRealtimePriority();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700813 }
814
Austin Schuh39788ff2019-12-01 18:22:57 -0800815 for (::std::unique_ptr<WatcherState> &base_watcher : watchers_) {
816 internal::WatcherState *watcher =
817 reinterpret_cast<internal::WatcherState *>(base_watcher.get());
Alex Perrycb7da4b2019-08-28 19:35:56 -0700818 watcher->UnregisterWakeup();
819 }
820
821 if (watchers_.size() > 0) {
822 epoll_.DeleteFd(signalfd->fd());
823 signalfd.reset();
824 }
Austin Schuh32fd5a72019-12-01 22:20:26 -0800825
826 SignalHandler::global()->Unregister(this);
Austin Schuhe84c3ed2019-12-14 15:29:48 -0800827
828 // Trigger any remaining senders or fetchers to be cleared before destroying
829 // the event loop so the book keeping matches. Do this in the thread that
830 // created the timing reporter.
831 timing_report_sender_.reset();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700832}
833
834void ShmEventLoop::Exit() { epoll_.Quit(); }
835
836ShmEventLoop::~ShmEventLoop() {
Austin Schuh39788ff2019-12-01 18:22:57 -0800837 // Force everything with a registered fd with epoll to be destroyed now.
838 timers_.clear();
839 phased_loops_.clear();
840 watchers_.clear();
841
Alex Perrycb7da4b2019-08-28 19:35:56 -0700842 CHECK(!is_running()) << ": ShmEventLoop destroyed while running";
843}
844
Alex Perrycb7da4b2019-08-28 19:35:56 -0700845void ShmEventLoop::SetRuntimeRealtimePriority(int priority) {
846 if (is_running()) {
847 LOG(FATAL) << "Cannot set realtime priority while running.";
848 }
849 priority_ = priority;
850}
851
James Kuszmaul57c2baa2020-01-19 14:52:52 -0800852void ShmEventLoop::set_name(const std::string_view name) {
853 name_ = std::string(name);
854 UpdateTimingReport();
855}
856
Brian Silverman5120afb2020-01-31 17:44:35 -0800857absl::Span<char> ShmEventLoop::GetWatcherSharedMemory(const Channel *channel) {
858 internal::WatcherState *const watcher_state =
859 static_cast<internal::WatcherState *>(GetWatcherState(channel));
860 return watcher_state->GetSharedMemory();
861}
862
863absl::Span<char> ShmEventLoop::GetShmSenderSharedMemory(
864 const aos::RawSender *sender) const {
865 return static_cast<const internal::ShmSender *>(sender)->GetSharedMemory();
866}
867
Austin Schuh39788ff2019-12-01 18:22:57 -0800868pid_t ShmEventLoop::GetTid() { return syscall(SYS_gettid); }
869
Alex Perrycb7da4b2019-08-28 19:35:56 -0700870} // namespace aos