blob: 9e18544be3b07ea9f0bdbee0e621f19054aee664 [file] [log] [blame]
Alex Perrycb7da4b2019-08-28 19:35:56 -07001#include "glog/logging.h"
2
3#include "aos/events/shm_event_loop.h"
4
5#include <sys/mman.h>
6#include <sys/stat.h>
7#include <sys/timerfd.h>
8#include <sys/types.h>
9#include <unistd.h>
10#include <algorithm>
11#include <atomic>
12#include <chrono>
13#include <stdexcept>
14
15#include "aos/events/epoll.h"
16#include "aos/ipc_lib/lockless_queue.h"
17#include "aos/realtime.h"
18#include "aos/util/phased_loop.h"
19
20DEFINE_string(shm_base, "/dev/shm/aos",
21 "Directory to place queue backing mmaped files in.");
22DEFINE_uint32(permissions, 0770,
23 "Permissions to make shared memory files and folders.");
24
25namespace aos {
26
27std::string ShmFolder(const Channel *channel) {
28 CHECK(channel->has_name());
29 CHECK_EQ(channel->name()->string_view()[0], '/');
30 return FLAGS_shm_base + channel->name()->str() + "/";
31}
32std::string ShmPath(const Channel *channel) {
33 CHECK(channel->has_type());
34 return ShmFolder(channel) + channel->type()->str() + ".v0";
35}
36
37class MMapedQueue {
38 public:
39 MMapedQueue(const Channel *channel) {
40 std::string path = ShmPath(channel);
41
42 // TODO(austin): Pull these out into the config if there is a need.
43 config_.num_watchers = 10;
44 config_.num_senders = 10;
45 config_.queue_size = 2 * channel->frequency();
46 config_.message_data_size = channel->max_size();
47
48 size_ = ipc_lib::LocklessQueueMemorySize(config_);
49
50 MkdirP(path);
51
52 // There are 2 cases. Either the file already exists, or it does not
53 // already exist and we need to create it. Start by trying to create it. If
54 // that fails, the file has already been created and we can open it
55 // normally.. Once the file has been created it wil never be deleted.
56 fd_ = open(path.c_str(), O_RDWR | O_CREAT | O_EXCL,
57 O_CLOEXEC | FLAGS_permissions);
58 if (fd_ == -1 && errno == EEXIST) {
59 VLOG(1) << path << " already created.";
60 // File already exists.
61 fd_ = open(path.c_str(), O_RDWR, O_CLOEXEC);
62 PCHECK(fd_ != -1) << ": Failed to open " << path;
63 while (true) {
64 struct stat st;
65 PCHECK(fstat(fd_, &st) == 0);
66 if (st.st_size != 0) {
67 CHECK_EQ(static_cast<size_t>(st.st_size), size_)
68 << ": Size of " << path
69 << " doesn't match expected size of backing queue file. Did the "
70 "queue definition change?";
71 break;
72 } else {
73 // The creating process didn't get around to it yet. Give it a bit.
74 std::this_thread::sleep_for(std::chrono::milliseconds(10));
75 VLOG(1) << path << " is zero size, waiting";
76 }
77 }
78 } else {
79 VLOG(1) << "Created " << path;
80 PCHECK(fd_ != -1) << ": Failed to open " << path;
81 PCHECK(ftruncate(fd_, size_) == 0);
82 }
83
84 data_ = mmap(NULL, size_, PROT_READ | PROT_WRITE, MAP_SHARED, fd_, 0);
85 PCHECK(data_ != MAP_FAILED);
86
87 ipc_lib::InitializeLocklessQueueMemory(memory(), config_);
88 }
89
90 ~MMapedQueue() {
91 PCHECK(munmap(data_, size_) == 0);
92 PCHECK(close(fd_) == 0);
93 }
94
95 ipc_lib::LocklessQueueMemory *memory() const {
96 return reinterpret_cast<ipc_lib::LocklessQueueMemory *>(data_);
97 }
98
99 const ipc_lib::LocklessQueueConfiguration &config() const {
100 return config_;
101 }
102
103 private:
James Kuszmaul3ae42262019-11-08 12:33:41 -0800104 void MkdirP(std::string_view path) {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700105 struct stat st;
106 auto last_slash_pos = path.find_last_of("/");
107
James Kuszmaul3ae42262019-11-08 12:33:41 -0800108 std::string folder(last_slash_pos == std::string_view::npos
109 ? std::string_view("")
Alex Perrycb7da4b2019-08-28 19:35:56 -0700110 : path.substr(0, last_slash_pos));
111 if (stat(folder.c_str(), &st) == -1) {
112 PCHECK(errno == ENOENT);
113 CHECK_NE(folder, "") << ": Base path doesn't exist";
114 MkdirP(folder);
115 VLOG(1) << "Creating " << folder;
116 PCHECK(mkdir(folder.c_str(), FLAGS_permissions) == 0);
117 }
118 }
119
120 ipc_lib::LocklessQueueConfiguration config_;
121
122 int fd_;
123
124 size_t size_;
125 void *data_;
126};
127
128// Returns the portion of the path after the last /.
James Kuszmaul3ae42262019-11-08 12:33:41 -0800129std::string_view Filename(std::string_view path) {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700130 auto last_slash_pos = path.find_last_of("/");
131
James Kuszmaul3ae42262019-11-08 12:33:41 -0800132 return last_slash_pos == std::string_view::npos
Alex Perrycb7da4b2019-08-28 19:35:56 -0700133 ? path
134 : path.substr(last_slash_pos + 1, path.size());
135}
136
137ShmEventLoop::ShmEventLoop(const Configuration *configuration)
138 : EventLoop(configuration), name_(Filename(program_invocation_name)) {}
139
140namespace {
141
142namespace chrono = ::std::chrono;
143
144class ShmFetcher : public RawFetcher {
145 public:
146 explicit ShmFetcher(const Channel *channel)
Austin Schuh54cf95f2019-11-29 13:14:18 -0800147 : RawFetcher(channel),
148 lockless_queue_memory_(channel),
Alex Perrycb7da4b2019-08-28 19:35:56 -0700149 lockless_queue_(lockless_queue_memory_.memory(),
150 lockless_queue_memory_.config()),
151 data_storage_(static_cast<AlignedChar *>(aligned_alloc(
152 alignof(AlignedChar), channel->max_size())),
153 &free) {
154 context_.data = nullptr;
155 // Point the queue index at the next index to read starting now. This
156 // makes it such that FetchNext will read the next message sent after
157 // the fetcher is created.
158 PointAtNextQueueIndex();
159 }
160
161 ~ShmFetcher() { data_ = nullptr; }
162
163 // Points the next message to fetch at the queue index which will be
164 // populated next.
165 void PointAtNextQueueIndex() {
166 actual_queue_index_ = lockless_queue_.LatestQueueIndex();
167 if (!actual_queue_index_.valid()) {
168 // Nothing in the queue. The next element will show up at the 0th
169 // index in the queue.
170 actual_queue_index_ =
171 ipc_lib::QueueIndex::Zero(lockless_queue_.queue_size());
172 } else {
173 actual_queue_index_ = actual_queue_index_.Increment();
174 }
175 }
176
177 bool FetchNext() override {
178 // TODO(austin): Write a test which starts with nothing in the queue,
179 // and then calls FetchNext() after something is sent.
180 // TODO(austin): Get behind and make sure it dies both here and with
181 // Fetch.
182 ipc_lib::LocklessQueue::ReadResult read_result = lockless_queue_.Read(
183 actual_queue_index_.index(), &context_.monotonic_sent_time,
184 &context_.realtime_sent_time, &context_.size,
185 reinterpret_cast<char *>(data_storage_.get()));
186 if (read_result == ipc_lib::LocklessQueue::ReadResult::GOOD) {
187 context_.queue_index = actual_queue_index_.index();
188 data_ = reinterpret_cast<char *>(data_storage_.get()) +
189 lockless_queue_.message_data_size() - context_.size;
190 context_.data = data_;
191 actual_queue_index_ = actual_queue_index_.Increment();
192 }
193
194 // Make sure the data wasn't modified while we were reading it. This
195 // can only happen if you are reading the last message *while* it is
196 // being written to, which means you are pretty far behind.
197 CHECK(read_result != ipc_lib::LocklessQueue::ReadResult::OVERWROTE)
198 << ": Got behind while reading and the last message was modified "
199 "out "
200 "from under us while we were reading it. Don't get so far "
201 "behind.";
202
203 CHECK(read_result != ipc_lib::LocklessQueue::ReadResult::TOO_OLD)
204 << ": The next message is no longer available.";
205 return read_result == ipc_lib::LocklessQueue::ReadResult::GOOD;
206 }
207
208 bool Fetch() override {
209 const ipc_lib::QueueIndex queue_index = lockless_queue_.LatestQueueIndex();
210 // actual_queue_index_ is only meaningful if it was set by Fetch or
211 // FetchNext. This happens when valid_data_ has been set. So, only
212 // skip checking if valid_data_ is true.
213 //
214 // Also, if the latest queue index is invalid, we are empty. So there
215 // is nothing to fetch.
216 if ((data_ != nullptr &&
217 queue_index == actual_queue_index_.DecrementBy(1u)) ||
218 !queue_index.valid()) {
219 return false;
220 }
221
222 ipc_lib::LocklessQueue::ReadResult read_result =
223 lockless_queue_.Read(queue_index.index(), &context_.monotonic_sent_time,
224 &context_.realtime_sent_time, &context_.size,
225 reinterpret_cast<char *>(data_storage_.get()));
226 if (read_result == ipc_lib::LocklessQueue::ReadResult::GOOD) {
227 context_.queue_index = queue_index.index();
228 data_ = reinterpret_cast<char *>(data_storage_.get()) +
229 lockless_queue_.message_data_size() - context_.size;
230 context_.data = data_;
231 actual_queue_index_ = queue_index.Increment();
232 }
233
234 // Make sure the data wasn't modified while we were reading it. This
235 // can only happen if you are reading the last message *while* it is
236 // being written to, which means you are pretty far behind.
237 CHECK(read_result != ipc_lib::LocklessQueue::ReadResult::OVERWROTE)
238 << ": Got behind while reading and the last message was modified "
239 "out "
240 "from under us while we were reading it. Don't get so far "
241 "behind.";
242
243 CHECK(read_result != ipc_lib::LocklessQueue::ReadResult::NOTHING_NEW)
244 << ": Queue index went backwards. This should never happen.";
245
246 // We fell behind between when we read the index and read the value.
247 // This isn't worth recovering from since this means we went to sleep
248 // for a long time in the middle of this function.
249 CHECK(read_result != ipc_lib::LocklessQueue::ReadResult::TOO_OLD)
250 << ": The next message is no longer available.";
251 return read_result == ipc_lib::LocklessQueue::ReadResult::GOOD;
252 }
253
254 bool RegisterWakeup(int priority) {
255 return lockless_queue_.RegisterWakeup(priority);
256 }
257
258 void UnregisterWakeup() { lockless_queue_.UnregisterWakeup(); }
259
260 private:
261 MMapedQueue lockless_queue_memory_;
262 ipc_lib::LocklessQueue lockless_queue_;
263
264 ipc_lib::QueueIndex actual_queue_index_ =
265 ipc_lib::LocklessQueue::empty_queue_index();
266
267 struct AlignedChar {
268 alignas(32) char data;
269 };
270
271 std::unique_ptr<AlignedChar, decltype(&free)> data_storage_;
272};
273
274class ShmSender : public RawSender {
275 public:
276 explicit ShmSender(const Channel *channel, const ShmEventLoop *shm_event_loop)
Austin Schuh54cf95f2019-11-29 13:14:18 -0800277 : RawSender(channel),
Alex Perrycb7da4b2019-08-28 19:35:56 -0700278 shm_event_loop_(shm_event_loop),
279 name_(channel->name()->str()),
280 lockless_queue_memory_(channel),
281 lockless_queue_(lockless_queue_memory_.memory(),
282 lockless_queue_memory_.config()),
283 lockless_queue_sender_(lockless_queue_.MakeSender()) {}
284
285 void *data() override { return lockless_queue_sender_.Data(); }
286 size_t size() override { return lockless_queue_sender_.size(); }
287 bool Send(size_t size) override {
288 lockless_queue_sender_.Send(size);
289 lockless_queue_.Wakeup(shm_event_loop_->priority());
290 return true;
291 }
292
Austin Schuh4726ce92019-11-29 13:23:18 -0800293 bool Send(const void *msg, size_t length) override {
294 lockless_queue_sender_.Send(reinterpret_cast<const char *>(msg), length);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700295 lockless_queue_.Wakeup(shm_event_loop_->priority());
296 // TODO(austin): Return an error if we send too fast.
297 return true;
298 }
299
James Kuszmaul3ae42262019-11-08 12:33:41 -0800300 const std::string_view name() const override { return name_; }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700301
302 private:
303 const ShmEventLoop *shm_event_loop_;
304 std::string name_;
305 MMapedQueue lockless_queue_memory_;
306 ipc_lib::LocklessQueue lockless_queue_;
307 ipc_lib::LocklessQueue::Sender lockless_queue_sender_;
308};
309
310} // namespace
311
312namespace internal {
313
314// Class to manage the state for a Watcher.
315class WatcherState {
316 public:
317 WatcherState(
318 const Channel *channel,
319 std::function<void(const Context &context, const void *message)> watcher)
320 : shm_fetcher_(channel), watcher_(watcher) {}
321
322 ~WatcherState() {}
323
324 // Points the next message to fetch at the queue index which will be populated
325 // next.
326 void PointAtNextQueueIndex() { shm_fetcher_.PointAtNextQueueIndex(); }
327
328 // Returns true if there is new data available.
329 bool HasNewData() {
330 if (!has_new_data_) {
331 has_new_data_ = shm_fetcher_.FetchNext();
332 }
333
334 return has_new_data_;
335 }
336
337 // Returns the time of the current data sample.
338 aos::monotonic_clock::time_point event_time() const {
339 return shm_fetcher_.context().monotonic_sent_time;
340 }
341
342 // Consumes the data by calling the callback.
343 void CallCallback() {
344 CHECK(has_new_data_);
345 watcher_(shm_fetcher_.context(), shm_fetcher_.most_recent_data());
346 has_new_data_ = false;
347 }
348
349 // Starts the thread and waits until it is running.
350 bool RegisterWakeup(int priority) {
351 return shm_fetcher_.RegisterWakeup(priority);
352 }
353
354 void UnregisterWakeup() { return shm_fetcher_.UnregisterWakeup(); }
355
356 private:
357 bool has_new_data_ = false;
358
359 ShmFetcher shm_fetcher_;
360
361 std::function<void(const Context &context, const void *message)> watcher_;
362};
363
364// Adapter class to adapt a timerfd to a TimerHandler.
Alex Perrycb7da4b2019-08-28 19:35:56 -0700365class TimerHandlerState : public TimerHandler {
366 public:
367 TimerHandlerState(ShmEventLoop *shm_event_loop, ::std::function<void()> fn)
368 : shm_event_loop_(shm_event_loop), fn_(::std::move(fn)) {
369 shm_event_loop_->epoll_.OnReadable(timerfd_.fd(), [this]() {
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800370 const uint64_t elapsed_cycles = timerfd_.Read();
371
372 shm_event_loop_->context_.monotonic_sent_time = base_;
373 shm_event_loop_->context_.realtime_sent_time = realtime_clock::min_time;
374 shm_event_loop_->context_.queue_index = 0;
375 shm_event_loop_->context_.size = 0;
376 shm_event_loop_->context_.data = nullptr;
377
Alex Perrycb7da4b2019-08-28 19:35:56 -0700378 fn_();
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800379
380 base_ += repeat_offset_ * elapsed_cycles;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700381 });
382 }
383
384 ~TimerHandlerState() { shm_event_loop_->epoll_.DeleteFd(timerfd_.fd()); }
385
386 void Setup(monotonic_clock::time_point base,
387 monotonic_clock::duration repeat_offset) override {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700388 timerfd_.SetTime(base, repeat_offset);
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800389 base_ = base;
390 repeat_offset_ = repeat_offset;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700391 }
392
393 void Disable() override {
394 // Disable is also threadsafe already.
395 timerfd_.Disable();
396 }
397
398 private:
399 ShmEventLoop *shm_event_loop_;
400
401 TimerFd timerfd_;
402
Alex Perrycb7da4b2019-08-28 19:35:56 -0700403 ::std::function<void()> fn_;
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800404
405 monotonic_clock::time_point base_;
406 monotonic_clock::duration repeat_offset_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700407};
408
409// Adapter class to the timerfd and PhasedLoop.
Alex Perrycb7da4b2019-08-28 19:35:56 -0700410class PhasedLoopHandler : public ::aos::PhasedLoopHandler {
411 public:
412 PhasedLoopHandler(ShmEventLoop *shm_event_loop, ::std::function<void(int)> fn,
413 const monotonic_clock::duration interval,
414 const monotonic_clock::duration offset)
415 : shm_event_loop_(shm_event_loop),
416 phased_loop_(interval, shm_event_loop_->monotonic_now(), offset),
417 fn_(::std::move(fn)) {
418 shm_event_loop_->epoll_.OnReadable(timerfd_.fd(), [this]() {
419 timerfd_.Read();
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800420 // Update the context to hold the desired wakeup time.
421 shm_event_loop_->context_.monotonic_sent_time = phased_loop_.sleep_time();
422 shm_event_loop_->context_.realtime_sent_time = realtime_clock::min_time;
423 shm_event_loop_->context_.queue_index = 0;
424 shm_event_loop_->context_.size = 0;
425 shm_event_loop_->context_.data = nullptr;
426
427 // Compute how many cycles elapsed and schedule the next wakeup.
Alex Perrycb7da4b2019-08-28 19:35:56 -0700428 Reschedule();
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800429
430 // Call the function with the elapsed cycles.
431 fn_(cycles_elapsed_);
432 cycles_elapsed_ = 0;
433
434 const monotonic_clock::time_point monotonic_end_time =
435 monotonic_clock::now();
436
437 // If the handler too too long so we blew by the previous deadline, we
438 // want to just try for the next deadline. Reschedule.
439 if (monotonic_end_time > phased_loop_.sleep_time()) {
440 Reschedule();
441 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700442 });
443 }
444
445 ~PhasedLoopHandler() { shm_event_loop_->epoll_.DeleteFd(timerfd_.fd()); }
446
447 void set_interval_and_offset(
448 const monotonic_clock::duration interval,
449 const monotonic_clock::duration offset) override {
450 phased_loop_.set_interval_and_offset(interval, offset);
451 }
452
453 void Startup() {
454 phased_loop_.Reset(shm_event_loop_->monotonic_now());
455 Reschedule();
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800456 // The first time, we'll double count. Reschedule here will count cycles
457 // elapsed before now, and then the reschedule before runing the handler
458 // will count the time that elapsed then. So clear the count here.
459 cycles_elapsed_ = 0;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700460 }
461
462 private:
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800463 // Reschedules the timer.
Alex Perrycb7da4b2019-08-28 19:35:56 -0700464 void Reschedule() {
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800465 cycles_elapsed_ += phased_loop_.Iterate(shm_event_loop_->monotonic_now());
Alex Perrycb7da4b2019-08-28 19:35:56 -0700466 timerfd_.SetTime(phased_loop_.sleep_time(), ::aos::monotonic_clock::zero());
467 }
468
469 ShmEventLoop *shm_event_loop_;
470
471 TimerFd timerfd_;
472 time::PhasedLoop phased_loop_;
473
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800474 int cycles_elapsed_ = 0;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700475
476 // Function to be run
477 const ::std::function<void(int)> fn_;
478};
479} // namespace internal
480
481::std::unique_ptr<RawFetcher> ShmEventLoop::MakeRawFetcher(
482 const Channel *channel) {
Austin Schuh54cf95f2019-11-29 13:14:18 -0800483 ValidateChannel(channel);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700484 return ::std::unique_ptr<RawFetcher>(new ShmFetcher(channel));
485}
486
487::std::unique_ptr<RawSender> ShmEventLoop::MakeRawSender(
488 const Channel *channel) {
Austin Schuh54cf95f2019-11-29 13:14:18 -0800489 ValidateChannel(channel);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700490 Take(channel);
491 return ::std::unique_ptr<RawSender>(new ShmSender(channel, this));
492}
493
494void ShmEventLoop::MakeRawWatcher(
495 const Channel *channel,
496 std::function<void(const Context &context, const void *message)> watcher) {
Austin Schuh54cf95f2019-11-29 13:14:18 -0800497 ValidateChannel(channel);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700498 Take(channel);
499
500 ::std::unique_ptr<internal::WatcherState> state(
501 new internal::WatcherState(
502 channel, std::move(watcher)));
503 watchers_.push_back(::std::move(state));
504}
505
506TimerHandler *ShmEventLoop::AddTimer(::std::function<void()> callback) {
507 ::std::unique_ptr<internal::TimerHandlerState> timer(
508 new internal::TimerHandlerState(this, ::std::move(callback)));
509
510 timers_.push_back(::std::move(timer));
511
512 return timers_.back().get();
513}
514
515PhasedLoopHandler *ShmEventLoop::AddPhasedLoop(
516 ::std::function<void(int)> callback,
517 const monotonic_clock::duration interval,
518 const monotonic_clock::duration offset) {
519 ::std::unique_ptr<internal::PhasedLoopHandler> phased_loop(
520 new internal::PhasedLoopHandler(this, ::std::move(callback), interval,
521 offset));
522
523 phased_loops_.push_back(::std::move(phased_loop));
524
525 return phased_loops_.back().get();
526}
527
528void ShmEventLoop::OnRun(::std::function<void()> on_run) {
529 on_run_.push_back(::std::move(on_run));
530}
531
532void ShmEventLoop::Run() {
533 std::unique_ptr<ipc_lib::SignalFd> signalfd;
534
535 if (watchers_.size() > 0) {
536 signalfd.reset(new ipc_lib::SignalFd({ipc_lib::kWakeupSignal}));
537
538 epoll_.OnReadable(signalfd->fd(), [signalfd_ptr = signalfd.get(), this]() {
539 signalfd_siginfo result = signalfd_ptr->Read();
540 CHECK_EQ(result.ssi_signo, ipc_lib::kWakeupSignal);
541
542 // TODO(austin): We should really be checking *everything*, not just
543 // watchers, and calling the oldest thing first. That will improve
544 // determinism a lot.
545
546 while (true) {
547 // Call the handlers in time order of their messages.
548 aos::monotonic_clock::time_point min_event_time =
549 aos::monotonic_clock::max_time;
550 size_t min_watcher_index = -1;
551 size_t watcher_index = 0;
552 for (::std::unique_ptr<internal::WatcherState> &watcher : watchers_) {
553 if (watcher->HasNewData()) {
554 if (watcher->event_time() < min_event_time) {
555 min_watcher_index = watcher_index;
556 min_event_time = watcher->event_time();
557 }
558 }
559 ++watcher_index;
560 }
561
562 if (min_event_time == aos::monotonic_clock::max_time) {
563 break;
564 }
565
566 watchers_[min_watcher_index]->CallCallback();
567 }
568 });
569 }
570
571 // Now, all the threads are up. Lock everything into memory and go RT.
572 if (priority_ != 0) {
573 ::aos::InitRT();
574
575 LOG(INFO) << "Setting priority to " << priority_;
576 ::aos::SetCurrentThreadRealtimePriority(priority_);
577 }
578
579 set_is_running(true);
580
581 // Now that we are realtime (but before the OnRun handlers run), snap the
582 // queue index.
583 for (::std::unique_ptr<internal::WatcherState> &watcher : watchers_) {
584 watcher->PointAtNextQueueIndex();
585 CHECK(watcher->RegisterWakeup(priority_));
586 }
587
588 // Now that we are RT, run all the OnRun handlers.
589 for (const auto &run : on_run_) {
590 run();
591 }
592
593 // Start up all the phased loops.
594 for (::std::unique_ptr<internal::PhasedLoopHandler> &phased_loop :
595 phased_loops_) {
596 phased_loop->Startup();
597 }
598
599 // And start our main event loop which runs all the timers and handles Quit.
600 epoll_.Run();
601
602 // Once epoll exits, there is no useful nonrt work left to do.
603 set_is_running(false);
604
605 // Nothing time or synchronization critical needs to happen after this point.
606 // Drop RT priority.
607 ::aos::UnsetCurrentThreadRealtimePriority();
608
609 for (::std::unique_ptr<internal::WatcherState> &watcher : watchers_) {
610 watcher->UnregisterWakeup();
611 }
612
613 if (watchers_.size() > 0) {
614 epoll_.DeleteFd(signalfd->fd());
615 signalfd.reset();
616 }
617}
618
619void ShmEventLoop::Exit() { epoll_.Quit(); }
620
621ShmEventLoop::~ShmEventLoop() {
622 CHECK(!is_running()) << ": ShmEventLoop destroyed while running";
623}
624
625void ShmEventLoop::Take(const Channel *channel) {
626 CHECK(!is_running()) << ": Cannot add new objects while running.";
627
628 // Cheat aggresively. Use the shared memory path as a proxy for a unique
629 // identifier for the channel.
630 const std::string path = ShmPath(channel);
631
632 const auto prior = ::std::find(taken_.begin(), taken_.end(), path);
633 CHECK(prior == taken_.end()) << ": " << path << " is already being used.";
634
635 taken_.emplace_back(path);
636}
637
638void ShmEventLoop::SetRuntimeRealtimePriority(int priority) {
639 if (is_running()) {
640 LOG(FATAL) << "Cannot set realtime priority while running.";
641 }
642 priority_ = priority;
643}
644
645} // namespace aos