blob: b53be2fbef60436e86aee9df6efe6039ed9a7d75 [file] [log] [blame]
Alex Perrycb7da4b2019-08-28 19:35:56 -07001#include "aos/events/shm_event_loop.h"
2
3#include <sys/mman.h>
4#include <sys/stat.h>
Austin Schuh39788ff2019-12-01 18:22:57 -08005#include <sys/syscall.h>
Alex Perrycb7da4b2019-08-28 19:35:56 -07006#include <sys/types.h>
7#include <unistd.h>
Tyler Chatow67ddb032020-01-12 14:30:04 -08008
Alex Perrycb7da4b2019-08-28 19:35:56 -07009#include <algorithm>
10#include <atomic>
11#include <chrono>
Austin Schuh39788ff2019-12-01 18:22:57 -080012#include <iterator>
Alex Perrycb7da4b2019-08-28 19:35:56 -070013#include <stdexcept>
14
Tyler Chatow67ddb032020-01-12 14:30:04 -080015#include "aos/events/aos_logging.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070016#include "aos/events/epoll.h"
Austin Schuh39788ff2019-12-01 18:22:57 -080017#include "aos/events/event_loop_generated.h"
18#include "aos/events/timing_statistics.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070019#include "aos/ipc_lib/lockless_queue.h"
Austin Schuh39788ff2019-12-01 18:22:57 -080020#include "aos/ipc_lib/signalfd.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070021#include "aos/realtime.h"
Austin Schuh32fd5a72019-12-01 22:20:26 -080022#include "aos/stl_mutex/stl_mutex.h"
Austin Schuhfccb2d02020-01-26 16:11:19 -080023#include "aos/util/file.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070024#include "aos/util/phased_loop.h"
Austin Schuh39788ff2019-12-01 18:22:57 -080025#include "glog/logging.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070026
Austin Schuhe84c3ed2019-12-14 15:29:48 -080027namespace {
28
29// Returns the portion of the path after the last /. This very much assumes
30// that the application name is null terminated.
31const char *Filename(const char *path) {
32 const std::string_view path_string_view = path;
33 auto last_slash_pos = path_string_view.find_last_of("/");
34
35 return last_slash_pos == std::string_view::npos ? path
36 : path + last_slash_pos + 1;
37}
38
39} // namespace
40
Alex Perrycb7da4b2019-08-28 19:35:56 -070041DEFINE_string(shm_base, "/dev/shm/aos",
42 "Directory to place queue backing mmaped files in.");
43DEFINE_uint32(permissions, 0770,
44 "Permissions to make shared memory files and folders.");
Austin Schuhe84c3ed2019-12-14 15:29:48 -080045DEFINE_string(application_name, Filename(program_invocation_name),
46 "The application name");
Alex Perrycb7da4b2019-08-28 19:35:56 -070047
48namespace aos {
49
Austin Schuhcdab6192019-12-29 17:47:46 -080050void SetShmBase(const std::string_view base) {
51 FLAGS_shm_base = std::string(base) + "/dev/shm/aos";
52}
53
Alex Perrycb7da4b2019-08-28 19:35:56 -070054std::string ShmFolder(const Channel *channel) {
55 CHECK(channel->has_name());
56 CHECK_EQ(channel->name()->string_view()[0], '/');
57 return FLAGS_shm_base + channel->name()->str() + "/";
58}
59std::string ShmPath(const Channel *channel) {
60 CHECK(channel->has_type());
Austin Schuh3328d132020-02-28 13:54:57 -080061 return ShmFolder(channel) + channel->type()->str() + ".v2";
Alex Perrycb7da4b2019-08-28 19:35:56 -070062}
63
64class MMapedQueue {
65 public:
Austin Schuhaa79e4e2019-12-29 20:43:32 -080066 MMapedQueue(const Channel *channel,
67 const std::chrono::seconds channel_storage_duration) {
Alex Perrycb7da4b2019-08-28 19:35:56 -070068 std::string path = ShmPath(channel);
69
Austin Schuh80c7fce2019-12-05 20:48:43 -080070 config_.num_watchers = channel->num_watchers();
71 config_.num_senders = channel->num_senders();
Austin Schuhaa79e4e2019-12-29 20:43:32 -080072 config_.queue_size =
73 channel_storage_duration.count() * channel->frequency();
Alex Perrycb7da4b2019-08-28 19:35:56 -070074 config_.message_data_size = channel->max_size();
75
76 size_ = ipc_lib::LocklessQueueMemorySize(config_);
77
Austin Schuhfccb2d02020-01-26 16:11:19 -080078 util::MkdirP(path, FLAGS_permissions);
Alex Perrycb7da4b2019-08-28 19:35:56 -070079
80 // There are 2 cases. Either the file already exists, or it does not
81 // already exist and we need to create it. Start by trying to create it. If
82 // that fails, the file has already been created and we can open it
83 // normally.. Once the file has been created it wil never be deleted.
Brian Silvermanf9f30ea2020-03-04 23:18:54 -080084 int fd = open(path.c_str(), O_RDWR | O_CREAT | O_EXCL,
Alex Perrycb7da4b2019-08-28 19:35:56 -070085 O_CLOEXEC | FLAGS_permissions);
Brian Silvermanf9f30ea2020-03-04 23:18:54 -080086 if (fd == -1 && errno == EEXIST) {
Alex Perrycb7da4b2019-08-28 19:35:56 -070087 VLOG(1) << path << " already created.";
88 // File already exists.
Brian Silvermanf9f30ea2020-03-04 23:18:54 -080089 fd = open(path.c_str(), O_RDWR, O_CLOEXEC);
90 PCHECK(fd != -1) << ": Failed to open " << path;
Alex Perrycb7da4b2019-08-28 19:35:56 -070091 while (true) {
92 struct stat st;
Brian Silvermanf9f30ea2020-03-04 23:18:54 -080093 PCHECK(fstat(fd, &st) == 0);
Alex Perrycb7da4b2019-08-28 19:35:56 -070094 if (st.st_size != 0) {
95 CHECK_EQ(static_cast<size_t>(st.st_size), size_)
96 << ": Size of " << path
97 << " doesn't match expected size of backing queue file. Did the "
98 "queue definition change?";
99 break;
100 } else {
101 // The creating process didn't get around to it yet. Give it a bit.
102 std::this_thread::sleep_for(std::chrono::milliseconds(10));
103 VLOG(1) << path << " is zero size, waiting";
104 }
105 }
106 } else {
107 VLOG(1) << "Created " << path;
Brian Silvermanf9f30ea2020-03-04 23:18:54 -0800108 PCHECK(fd != -1) << ": Failed to open " << path;
109 PCHECK(ftruncate(fd, size_) == 0);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700110 }
111
Brian Silvermanf9f30ea2020-03-04 23:18:54 -0800112 data_ = mmap(NULL, size_, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700113 PCHECK(data_ != MAP_FAILED);
Brian Silvermanf9f30ea2020-03-04 23:18:54 -0800114 PCHECK(close(fd) == 0);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700115
116 ipc_lib::InitializeLocklessQueueMemory(memory(), config_);
117 }
118
119 ~MMapedQueue() {
120 PCHECK(munmap(data_, size_) == 0);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700121 }
122
123 ipc_lib::LocklessQueueMemory *memory() const {
124 return reinterpret_cast<ipc_lib::LocklessQueueMemory *>(data_);
125 }
126
Austin Schuh39788ff2019-12-01 18:22:57 -0800127 const ipc_lib::LocklessQueueConfiguration &config() const { return config_; }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700128
Brian Silverman5120afb2020-01-31 17:44:35 -0800129 absl::Span<char> GetSharedMemory() const {
130 return absl::Span<char>(static_cast<char *>(data_), size_);
131 }
132
Alex Perrycb7da4b2019-08-28 19:35:56 -0700133 private:
Alex Perrycb7da4b2019-08-28 19:35:56 -0700134 ipc_lib::LocklessQueueConfiguration config_;
135
Alex Perrycb7da4b2019-08-28 19:35:56 -0700136 size_t size_;
137 void *data_;
138};
139
Austin Schuh217a9782019-12-21 23:02:50 -0800140namespace {
141
Austin Schuh217a9782019-12-21 23:02:50 -0800142const Node *MaybeMyNode(const Configuration *configuration) {
143 if (!configuration->has_nodes()) {
144 return nullptr;
145 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700146
Austin Schuh217a9782019-12-21 23:02:50 -0800147 return configuration::GetMyNode(configuration);
148}
Alex Perrycb7da4b2019-08-28 19:35:56 -0700149
150namespace chrono = ::std::chrono;
151
Austin Schuh39788ff2019-12-01 18:22:57 -0800152} // namespace
153
Austin Schuh217a9782019-12-21 23:02:50 -0800154ShmEventLoop::ShmEventLoop(const Configuration *configuration)
155 : EventLoop(configuration),
Austin Schuhe84c3ed2019-12-14 15:29:48 -0800156 name_(FLAGS_application_name),
Austin Schuh15649d62019-12-28 16:36:38 -0800157 node_(MaybeMyNode(configuration)) {
158 if (configuration->has_nodes()) {
159 CHECK(node_ != nullptr) << ": Couldn't find node in config.";
160 }
161}
Austin Schuh217a9782019-12-21 23:02:50 -0800162
Austin Schuh39788ff2019-12-01 18:22:57 -0800163namespace internal {
164
165class SimpleShmFetcher {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700166 public:
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800167 explicit SimpleShmFetcher(EventLoop *event_loop, const Channel *channel,
168 bool copy_data)
Austin Schuhf5652592019-12-29 16:26:15 -0800169 : channel_(channel),
Austin Schuhaa79e4e2019-12-29 20:43:32 -0800170 lockless_queue_memory_(
171 channel,
Brian Silverman587da252020-01-01 17:00:47 -0800172 chrono::ceil<chrono::seconds>(chrono::nanoseconds(
Austin Schuhaa79e4e2019-12-29 20:43:32 -0800173 event_loop->configuration()->channel_storage_duration()))),
Alex Perrycb7da4b2019-08-28 19:35:56 -0700174 lockless_queue_(lockless_queue_memory_.memory(),
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800175 lockless_queue_memory_.config()) {
176 if (copy_data) {
177 data_storage_.reset(static_cast<char *>(
178 malloc(channel->max_size() + kChannelDataAlignment - 1)));
179 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700180 context_.data = nullptr;
181 // Point the queue index at the next index to read starting now. This
182 // makes it such that FetchNext will read the next message sent after
183 // the fetcher is created.
184 PointAtNextQueueIndex();
185 }
186
Austin Schuh39788ff2019-12-01 18:22:57 -0800187 ~SimpleShmFetcher() {}
Alex Perrycb7da4b2019-08-28 19:35:56 -0700188
189 // Points the next message to fetch at the queue index which will be
190 // populated next.
191 void PointAtNextQueueIndex() {
192 actual_queue_index_ = lockless_queue_.LatestQueueIndex();
193 if (!actual_queue_index_.valid()) {
194 // Nothing in the queue. The next element will show up at the 0th
195 // index in the queue.
196 actual_queue_index_ =
197 ipc_lib::QueueIndex::Zero(lockless_queue_.queue_size());
198 } else {
199 actual_queue_index_ = actual_queue_index_.Increment();
200 }
201 }
202
Austin Schuh39788ff2019-12-01 18:22:57 -0800203 bool FetchNext() {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700204 // TODO(austin): Get behind and make sure it dies both here and with
205 // Fetch.
206 ipc_lib::LocklessQueue::ReadResult read_result = lockless_queue_.Read(
Austin Schuhad154822019-12-27 15:45:13 -0800207 actual_queue_index_.index(), &context_.monotonic_event_time,
208 &context_.realtime_event_time, &context_.monotonic_remote_time,
209 &context_.realtime_remote_time, &context_.remote_queue_index,
Brian Silvermana1652f32020-01-29 20:41:44 -0800210 &context_.size, data_storage_start());
Alex Perrycb7da4b2019-08-28 19:35:56 -0700211 if (read_result == ipc_lib::LocklessQueue::ReadResult::GOOD) {
212 context_.queue_index = actual_queue_index_.index();
Austin Schuhad154822019-12-27 15:45:13 -0800213 if (context_.remote_queue_index == 0xffffffffu) {
214 context_.remote_queue_index = context_.queue_index;
215 }
216 if (context_.monotonic_remote_time == aos::monotonic_clock::min_time) {
217 context_.monotonic_remote_time = context_.monotonic_event_time;
218 }
219 if (context_.realtime_remote_time == aos::realtime_clock::min_time) {
220 context_.realtime_remote_time = context_.realtime_event_time;
221 }
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800222 if (copy_data()) {
223 context_.data = data_storage_start() +
224 lockless_queue_.message_data_size() - context_.size;
225 } else {
226 context_.data = nullptr;
227 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700228 actual_queue_index_ = actual_queue_index_.Increment();
229 }
230
231 // Make sure the data wasn't modified while we were reading it. This
232 // can only happen if you are reading the last message *while* it is
233 // being written to, which means you are pretty far behind.
234 CHECK(read_result != ipc_lib::LocklessQueue::ReadResult::OVERWROTE)
235 << ": Got behind while reading and the last message was modified "
Austin Schuhf5652592019-12-29 16:26:15 -0800236 "out from under us while we were reading it. Don't get so far "
237 "behind. "
238 << configuration::CleanedChannelToString(channel_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700239
240 CHECK(read_result != ipc_lib::LocklessQueue::ReadResult::TOO_OLD)
Austin Schuhf5652592019-12-29 16:26:15 -0800241 << ": The next message is no longer available. "
242 << configuration::CleanedChannelToString(channel_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700243 return read_result == ipc_lib::LocklessQueue::ReadResult::GOOD;
244 }
245
Austin Schuh39788ff2019-12-01 18:22:57 -0800246 bool Fetch() {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700247 const ipc_lib::QueueIndex queue_index = lockless_queue_.LatestQueueIndex();
248 // actual_queue_index_ is only meaningful if it was set by Fetch or
249 // FetchNext. This happens when valid_data_ has been set. So, only
250 // skip checking if valid_data_ is true.
251 //
252 // Also, if the latest queue index is invalid, we are empty. So there
253 // is nothing to fetch.
Austin Schuh39788ff2019-12-01 18:22:57 -0800254 if ((context_.data != nullptr &&
Alex Perrycb7da4b2019-08-28 19:35:56 -0700255 queue_index == actual_queue_index_.DecrementBy(1u)) ||
256 !queue_index.valid()) {
257 return false;
258 }
259
Austin Schuhad154822019-12-27 15:45:13 -0800260 ipc_lib::LocklessQueue::ReadResult read_result = lockless_queue_.Read(
261 queue_index.index(), &context_.monotonic_event_time,
262 &context_.realtime_event_time, &context_.monotonic_remote_time,
263 &context_.realtime_remote_time, &context_.remote_queue_index,
Brian Silvermana1652f32020-01-29 20:41:44 -0800264 &context_.size, data_storage_start());
Alex Perrycb7da4b2019-08-28 19:35:56 -0700265 if (read_result == ipc_lib::LocklessQueue::ReadResult::GOOD) {
266 context_.queue_index = queue_index.index();
Austin Schuhad154822019-12-27 15:45:13 -0800267 if (context_.remote_queue_index == 0xffffffffu) {
268 context_.remote_queue_index = context_.queue_index;
269 }
270 if (context_.monotonic_remote_time == aos::monotonic_clock::min_time) {
271 context_.monotonic_remote_time = context_.monotonic_event_time;
272 }
273 if (context_.realtime_remote_time == aos::realtime_clock::min_time) {
274 context_.realtime_remote_time = context_.realtime_event_time;
275 }
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800276 if (copy_data()) {
277 context_.data = data_storage_start() +
278 lockless_queue_.message_data_size() - context_.size;
279 } else {
280 context_.data = nullptr;
281 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700282 actual_queue_index_ = queue_index.Increment();
283 }
284
285 // Make sure the data wasn't modified while we were reading it. This
286 // can only happen if you are reading the last message *while* it is
287 // being written to, which means you are pretty far behind.
288 CHECK(read_result != ipc_lib::LocklessQueue::ReadResult::OVERWROTE)
289 << ": Got behind while reading and the last message was modified "
Austin Schuhf5652592019-12-29 16:26:15 -0800290 "out from under us while we were reading it. Don't get so far "
291 "behind."
292 << configuration::CleanedChannelToString(channel_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700293
294 CHECK(read_result != ipc_lib::LocklessQueue::ReadResult::NOTHING_NEW)
Austin Schuhf5652592019-12-29 16:26:15 -0800295 << ": Queue index went backwards. This should never happen. "
296 << configuration::CleanedChannelToString(channel_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700297
298 // We fell behind between when we read the index and read the value.
299 // This isn't worth recovering from since this means we went to sleep
300 // for a long time in the middle of this function.
301 CHECK(read_result != ipc_lib::LocklessQueue::ReadResult::TOO_OLD)
Austin Schuhf5652592019-12-29 16:26:15 -0800302 << ": The next message is no longer available. "
303 << configuration::CleanedChannelToString(channel_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700304 return read_result == ipc_lib::LocklessQueue::ReadResult::GOOD;
305 }
306
Austin Schuh39788ff2019-12-01 18:22:57 -0800307 Context context() const { return context_; }
308
Alex Perrycb7da4b2019-08-28 19:35:56 -0700309 bool RegisterWakeup(int priority) {
310 return lockless_queue_.RegisterWakeup(priority);
311 }
312
313 void UnregisterWakeup() { lockless_queue_.UnregisterWakeup(); }
314
Brian Silverman5120afb2020-01-31 17:44:35 -0800315 absl::Span<char> GetSharedMemory() const {
316 return lockless_queue_memory_.GetSharedMemory();
317 }
318
Alex Perrycb7da4b2019-08-28 19:35:56 -0700319 private:
Brian Silvermana1652f32020-01-29 20:41:44 -0800320 char *data_storage_start() {
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800321 if (!copy_data()) return nullptr;
Brian Silvermana1652f32020-01-29 20:41:44 -0800322 return RoundChannelData(data_storage_.get(), channel_->max_size());
323 }
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800324 bool copy_data() const { return static_cast<bool>(data_storage_); }
Brian Silvermana1652f32020-01-29 20:41:44 -0800325
Austin Schuhf5652592019-12-29 16:26:15 -0800326 const Channel *const channel_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700327 MMapedQueue lockless_queue_memory_;
328 ipc_lib::LocklessQueue lockless_queue_;
329
330 ipc_lib::QueueIndex actual_queue_index_ =
331 ipc_lib::LocklessQueue::empty_queue_index();
332
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800333 // This being empty indicates we're not going to copy data.
334 std::unique_ptr<char, decltype(&free)> data_storage_{nullptr, &free};
Austin Schuh39788ff2019-12-01 18:22:57 -0800335
336 Context context_;
337};
338
339class ShmFetcher : public RawFetcher {
340 public:
341 explicit ShmFetcher(EventLoop *event_loop, const Channel *channel)
Austin Schuhaa79e4e2019-12-29 20:43:32 -0800342 : RawFetcher(event_loop, channel),
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800343 simple_shm_fetcher_(event_loop, channel, true) {}
Austin Schuh39788ff2019-12-01 18:22:57 -0800344
345 ~ShmFetcher() { context_.data = nullptr; }
346
347 std::pair<bool, monotonic_clock::time_point> DoFetchNext() override {
348 if (simple_shm_fetcher_.FetchNext()) {
349 context_ = simple_shm_fetcher_.context();
350 return std::make_pair(true, monotonic_clock::now());
351 }
352 return std::make_pair(false, monotonic_clock::min_time);
353 }
354
355 std::pair<bool, monotonic_clock::time_point> DoFetch() override {
356 if (simple_shm_fetcher_.Fetch()) {
357 context_ = simple_shm_fetcher_.context();
358 return std::make_pair(true, monotonic_clock::now());
359 }
360 return std::make_pair(false, monotonic_clock::min_time);
361 }
362
363 private:
364 SimpleShmFetcher simple_shm_fetcher_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700365};
366
367class ShmSender : public RawSender {
368 public:
Austin Schuh39788ff2019-12-01 18:22:57 -0800369 explicit ShmSender(EventLoop *event_loop, const Channel *channel)
370 : RawSender(event_loop, channel),
Austin Schuhaa79e4e2019-12-29 20:43:32 -0800371 lockless_queue_memory_(
372 channel,
Brian Silverman587da252020-01-01 17:00:47 -0800373 chrono::ceil<chrono::seconds>(chrono::nanoseconds(
Austin Schuhaa79e4e2019-12-29 20:43:32 -0800374 event_loop->configuration()->channel_storage_duration()))),
Alex Perrycb7da4b2019-08-28 19:35:56 -0700375 lockless_queue_(lockless_queue_memory_.memory(),
376 lockless_queue_memory_.config()),
Austin Schuhe516ab02020-05-06 21:37:04 -0700377 lockless_queue_sender_(
378 VerifySender(lockless_queue_.MakeSender(), channel)) {}
Alex Perrycb7da4b2019-08-28 19:35:56 -0700379
Austin Schuh39788ff2019-12-01 18:22:57 -0800380 ~ShmSender() override {}
381
Austin Schuhe516ab02020-05-06 21:37:04 -0700382 static ipc_lib::LocklessQueue::Sender VerifySender(
383 std::optional<ipc_lib::LocklessQueue::Sender> &&sender,
384 const Channel *channel) {
385 if (sender) {
386 return std::move(sender.value());
387 }
388 LOG(FATAL) << "Failed to create sender on "
389 << configuration::CleanedChannelToString(channel)
390 << ", too many senders.";
391 }
392
Alex Perrycb7da4b2019-08-28 19:35:56 -0700393 void *data() override { return lockless_queue_sender_.Data(); }
394 size_t size() override { return lockless_queue_sender_.size(); }
Austin Schuhad154822019-12-27 15:45:13 -0800395 bool DoSend(size_t length,
396 aos::monotonic_clock::time_point monotonic_remote_time,
397 aos::realtime_clock::time_point realtime_remote_time,
398 uint32_t remote_queue_index) override {
Austin Schuh0f7ed462020-03-28 20:38:34 -0700399 CHECK_LE(length, static_cast<size_t>(channel()->max_size()))
400 << ": Sent too big a message on "
401 << configuration::CleanedChannelToString(channel());
Austin Schuhad154822019-12-27 15:45:13 -0800402 lockless_queue_sender_.Send(
403 length, monotonic_remote_time, realtime_remote_time, remote_queue_index,
404 &monotonic_sent_time_, &realtime_sent_time_, &sent_queue_index_);
Austin Schuh39788ff2019-12-01 18:22:57 -0800405 lockless_queue_.Wakeup(event_loop()->priority());
Alex Perrycb7da4b2019-08-28 19:35:56 -0700406 return true;
407 }
408
Austin Schuhad154822019-12-27 15:45:13 -0800409 bool DoSend(const void *msg, size_t length,
410 aos::monotonic_clock::time_point monotonic_remote_time,
411 aos::realtime_clock::time_point realtime_remote_time,
412 uint32_t remote_queue_index) override {
Austin Schuh0f7ed462020-03-28 20:38:34 -0700413 CHECK_LE(length, static_cast<size_t>(channel()->max_size()))
414 << ": Sent too big a message on "
415 << configuration::CleanedChannelToString(channel());
Austin Schuhad154822019-12-27 15:45:13 -0800416 lockless_queue_sender_.Send(reinterpret_cast<const char *>(msg), length,
417 monotonic_remote_time, realtime_remote_time,
418 remote_queue_index, &monotonic_sent_time_,
419 &realtime_sent_time_, &sent_queue_index_);
Austin Schuh39788ff2019-12-01 18:22:57 -0800420 lockless_queue_.Wakeup(event_loop()->priority());
Alex Perrycb7da4b2019-08-28 19:35:56 -0700421 // TODO(austin): Return an error if we send too fast.
422 return true;
423 }
424
Brian Silverman5120afb2020-01-31 17:44:35 -0800425 absl::Span<char> GetSharedMemory() const {
426 return lockless_queue_memory_.GetSharedMemory();
427 }
428
Alex Perrycb7da4b2019-08-28 19:35:56 -0700429 private:
Alex Perrycb7da4b2019-08-28 19:35:56 -0700430 MMapedQueue lockless_queue_memory_;
431 ipc_lib::LocklessQueue lockless_queue_;
432 ipc_lib::LocklessQueue::Sender lockless_queue_sender_;
433};
434
Alex Perrycb7da4b2019-08-28 19:35:56 -0700435// Class to manage the state for a Watcher.
Austin Schuh39788ff2019-12-01 18:22:57 -0800436class WatcherState : public aos::WatcherState {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700437 public:
438 WatcherState(
Austin Schuh7d87b672019-12-01 20:23:49 -0800439 ShmEventLoop *event_loop, const Channel *channel,
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800440 std::function<void(const Context &context, const void *message)> fn,
441 bool copy_data)
Austin Schuh39788ff2019-12-01 18:22:57 -0800442 : aos::WatcherState(event_loop, channel, std::move(fn)),
Austin Schuh7d87b672019-12-01 20:23:49 -0800443 event_loop_(event_loop),
444 event_(this),
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800445 simple_shm_fetcher_(event_loop, channel, copy_data) {}
Alex Perrycb7da4b2019-08-28 19:35:56 -0700446
Austin Schuh7d87b672019-12-01 20:23:49 -0800447 ~WatcherState() override { event_loop_->RemoveEvent(&event_); }
Austin Schuh39788ff2019-12-01 18:22:57 -0800448
449 void Startup(EventLoop *event_loop) override {
Austin Schuh7d87b672019-12-01 20:23:49 -0800450 simple_shm_fetcher_.PointAtNextQueueIndex();
Austin Schuh39788ff2019-12-01 18:22:57 -0800451 CHECK(RegisterWakeup(event_loop->priority()));
452 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700453
Alex Perrycb7da4b2019-08-28 19:35:56 -0700454 // Returns true if there is new data available.
Austin Schuh7d87b672019-12-01 20:23:49 -0800455 bool CheckForNewData() {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700456 if (!has_new_data_) {
Austin Schuh39788ff2019-12-01 18:22:57 -0800457 has_new_data_ = simple_shm_fetcher_.FetchNext();
Austin Schuh7d87b672019-12-01 20:23:49 -0800458
459 if (has_new_data_) {
460 event_.set_event_time(
Austin Schuhad154822019-12-27 15:45:13 -0800461 simple_shm_fetcher_.context().monotonic_event_time);
Austin Schuh7d87b672019-12-01 20:23:49 -0800462 event_loop_->AddEvent(&event_);
463 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700464 }
465
466 return has_new_data_;
467 }
468
Alex Perrycb7da4b2019-08-28 19:35:56 -0700469 // Consumes the data by calling the callback.
Austin Schuh7d87b672019-12-01 20:23:49 -0800470 void HandleEvent() {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700471 CHECK(has_new_data_);
Austin Schuh39788ff2019-12-01 18:22:57 -0800472 DoCallCallback(monotonic_clock::now, simple_shm_fetcher_.context());
Alex Perrycb7da4b2019-08-28 19:35:56 -0700473 has_new_data_ = false;
Austin Schuh7d87b672019-12-01 20:23:49 -0800474 CheckForNewData();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700475 }
476
Austin Schuh39788ff2019-12-01 18:22:57 -0800477 // Registers us to receive a signal on event reception.
Alex Perrycb7da4b2019-08-28 19:35:56 -0700478 bool RegisterWakeup(int priority) {
Austin Schuh39788ff2019-12-01 18:22:57 -0800479 return simple_shm_fetcher_.RegisterWakeup(priority);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700480 }
481
Austin Schuh39788ff2019-12-01 18:22:57 -0800482 void UnregisterWakeup() { return simple_shm_fetcher_.UnregisterWakeup(); }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700483
Brian Silverman5120afb2020-01-31 17:44:35 -0800484 absl::Span<char> GetSharedMemory() const {
485 return simple_shm_fetcher_.GetSharedMemory();
486 }
487
Alex Perrycb7da4b2019-08-28 19:35:56 -0700488 private:
489 bool has_new_data_ = false;
490
Austin Schuh7d87b672019-12-01 20:23:49 -0800491 ShmEventLoop *event_loop_;
492 EventHandler<WatcherState> event_;
Austin Schuh39788ff2019-12-01 18:22:57 -0800493 SimpleShmFetcher simple_shm_fetcher_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700494};
495
496// Adapter class to adapt a timerfd to a TimerHandler.
Austin Schuh7d87b672019-12-01 20:23:49 -0800497class TimerHandlerState final : public TimerHandler {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700498 public:
499 TimerHandlerState(ShmEventLoop *shm_event_loop, ::std::function<void()> fn)
Austin Schuh39788ff2019-12-01 18:22:57 -0800500 : TimerHandler(shm_event_loop, std::move(fn)),
Austin Schuh7d87b672019-12-01 20:23:49 -0800501 shm_event_loop_(shm_event_loop),
502 event_(this) {
Austin Schuhcde39fd2020-02-22 20:58:24 -0800503 shm_event_loop_->epoll_.OnReadable(timerfd_.fd(), [this]() {
504 // The timer may fire spurriously. HandleEvent on the event loop will
505 // call the callback if it is needed. It may also have called it when
506 // processing some other event, and the kernel decided to deliver this
507 // wakeup anyways.
508 timerfd_.Read();
509 shm_event_loop_->HandleEvent();
510 });
Alex Perrycb7da4b2019-08-28 19:35:56 -0700511 }
512
Austin Schuh7d87b672019-12-01 20:23:49 -0800513 ~TimerHandlerState() {
514 Disable();
515 shm_event_loop_->epoll_.DeleteFd(timerfd_.fd());
516 }
517
518 void HandleEvent() {
Austin Schuhcde39fd2020-02-22 20:58:24 -0800519 CHECK(!event_.valid());
520 const auto monotonic_now = Call(monotonic_clock::now, base_);
521 if (event_.valid()) {
522 // If someone called Setup inside Call, rescheduling is already taken care
523 // of. Bail.
524 return;
Austin Schuh7d87b672019-12-01 20:23:49 -0800525 }
526
Austin Schuhcde39fd2020-02-22 20:58:24 -0800527 if (repeat_offset_ == chrono::seconds(0)) {
528 timerfd_.Disable();
529 } else {
530 // Compute how many cycles have elapsed and schedule the next iteration
531 // for the next iteration in the future.
532 const int elapsed_cycles =
533 std::max<int>(0, (monotonic_now - base_ + repeat_offset_ -
534 std::chrono::nanoseconds(1)) /
535 repeat_offset_);
536 base_ += repeat_offset_ * elapsed_cycles;
Austin Schuh7d87b672019-12-01 20:23:49 -0800537
Austin Schuhcde39fd2020-02-22 20:58:24 -0800538 // Update the heap and schedule the timerfd wakeup.
Austin Schuh7d87b672019-12-01 20:23:49 -0800539 event_.set_event_time(base_);
540 shm_event_loop_->AddEvent(&event_);
Austin Schuhcde39fd2020-02-22 20:58:24 -0800541 timerfd_.SetTime(base_, chrono::seconds(0));
Austin Schuh7d87b672019-12-01 20:23:49 -0800542 }
543 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700544
545 void Setup(monotonic_clock::time_point base,
546 monotonic_clock::duration repeat_offset) override {
Austin Schuh7d87b672019-12-01 20:23:49 -0800547 if (event_.valid()) {
548 shm_event_loop_->RemoveEvent(&event_);
549 }
550
Alex Perrycb7da4b2019-08-28 19:35:56 -0700551 timerfd_.SetTime(base, repeat_offset);
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800552 base_ = base;
553 repeat_offset_ = repeat_offset;
Austin Schuh7d87b672019-12-01 20:23:49 -0800554 event_.set_event_time(base_);
555 shm_event_loop_->AddEvent(&event_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700556 }
557
Austin Schuh7d87b672019-12-01 20:23:49 -0800558 void Disable() override {
559 shm_event_loop_->RemoveEvent(&event_);
560 timerfd_.Disable();
561 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700562
563 private:
564 ShmEventLoop *shm_event_loop_;
Austin Schuh7d87b672019-12-01 20:23:49 -0800565 EventHandler<TimerHandlerState> event_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700566
567 TimerFd timerfd_;
568
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800569 monotonic_clock::time_point base_;
570 monotonic_clock::duration repeat_offset_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700571};
572
573// Adapter class to the timerfd and PhasedLoop.
Austin Schuh7d87b672019-12-01 20:23:49 -0800574class PhasedLoopHandler final : public ::aos::PhasedLoopHandler {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700575 public:
576 PhasedLoopHandler(ShmEventLoop *shm_event_loop, ::std::function<void(int)> fn,
577 const monotonic_clock::duration interval,
578 const monotonic_clock::duration offset)
Austin Schuh39788ff2019-12-01 18:22:57 -0800579 : aos::PhasedLoopHandler(shm_event_loop, std::move(fn), interval, offset),
Austin Schuh7d87b672019-12-01 20:23:49 -0800580 shm_event_loop_(shm_event_loop),
581 event_(this) {
582 shm_event_loop_->epoll_.OnReadable(
583 timerfd_.fd(), [this]() { shm_event_loop_->HandleEvent(); });
584 }
585
586 void HandleEvent() {
587 // The return value for read is the number of cycles that have elapsed.
588 // Because we check to see when this event *should* have happened, there are
589 // cases where Read() will return 0, when 1 cycle has actually happened.
590 // This occurs when the timer interrupt hasn't triggered yet. Therefore,
591 // ignore it. Call handles rescheduling and calculating elapsed cycles
592 // without any extra help.
593 timerfd_.Read();
594 event_.Invalidate();
595
596 Call(monotonic_clock::now, [this](monotonic_clock::time_point sleep_time) {
597 Schedule(sleep_time);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700598 });
599 }
600
Austin Schuh39788ff2019-12-01 18:22:57 -0800601 ~PhasedLoopHandler() override {
602 shm_event_loop_->epoll_.DeleteFd(timerfd_.fd());
Austin Schuh7d87b672019-12-01 20:23:49 -0800603 shm_event_loop_->RemoveEvent(&event_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700604 }
605
606 private:
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800607 // Reschedules the timer.
Austin Schuh39788ff2019-12-01 18:22:57 -0800608 void Schedule(monotonic_clock::time_point sleep_time) override {
Austin Schuh7d87b672019-12-01 20:23:49 -0800609 if (event_.valid()) {
610 shm_event_loop_->RemoveEvent(&event_);
611 }
612
Austin Schuh39788ff2019-12-01 18:22:57 -0800613 timerfd_.SetTime(sleep_time, ::aos::monotonic_clock::zero());
Austin Schuh7d87b672019-12-01 20:23:49 -0800614 event_.set_event_time(sleep_time);
615 shm_event_loop_->AddEvent(&event_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700616 }
617
618 ShmEventLoop *shm_event_loop_;
Austin Schuh7d87b672019-12-01 20:23:49 -0800619 EventHandler<PhasedLoopHandler> event_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700620
621 TimerFd timerfd_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700622};
623} // namespace internal
624
625::std::unique_ptr<RawFetcher> ShmEventLoop::MakeRawFetcher(
626 const Channel *channel) {
Austin Schuhca4828c2019-12-28 14:21:35 -0800627 if (!configuration::ChannelIsReadableOnNode(channel, node())) {
628 LOG(FATAL) << "Channel { \"name\": \"" << channel->name()->string_view()
629 << "\", \"type\": \"" << channel->type()->string_view()
630 << "\" } is not able to be fetched on this node. Check your "
631 "configuration.";
Austin Schuh217a9782019-12-21 23:02:50 -0800632 }
633
Austin Schuh39788ff2019-12-01 18:22:57 -0800634 return ::std::unique_ptr<RawFetcher>(new internal::ShmFetcher(this, channel));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700635}
636
637::std::unique_ptr<RawSender> ShmEventLoop::MakeRawSender(
638 const Channel *channel) {
Brian Silverman0fc69932020-01-24 21:54:02 -0800639 TakeSender(channel);
Austin Schuh39788ff2019-12-01 18:22:57 -0800640
641 return ::std::unique_ptr<RawSender>(new internal::ShmSender(this, channel));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700642}
643
644void ShmEventLoop::MakeRawWatcher(
645 const Channel *channel,
646 std::function<void(const Context &context, const void *message)> watcher) {
Brian Silverman0fc69932020-01-24 21:54:02 -0800647 TakeWatcher(channel);
Austin Schuh217a9782019-12-21 23:02:50 -0800648
Austin Schuh39788ff2019-12-01 18:22:57 -0800649 NewWatcher(::std::unique_ptr<WatcherState>(
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800650 new internal::WatcherState(this, channel, std::move(watcher), true)));
651}
652
653void ShmEventLoop::MakeRawNoArgWatcher(
654 const Channel *channel,
655 std::function<void(const Context &context)> watcher) {
656 TakeWatcher(channel);
657
658 NewWatcher(::std::unique_ptr<WatcherState>(new internal::WatcherState(
659 this, channel,
660 [watcher](const Context &context, const void *) { watcher(context); },
661 false)));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700662}
663
664TimerHandler *ShmEventLoop::AddTimer(::std::function<void()> callback) {
Austin Schuh39788ff2019-12-01 18:22:57 -0800665 return NewTimer(::std::unique_ptr<TimerHandler>(
666 new internal::TimerHandlerState(this, ::std::move(callback))));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700667}
668
669PhasedLoopHandler *ShmEventLoop::AddPhasedLoop(
670 ::std::function<void(int)> callback,
671 const monotonic_clock::duration interval,
672 const monotonic_clock::duration offset) {
Austin Schuh39788ff2019-12-01 18:22:57 -0800673 return NewPhasedLoop(
674 ::std::unique_ptr<PhasedLoopHandler>(new internal::PhasedLoopHandler(
675 this, ::std::move(callback), interval, offset)));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700676}
677
678void ShmEventLoop::OnRun(::std::function<void()> on_run) {
679 on_run_.push_back(::std::move(on_run));
680}
681
Austin Schuh7d87b672019-12-01 20:23:49 -0800682void ShmEventLoop::HandleEvent() {
683 // Update all the times for handlers.
684 for (::std::unique_ptr<WatcherState> &base_watcher : watchers_) {
685 internal::WatcherState *watcher =
686 reinterpret_cast<internal::WatcherState *>(base_watcher.get());
687
688 watcher->CheckForNewData();
689 }
690
Austin Schuh39788ff2019-12-01 18:22:57 -0800691 while (true) {
Austin Schuh7d87b672019-12-01 20:23:49 -0800692 if (EventCount() == 0 ||
693 PeekEvent()->event_time() > monotonic_clock::now()) {
Austin Schuh39788ff2019-12-01 18:22:57 -0800694 break;
695 }
696
Austin Schuh7d87b672019-12-01 20:23:49 -0800697 EventLoopEvent *event = PopEvent();
698 event->HandleEvent();
Austin Schuh39788ff2019-12-01 18:22:57 -0800699 }
700}
701
Austin Schuh32fd5a72019-12-01 22:20:26 -0800702// RAII class to mask signals.
703class ScopedSignalMask {
704 public:
705 ScopedSignalMask(std::initializer_list<int> signals) {
706 sigset_t sigset;
707 PCHECK(sigemptyset(&sigset) == 0);
708 for (int signal : signals) {
709 PCHECK(sigaddset(&sigset, signal) == 0);
710 }
711
712 PCHECK(sigprocmask(SIG_BLOCK, &sigset, &old_) == 0);
713 }
714
715 ~ScopedSignalMask() { PCHECK(sigprocmask(SIG_SETMASK, &old_, nullptr) == 0); }
716
717 private:
718 sigset_t old_;
719};
720
721// Class to manage the static state associated with killing multiple event
722// loops.
723class SignalHandler {
724 public:
725 // Gets the singleton.
726 static SignalHandler *global() {
727 static SignalHandler loop;
728 return &loop;
729 }
730
731 // Handles the signal with the singleton.
732 static void HandleSignal(int) { global()->DoHandleSignal(); }
733
734 // Registers an event loop to receive Exit() calls.
735 void Register(ShmEventLoop *event_loop) {
736 // Block signals while we have the mutex so we never race with the signal
737 // handler.
738 ScopedSignalMask mask({SIGINT, SIGHUP, SIGTERM});
739 std::unique_lock<stl_mutex> locker(mutex_);
740 if (event_loops_.size() == 0) {
741 // The first caller registers the signal handler.
742 struct sigaction new_action;
743 sigemptyset(&new_action.sa_mask);
744 // This makes it so that 2 control c's to a stuck process will kill it by
745 // restoring the original signal handler.
746 new_action.sa_flags = SA_RESETHAND;
747 new_action.sa_handler = &HandleSignal;
748
749 PCHECK(sigaction(SIGINT, &new_action, &old_action_int_) == 0);
750 PCHECK(sigaction(SIGHUP, &new_action, &old_action_hup_) == 0);
751 PCHECK(sigaction(SIGTERM, &new_action, &old_action_term_) == 0);
752 }
753
754 event_loops_.push_back(event_loop);
755 }
756
757 // Unregisters an event loop to receive Exit() calls.
758 void Unregister(ShmEventLoop *event_loop) {
759 // Block signals while we have the mutex so we never race with the signal
760 // handler.
761 ScopedSignalMask mask({SIGINT, SIGHUP, SIGTERM});
762 std::unique_lock<stl_mutex> locker(mutex_);
763
Brian Silverman5120afb2020-01-31 17:44:35 -0800764 event_loops_.erase(
765 std::find(event_loops_.begin(), event_loops_.end(), event_loop));
Austin Schuh32fd5a72019-12-01 22:20:26 -0800766
767 if (event_loops_.size() == 0u) {
768 // The last caller restores the original signal handlers.
769 PCHECK(sigaction(SIGINT, &old_action_int_, nullptr) == 0);
770 PCHECK(sigaction(SIGHUP, &old_action_hup_, nullptr) == 0);
771 PCHECK(sigaction(SIGTERM, &old_action_term_, nullptr) == 0);
772 }
773 }
774
775 private:
776 void DoHandleSignal() {
777 // We block signals while grabbing the lock, so there should never be a
778 // race. Confirm that this is true using trylock.
779 CHECK(mutex_.try_lock()) << ": sigprocmask failed to block signals while "
780 "modifing the event loop list.";
781 for (ShmEventLoop *event_loop : event_loops_) {
782 event_loop->Exit();
783 }
784 mutex_.unlock();
785 }
786
787 // Mutex to protect all state.
788 stl_mutex mutex_;
789 std::vector<ShmEventLoop *> event_loops_;
790 struct sigaction old_action_int_;
791 struct sigaction old_action_hup_;
792 struct sigaction old_action_term_;
793};
794
Alex Perrycb7da4b2019-08-28 19:35:56 -0700795void ShmEventLoop::Run() {
Austin Schuh32fd5a72019-12-01 22:20:26 -0800796 SignalHandler::global()->Register(this);
Austin Schuh39788ff2019-12-01 18:22:57 -0800797
Alex Perrycb7da4b2019-08-28 19:35:56 -0700798 std::unique_ptr<ipc_lib::SignalFd> signalfd;
799
800 if (watchers_.size() > 0) {
801 signalfd.reset(new ipc_lib::SignalFd({ipc_lib::kWakeupSignal}));
802
803 epoll_.OnReadable(signalfd->fd(), [signalfd_ptr = signalfd.get(), this]() {
804 signalfd_siginfo result = signalfd_ptr->Read();
805 CHECK_EQ(result.ssi_signo, ipc_lib::kWakeupSignal);
806
807 // TODO(austin): We should really be checking *everything*, not just
808 // watchers, and calling the oldest thing first. That will improve
809 // determinism a lot.
810
Austin Schuh7d87b672019-12-01 20:23:49 -0800811 HandleEvent();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700812 });
813 }
814
Austin Schuh39788ff2019-12-01 18:22:57 -0800815 MaybeScheduleTimingReports();
816
Austin Schuh7d87b672019-12-01 20:23:49 -0800817 ReserveEvents();
818
Tyler Chatow67ddb032020-01-12 14:30:04 -0800819 {
820 AosLogToFbs aos_logger;
821 if (!skip_logger_) {
822 aos_logger.Initialize(MakeSender<logging::LogMessageFbs>("/aos"));
823 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700824
Tyler Chatow67ddb032020-01-12 14:30:04 -0800825 aos::SetCurrentThreadName(name_.substr(0, 16));
Brian Silverman6a54ff32020-04-28 16:41:39 -0700826 const cpu_set_t default_affinity = DefaultAffinity();
827 if (!CPU_EQUAL(&affinity_, &default_affinity)) {
828 ::aos::SetCurrentThreadAffinity(affinity_);
829 }
Tyler Chatow67ddb032020-01-12 14:30:04 -0800830 // Now, all the callbacks are setup. Lock everything into memory and go RT.
831 if (priority_ != 0) {
832 ::aos::InitRT();
833
834 LOG(INFO) << "Setting priority to " << priority_;
835 ::aos::SetCurrentThreadRealtimePriority(priority_);
836 }
837
838 set_is_running(true);
839
840 // Now that we are realtime (but before the OnRun handlers run), snap the
841 // queue index.
842 for (::std::unique_ptr<WatcherState> &watcher : watchers_) {
843 watcher->Startup(this);
844 }
845
846 // Now that we are RT, run all the OnRun handlers.
847 for (const auto &run : on_run_) {
848 run();
849 }
850
851 // And start our main event loop which runs all the timers and handles Quit.
852 epoll_.Run();
853
854 // Once epoll exits, there is no useful nonrt work left to do.
855 set_is_running(false);
856
857 // Nothing time or synchronization critical needs to happen after this
858 // point. Drop RT priority.
859 ::aos::UnsetCurrentThreadRealtimePriority();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700860 }
861
Austin Schuh39788ff2019-12-01 18:22:57 -0800862 for (::std::unique_ptr<WatcherState> &base_watcher : watchers_) {
863 internal::WatcherState *watcher =
864 reinterpret_cast<internal::WatcherState *>(base_watcher.get());
Alex Perrycb7da4b2019-08-28 19:35:56 -0700865 watcher->UnregisterWakeup();
866 }
867
868 if (watchers_.size() > 0) {
869 epoll_.DeleteFd(signalfd->fd());
870 signalfd.reset();
871 }
Austin Schuh32fd5a72019-12-01 22:20:26 -0800872
873 SignalHandler::global()->Unregister(this);
Austin Schuhe84c3ed2019-12-14 15:29:48 -0800874
875 // Trigger any remaining senders or fetchers to be cleared before destroying
876 // the event loop so the book keeping matches. Do this in the thread that
877 // created the timing reporter.
878 timing_report_sender_.reset();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700879}
880
881void ShmEventLoop::Exit() { epoll_.Quit(); }
882
883ShmEventLoop::~ShmEventLoop() {
Austin Schuh39788ff2019-12-01 18:22:57 -0800884 // Force everything with a registered fd with epoll to be destroyed now.
885 timers_.clear();
886 phased_loops_.clear();
887 watchers_.clear();
888
Alex Perrycb7da4b2019-08-28 19:35:56 -0700889 CHECK(!is_running()) << ": ShmEventLoop destroyed while running";
890}
891
Alex Perrycb7da4b2019-08-28 19:35:56 -0700892void ShmEventLoop::SetRuntimeRealtimePriority(int priority) {
893 if (is_running()) {
894 LOG(FATAL) << "Cannot set realtime priority while running.";
895 }
896 priority_ = priority;
897}
898
Brian Silverman6a54ff32020-04-28 16:41:39 -0700899void ShmEventLoop::SetRuntimeAffinity(const cpu_set_t &cpuset) {
900 if (is_running()) {
901 LOG(FATAL) << "Cannot set affinity while running.";
902 }
903 affinity_ = cpuset;
904}
905
James Kuszmaul57c2baa2020-01-19 14:52:52 -0800906void ShmEventLoop::set_name(const std::string_view name) {
907 name_ = std::string(name);
908 UpdateTimingReport();
909}
910
Brian Silverman5120afb2020-01-31 17:44:35 -0800911absl::Span<char> ShmEventLoop::GetWatcherSharedMemory(const Channel *channel) {
912 internal::WatcherState *const watcher_state =
913 static_cast<internal::WatcherState *>(GetWatcherState(channel));
914 return watcher_state->GetSharedMemory();
915}
916
917absl::Span<char> ShmEventLoop::GetShmSenderSharedMemory(
918 const aos::RawSender *sender) const {
919 return static_cast<const internal::ShmSender *>(sender)->GetSharedMemory();
920}
921
Austin Schuh39788ff2019-12-01 18:22:57 -0800922pid_t ShmEventLoop::GetTid() { return syscall(SYS_gettid); }
923
Alex Perrycb7da4b2019-08-28 19:35:56 -0700924} // namespace aos