blob: 80352355c9e54fd0e75e286f3ab02118e7e002bf [file] [log] [blame]
Alex Perrycb7da4b2019-08-28 19:35:56 -07001#include "aos/events/shm_event_loop.h"
2
Alex Perrycb7da4b2019-08-28 19:35:56 -07003#include <sys/stat.h>
Austin Schuh39788ff2019-12-01 18:22:57 -08004#include <sys/syscall.h>
Alex Perrycb7da4b2019-08-28 19:35:56 -07005#include <sys/types.h>
Tyler Chatow67ddb032020-01-12 14:30:04 -08006
Alex Perrycb7da4b2019-08-28 19:35:56 -07007#include <algorithm>
8#include <atomic>
9#include <chrono>
Austin Schuh39788ff2019-12-01 18:22:57 -080010#include <iterator>
Alex Perrycb7da4b2019-08-28 19:35:56 -070011#include <stdexcept>
12
Philipp Schrader790cb542023-07-05 21:06:52 -070013#include "glog/logging.h"
14
Tyler Chatow67ddb032020-01-12 14:30:04 -080015#include "aos/events/aos_logging.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070016#include "aos/events/epoll.h"
Austin Schuh39788ff2019-12-01 18:22:57 -080017#include "aos/events/event_loop_generated.h"
18#include "aos/events/timing_statistics.h"
Austin Schuh094d09b2020-11-20 23:26:52 -080019#include "aos/init.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070020#include "aos/ipc_lib/lockless_queue.h"
Austin Schuh4d275fc2022-09-16 15:42:45 -070021#include "aos/ipc_lib/memory_mapped_queue.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070022#include "aos/realtime.h"
Austin Schuh32fd5a72019-12-01 22:20:26 -080023#include "aos/stl_mutex/stl_mutex.h"
Austin Schuhfccb2d02020-01-26 16:11:19 -080024#include "aos/util/file.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070025#include "aos/util/phased_loop.h"
26
Austin Schuhe84c3ed2019-12-14 15:29:48 -080027namespace {
28
29// Returns the portion of the path after the last /. This very much assumes
30// that the application name is null terminated.
31const char *Filename(const char *path) {
32 const std::string_view path_string_view = path;
33 auto last_slash_pos = path_string_view.find_last_of("/");
34
35 return last_slash_pos == std::string_view::npos ? path
36 : path + last_slash_pos + 1;
37}
38
39} // namespace
40
Brennan Coslett6fd3c002023-07-11 17:41:09 -050041// This value is affected by the umask of the process which is calling it
42// and is set to the user's value by default (check yours running `umask` on
43// the command line).
44// Any file mode requested is transformed using: mode & ~umask and the default
45// umask is 0022 (allow any permissions for the user, dont allow writes for
46// groups or others).
47// See https://man7.org/linux/man-pages/man2/umask.2.html for more details.
48// WITH THE DEFAULT UMASK YOU WONT ACTUALLY GET THESE PERMISSIONS :)
Alex Perrycb7da4b2019-08-28 19:35:56 -070049DEFINE_uint32(permissions, 0770,
Brennan Coslett6fd3c002023-07-11 17:41:09 -050050 "Permissions to make shared memory files and folders, "
Brennan Coslettd5077bc2023-07-13 08:49:35 -050051 "affected by the process's umask. "
Brennan Coslett6fd3c002023-07-11 17:41:09 -050052 "See shm_event_loop.cc for more details.");
Austin Schuhe84c3ed2019-12-14 15:29:48 -080053DEFINE_string(application_name, Filename(program_invocation_name),
54 "The application name");
Alex Perrycb7da4b2019-08-28 19:35:56 -070055
56namespace aos {
57
Brian Silverman148d43d2020-06-07 18:19:22 -050058using namespace shm_event_loop_internal;
59
Brian Silverman4f4e0612020-08-12 19:54:41 -070060namespace {
61
Austin Schuh217a9782019-12-21 23:02:50 -080062const Node *MaybeMyNode(const Configuration *configuration) {
63 if (!configuration->has_nodes()) {
64 return nullptr;
65 }
Alex Perrycb7da4b2019-08-28 19:35:56 -070066
Austin Schuh217a9782019-12-21 23:02:50 -080067 return configuration::GetMyNode(configuration);
68}
Alex Perrycb7da4b2019-08-28 19:35:56 -070069
Philipp Schradera8734662023-08-06 14:49:39 -070070void IgnoreWakeupSignal() {
71 struct sigaction action;
72 action.sa_handler = SIG_IGN;
73 PCHECK(sigemptyset(&action.sa_mask) == 0);
74 action.sa_flags = 0;
75 PCHECK(sigaction(ipc_lib::kWakeupSignal, &action, nullptr) == 0);
76}
77
Austin Schuh39788ff2019-12-01 18:22:57 -080078} // namespace
79
Austin Schuh217a9782019-12-21 23:02:50 -080080ShmEventLoop::ShmEventLoop(const Configuration *configuration)
Austin Schuh83c7f702021-01-19 22:36:29 -080081 : EventLoop(configuration),
82 boot_uuid_(UUID::BootUUID()),
Austin Schuhef323c02020-09-01 14:55:28 -070083 shm_base_(FLAGS_shm_base),
Austin Schuhe84c3ed2019-12-14 15:29:48 -080084 name_(FLAGS_application_name),
Austin Schuh15649d62019-12-28 16:36:38 -080085 node_(MaybeMyNode(configuration)) {
Philipp Schradera8734662023-08-06 14:49:39 -070086 // Ignore the wakeup signal by default. Otherwise, we have race conditions on
87 // shutdown where a wakeup signal will uncleanly terminate the process.
88 // See LocklessQueueWakeUpper::Wakeup() for some more information.
89 IgnoreWakeupSignal();
90
Austin Schuh094d09b2020-11-20 23:26:52 -080091 CHECK(IsInitialized()) << ": Need to initialize AOS first.";
Austin Schuh0debde12022-08-17 16:25:17 -070092 ClearContext();
Austin Schuh15649d62019-12-28 16:36:38 -080093 if (configuration->has_nodes()) {
94 CHECK(node_ != nullptr) << ": Couldn't find node in config.";
95 }
96}
Austin Schuh217a9782019-12-21 23:02:50 -080097
Brian Silverman148d43d2020-06-07 18:19:22 -050098namespace shm_event_loop_internal {
Austin Schuh39788ff2019-12-01 18:22:57 -080099
100class SimpleShmFetcher {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700101 public:
Austin Schuhef323c02020-09-01 14:55:28 -0700102 explicit SimpleShmFetcher(std::string_view shm_base, ShmEventLoop *event_loop,
103 const Channel *channel)
Austin Schuh432784f2020-06-23 17:27:35 -0700104 : event_loop_(event_loop),
105 channel_(channel),
Austin Schuh4d275fc2022-09-16 15:42:45 -0700106 lockless_queue_memory_(shm_base, FLAGS_permissions,
107 event_loop->configuration(), channel),
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700108 reader_(lockless_queue_memory_.queue()) {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700109 context_.data = nullptr;
110 // Point the queue index at the next index to read starting now. This
111 // makes it such that FetchNext will read the next message sent after
112 // the fetcher is created.
113 PointAtNextQueueIndex();
114 }
115
Austin Schuh39788ff2019-12-01 18:22:57 -0800116 ~SimpleShmFetcher() {}
Alex Perrycb7da4b2019-08-28 19:35:56 -0700117
Brian Silverman77162972020-08-12 19:52:40 -0700118 // Sets this object to pin or copy data, as configured in the channel.
119 void RetrieveData() {
120 if (channel_->read_method() == ReadMethod::PIN) {
121 PinDataOnFetch();
122 } else {
123 CopyDataOnFetch();
124 }
125 }
126
Brian Silverman3bca5322020-08-12 19:35:29 -0700127 // Sets this object to copy data out of the shared memory into a private
128 // buffer when fetching.
129 void CopyDataOnFetch() {
Brian Silverman77162972020-08-12 19:52:40 -0700130 CHECK(!pin_data());
Brian Silverman3bca5322020-08-12 19:35:29 -0700131 data_storage_.reset(static_cast<char *>(
132 malloc(channel_->max_size() + kChannelDataAlignment - 1)));
133 }
134
Brian Silverman77162972020-08-12 19:52:40 -0700135 // Sets this object to pin data in shared memory when fetching.
136 void PinDataOnFetch() {
137 CHECK(!copy_data());
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700138 auto maybe_pinner =
139 ipc_lib::LocklessQueuePinner::Make(lockless_queue_memory_.queue());
Brian Silverman77162972020-08-12 19:52:40 -0700140 if (!maybe_pinner) {
141 LOG(FATAL) << "Failed to create reader on "
142 << configuration::CleanedChannelToString(channel_)
143 << ", too many readers.";
144 }
145 pinner_ = std::move(maybe_pinner.value());
146 }
147
Alex Perrycb7da4b2019-08-28 19:35:56 -0700148 // Points the next message to fetch at the queue index which will be
149 // populated next.
150 void PointAtNextQueueIndex() {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700151 actual_queue_index_ = reader_.LatestIndex();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700152 if (!actual_queue_index_.valid()) {
153 // Nothing in the queue. The next element will show up at the 0th
154 // index in the queue.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700155 actual_queue_index_ = ipc_lib::QueueIndex::Zero(
156 LocklessQueueSize(lockless_queue_memory_.memory()));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700157 } else {
158 actual_queue_index_ = actual_queue_index_.Increment();
159 }
160 }
161
Austin Schuh2b4661a2023-09-20 21:37:33 -0700162 bool FetchNext() { return FetchNextIf(should_fetch_); }
Austin Schuh98ed26f2023-07-19 14:12:28 -0700163
164 bool FetchNextIf(std::function<bool(const Context &)> fn) {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700165 const ipc_lib::LocklessQueueReader::Result read_result =
Austin Schuh98ed26f2023-07-19 14:12:28 -0700166 DoFetch(actual_queue_index_, std::move(fn));
Austin Schuh432784f2020-06-23 17:27:35 -0700167
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700168 return read_result == ipc_lib::LocklessQueueReader::Result::GOOD;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700169 }
170
Austin Schuh98ed26f2023-07-19 14:12:28 -0700171 bool FetchIf(std::function<bool(const Context &)> fn) {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700172 const ipc_lib::QueueIndex queue_index = reader_.LatestIndex();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700173 // actual_queue_index_ is only meaningful if it was set by Fetch or
174 // FetchNext. This happens when valid_data_ has been set. So, only
175 // skip checking if valid_data_ is true.
176 //
177 // Also, if the latest queue index is invalid, we are empty. So there
178 // is nothing to fetch.
Austin Schuh39788ff2019-12-01 18:22:57 -0800179 if ((context_.data != nullptr &&
Alex Perrycb7da4b2019-08-28 19:35:56 -0700180 queue_index == actual_queue_index_.DecrementBy(1u)) ||
181 !queue_index.valid()) {
182 return false;
183 }
184
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700185 const ipc_lib::LocklessQueueReader::Result read_result =
Austin Schuh98ed26f2023-07-19 14:12:28 -0700186 DoFetch(queue_index, std::move(fn));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700187
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700188 CHECK(read_result != ipc_lib::LocklessQueueReader::Result::NOTHING_NEW)
Austin Schuhf5652592019-12-29 16:26:15 -0800189 << ": Queue index went backwards. This should never happen. "
190 << configuration::CleanedChannelToString(channel_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700191
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700192 return read_result == ipc_lib::LocklessQueueReader::Result::GOOD;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700193 }
194
Austin Schuh2b4661a2023-09-20 21:37:33 -0700195 bool Fetch() { return FetchIf(should_fetch_); }
Austin Schuh98ed26f2023-07-19 14:12:28 -0700196
Austin Schuh39788ff2019-12-01 18:22:57 -0800197 Context context() const { return context_; }
198
Alex Perrycb7da4b2019-08-28 19:35:56 -0700199 bool RegisterWakeup(int priority) {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700200 CHECK(!watcher_);
201 watcher_ = ipc_lib::LocklessQueueWatcher::Make(
202 lockless_queue_memory_.queue(), priority);
203 return static_cast<bool>(watcher_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700204 }
205
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700206 void UnregisterWakeup() {
207 CHECK(watcher_);
208 watcher_ = std::nullopt;
209 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700210
Brian Silvermana5450a92020-08-12 19:59:57 -0700211 absl::Span<char> GetMutableSharedMemory() {
212 return lockless_queue_memory_.GetMutableSharedMemory();
Brian Silverman5120afb2020-01-31 17:44:35 -0800213 }
214
Brian Silvermana5450a92020-08-12 19:59:57 -0700215 absl::Span<const char> GetConstSharedMemory() const {
216 return lockless_queue_memory_.GetConstSharedMemory();
217 }
218
219 absl::Span<const char> GetPrivateMemory() const {
220 if (pin_data()) {
221 return lockless_queue_memory_.GetConstSharedMemory();
222 }
Brian Silverman6d2b3592020-06-18 14:40:15 -0700223 return absl::Span<char>(
224 const_cast<SimpleShmFetcher *>(this)->data_storage_start(),
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700225 LocklessQueueMessageDataSize(lockless_queue_memory_.memory()));
Brian Silverman6d2b3592020-06-18 14:40:15 -0700226 }
227
Alex Perrycb7da4b2019-08-28 19:35:56 -0700228 private:
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700229 ipc_lib::LocklessQueueReader::Result DoFetch(
Austin Schuh98ed26f2023-07-19 14:12:28 -0700230 ipc_lib::QueueIndex queue_index,
231 std::function<bool(const Context &context)> fn) {
Brian Silverman3bca5322020-08-12 19:35:29 -0700232 // TODO(austin): Get behind and make sure it dies.
233 char *copy_buffer = nullptr;
234 if (copy_data()) {
235 copy_buffer = data_storage_start();
236 }
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700237 ipc_lib::LocklessQueueReader::Result read_result = reader_.Read(
Brian Silverman3bca5322020-08-12 19:35:29 -0700238 queue_index.index(), &context_.monotonic_event_time,
239 &context_.realtime_event_time, &context_.monotonic_remote_time,
Austin Schuhac6d89e2024-03-27 14:56:09 -0700240 &context_.monotonic_remote_transmit_time,
Brian Silverman3bca5322020-08-12 19:35:29 -0700241 &context_.realtime_remote_time, &context_.remote_queue_index,
Austin Schuh98ed26f2023-07-19 14:12:28 -0700242 &context_.source_boot_uuid, &context_.size, copy_buffer, std::move(fn));
Brian Silverman3bca5322020-08-12 19:35:29 -0700243
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700244 if (read_result == ipc_lib::LocklessQueueReader::Result::GOOD) {
Brian Silverman77162972020-08-12 19:52:40 -0700245 if (pin_data()) {
Brian Silverman4f4e0612020-08-12 19:54:41 -0700246 const int pin_result = pinner_->PinIndex(queue_index.index());
247 CHECK(pin_result >= 0)
Brian Silverman77162972020-08-12 19:52:40 -0700248 << ": Got behind while reading and the last message was modified "
249 "out from under us while we tried to pin it. Don't get so far "
250 "behind on: "
251 << configuration::CleanedChannelToString(channel_);
Brian Silverman4f4e0612020-08-12 19:54:41 -0700252 context_.buffer_index = pin_result;
253 } else {
254 context_.buffer_index = -1;
Brian Silverman77162972020-08-12 19:52:40 -0700255 }
256
Brian Silverman3bca5322020-08-12 19:35:29 -0700257 context_.queue_index = queue_index.index();
258 if (context_.remote_queue_index == 0xffffffffu) {
259 context_.remote_queue_index = context_.queue_index;
260 }
261 if (context_.monotonic_remote_time == aos::monotonic_clock::min_time) {
262 context_.monotonic_remote_time = context_.monotonic_event_time;
263 }
264 if (context_.realtime_remote_time == aos::realtime_clock::min_time) {
265 context_.realtime_remote_time = context_.realtime_event_time;
266 }
267 const char *const data = DataBuffer();
268 if (data) {
269 context_.data =
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700270 data +
271 LocklessQueueMessageDataSize(lockless_queue_memory_.memory()) -
272 context_.size;
Brian Silverman3bca5322020-08-12 19:35:29 -0700273 } else {
274 context_.data = nullptr;
275 }
276 actual_queue_index_ = queue_index.Increment();
277 }
278
279 // Make sure the data wasn't modified while we were reading it. This
280 // can only happen if you are reading the last message *while* it is
281 // being written to, which means you are pretty far behind.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700282 CHECK(read_result != ipc_lib::LocklessQueueReader::Result::OVERWROTE)
Brian Silverman3bca5322020-08-12 19:35:29 -0700283 << ": Got behind while reading and the last message was modified "
284 "out from under us while we were reading it. Don't get so far "
285 "behind on: "
286 << configuration::CleanedChannelToString(channel_);
287
288 // We fell behind between when we read the index and read the value.
289 // This isn't worth recovering from since this means we went to sleep
290 // for a long time in the middle of this function.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700291 if (read_result == ipc_lib::LocklessQueueReader::Result::TOO_OLD) {
Brian Silverman3bca5322020-08-12 19:35:29 -0700292 event_loop_->SendTimingReport();
293 LOG(FATAL) << "The next message is no longer available. "
294 << configuration::CleanedChannelToString(channel_);
295 }
296
297 return read_result;
298 }
299
300 char *data_storage_start() const {
301 CHECK(copy_data());
Brian Silvermana1652f32020-01-29 20:41:44 -0800302 return RoundChannelData(data_storage_.get(), channel_->max_size());
303 }
Brian Silverman3bca5322020-08-12 19:35:29 -0700304
305 // Note that for some modes the return value will change as new messages are
306 // read.
307 const char *DataBuffer() const {
308 if (copy_data()) {
309 return data_storage_start();
310 }
Brian Silverman77162972020-08-12 19:52:40 -0700311 if (pin_data()) {
312 return static_cast<const char *>(pinner_->Data());
313 }
Brian Silverman3bca5322020-08-12 19:35:29 -0700314 return nullptr;
315 }
316
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800317 bool copy_data() const { return static_cast<bool>(data_storage_); }
Brian Silverman77162972020-08-12 19:52:40 -0700318 bool pin_data() const { return static_cast<bool>(pinner_); }
Brian Silvermana1652f32020-01-29 20:41:44 -0800319
Austin Schuh432784f2020-06-23 17:27:35 -0700320 aos::ShmEventLoop *event_loop_;
Austin Schuhf5652592019-12-29 16:26:15 -0800321 const Channel *const channel_;
Austin Schuh4d275fc2022-09-16 15:42:45 -0700322 ipc_lib::MemoryMappedQueue lockless_queue_memory_;
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700323 ipc_lib::LocklessQueueReader reader_;
324 // This being nullopt indicates we're not looking for wakeups right now.
325 std::optional<ipc_lib::LocklessQueueWatcher> watcher_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700326
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700327 ipc_lib::QueueIndex actual_queue_index_ = ipc_lib::QueueIndex::Invalid();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700328
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800329 // This being empty indicates we're not going to copy data.
330 std::unique_ptr<char, decltype(&free)> data_storage_{nullptr, &free};
Austin Schuh39788ff2019-12-01 18:22:57 -0800331
Brian Silverman77162972020-08-12 19:52:40 -0700332 // This being nullopt indicates we're not going to pin messages.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700333 std::optional<ipc_lib::LocklessQueuePinner> pinner_;
Brian Silverman77162972020-08-12 19:52:40 -0700334
Austin Schuh39788ff2019-12-01 18:22:57 -0800335 Context context_;
Austin Schuh82ea7382023-07-14 15:17:34 -0700336
337 // Pre-allocated should_fetch function so we don't allocate.
Austin Schuh98ed26f2023-07-19 14:12:28 -0700338 const std::function<bool(const Context &)> should_fetch_;
Austin Schuh39788ff2019-12-01 18:22:57 -0800339};
340
341class ShmFetcher : public RawFetcher {
342 public:
Austin Schuhef323c02020-09-01 14:55:28 -0700343 explicit ShmFetcher(std::string_view shm_base, ShmEventLoop *event_loop,
344 const Channel *channel)
Austin Schuhaa79e4e2019-12-29 20:43:32 -0800345 : RawFetcher(event_loop, channel),
Austin Schuhef323c02020-09-01 14:55:28 -0700346 simple_shm_fetcher_(shm_base, event_loop, channel) {
Brian Silverman77162972020-08-12 19:52:40 -0700347 simple_shm_fetcher_.RetrieveData();
Brian Silverman3bca5322020-08-12 19:35:29 -0700348 }
Austin Schuh39788ff2019-12-01 18:22:57 -0800349
Austin Schuh3054f5f2021-07-21 15:38:01 -0700350 ~ShmFetcher() override {
351 shm_event_loop()->CheckCurrentThread();
352 context_.data = nullptr;
353 }
Austin Schuh39788ff2019-12-01 18:22:57 -0800354
355 std::pair<bool, monotonic_clock::time_point> DoFetchNext() override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700356 shm_event_loop()->CheckCurrentThread();
Austin Schuh39788ff2019-12-01 18:22:57 -0800357 if (simple_shm_fetcher_.FetchNext()) {
358 context_ = simple_shm_fetcher_.context();
359 return std::make_pair(true, monotonic_clock::now());
360 }
361 return std::make_pair(false, monotonic_clock::min_time);
362 }
363
Austin Schuh98ed26f2023-07-19 14:12:28 -0700364 std::pair<bool, monotonic_clock::time_point> DoFetchNextIf(
365 std::function<bool(const Context &context)> fn) override {
366 shm_event_loop()->CheckCurrentThread();
367 if (simple_shm_fetcher_.FetchNextIf(std::move(fn))) {
368 context_ = simple_shm_fetcher_.context();
369 return std::make_pair(true, monotonic_clock::now());
370 }
371 return std::make_pair(false, monotonic_clock::min_time);
372 }
373
Austin Schuh39788ff2019-12-01 18:22:57 -0800374 std::pair<bool, monotonic_clock::time_point> DoFetch() override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700375 shm_event_loop()->CheckCurrentThread();
Austin Schuh39788ff2019-12-01 18:22:57 -0800376 if (simple_shm_fetcher_.Fetch()) {
377 context_ = simple_shm_fetcher_.context();
378 return std::make_pair(true, monotonic_clock::now());
379 }
380 return std::make_pair(false, monotonic_clock::min_time);
381 }
382
Austin Schuh98ed26f2023-07-19 14:12:28 -0700383 std::pair<bool, monotonic_clock::time_point> DoFetchIf(
384 std::function<bool(const Context &context)> fn) override {
385 shm_event_loop()->CheckCurrentThread();
386 if (simple_shm_fetcher_.FetchIf(std::move(fn))) {
387 context_ = simple_shm_fetcher_.context();
388 return std::make_pair(true, monotonic_clock::now());
389 }
390 return std::make_pair(false, monotonic_clock::min_time);
391 }
392
Brian Silvermana5450a92020-08-12 19:59:57 -0700393 absl::Span<const char> GetPrivateMemory() const {
Brian Silverman6d2b3592020-06-18 14:40:15 -0700394 return simple_shm_fetcher_.GetPrivateMemory();
395 }
396
Austin Schuh39788ff2019-12-01 18:22:57 -0800397 private:
Austin Schuh3054f5f2021-07-21 15:38:01 -0700398 const ShmEventLoop *shm_event_loop() const {
399 return static_cast<const ShmEventLoop *>(event_loop());
400 }
401
Austin Schuh39788ff2019-12-01 18:22:57 -0800402 SimpleShmFetcher simple_shm_fetcher_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700403};
404
Brian Silvermane1fe2512022-08-14 23:18:50 -0700405class ShmExitHandle : public ExitHandle {
406 public:
407 ShmExitHandle(ShmEventLoop *event_loop) : event_loop_(event_loop) {
408 ++event_loop_->exit_handle_count_;
409 }
410 ~ShmExitHandle() override {
411 CHECK_GT(event_loop_->exit_handle_count_, 0);
412 --event_loop_->exit_handle_count_;
413 }
James Kuszmaul9f998082024-05-23 15:37:35 -0700414 // Because of how we handle reference counting, we either need to implement
415 // reference counting in the copy/move constructors or just not support them.
416 // If we ever develop a need for this object to be movable/copyable,
417 // supporting it should be straightforwards.
418 DISALLOW_COPY_AND_ASSIGN(ShmExitHandle);
Brian Silvermane1fe2512022-08-14 23:18:50 -0700419
James Kuszmaul9f998082024-05-23 15:37:35 -0700420 void Exit(Result<void> status) override {
421 event_loop_->ExitWithStatus(status);
422 }
Brian Silvermane1fe2512022-08-14 23:18:50 -0700423
424 private:
425 ShmEventLoop *const event_loop_;
426};
427
Alex Perrycb7da4b2019-08-28 19:35:56 -0700428class ShmSender : public RawSender {
429 public:
Austin Schuhef323c02020-09-01 14:55:28 -0700430 explicit ShmSender(std::string_view shm_base, EventLoop *event_loop,
431 const Channel *channel)
Austin Schuh39788ff2019-12-01 18:22:57 -0800432 : RawSender(event_loop, channel),
Austin Schuh4d275fc2022-09-16 15:42:45 -0700433 lockless_queue_memory_(shm_base, FLAGS_permissions,
434 event_loop->configuration(), channel),
Austin Schuhfff9c3a2023-06-16 18:48:23 -0700435 lockless_queue_sender_(
436 VerifySender(ipc_lib::LocklessQueueSender::Make(
437 lockless_queue_memory_.queue(),
438 configuration::ChannelStorageDuration(
439 event_loop->configuration(), channel)),
440 channel)),
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700441 wake_upper_(lockless_queue_memory_.queue()) {}
Alex Perrycb7da4b2019-08-28 19:35:56 -0700442
Austin Schuh3054f5f2021-07-21 15:38:01 -0700443 ~ShmSender() override { shm_event_loop()->CheckCurrentThread(); }
Austin Schuh39788ff2019-12-01 18:22:57 -0800444
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700445 static ipc_lib::LocklessQueueSender VerifySender(
446 std::optional<ipc_lib::LocklessQueueSender> sender,
Austin Schuhe516ab02020-05-06 21:37:04 -0700447 const Channel *channel) {
448 if (sender) {
449 return std::move(sender.value());
450 }
451 LOG(FATAL) << "Failed to create sender on "
452 << configuration::CleanedChannelToString(channel)
453 << ", too many senders.";
454 }
455
Austin Schuh3054f5f2021-07-21 15:38:01 -0700456 void *data() override {
457 shm_event_loop()->CheckCurrentThread();
458 return lockless_queue_sender_.Data();
459 }
460 size_t size() override {
461 shm_event_loop()->CheckCurrentThread();
462 return lockless_queue_sender_.size();
463 }
milind1f1dca32021-07-03 13:50:07 -0700464
465 Error DoSend(size_t length,
466 aos::monotonic_clock::time_point monotonic_remote_time,
467 aos::realtime_clock::time_point realtime_remote_time,
Austin Schuhac6d89e2024-03-27 14:56:09 -0700468 aos::monotonic_clock::time_point monotonic_remote_transmit_time,
milind1f1dca32021-07-03 13:50:07 -0700469 uint32_t remote_queue_index,
470 const UUID &source_boot_uuid) override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700471 shm_event_loop()->CheckCurrentThread();
Austin Schuh0f7ed462020-03-28 20:38:34 -0700472 CHECK_LE(length, static_cast<size_t>(channel()->max_size()))
473 << ": Sent too big a message on "
474 << configuration::CleanedChannelToString(channel());
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700475 const auto result = lockless_queue_sender_.Send(
Austin Schuhac6d89e2024-03-27 14:56:09 -0700476 length, monotonic_remote_time, realtime_remote_time,
477 monotonic_remote_transmit_time, remote_queue_index, source_boot_uuid,
478 &monotonic_sent_time_, &realtime_sent_time_, &sent_queue_index_);
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700479 CHECK_NE(result, ipc_lib::LocklessQueueSender::Result::INVALID_REDZONE)
Austin Schuh91ba6392020-10-03 13:27:47 -0700480 << ": Somebody wrote outside the buffer of their message on channel "
481 << configuration::CleanedChannelToString(channel());
482
Austin Schuh65493d62022-08-17 15:10:37 -0700483 wake_upper_.Wakeup(event_loop()->is_running()
484 ? event_loop()->runtime_realtime_priority()
485 : 0);
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700486 return CheckLocklessQueueResult(result);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700487 }
488
milind1f1dca32021-07-03 13:50:07 -0700489 Error DoSend(const void *msg, size_t length,
490 aos::monotonic_clock::time_point monotonic_remote_time,
491 aos::realtime_clock::time_point realtime_remote_time,
Austin Schuhac6d89e2024-03-27 14:56:09 -0700492 aos::monotonic_clock::time_point monotonic_remote_transmit_time,
milind1f1dca32021-07-03 13:50:07 -0700493 uint32_t remote_queue_index,
494 const UUID &source_boot_uuid) override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700495 shm_event_loop()->CheckCurrentThread();
Austin Schuh0f7ed462020-03-28 20:38:34 -0700496 CHECK_LE(length, static_cast<size_t>(channel()->max_size()))
497 << ": Sent too big a message on "
498 << configuration::CleanedChannelToString(channel());
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700499 const auto result = lockless_queue_sender_.Send(
Brian Silvermanaf9a4d82020-10-06 15:10:58 -0700500 reinterpret_cast<const char *>(msg), length, monotonic_remote_time,
Austin Schuhac6d89e2024-03-27 14:56:09 -0700501 realtime_remote_time, monotonic_remote_transmit_time,
502 remote_queue_index, source_boot_uuid, &monotonic_sent_time_,
503 &realtime_sent_time_, &sent_queue_index_);
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700504
505 CHECK_NE(result, ipc_lib::LocklessQueueSender::Result::INVALID_REDZONE)
506 << ": Somebody wrote outside the buffer of their message on "
507 "channel "
Austin Schuh91ba6392020-10-03 13:27:47 -0700508 << configuration::CleanedChannelToString(channel());
Austin Schuh65493d62022-08-17 15:10:37 -0700509 wake_upper_.Wakeup(event_loop()->is_running()
510 ? event_loop()->runtime_realtime_priority()
511 : 0);
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700512
513 return CheckLocklessQueueResult(result);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700514 }
515
Brian Silverman5120afb2020-01-31 17:44:35 -0800516 absl::Span<char> GetSharedMemory() const {
Brian Silvermana5450a92020-08-12 19:59:57 -0700517 return lockless_queue_memory_.GetMutableSharedMemory();
Brian Silverman5120afb2020-01-31 17:44:35 -0800518 }
519
Austin Schuh3054f5f2021-07-21 15:38:01 -0700520 int buffer_index() override {
521 shm_event_loop()->CheckCurrentThread();
522 return lockless_queue_sender_.buffer_index();
523 }
Brian Silverman4f4e0612020-08-12 19:54:41 -0700524
Alex Perrycb7da4b2019-08-28 19:35:56 -0700525 private:
Austin Schuh3054f5f2021-07-21 15:38:01 -0700526 const ShmEventLoop *shm_event_loop() const {
527 return static_cast<const ShmEventLoop *>(event_loop());
528 }
529
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700530 RawSender::Error CheckLocklessQueueResult(
531 const ipc_lib::LocklessQueueSender::Result &result) {
532 switch (result) {
533 case ipc_lib::LocklessQueueSender::Result::GOOD:
534 return Error::kOk;
535 case ipc_lib::LocklessQueueSender::Result::MESSAGES_SENT_TOO_FAST:
536 return Error::kMessagesSentTooFast;
537 case ipc_lib::LocklessQueueSender::Result::INVALID_REDZONE:
538 return Error::kInvalidRedzone;
539 }
540 LOG(FATAL) << "Unknown lockless queue sender result"
541 << static_cast<int>(result);
542 }
543
Austin Schuh4d275fc2022-09-16 15:42:45 -0700544 ipc_lib::MemoryMappedQueue lockless_queue_memory_;
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700545 ipc_lib::LocklessQueueSender lockless_queue_sender_;
546 ipc_lib::LocklessQueueWakeUpper wake_upper_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700547};
548
Alex Perrycb7da4b2019-08-28 19:35:56 -0700549// Class to manage the state for a Watcher.
Brian Silverman148d43d2020-06-07 18:19:22 -0500550class ShmWatcherState : public WatcherState {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700551 public:
Brian Silverman148d43d2020-06-07 18:19:22 -0500552 ShmWatcherState(
Austin Schuhef323c02020-09-01 14:55:28 -0700553 std::string_view shm_base, ShmEventLoop *event_loop,
554 const Channel *channel,
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800555 std::function<void(const Context &context, const void *message)> fn,
556 bool copy_data)
Brian Silverman148d43d2020-06-07 18:19:22 -0500557 : WatcherState(event_loop, channel, std::move(fn)),
Austin Schuh7d87b672019-12-01 20:23:49 -0800558 event_loop_(event_loop),
559 event_(this),
Austin Schuhef323c02020-09-01 14:55:28 -0700560 simple_shm_fetcher_(shm_base, event_loop, channel) {
Brian Silverman3bca5322020-08-12 19:35:29 -0700561 if (copy_data) {
Brian Silverman77162972020-08-12 19:52:40 -0700562 simple_shm_fetcher_.RetrieveData();
Brian Silverman3bca5322020-08-12 19:35:29 -0700563 }
564 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700565
Austin Schuh3054f5f2021-07-21 15:38:01 -0700566 ~ShmWatcherState() override {
567 event_loop_->CheckCurrentThread();
568 event_loop_->RemoveEvent(&event_);
569 }
Austin Schuh39788ff2019-12-01 18:22:57 -0800570
Philipp Schrader81fa3fb2023-09-17 18:58:35 -0700571 void Construct() override {
572 event_loop_->CheckCurrentThread();
573 CHECK(RegisterWakeup(event_loop_->runtime_realtime_priority()));
574 }
575
576 void Startup() override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700577 event_loop_->CheckCurrentThread();
Austin Schuh7d87b672019-12-01 20:23:49 -0800578 simple_shm_fetcher_.PointAtNextQueueIndex();
Austin Schuh39788ff2019-12-01 18:22:57 -0800579 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700580
Alex Perrycb7da4b2019-08-28 19:35:56 -0700581 // Returns true if there is new data available.
Austin Schuh7d87b672019-12-01 20:23:49 -0800582 bool CheckForNewData() {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700583 if (!has_new_data_) {
Austin Schuh39788ff2019-12-01 18:22:57 -0800584 has_new_data_ = simple_shm_fetcher_.FetchNext();
Austin Schuh7d87b672019-12-01 20:23:49 -0800585
586 if (has_new_data_) {
587 event_.set_event_time(
Austin Schuhad154822019-12-27 15:45:13 -0800588 simple_shm_fetcher_.context().monotonic_event_time);
Austin Schuh7d87b672019-12-01 20:23:49 -0800589 event_loop_->AddEvent(&event_);
590 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700591 }
592
593 return has_new_data_;
594 }
595
Alex Perrycb7da4b2019-08-28 19:35:56 -0700596 // Consumes the data by calling the callback.
Austin Schuh7d87b672019-12-01 20:23:49 -0800597 void HandleEvent() {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700598 CHECK(has_new_data_);
Austin Schuh39788ff2019-12-01 18:22:57 -0800599 DoCallCallback(monotonic_clock::now, simple_shm_fetcher_.context());
Alex Perrycb7da4b2019-08-28 19:35:56 -0700600 has_new_data_ = false;
Austin Schuh7d87b672019-12-01 20:23:49 -0800601 CheckForNewData();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700602 }
603
Austin Schuh39788ff2019-12-01 18:22:57 -0800604 // Registers us to receive a signal on event reception.
Alex Perrycb7da4b2019-08-28 19:35:56 -0700605 bool RegisterWakeup(int priority) {
Austin Schuh39788ff2019-12-01 18:22:57 -0800606 return simple_shm_fetcher_.RegisterWakeup(priority);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700607 }
608
Austin Schuh39788ff2019-12-01 18:22:57 -0800609 void UnregisterWakeup() { return simple_shm_fetcher_.UnregisterWakeup(); }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700610
Brian Silvermana5450a92020-08-12 19:59:57 -0700611 absl::Span<const char> GetSharedMemory() const {
612 return simple_shm_fetcher_.GetConstSharedMemory();
Brian Silverman5120afb2020-01-31 17:44:35 -0800613 }
614
Alex Perrycb7da4b2019-08-28 19:35:56 -0700615 private:
616 bool has_new_data_ = false;
617
Austin Schuh7d87b672019-12-01 20:23:49 -0800618 ShmEventLoop *event_loop_;
Brian Silverman148d43d2020-06-07 18:19:22 -0500619 EventHandler<ShmWatcherState> event_;
Austin Schuh39788ff2019-12-01 18:22:57 -0800620 SimpleShmFetcher simple_shm_fetcher_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700621};
622
623// Adapter class to adapt a timerfd to a TimerHandler.
Brian Silverman148d43d2020-06-07 18:19:22 -0500624class ShmTimerHandler final : public TimerHandler {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700625 public:
Brian Silverman148d43d2020-06-07 18:19:22 -0500626 ShmTimerHandler(ShmEventLoop *shm_event_loop, ::std::function<void()> fn)
Austin Schuh39788ff2019-12-01 18:22:57 -0800627 : TimerHandler(shm_event_loop, std::move(fn)),
Austin Schuh7d87b672019-12-01 20:23:49 -0800628 shm_event_loop_(shm_event_loop),
629 event_(this) {
Austin Schuhcde39fd2020-02-22 20:58:24 -0800630 shm_event_loop_->epoll_.OnReadable(timerfd_.fd(), [this]() {
Austin Schuh5ca13112021-02-07 22:06:53 -0800631 // The timer may fire spuriously. HandleEvent on the event loop will
Austin Schuhcde39fd2020-02-22 20:58:24 -0800632 // call the callback if it is needed. It may also have called it when
633 // processing some other event, and the kernel decided to deliver this
634 // wakeup anyways.
635 timerfd_.Read();
636 shm_event_loop_->HandleEvent();
637 });
Alex Perrycb7da4b2019-08-28 19:35:56 -0700638 }
639
Brian Silverman148d43d2020-06-07 18:19:22 -0500640 ~ShmTimerHandler() {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700641 shm_event_loop_->CheckCurrentThread();
Austin Schuh7d87b672019-12-01 20:23:49 -0800642 Disable();
643 shm_event_loop_->epoll_.DeleteFd(timerfd_.fd());
644 }
645
646 void HandleEvent() {
Austin Schuhcde39fd2020-02-22 20:58:24 -0800647 CHECK(!event_.valid());
Brian Silvermanaf9a4d82020-10-06 15:10:58 -0700648 disabled_ = false;
Austin Schuhcde39fd2020-02-22 20:58:24 -0800649 const auto monotonic_now = Call(monotonic_clock::now, base_);
650 if (event_.valid()) {
Philipp Schradera6712522023-07-05 20:25:11 -0700651 // If someone called Schedule inside Call, rescheduling is already taken
652 // care of. Bail.
Austin Schuhcde39fd2020-02-22 20:58:24 -0800653 return;
Austin Schuh7d87b672019-12-01 20:23:49 -0800654 }
Brian Silvermanaf9a4d82020-10-06 15:10:58 -0700655 if (disabled_) {
656 // Somebody called Disable inside Call, so we don't want to reschedule.
657 // Bail.
658 return;
659 }
Austin Schuh7d87b672019-12-01 20:23:49 -0800660
Austin Schuh4d275fc2022-09-16 15:42:45 -0700661 if (repeat_offset_ == std::chrono::seconds(0)) {
Austin Schuhcde39fd2020-02-22 20:58:24 -0800662 timerfd_.Disable();
Naman Gupta4d13b0a2022-10-19 16:41:24 -0700663 disabled_ = true;
Austin Schuhcde39fd2020-02-22 20:58:24 -0800664 } else {
665 // Compute how many cycles have elapsed and schedule the next iteration
666 // for the next iteration in the future.
667 const int elapsed_cycles =
668 std::max<int>(0, (monotonic_now - base_ + repeat_offset_ -
669 std::chrono::nanoseconds(1)) /
670 repeat_offset_);
671 base_ += repeat_offset_ * elapsed_cycles;
Austin Schuh7d87b672019-12-01 20:23:49 -0800672
Austin Schuhcde39fd2020-02-22 20:58:24 -0800673 // Update the heap and schedule the timerfd wakeup.
Austin Schuh7d87b672019-12-01 20:23:49 -0800674 event_.set_event_time(base_);
675 shm_event_loop_->AddEvent(&event_);
Austin Schuh4d275fc2022-09-16 15:42:45 -0700676 timerfd_.SetTime(base_, std::chrono::seconds(0));
Naman Gupta4d13b0a2022-10-19 16:41:24 -0700677 disabled_ = false;
Austin Schuh7d87b672019-12-01 20:23:49 -0800678 }
679 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700680
Philipp Schradera6712522023-07-05 20:25:11 -0700681 void Schedule(monotonic_clock::time_point base,
682 monotonic_clock::duration repeat_offset) override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700683 shm_event_loop_->CheckCurrentThread();
Austin Schuh7d87b672019-12-01 20:23:49 -0800684 if (event_.valid()) {
685 shm_event_loop_->RemoveEvent(&event_);
686 }
687
Alex Perrycb7da4b2019-08-28 19:35:56 -0700688 timerfd_.SetTime(base, repeat_offset);
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800689 base_ = base;
690 repeat_offset_ = repeat_offset;
Austin Schuh7d87b672019-12-01 20:23:49 -0800691 event_.set_event_time(base_);
692 shm_event_loop_->AddEvent(&event_);
Naman Gupta4d13b0a2022-10-19 16:41:24 -0700693 disabled_ = false;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700694 }
695
Austin Schuh7d87b672019-12-01 20:23:49 -0800696 void Disable() override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700697 shm_event_loop_->CheckCurrentThread();
Austin Schuh7d87b672019-12-01 20:23:49 -0800698 shm_event_loop_->RemoveEvent(&event_);
699 timerfd_.Disable();
Brian Silvermanaf9a4d82020-10-06 15:10:58 -0700700 disabled_ = true;
Austin Schuh7d87b672019-12-01 20:23:49 -0800701 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700702
Naman Gupta4d13b0a2022-10-19 16:41:24 -0700703 bool IsDisabled() override { return disabled_; }
704
Alex Perrycb7da4b2019-08-28 19:35:56 -0700705 private:
706 ShmEventLoop *shm_event_loop_;
Brian Silverman148d43d2020-06-07 18:19:22 -0500707 EventHandler<ShmTimerHandler> event_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700708
Brian Silverman148d43d2020-06-07 18:19:22 -0500709 internal::TimerFd timerfd_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700710
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800711 monotonic_clock::time_point base_;
712 monotonic_clock::duration repeat_offset_;
Brian Silvermanaf9a4d82020-10-06 15:10:58 -0700713
714 // Used to track if Disable() was called during the callback, so we know not
715 // to reschedule.
Naman Gupta4d13b0a2022-10-19 16:41:24 -0700716 bool disabled_ = true;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700717};
718
719// Adapter class to the timerfd and PhasedLoop.
Brian Silverman148d43d2020-06-07 18:19:22 -0500720class ShmPhasedLoopHandler final : public PhasedLoopHandler {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700721 public:
Brian Silverman148d43d2020-06-07 18:19:22 -0500722 ShmPhasedLoopHandler(ShmEventLoop *shm_event_loop,
723 ::std::function<void(int)> fn,
724 const monotonic_clock::duration interval,
725 const monotonic_clock::duration offset)
726 : PhasedLoopHandler(shm_event_loop, std::move(fn), interval, offset),
Austin Schuh7d87b672019-12-01 20:23:49 -0800727 shm_event_loop_(shm_event_loop),
728 event_(this) {
729 shm_event_loop_->epoll_.OnReadable(
730 timerfd_.fd(), [this]() { shm_event_loop_->HandleEvent(); });
731 }
732
733 void HandleEvent() {
734 // The return value for read is the number of cycles that have elapsed.
735 // Because we check to see when this event *should* have happened, there are
736 // cases where Read() will return 0, when 1 cycle has actually happened.
737 // This occurs when the timer interrupt hasn't triggered yet. Therefore,
738 // ignore it. Call handles rescheduling and calculating elapsed cycles
739 // without any extra help.
740 timerfd_.Read();
741 event_.Invalidate();
742
James Kuszmaul20dcc7c2023-01-20 11:06:31 -0800743 Call(monotonic_clock::now);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700744 }
745
Brian Silverman148d43d2020-06-07 18:19:22 -0500746 ~ShmPhasedLoopHandler() override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700747 shm_event_loop_->CheckCurrentThread();
Austin Schuh39788ff2019-12-01 18:22:57 -0800748 shm_event_loop_->epoll_.DeleteFd(timerfd_.fd());
Austin Schuh7d87b672019-12-01 20:23:49 -0800749 shm_event_loop_->RemoveEvent(&event_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700750 }
751
752 private:
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800753 // Reschedules the timer.
Austin Schuh39788ff2019-12-01 18:22:57 -0800754 void Schedule(monotonic_clock::time_point sleep_time) override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700755 shm_event_loop_->CheckCurrentThread();
Austin Schuh7d87b672019-12-01 20:23:49 -0800756 if (event_.valid()) {
757 shm_event_loop_->RemoveEvent(&event_);
758 }
759
Austin Schuh39788ff2019-12-01 18:22:57 -0800760 timerfd_.SetTime(sleep_time, ::aos::monotonic_clock::zero());
Austin Schuh7d87b672019-12-01 20:23:49 -0800761 event_.set_event_time(sleep_time);
762 shm_event_loop_->AddEvent(&event_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700763 }
764
765 ShmEventLoop *shm_event_loop_;
Brian Silverman148d43d2020-06-07 18:19:22 -0500766 EventHandler<ShmPhasedLoopHandler> event_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700767
Brian Silverman148d43d2020-06-07 18:19:22 -0500768 internal::TimerFd timerfd_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700769};
Brian Silverman148d43d2020-06-07 18:19:22 -0500770
771} // namespace shm_event_loop_internal
Alex Perrycb7da4b2019-08-28 19:35:56 -0700772
773::std::unique_ptr<RawFetcher> ShmEventLoop::MakeRawFetcher(
774 const Channel *channel) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700775 CheckCurrentThread();
Austin Schuhca4828c2019-12-28 14:21:35 -0800776 if (!configuration::ChannelIsReadableOnNode(channel, node())) {
777 LOG(FATAL) << "Channel { \"name\": \"" << channel->name()->string_view()
778 << "\", \"type\": \"" << channel->type()->string_view()
779 << "\" } is not able to be fetched on this node. Check your "
780 "configuration.";
Austin Schuh217a9782019-12-21 23:02:50 -0800781 }
782
Austin Schuhef323c02020-09-01 14:55:28 -0700783 return ::std::unique_ptr<RawFetcher>(
784 new ShmFetcher(shm_base_, this, channel));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700785}
786
787::std::unique_ptr<RawSender> ShmEventLoop::MakeRawSender(
788 const Channel *channel) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700789 CheckCurrentThread();
Brian Silverman0fc69932020-01-24 21:54:02 -0800790 TakeSender(channel);
Austin Schuh39788ff2019-12-01 18:22:57 -0800791
Austin Schuhef323c02020-09-01 14:55:28 -0700792 return ::std::unique_ptr<RawSender>(new ShmSender(shm_base_, this, channel));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700793}
794
795void ShmEventLoop::MakeRawWatcher(
796 const Channel *channel,
797 std::function<void(const Context &context, const void *message)> watcher) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700798 CheckCurrentThread();
Brian Silverman0fc69932020-01-24 21:54:02 -0800799 TakeWatcher(channel);
Austin Schuh217a9782019-12-21 23:02:50 -0800800
Austin Schuh39788ff2019-12-01 18:22:57 -0800801 NewWatcher(::std::unique_ptr<WatcherState>(
Austin Schuhef323c02020-09-01 14:55:28 -0700802 new ShmWatcherState(shm_base_, this, channel, std::move(watcher), true)));
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800803}
804
805void ShmEventLoop::MakeRawNoArgWatcher(
806 const Channel *channel,
807 std::function<void(const Context &context)> watcher) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700808 CheckCurrentThread();
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800809 TakeWatcher(channel);
810
Brian Silverman148d43d2020-06-07 18:19:22 -0500811 NewWatcher(::std::unique_ptr<WatcherState>(new ShmWatcherState(
Austin Schuhef323c02020-09-01 14:55:28 -0700812 shm_base_, this, channel,
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800813 [watcher](const Context &context, const void *) { watcher(context); },
814 false)));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700815}
816
817TimerHandler *ShmEventLoop::AddTimer(::std::function<void()> callback) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700818 CheckCurrentThread();
Austin Schuh39788ff2019-12-01 18:22:57 -0800819 return NewTimer(::std::unique_ptr<TimerHandler>(
Brian Silverman148d43d2020-06-07 18:19:22 -0500820 new ShmTimerHandler(this, ::std::move(callback))));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700821}
822
823PhasedLoopHandler *ShmEventLoop::AddPhasedLoop(
824 ::std::function<void(int)> callback,
825 const monotonic_clock::duration interval,
826 const monotonic_clock::duration offset) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700827 CheckCurrentThread();
Brian Silverman148d43d2020-06-07 18:19:22 -0500828 return NewPhasedLoop(::std::unique_ptr<PhasedLoopHandler>(
829 new ShmPhasedLoopHandler(this, ::std::move(callback), interval, offset)));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700830}
831
832void ShmEventLoop::OnRun(::std::function<void()> on_run) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700833 CheckCurrentThread();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700834 on_run_.push_back(::std::move(on_run));
835}
836
Austin Schuh3054f5f2021-07-21 15:38:01 -0700837void ShmEventLoop::CheckCurrentThread() const {
838 if (__builtin_expect(check_mutex_ != nullptr, false)) {
839 CHECK(check_mutex_->is_locked())
840 << ": The configured mutex is not locked while calling a "
841 "ShmEventLoop function";
842 }
843 if (__builtin_expect(!!check_tid_, false)) {
844 CHECK_EQ(syscall(SYS_gettid), *check_tid_)
845 << ": Being called from the wrong thread";
846 }
847}
848
Austin Schuh5ca13112021-02-07 22:06:53 -0800849// This is a bit tricky because watchers can generate new events at any time (as
850// long as it's in the past). We want to check the watchers at least once before
851// declaring there are no events to handle, and we want to check them again if
852// event processing takes long enough that we find an event after that point in
853// time to handle.
Austin Schuh7d87b672019-12-01 20:23:49 -0800854void ShmEventLoop::HandleEvent() {
Austin Schuh5ca13112021-02-07 22:06:53 -0800855 // Time through which we've checked for new events in watchers.
856 monotonic_clock::time_point checked_until = monotonic_clock::min_time;
857 if (!signalfd_) {
858 // Nothing to check, so we can bail out immediately once we're out of
859 // events.
860 CHECK(watchers_.empty());
861 checked_until = monotonic_clock::max_time;
Austin Schuh7d87b672019-12-01 20:23:49 -0800862 }
863
Austin Schuh5ca13112021-02-07 22:06:53 -0800864 // Loop until we run out of events to check.
Austin Schuh39788ff2019-12-01 18:22:57 -0800865 while (true) {
Austin Schuh5ca13112021-02-07 22:06:53 -0800866 // Time of the next event we know about. If this is before checked_until, we
867 // know there aren't any new events before the next one that we already know
868 // about, so no need to check the watchers.
869 monotonic_clock::time_point next_time = monotonic_clock::max_time;
870
871 if (EventCount() == 0) {
872 if (checked_until != monotonic_clock::min_time) {
873 // No events, and we've already checked the watchers at least once, so
874 // we're all done.
875 //
876 // There's a small chance that a watcher has gotten another event in
877 // between checked_until and now. If so, then the signalfd will be
878 // triggered now and we'll re-enter HandleEvent immediately. This is
879 // unlikely though, so we don't want to spend time checking all the
880 // watchers unnecessarily.
881 break;
882 }
883 } else {
884 next_time = PeekEvent()->event_time();
885 }
Austin Schuh00cad2e2022-12-02 20:11:04 -0800886 monotonic_clock::time_point now;
887 bool new_data = false;
Austin Schuh5ca13112021-02-07 22:06:53 -0800888
889 if (next_time > checked_until) {
890 // Read all of the signals, because there's no point in waking up again
891 // immediately to handle each one if we've fallen behind.
892 //
893 // This is safe before checking for new data on the watchers. If a signal
894 // is cleared here, the corresponding CheckForNewData() call below will
895 // pick it up.
896 while (true) {
897 const signalfd_siginfo result = signalfd_->Read();
898 if (result.ssi_signo == 0) {
899 break;
900 }
901 CHECK_EQ(result.ssi_signo, ipc_lib::kWakeupSignal);
902 }
Austin Schuh00cad2e2022-12-02 20:11:04 -0800903 // This is the last time we can guarantee that if a message is published
904 // before, we will notice it.
905 now = monotonic_clock::now();
Austin Schuh5ca13112021-02-07 22:06:53 -0800906
907 // Check all the watchers for new events.
908 for (std::unique_ptr<WatcherState> &base_watcher : watchers_) {
909 ShmWatcherState *const watcher =
910 reinterpret_cast<ShmWatcherState *>(base_watcher.get());
911
Austin Schuh00cad2e2022-12-02 20:11:04 -0800912 // Track if we got a message.
913 if (watcher->CheckForNewData()) {
914 new_data = true;
915 }
Austin Schuh5ca13112021-02-07 22:06:53 -0800916 }
917 if (EventCount() == 0) {
918 // Still no events, all done now.
919 break;
920 }
921
922 checked_until = now;
923 // Check for any new events we found.
924 next_time = PeekEvent()->event_time();
Austin Schuh00cad2e2022-12-02 20:11:04 -0800925 } else {
926 now = monotonic_clock::now();
Austin Schuh5ca13112021-02-07 22:06:53 -0800927 }
928
929 if (next_time > now) {
Austin Schuh00cad2e2022-12-02 20:11:04 -0800930 // Ok, we got a message with a timestamp *after* we wrote down time. We
931 // need to process it (otherwise we will go to sleep without processing
932 // it), but we also need to make sure no other messages have come in
933 // before it that we would process out of order. Just go around again to
934 // redo the checks.
935 if (new_data) {
936 continue;
937 }
Austin Schuh39788ff2019-12-01 18:22:57 -0800938 break;
939 }
940
Austin Schuh5ca13112021-02-07 22:06:53 -0800941 EventLoopEvent *const event = PopEvent();
Austin Schuh7d87b672019-12-01 20:23:49 -0800942 event->HandleEvent();
Austin Schuh39788ff2019-12-01 18:22:57 -0800943 }
944}
945
Austin Schuh32fd5a72019-12-01 22:20:26 -0800946// RAII class to mask signals.
947class ScopedSignalMask {
948 public:
949 ScopedSignalMask(std::initializer_list<int> signals) {
950 sigset_t sigset;
951 PCHECK(sigemptyset(&sigset) == 0);
952 for (int signal : signals) {
953 PCHECK(sigaddset(&sigset, signal) == 0);
954 }
955
956 PCHECK(sigprocmask(SIG_BLOCK, &sigset, &old_) == 0);
957 }
958
959 ~ScopedSignalMask() { PCHECK(sigprocmask(SIG_SETMASK, &old_, nullptr) == 0); }
960
961 private:
962 sigset_t old_;
963};
964
965// Class to manage the static state associated with killing multiple event
966// loops.
967class SignalHandler {
968 public:
969 // Gets the singleton.
970 static SignalHandler *global() {
971 static SignalHandler loop;
972 return &loop;
973 }
974
975 // Handles the signal with the singleton.
976 static void HandleSignal(int) { global()->DoHandleSignal(); }
977
978 // Registers an event loop to receive Exit() calls.
979 void Register(ShmEventLoop *event_loop) {
980 // Block signals while we have the mutex so we never race with the signal
981 // handler.
982 ScopedSignalMask mask({SIGINT, SIGHUP, SIGTERM});
983 std::unique_lock<stl_mutex> locker(mutex_);
984 if (event_loops_.size() == 0) {
985 // The first caller registers the signal handler.
986 struct sigaction new_action;
987 sigemptyset(&new_action.sa_mask);
988 // This makes it so that 2 control c's to a stuck process will kill it by
989 // restoring the original signal handler.
990 new_action.sa_flags = SA_RESETHAND;
991 new_action.sa_handler = &HandleSignal;
992
993 PCHECK(sigaction(SIGINT, &new_action, &old_action_int_) == 0);
994 PCHECK(sigaction(SIGHUP, &new_action, &old_action_hup_) == 0);
995 PCHECK(sigaction(SIGTERM, &new_action, &old_action_term_) == 0);
996 }
997
998 event_loops_.push_back(event_loop);
999 }
1000
1001 // Unregisters an event loop to receive Exit() calls.
1002 void Unregister(ShmEventLoop *event_loop) {
1003 // Block signals while we have the mutex so we never race with the signal
1004 // handler.
1005 ScopedSignalMask mask({SIGINT, SIGHUP, SIGTERM});
1006 std::unique_lock<stl_mutex> locker(mutex_);
1007
Brian Silverman5120afb2020-01-31 17:44:35 -08001008 event_loops_.erase(
1009 std::find(event_loops_.begin(), event_loops_.end(), event_loop));
Austin Schuh32fd5a72019-12-01 22:20:26 -08001010
1011 if (event_loops_.size() == 0u) {
1012 // The last caller restores the original signal handlers.
1013 PCHECK(sigaction(SIGINT, &old_action_int_, nullptr) == 0);
1014 PCHECK(sigaction(SIGHUP, &old_action_hup_, nullptr) == 0);
1015 PCHECK(sigaction(SIGTERM, &old_action_term_, nullptr) == 0);
1016 }
1017 }
1018
1019 private:
1020 void DoHandleSignal() {
1021 // We block signals while grabbing the lock, so there should never be a
1022 // race. Confirm that this is true using trylock.
1023 CHECK(mutex_.try_lock()) << ": sigprocmask failed to block signals while "
1024 "modifing the event loop list.";
1025 for (ShmEventLoop *event_loop : event_loops_) {
1026 event_loop->Exit();
1027 }
1028 mutex_.unlock();
1029 }
1030
1031 // Mutex to protect all state.
1032 stl_mutex mutex_;
1033 std::vector<ShmEventLoop *> event_loops_;
1034 struct sigaction old_action_int_;
1035 struct sigaction old_action_hup_;
1036 struct sigaction old_action_term_;
1037};
1038
James Kuszmaul9f998082024-05-23 15:37:35 -07001039Result<void> ShmEventLoop::Run() {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001040 CheckCurrentThread();
Austin Schuh32fd5a72019-12-01 22:20:26 -08001041 SignalHandler::global()->Register(this);
Austin Schuh39788ff2019-12-01 18:22:57 -08001042
Alex Perrycb7da4b2019-08-28 19:35:56 -07001043 if (watchers_.size() > 0) {
Austin Schuh5ca13112021-02-07 22:06:53 -08001044 signalfd_.reset(new ipc_lib::SignalFd({ipc_lib::kWakeupSignal}));
Brian Silverman36975282021-07-29 12:06:55 -07001045 signalfd_->LeaveSignalBlocked(ipc_lib::kWakeupSignal);
Alex Perrycb7da4b2019-08-28 19:35:56 -07001046
Austin Schuh5ca13112021-02-07 22:06:53 -08001047 epoll_.OnReadable(signalfd_->fd(), [this]() { HandleEvent(); });
Alex Perrycb7da4b2019-08-28 19:35:56 -07001048 }
1049
Austin Schuh39788ff2019-12-01 18:22:57 -08001050 MaybeScheduleTimingReports();
1051
Austin Schuh7d87b672019-12-01 20:23:49 -08001052 ReserveEvents();
1053
Tyler Chatow67ddb032020-01-12 14:30:04 -08001054 {
Austin Schuha0c41ba2020-09-10 22:59:14 -07001055 logging::ScopedLogRestorer prev_logger;
Tyler Chatow67ddb032020-01-12 14:30:04 -08001056 AosLogToFbs aos_logger;
1057 if (!skip_logger_) {
Austin Schuhad9e5eb2021-11-19 20:33:55 -08001058 aos_logger.Initialize(&name_, MakeSender<logging::LogMessageFbs>("/aos"));
Austin Schuha0c41ba2020-09-10 22:59:14 -07001059 prev_logger.Swap(aos_logger.implementation());
Tyler Chatow67ddb032020-01-12 14:30:04 -08001060 }
Alex Perrycb7da4b2019-08-28 19:35:56 -07001061
Tyler Chatow67ddb032020-01-12 14:30:04 -08001062 aos::SetCurrentThreadName(name_.substr(0, 16));
Brian Silverman6a54ff32020-04-28 16:41:39 -07001063 const cpu_set_t default_affinity = DefaultAffinity();
1064 if (!CPU_EQUAL(&affinity_, &default_affinity)) {
1065 ::aos::SetCurrentThreadAffinity(affinity_);
1066 }
Philipp Schrader81fa3fb2023-09-17 18:58:35 -07001067
1068 // Construct the watchers, but don't update the next pointer. This also
1069 // cleans up any watchers that previously died, and puts the nonrt work
1070 // before going realtime. After this happens, we will start queueing
1071 // signals (which may be a bit of extra work to process, but won't cause any
1072 // messages to be lost).
1073 for (::std::unique_ptr<WatcherState> &watcher : watchers_) {
1074 watcher->Construct();
1075 }
1076
Tyler Chatow67ddb032020-01-12 14:30:04 -08001077 // Now, all the callbacks are setup. Lock everything into memory and go RT.
1078 if (priority_ != 0) {
1079 ::aos::InitRT();
1080
1081 LOG(INFO) << "Setting priority to " << priority_;
1082 ::aos::SetCurrentThreadRealtimePriority(priority_);
1083 }
1084
1085 set_is_running(true);
1086
1087 // Now that we are realtime (but before the OnRun handlers run), snap the
Philipp Schrader81fa3fb2023-09-17 18:58:35 -07001088 // queue index pointer to the newest message. This happens in RT so that we
1089 // minimize the risk of losing messages.
Tyler Chatow67ddb032020-01-12 14:30:04 -08001090 for (::std::unique_ptr<WatcherState> &watcher : watchers_) {
Philipp Schrader81fa3fb2023-09-17 18:58:35 -07001091 watcher->Startup();
Tyler Chatow67ddb032020-01-12 14:30:04 -08001092 }
1093
1094 // Now that we are RT, run all the OnRun handlers.
Austin Schuha9012be2021-07-21 15:19:11 -07001095 SetTimerContext(monotonic_clock::now());
Tyler Chatow67ddb032020-01-12 14:30:04 -08001096 for (const auto &run : on_run_) {
1097 run();
1098 }
1099
1100 // And start our main event loop which runs all the timers and handles Quit.
1101 epoll_.Run();
1102
1103 // Once epoll exits, there is no useful nonrt work left to do.
1104 set_is_running(false);
1105
1106 // Nothing time or synchronization critical needs to happen after this
1107 // point. Drop RT priority.
1108 ::aos::UnsetCurrentThreadRealtimePriority();
Alex Perrycb7da4b2019-08-28 19:35:56 -07001109 }
1110
Austin Schuh39788ff2019-12-01 18:22:57 -08001111 for (::std::unique_ptr<WatcherState> &base_watcher : watchers_) {
Brian Silverman148d43d2020-06-07 18:19:22 -05001112 ShmWatcherState *watcher =
1113 reinterpret_cast<ShmWatcherState *>(base_watcher.get());
Alex Perrycb7da4b2019-08-28 19:35:56 -07001114 watcher->UnregisterWakeup();
1115 }
1116
1117 if (watchers_.size() > 0) {
Austin Schuh5ca13112021-02-07 22:06:53 -08001118 epoll_.DeleteFd(signalfd_->fd());
1119 signalfd_.reset();
Alex Perrycb7da4b2019-08-28 19:35:56 -07001120 }
Austin Schuh32fd5a72019-12-01 22:20:26 -08001121
1122 SignalHandler::global()->Unregister(this);
Austin Schuhe84c3ed2019-12-14 15:29:48 -08001123
1124 // Trigger any remaining senders or fetchers to be cleared before destroying
1125 // the event loop so the book keeping matches. Do this in the thread that
1126 // created the timing reporter.
1127 timing_report_sender_.reset();
Austin Schuh0debde12022-08-17 16:25:17 -07001128 ClearContext();
James Kuszmaul9f998082024-05-23 15:37:35 -07001129 std::unique_lock<aos::stl_mutex> locker(exit_status_mutex_);
1130 std::optional<Result<void>> exit_status;
1131 // Clear the stored exit_status_ and extract it to be returned.
1132 exit_status_.swap(exit_status);
1133 return exit_status.value_or(Result<void>{});
Alex Perrycb7da4b2019-08-28 19:35:56 -07001134}
1135
James Kuszmaul9f998082024-05-23 15:37:35 -07001136void ShmEventLoop::Exit() {
1137 observed_exit_.test_and_set();
1138 // Implicitly defaults exit_status_ to success by not setting it.
1139
1140 epoll_.Quit();
1141}
1142
1143void ShmEventLoop::ExitWithStatus(Result<void> status) {
1144 // Only set the exit status if no other Exit*() call got here first.
1145 if (!observed_exit_.test_and_set()) {
1146 std::unique_lock<aos::stl_mutex> locker(exit_status_mutex_);
1147 exit_status_ = std::move(status);
1148 } else {
1149 VLOG(1) << "Exit status is already set; not setting it again.";
1150 }
1151 Exit();
1152}
Alex Perrycb7da4b2019-08-28 19:35:56 -07001153
Brian Silvermane1fe2512022-08-14 23:18:50 -07001154std::unique_ptr<ExitHandle> ShmEventLoop::MakeExitHandle() {
1155 return std::make_unique<ShmExitHandle>(this);
1156}
1157
Alex Perrycb7da4b2019-08-28 19:35:56 -07001158ShmEventLoop::~ShmEventLoop() {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001159 CheckCurrentThread();
Austin Schuh39788ff2019-12-01 18:22:57 -08001160 // Force everything with a registered fd with epoll to be destroyed now.
1161 timers_.clear();
1162 phased_loops_.clear();
1163 watchers_.clear();
1164
Alex Perrycb7da4b2019-08-28 19:35:56 -07001165 CHECK(!is_running()) << ": ShmEventLoop destroyed while running";
Brian Silvermane1fe2512022-08-14 23:18:50 -07001166 CHECK_EQ(0, exit_handle_count_)
1167 << ": All ExitHandles must be destroyed before the ShmEventLoop";
Alex Perrycb7da4b2019-08-28 19:35:56 -07001168}
1169
Alex Perrycb7da4b2019-08-28 19:35:56 -07001170void ShmEventLoop::SetRuntimeRealtimePriority(int priority) {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001171 CheckCurrentThread();
Alex Perrycb7da4b2019-08-28 19:35:56 -07001172 if (is_running()) {
1173 LOG(FATAL) << "Cannot set realtime priority while running.";
1174 }
1175 priority_ = priority;
1176}
1177
Brian Silverman6a54ff32020-04-28 16:41:39 -07001178void ShmEventLoop::SetRuntimeAffinity(const cpu_set_t &cpuset) {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001179 CheckCurrentThread();
Brian Silverman6a54ff32020-04-28 16:41:39 -07001180 if (is_running()) {
1181 LOG(FATAL) << "Cannot set affinity while running.";
1182 }
1183 affinity_ = cpuset;
1184}
1185
James Kuszmaul57c2baa2020-01-19 14:52:52 -08001186void ShmEventLoop::set_name(const std::string_view name) {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001187 CheckCurrentThread();
James Kuszmaul57c2baa2020-01-19 14:52:52 -08001188 name_ = std::string(name);
1189 UpdateTimingReport();
1190}
1191
Brian Silvermana5450a92020-08-12 19:59:57 -07001192absl::Span<const char> ShmEventLoop::GetWatcherSharedMemory(
1193 const Channel *channel) {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001194 CheckCurrentThread();
Brian Silverman148d43d2020-06-07 18:19:22 -05001195 ShmWatcherState *const watcher_state =
1196 static_cast<ShmWatcherState *>(GetWatcherState(channel));
Brian Silverman5120afb2020-01-31 17:44:35 -08001197 return watcher_state->GetSharedMemory();
1198}
1199
Brian Silverman4f4e0612020-08-12 19:54:41 -07001200int ShmEventLoop::NumberBuffers(const Channel *channel) {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001201 CheckCurrentThread();
Austin Schuh4d275fc2022-09-16 15:42:45 -07001202 return ipc_lib::MakeQueueConfiguration(configuration(), channel)
1203 .num_messages();
Brian Silverman4f4e0612020-08-12 19:54:41 -07001204}
1205
Brian Silverman5120afb2020-01-31 17:44:35 -08001206absl::Span<char> ShmEventLoop::GetShmSenderSharedMemory(
1207 const aos::RawSender *sender) const {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001208 CheckCurrentThread();
Brian Silverman148d43d2020-06-07 18:19:22 -05001209 return static_cast<const ShmSender *>(sender)->GetSharedMemory();
Brian Silverman5120afb2020-01-31 17:44:35 -08001210}
1211
Brian Silvermana5450a92020-08-12 19:59:57 -07001212absl::Span<const char> ShmEventLoop::GetShmFetcherPrivateMemory(
Brian Silverman6d2b3592020-06-18 14:40:15 -07001213 const aos::RawFetcher *fetcher) const {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001214 CheckCurrentThread();
Brian Silverman6d2b3592020-06-18 14:40:15 -07001215 return static_cast<const ShmFetcher *>(fetcher)->GetPrivateMemory();
1216}
1217
Austin Schuh3054f5f2021-07-21 15:38:01 -07001218pid_t ShmEventLoop::GetTid() {
1219 CheckCurrentThread();
1220 return syscall(SYS_gettid);
1221}
Austin Schuh39788ff2019-12-01 18:22:57 -08001222
Alex Perrycb7da4b2019-08-28 19:35:56 -07001223} // namespace aos