blob: e03849813567d0e7936f888f1ba3864e9f1dae24 [file] [log] [blame]
Alex Perrycb7da4b2019-08-28 19:35:56 -07001#include "aos/events/shm_event_loop.h"
2
Alex Perrycb7da4b2019-08-28 19:35:56 -07003#include <sys/stat.h>
Austin Schuh39788ff2019-12-01 18:22:57 -08004#include <sys/syscall.h>
Alex Perrycb7da4b2019-08-28 19:35:56 -07005#include <sys/types.h>
Tyler Chatow67ddb032020-01-12 14:30:04 -08006
Alex Perrycb7da4b2019-08-28 19:35:56 -07007#include <algorithm>
8#include <atomic>
9#include <chrono>
Austin Schuh39788ff2019-12-01 18:22:57 -080010#include <iterator>
Alex Perrycb7da4b2019-08-28 19:35:56 -070011#include <stdexcept>
12
Austin Schuh99f7c6a2024-06-25 22:07:44 -070013#include "absl/flags/flag.h"
14#include "absl/log/check.h"
15#include "absl/log/log.h"
Philipp Schrader790cb542023-07-05 21:06:52 -070016
Tyler Chatow67ddb032020-01-12 14:30:04 -080017#include "aos/events/aos_logging.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070018#include "aos/events/epoll.h"
Austin Schuh39788ff2019-12-01 18:22:57 -080019#include "aos/events/event_loop_generated.h"
20#include "aos/events/timing_statistics.h"
Austin Schuh094d09b2020-11-20 23:26:52 -080021#include "aos/init.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070022#include "aos/ipc_lib/lockless_queue.h"
Austin Schuh4d275fc2022-09-16 15:42:45 -070023#include "aos/ipc_lib/memory_mapped_queue.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070024#include "aos/realtime.h"
Austin Schuh32fd5a72019-12-01 22:20:26 -080025#include "aos/stl_mutex/stl_mutex.h"
Austin Schuhfccb2d02020-01-26 16:11:19 -080026#include "aos/util/file.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070027#include "aos/util/phased_loop.h"
28
Austin Schuhe84c3ed2019-12-14 15:29:48 -080029namespace {
30
31// Returns the portion of the path after the last /. This very much assumes
32// that the application name is null terminated.
33const char *Filename(const char *path) {
34 const std::string_view path_string_view = path;
35 auto last_slash_pos = path_string_view.find_last_of("/");
36
37 return last_slash_pos == std::string_view::npos ? path
38 : path + last_slash_pos + 1;
39}
40
41} // namespace
42
Brennan Coslett6fd3c002023-07-11 17:41:09 -050043// This value is affected by the umask of the process which is calling it
44// and is set to the user's value by default (check yours running `umask` on
45// the command line).
46// Any file mode requested is transformed using: mode & ~umask and the default
47// umask is 0022 (allow any permissions for the user, dont allow writes for
48// groups or others).
49// See https://man7.org/linux/man-pages/man2/umask.2.html for more details.
50// WITH THE DEFAULT UMASK YOU WONT ACTUALLY GET THESE PERMISSIONS :)
Austin Schuh99f7c6a2024-06-25 22:07:44 -070051ABSL_FLAG(uint32_t, permissions, 0770,
52 "Permissions to make shared memory files and folders, "
53 "affected by the process's umask. "
54 "See shm_event_loop.cc for more details.");
55ABSL_FLAG(std::string, application_name, Filename(program_invocation_name),
56 "The application name");
Alex Perrycb7da4b2019-08-28 19:35:56 -070057
58namespace aos {
59
Brian Silverman148d43d2020-06-07 18:19:22 -050060using namespace shm_event_loop_internal;
61
Brian Silverman4f4e0612020-08-12 19:54:41 -070062namespace {
63
Austin Schuh217a9782019-12-21 23:02:50 -080064const Node *MaybeMyNode(const Configuration *configuration) {
65 if (!configuration->has_nodes()) {
66 return nullptr;
67 }
Alex Perrycb7da4b2019-08-28 19:35:56 -070068
Austin Schuh217a9782019-12-21 23:02:50 -080069 return configuration::GetMyNode(configuration);
70}
Alex Perrycb7da4b2019-08-28 19:35:56 -070071
Philipp Schradera8734662023-08-06 14:49:39 -070072void IgnoreWakeupSignal() {
73 struct sigaction action;
74 action.sa_handler = SIG_IGN;
75 PCHECK(sigemptyset(&action.sa_mask) == 0);
76 action.sa_flags = 0;
77 PCHECK(sigaction(ipc_lib::kWakeupSignal, &action, nullptr) == 0);
78}
79
Austin Schuh39788ff2019-12-01 18:22:57 -080080} // namespace
81
Austin Schuh217a9782019-12-21 23:02:50 -080082ShmEventLoop::ShmEventLoop(const Configuration *configuration)
Austin Schuh83c7f702021-01-19 22:36:29 -080083 : EventLoop(configuration),
84 boot_uuid_(UUID::BootUUID()),
Austin Schuh99f7c6a2024-06-25 22:07:44 -070085 shm_base_(absl::GetFlag(FLAGS_shm_base)),
86 name_(absl::GetFlag(FLAGS_application_name)),
Austin Schuh15649d62019-12-28 16:36:38 -080087 node_(MaybeMyNode(configuration)) {
Philipp Schradera8734662023-08-06 14:49:39 -070088 // Ignore the wakeup signal by default. Otherwise, we have race conditions on
89 // shutdown where a wakeup signal will uncleanly terminate the process.
90 // See LocklessQueueWakeUpper::Wakeup() for some more information.
91 IgnoreWakeupSignal();
92
Austin Schuh094d09b2020-11-20 23:26:52 -080093 CHECK(IsInitialized()) << ": Need to initialize AOS first.";
Austin Schuh0debde12022-08-17 16:25:17 -070094 ClearContext();
Austin Schuh15649d62019-12-28 16:36:38 -080095 if (configuration->has_nodes()) {
96 CHECK(node_ != nullptr) << ": Couldn't find node in config.";
97 }
98}
Austin Schuh217a9782019-12-21 23:02:50 -080099
Brian Silverman148d43d2020-06-07 18:19:22 -0500100namespace shm_event_loop_internal {
Austin Schuh39788ff2019-12-01 18:22:57 -0800101
102class SimpleShmFetcher {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700103 public:
Austin Schuhef323c02020-09-01 14:55:28 -0700104 explicit SimpleShmFetcher(std::string_view shm_base, ShmEventLoop *event_loop,
105 const Channel *channel)
Austin Schuh432784f2020-06-23 17:27:35 -0700106 : event_loop_(event_loop),
107 channel_(channel),
Austin Schuh99f7c6a2024-06-25 22:07:44 -0700108 lockless_queue_memory_(shm_base, absl::GetFlag(FLAGS_permissions),
Austin Schuh4d275fc2022-09-16 15:42:45 -0700109 event_loop->configuration(), channel),
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700110 reader_(lockless_queue_memory_.queue()) {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700111 context_.data = nullptr;
112 // Point the queue index at the next index to read starting now. This
113 // makes it such that FetchNext will read the next message sent after
114 // the fetcher is created.
115 PointAtNextQueueIndex();
116 }
117
Austin Schuh39788ff2019-12-01 18:22:57 -0800118 ~SimpleShmFetcher() {}
Alex Perrycb7da4b2019-08-28 19:35:56 -0700119
Brian Silverman77162972020-08-12 19:52:40 -0700120 // Sets this object to pin or copy data, as configured in the channel.
121 void RetrieveData() {
122 if (channel_->read_method() == ReadMethod::PIN) {
123 PinDataOnFetch();
124 } else {
125 CopyDataOnFetch();
126 }
127 }
128
Brian Silverman3bca5322020-08-12 19:35:29 -0700129 // Sets this object to copy data out of the shared memory into a private
130 // buffer when fetching.
131 void CopyDataOnFetch() {
Brian Silverman77162972020-08-12 19:52:40 -0700132 CHECK(!pin_data());
Brian Silverman3bca5322020-08-12 19:35:29 -0700133 data_storage_.reset(static_cast<char *>(
134 malloc(channel_->max_size() + kChannelDataAlignment - 1)));
135 }
136
Brian Silverman77162972020-08-12 19:52:40 -0700137 // Sets this object to pin data in shared memory when fetching.
138 void PinDataOnFetch() {
139 CHECK(!copy_data());
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700140 auto maybe_pinner =
141 ipc_lib::LocklessQueuePinner::Make(lockless_queue_memory_.queue());
Brian Silverman77162972020-08-12 19:52:40 -0700142 if (!maybe_pinner) {
143 LOG(FATAL) << "Failed to create reader on "
144 << configuration::CleanedChannelToString(channel_)
145 << ", too many readers.";
146 }
147 pinner_ = std::move(maybe_pinner.value());
148 }
149
Alex Perrycb7da4b2019-08-28 19:35:56 -0700150 // Points the next message to fetch at the queue index which will be
151 // populated next.
152 void PointAtNextQueueIndex() {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700153 actual_queue_index_ = reader_.LatestIndex();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700154 if (!actual_queue_index_.valid()) {
155 // Nothing in the queue. The next element will show up at the 0th
156 // index in the queue.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700157 actual_queue_index_ = ipc_lib::QueueIndex::Zero(
158 LocklessQueueSize(lockless_queue_memory_.memory()));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700159 } else {
160 actual_queue_index_ = actual_queue_index_.Increment();
161 }
162 }
163
Austin Schuh2b4661a2023-09-20 21:37:33 -0700164 bool FetchNext() { return FetchNextIf(should_fetch_); }
Austin Schuh98ed26f2023-07-19 14:12:28 -0700165
166 bool FetchNextIf(std::function<bool(const Context &)> fn) {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700167 const ipc_lib::LocklessQueueReader::Result read_result =
Austin Schuh98ed26f2023-07-19 14:12:28 -0700168 DoFetch(actual_queue_index_, std::move(fn));
Austin Schuh432784f2020-06-23 17:27:35 -0700169
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700170 return read_result == ipc_lib::LocklessQueueReader::Result::GOOD;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700171 }
172
Austin Schuh98ed26f2023-07-19 14:12:28 -0700173 bool FetchIf(std::function<bool(const Context &)> fn) {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700174 const ipc_lib::QueueIndex queue_index = reader_.LatestIndex();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700175 // actual_queue_index_ is only meaningful if it was set by Fetch or
176 // FetchNext. This happens when valid_data_ has been set. So, only
177 // skip checking if valid_data_ is true.
178 //
179 // Also, if the latest queue index is invalid, we are empty. So there
180 // is nothing to fetch.
Austin Schuh39788ff2019-12-01 18:22:57 -0800181 if ((context_.data != nullptr &&
Alex Perrycb7da4b2019-08-28 19:35:56 -0700182 queue_index == actual_queue_index_.DecrementBy(1u)) ||
183 !queue_index.valid()) {
184 return false;
185 }
186
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700187 const ipc_lib::LocklessQueueReader::Result read_result =
Austin Schuh98ed26f2023-07-19 14:12:28 -0700188 DoFetch(queue_index, std::move(fn));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700189
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700190 CHECK(read_result != ipc_lib::LocklessQueueReader::Result::NOTHING_NEW)
Austin Schuhf5652592019-12-29 16:26:15 -0800191 << ": Queue index went backwards. This should never happen. "
192 << configuration::CleanedChannelToString(channel_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700193
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700194 return read_result == ipc_lib::LocklessQueueReader::Result::GOOD;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700195 }
196
Austin Schuh2b4661a2023-09-20 21:37:33 -0700197 bool Fetch() { return FetchIf(should_fetch_); }
Austin Schuh98ed26f2023-07-19 14:12:28 -0700198
Austin Schuh39788ff2019-12-01 18:22:57 -0800199 Context context() const { return context_; }
200
Alex Perrycb7da4b2019-08-28 19:35:56 -0700201 bool RegisterWakeup(int priority) {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700202 CHECK(!watcher_);
203 watcher_ = ipc_lib::LocklessQueueWatcher::Make(
204 lockless_queue_memory_.queue(), priority);
205 return static_cast<bool>(watcher_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700206 }
207
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700208 void UnregisterWakeup() {
209 CHECK(watcher_);
210 watcher_ = std::nullopt;
211 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700212
Brian Silvermana5450a92020-08-12 19:59:57 -0700213 absl::Span<char> GetMutableSharedMemory() {
214 return lockless_queue_memory_.GetMutableSharedMemory();
Brian Silverman5120afb2020-01-31 17:44:35 -0800215 }
216
Brian Silvermana5450a92020-08-12 19:59:57 -0700217 absl::Span<const char> GetConstSharedMemory() const {
218 return lockless_queue_memory_.GetConstSharedMemory();
219 }
220
221 absl::Span<const char> GetPrivateMemory() const {
222 if (pin_data()) {
223 return lockless_queue_memory_.GetConstSharedMemory();
224 }
Brian Silverman6d2b3592020-06-18 14:40:15 -0700225 return absl::Span<char>(
226 const_cast<SimpleShmFetcher *>(this)->data_storage_start(),
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700227 LocklessQueueMessageDataSize(lockless_queue_memory_.memory()));
Brian Silverman6d2b3592020-06-18 14:40:15 -0700228 }
229
Alex Perrycb7da4b2019-08-28 19:35:56 -0700230 private:
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700231 ipc_lib::LocklessQueueReader::Result DoFetch(
Austin Schuh98ed26f2023-07-19 14:12:28 -0700232 ipc_lib::QueueIndex queue_index,
233 std::function<bool(const Context &context)> fn) {
Brian Silverman3bca5322020-08-12 19:35:29 -0700234 // TODO(austin): Get behind and make sure it dies.
235 char *copy_buffer = nullptr;
236 if (copy_data()) {
237 copy_buffer = data_storage_start();
238 }
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700239 ipc_lib::LocklessQueueReader::Result read_result = reader_.Read(
Brian Silverman3bca5322020-08-12 19:35:29 -0700240 queue_index.index(), &context_.monotonic_event_time,
241 &context_.realtime_event_time, &context_.monotonic_remote_time,
Austin Schuhac6d89e2024-03-27 14:56:09 -0700242 &context_.monotonic_remote_transmit_time,
Brian Silverman3bca5322020-08-12 19:35:29 -0700243 &context_.realtime_remote_time, &context_.remote_queue_index,
Austin Schuh98ed26f2023-07-19 14:12:28 -0700244 &context_.source_boot_uuid, &context_.size, copy_buffer, std::move(fn));
Brian Silverman3bca5322020-08-12 19:35:29 -0700245
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700246 if (read_result == ipc_lib::LocklessQueueReader::Result::GOOD) {
Brian Silverman77162972020-08-12 19:52:40 -0700247 if (pin_data()) {
Brian Silverman4f4e0612020-08-12 19:54:41 -0700248 const int pin_result = pinner_->PinIndex(queue_index.index());
249 CHECK(pin_result >= 0)
Brian Silverman77162972020-08-12 19:52:40 -0700250 << ": Got behind while reading and the last message was modified "
251 "out from under us while we tried to pin it. Don't get so far "
252 "behind on: "
253 << configuration::CleanedChannelToString(channel_);
Brian Silverman4f4e0612020-08-12 19:54:41 -0700254 context_.buffer_index = pin_result;
255 } else {
256 context_.buffer_index = -1;
Brian Silverman77162972020-08-12 19:52:40 -0700257 }
258
Brian Silverman3bca5322020-08-12 19:35:29 -0700259 context_.queue_index = queue_index.index();
260 if (context_.remote_queue_index == 0xffffffffu) {
261 context_.remote_queue_index = context_.queue_index;
262 }
263 if (context_.monotonic_remote_time == aos::monotonic_clock::min_time) {
264 context_.monotonic_remote_time = context_.monotonic_event_time;
265 }
266 if (context_.realtime_remote_time == aos::realtime_clock::min_time) {
267 context_.realtime_remote_time = context_.realtime_event_time;
268 }
269 const char *const data = DataBuffer();
270 if (data) {
271 context_.data =
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700272 data +
273 LocklessQueueMessageDataSize(lockless_queue_memory_.memory()) -
274 context_.size;
Brian Silverman3bca5322020-08-12 19:35:29 -0700275 } else {
276 context_.data = nullptr;
277 }
278 actual_queue_index_ = queue_index.Increment();
279 }
280
281 // Make sure the data wasn't modified while we were reading it. This
282 // can only happen if you are reading the last message *while* it is
283 // being written to, which means you are pretty far behind.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700284 CHECK(read_result != ipc_lib::LocklessQueueReader::Result::OVERWROTE)
Brian Silverman3bca5322020-08-12 19:35:29 -0700285 << ": Got behind while reading and the last message was modified "
286 "out from under us while we were reading it. Don't get so far "
287 "behind on: "
288 << configuration::CleanedChannelToString(channel_);
289
290 // We fell behind between when we read the index and read the value.
291 // This isn't worth recovering from since this means we went to sleep
292 // for a long time in the middle of this function.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700293 if (read_result == ipc_lib::LocklessQueueReader::Result::TOO_OLD) {
Brian Silverman3bca5322020-08-12 19:35:29 -0700294 event_loop_->SendTimingReport();
295 LOG(FATAL) << "The next message is no longer available. "
296 << configuration::CleanedChannelToString(channel_);
297 }
298
299 return read_result;
300 }
301
302 char *data_storage_start() const {
303 CHECK(copy_data());
Brian Silvermana1652f32020-01-29 20:41:44 -0800304 return RoundChannelData(data_storage_.get(), channel_->max_size());
305 }
Brian Silverman3bca5322020-08-12 19:35:29 -0700306
307 // Note that for some modes the return value will change as new messages are
308 // read.
309 const char *DataBuffer() const {
310 if (copy_data()) {
311 return data_storage_start();
312 }
Brian Silverman77162972020-08-12 19:52:40 -0700313 if (pin_data()) {
314 return static_cast<const char *>(pinner_->Data());
315 }
Brian Silverman3bca5322020-08-12 19:35:29 -0700316 return nullptr;
317 }
318
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800319 bool copy_data() const { return static_cast<bool>(data_storage_); }
Brian Silverman77162972020-08-12 19:52:40 -0700320 bool pin_data() const { return static_cast<bool>(pinner_); }
Brian Silvermana1652f32020-01-29 20:41:44 -0800321
Austin Schuh432784f2020-06-23 17:27:35 -0700322 aos::ShmEventLoop *event_loop_;
Austin Schuhf5652592019-12-29 16:26:15 -0800323 const Channel *const channel_;
Austin Schuh4d275fc2022-09-16 15:42:45 -0700324 ipc_lib::MemoryMappedQueue lockless_queue_memory_;
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700325 ipc_lib::LocklessQueueReader reader_;
326 // This being nullopt indicates we're not looking for wakeups right now.
327 std::optional<ipc_lib::LocklessQueueWatcher> watcher_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700328
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700329 ipc_lib::QueueIndex actual_queue_index_ = ipc_lib::QueueIndex::Invalid();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700330
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800331 // This being empty indicates we're not going to copy data.
332 std::unique_ptr<char, decltype(&free)> data_storage_{nullptr, &free};
Austin Schuh39788ff2019-12-01 18:22:57 -0800333
Brian Silverman77162972020-08-12 19:52:40 -0700334 // This being nullopt indicates we're not going to pin messages.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700335 std::optional<ipc_lib::LocklessQueuePinner> pinner_;
Brian Silverman77162972020-08-12 19:52:40 -0700336
Austin Schuh39788ff2019-12-01 18:22:57 -0800337 Context context_;
Austin Schuh82ea7382023-07-14 15:17:34 -0700338
339 // Pre-allocated should_fetch function so we don't allocate.
Austin Schuh98ed26f2023-07-19 14:12:28 -0700340 const std::function<bool(const Context &)> should_fetch_;
Austin Schuh39788ff2019-12-01 18:22:57 -0800341};
342
343class ShmFetcher : public RawFetcher {
344 public:
Austin Schuhef323c02020-09-01 14:55:28 -0700345 explicit ShmFetcher(std::string_view shm_base, ShmEventLoop *event_loop,
346 const Channel *channel)
Austin Schuhaa79e4e2019-12-29 20:43:32 -0800347 : RawFetcher(event_loop, channel),
Austin Schuhef323c02020-09-01 14:55:28 -0700348 simple_shm_fetcher_(shm_base, event_loop, channel) {
Brian Silverman77162972020-08-12 19:52:40 -0700349 simple_shm_fetcher_.RetrieveData();
Brian Silverman3bca5322020-08-12 19:35:29 -0700350 }
Austin Schuh39788ff2019-12-01 18:22:57 -0800351
Austin Schuh3054f5f2021-07-21 15:38:01 -0700352 ~ShmFetcher() override {
353 shm_event_loop()->CheckCurrentThread();
354 context_.data = nullptr;
355 }
Austin Schuh39788ff2019-12-01 18:22:57 -0800356
357 std::pair<bool, monotonic_clock::time_point> DoFetchNext() override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700358 shm_event_loop()->CheckCurrentThread();
Austin Schuh39788ff2019-12-01 18:22:57 -0800359 if (simple_shm_fetcher_.FetchNext()) {
360 context_ = simple_shm_fetcher_.context();
361 return std::make_pair(true, monotonic_clock::now());
362 }
363 return std::make_pair(false, monotonic_clock::min_time);
364 }
365
Austin Schuh98ed26f2023-07-19 14:12:28 -0700366 std::pair<bool, monotonic_clock::time_point> DoFetchNextIf(
367 std::function<bool(const Context &context)> fn) override {
368 shm_event_loop()->CheckCurrentThread();
369 if (simple_shm_fetcher_.FetchNextIf(std::move(fn))) {
370 context_ = simple_shm_fetcher_.context();
371 return std::make_pair(true, monotonic_clock::now());
372 }
373 return std::make_pair(false, monotonic_clock::min_time);
374 }
375
Austin Schuh39788ff2019-12-01 18:22:57 -0800376 std::pair<bool, monotonic_clock::time_point> DoFetch() override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700377 shm_event_loop()->CheckCurrentThread();
Austin Schuh39788ff2019-12-01 18:22:57 -0800378 if (simple_shm_fetcher_.Fetch()) {
379 context_ = simple_shm_fetcher_.context();
380 return std::make_pair(true, monotonic_clock::now());
381 }
382 return std::make_pair(false, monotonic_clock::min_time);
383 }
384
Austin Schuh98ed26f2023-07-19 14:12:28 -0700385 std::pair<bool, monotonic_clock::time_point> DoFetchIf(
386 std::function<bool(const Context &context)> fn) override {
387 shm_event_loop()->CheckCurrentThread();
388 if (simple_shm_fetcher_.FetchIf(std::move(fn))) {
389 context_ = simple_shm_fetcher_.context();
390 return std::make_pair(true, monotonic_clock::now());
391 }
392 return std::make_pair(false, monotonic_clock::min_time);
393 }
394
Brian Silvermana5450a92020-08-12 19:59:57 -0700395 absl::Span<const char> GetPrivateMemory() const {
Brian Silverman6d2b3592020-06-18 14:40:15 -0700396 return simple_shm_fetcher_.GetPrivateMemory();
397 }
398
Austin Schuh39788ff2019-12-01 18:22:57 -0800399 private:
Austin Schuh3054f5f2021-07-21 15:38:01 -0700400 const ShmEventLoop *shm_event_loop() const {
401 return static_cast<const ShmEventLoop *>(event_loop());
402 }
403
Austin Schuh39788ff2019-12-01 18:22:57 -0800404 SimpleShmFetcher simple_shm_fetcher_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700405};
406
Brian Silvermane1fe2512022-08-14 23:18:50 -0700407class ShmExitHandle : public ExitHandle {
408 public:
409 ShmExitHandle(ShmEventLoop *event_loop) : event_loop_(event_loop) {
410 ++event_loop_->exit_handle_count_;
411 }
412 ~ShmExitHandle() override {
413 CHECK_GT(event_loop_->exit_handle_count_, 0);
414 --event_loop_->exit_handle_count_;
415 }
James Kuszmaul9f998082024-05-23 15:37:35 -0700416 // Because of how we handle reference counting, we either need to implement
417 // reference counting in the copy/move constructors or just not support them.
418 // If we ever develop a need for this object to be movable/copyable,
419 // supporting it should be straightforwards.
420 DISALLOW_COPY_AND_ASSIGN(ShmExitHandle);
Brian Silvermane1fe2512022-08-14 23:18:50 -0700421
James Kuszmaul9f998082024-05-23 15:37:35 -0700422 void Exit(Result<void> status) override {
423 event_loop_->ExitWithStatus(status);
424 }
Brian Silvermane1fe2512022-08-14 23:18:50 -0700425
426 private:
427 ShmEventLoop *const event_loop_;
428};
429
Alex Perrycb7da4b2019-08-28 19:35:56 -0700430class ShmSender : public RawSender {
431 public:
Austin Schuhef323c02020-09-01 14:55:28 -0700432 explicit ShmSender(std::string_view shm_base, EventLoop *event_loop,
433 const Channel *channel)
Austin Schuh39788ff2019-12-01 18:22:57 -0800434 : RawSender(event_loop, channel),
Austin Schuh99f7c6a2024-06-25 22:07:44 -0700435 lockless_queue_memory_(shm_base, absl::GetFlag(FLAGS_permissions),
Austin Schuh4d275fc2022-09-16 15:42:45 -0700436 event_loop->configuration(), channel),
Austin Schuhfff9c3a2023-06-16 18:48:23 -0700437 lockless_queue_sender_(
438 VerifySender(ipc_lib::LocklessQueueSender::Make(
439 lockless_queue_memory_.queue(),
440 configuration::ChannelStorageDuration(
441 event_loop->configuration(), channel)),
442 channel)),
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700443 wake_upper_(lockless_queue_memory_.queue()) {}
Alex Perrycb7da4b2019-08-28 19:35:56 -0700444
Austin Schuh3054f5f2021-07-21 15:38:01 -0700445 ~ShmSender() override { shm_event_loop()->CheckCurrentThread(); }
Austin Schuh39788ff2019-12-01 18:22:57 -0800446
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700447 static ipc_lib::LocklessQueueSender VerifySender(
448 std::optional<ipc_lib::LocklessQueueSender> sender,
Austin Schuhe516ab02020-05-06 21:37:04 -0700449 const Channel *channel) {
450 if (sender) {
451 return std::move(sender.value());
452 }
453 LOG(FATAL) << "Failed to create sender on "
454 << configuration::CleanedChannelToString(channel)
455 << ", too many senders.";
456 }
457
Austin Schuh3054f5f2021-07-21 15:38:01 -0700458 void *data() override {
459 shm_event_loop()->CheckCurrentThread();
460 return lockless_queue_sender_.Data();
461 }
462 size_t size() override {
463 shm_event_loop()->CheckCurrentThread();
464 return lockless_queue_sender_.size();
465 }
milind1f1dca32021-07-03 13:50:07 -0700466
467 Error DoSend(size_t length,
468 aos::monotonic_clock::time_point monotonic_remote_time,
469 aos::realtime_clock::time_point realtime_remote_time,
Austin Schuhac6d89e2024-03-27 14:56:09 -0700470 aos::monotonic_clock::time_point monotonic_remote_transmit_time,
milind1f1dca32021-07-03 13:50:07 -0700471 uint32_t remote_queue_index,
472 const UUID &source_boot_uuid) override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700473 shm_event_loop()->CheckCurrentThread();
Austin Schuh0f7ed462020-03-28 20:38:34 -0700474 CHECK_LE(length, static_cast<size_t>(channel()->max_size()))
475 << ": Sent too big a message on "
476 << configuration::CleanedChannelToString(channel());
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700477 const auto result = lockless_queue_sender_.Send(
Austin Schuhac6d89e2024-03-27 14:56:09 -0700478 length, monotonic_remote_time, realtime_remote_time,
479 monotonic_remote_transmit_time, remote_queue_index, source_boot_uuid,
480 &monotonic_sent_time_, &realtime_sent_time_, &sent_queue_index_);
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700481 CHECK_NE(result, ipc_lib::LocklessQueueSender::Result::INVALID_REDZONE)
Austin Schuh91ba6392020-10-03 13:27:47 -0700482 << ": Somebody wrote outside the buffer of their message on channel "
483 << configuration::CleanedChannelToString(channel());
484
Austin Schuh65493d62022-08-17 15:10:37 -0700485 wake_upper_.Wakeup(event_loop()->is_running()
486 ? event_loop()->runtime_realtime_priority()
487 : 0);
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700488 return CheckLocklessQueueResult(result);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700489 }
490
milind1f1dca32021-07-03 13:50:07 -0700491 Error DoSend(const void *msg, size_t length,
492 aos::monotonic_clock::time_point monotonic_remote_time,
493 aos::realtime_clock::time_point realtime_remote_time,
Austin Schuhac6d89e2024-03-27 14:56:09 -0700494 aos::monotonic_clock::time_point monotonic_remote_transmit_time,
milind1f1dca32021-07-03 13:50:07 -0700495 uint32_t remote_queue_index,
496 const UUID &source_boot_uuid) override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700497 shm_event_loop()->CheckCurrentThread();
Austin Schuh0f7ed462020-03-28 20:38:34 -0700498 CHECK_LE(length, static_cast<size_t>(channel()->max_size()))
499 << ": Sent too big a message on "
500 << configuration::CleanedChannelToString(channel());
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700501 const auto result = lockless_queue_sender_.Send(
Brian Silvermanaf9a4d82020-10-06 15:10:58 -0700502 reinterpret_cast<const char *>(msg), length, monotonic_remote_time,
Austin Schuhac6d89e2024-03-27 14:56:09 -0700503 realtime_remote_time, monotonic_remote_transmit_time,
504 remote_queue_index, source_boot_uuid, &monotonic_sent_time_,
505 &realtime_sent_time_, &sent_queue_index_);
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700506
507 CHECK_NE(result, ipc_lib::LocklessQueueSender::Result::INVALID_REDZONE)
508 << ": Somebody wrote outside the buffer of their message on "
509 "channel "
Austin Schuh91ba6392020-10-03 13:27:47 -0700510 << configuration::CleanedChannelToString(channel());
Austin Schuh65493d62022-08-17 15:10:37 -0700511 wake_upper_.Wakeup(event_loop()->is_running()
512 ? event_loop()->runtime_realtime_priority()
513 : 0);
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700514
515 return CheckLocklessQueueResult(result);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700516 }
517
Brian Silverman5120afb2020-01-31 17:44:35 -0800518 absl::Span<char> GetSharedMemory() const {
Brian Silvermana5450a92020-08-12 19:59:57 -0700519 return lockless_queue_memory_.GetMutableSharedMemory();
Brian Silverman5120afb2020-01-31 17:44:35 -0800520 }
521
Austin Schuh3054f5f2021-07-21 15:38:01 -0700522 int buffer_index() override {
523 shm_event_loop()->CheckCurrentThread();
524 return lockless_queue_sender_.buffer_index();
525 }
Brian Silverman4f4e0612020-08-12 19:54:41 -0700526
Alex Perrycb7da4b2019-08-28 19:35:56 -0700527 private:
Austin Schuh3054f5f2021-07-21 15:38:01 -0700528 const ShmEventLoop *shm_event_loop() const {
529 return static_cast<const ShmEventLoop *>(event_loop());
530 }
531
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700532 RawSender::Error CheckLocklessQueueResult(
533 const ipc_lib::LocklessQueueSender::Result &result) {
534 switch (result) {
535 case ipc_lib::LocklessQueueSender::Result::GOOD:
536 return Error::kOk;
537 case ipc_lib::LocklessQueueSender::Result::MESSAGES_SENT_TOO_FAST:
538 return Error::kMessagesSentTooFast;
539 case ipc_lib::LocklessQueueSender::Result::INVALID_REDZONE:
540 return Error::kInvalidRedzone;
541 }
542 LOG(FATAL) << "Unknown lockless queue sender result"
543 << static_cast<int>(result);
544 }
545
Austin Schuh4d275fc2022-09-16 15:42:45 -0700546 ipc_lib::MemoryMappedQueue lockless_queue_memory_;
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700547 ipc_lib::LocklessQueueSender lockless_queue_sender_;
548 ipc_lib::LocklessQueueWakeUpper wake_upper_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700549};
550
Alex Perrycb7da4b2019-08-28 19:35:56 -0700551// Class to manage the state for a Watcher.
Brian Silverman148d43d2020-06-07 18:19:22 -0500552class ShmWatcherState : public WatcherState {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700553 public:
Brian Silverman148d43d2020-06-07 18:19:22 -0500554 ShmWatcherState(
Austin Schuhef323c02020-09-01 14:55:28 -0700555 std::string_view shm_base, ShmEventLoop *event_loop,
556 const Channel *channel,
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800557 std::function<void(const Context &context, const void *message)> fn,
558 bool copy_data)
Brian Silverman148d43d2020-06-07 18:19:22 -0500559 : WatcherState(event_loop, channel, std::move(fn)),
Austin Schuh7d87b672019-12-01 20:23:49 -0800560 event_loop_(event_loop),
561 event_(this),
Austin Schuhef323c02020-09-01 14:55:28 -0700562 simple_shm_fetcher_(shm_base, event_loop, channel) {
Brian Silverman3bca5322020-08-12 19:35:29 -0700563 if (copy_data) {
Brian Silverman77162972020-08-12 19:52:40 -0700564 simple_shm_fetcher_.RetrieveData();
Brian Silverman3bca5322020-08-12 19:35:29 -0700565 }
566 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700567
Austin Schuh3054f5f2021-07-21 15:38:01 -0700568 ~ShmWatcherState() override {
569 event_loop_->CheckCurrentThread();
570 event_loop_->RemoveEvent(&event_);
571 }
Austin Schuh39788ff2019-12-01 18:22:57 -0800572
Philipp Schrader81fa3fb2023-09-17 18:58:35 -0700573 void Construct() override {
574 event_loop_->CheckCurrentThread();
575 CHECK(RegisterWakeup(event_loop_->runtime_realtime_priority()));
576 }
577
578 void Startup() override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700579 event_loop_->CheckCurrentThread();
Austin Schuh7d87b672019-12-01 20:23:49 -0800580 simple_shm_fetcher_.PointAtNextQueueIndex();
Austin Schuh39788ff2019-12-01 18:22:57 -0800581 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700582
Alex Perrycb7da4b2019-08-28 19:35:56 -0700583 // Returns true if there is new data available.
Austin Schuh7d87b672019-12-01 20:23:49 -0800584 bool CheckForNewData() {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700585 if (!has_new_data_) {
Austin Schuh39788ff2019-12-01 18:22:57 -0800586 has_new_data_ = simple_shm_fetcher_.FetchNext();
Austin Schuh7d87b672019-12-01 20:23:49 -0800587
588 if (has_new_data_) {
589 event_.set_event_time(
Austin Schuhad154822019-12-27 15:45:13 -0800590 simple_shm_fetcher_.context().monotonic_event_time);
Austin Schuh7d87b672019-12-01 20:23:49 -0800591 event_loop_->AddEvent(&event_);
592 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700593 }
594
595 return has_new_data_;
596 }
597
Alex Perrycb7da4b2019-08-28 19:35:56 -0700598 // Consumes the data by calling the callback.
Austin Schuh7d87b672019-12-01 20:23:49 -0800599 void HandleEvent() {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700600 CHECK(has_new_data_);
Austin Schuh39788ff2019-12-01 18:22:57 -0800601 DoCallCallback(monotonic_clock::now, simple_shm_fetcher_.context());
Alex Perrycb7da4b2019-08-28 19:35:56 -0700602 has_new_data_ = false;
Austin Schuh7d87b672019-12-01 20:23:49 -0800603 CheckForNewData();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700604 }
605
Austin Schuh39788ff2019-12-01 18:22:57 -0800606 // Registers us to receive a signal on event reception.
Alex Perrycb7da4b2019-08-28 19:35:56 -0700607 bool RegisterWakeup(int priority) {
Austin Schuh39788ff2019-12-01 18:22:57 -0800608 return simple_shm_fetcher_.RegisterWakeup(priority);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700609 }
610
Austin Schuh39788ff2019-12-01 18:22:57 -0800611 void UnregisterWakeup() { return simple_shm_fetcher_.UnregisterWakeup(); }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700612
Brian Silvermana5450a92020-08-12 19:59:57 -0700613 absl::Span<const char> GetSharedMemory() const {
614 return simple_shm_fetcher_.GetConstSharedMemory();
Brian Silverman5120afb2020-01-31 17:44:35 -0800615 }
616
Alex Perrycb7da4b2019-08-28 19:35:56 -0700617 private:
618 bool has_new_data_ = false;
619
Austin Schuh7d87b672019-12-01 20:23:49 -0800620 ShmEventLoop *event_loop_;
Brian Silverman148d43d2020-06-07 18:19:22 -0500621 EventHandler<ShmWatcherState> event_;
Austin Schuh39788ff2019-12-01 18:22:57 -0800622 SimpleShmFetcher simple_shm_fetcher_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700623};
624
625// Adapter class to adapt a timerfd to a TimerHandler.
Brian Silverman148d43d2020-06-07 18:19:22 -0500626class ShmTimerHandler final : public TimerHandler {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700627 public:
Brian Silverman148d43d2020-06-07 18:19:22 -0500628 ShmTimerHandler(ShmEventLoop *shm_event_loop, ::std::function<void()> fn)
Austin Schuh39788ff2019-12-01 18:22:57 -0800629 : TimerHandler(shm_event_loop, std::move(fn)),
Austin Schuh7d87b672019-12-01 20:23:49 -0800630 shm_event_loop_(shm_event_loop),
631 event_(this) {
Austin Schuhcde39fd2020-02-22 20:58:24 -0800632 shm_event_loop_->epoll_.OnReadable(timerfd_.fd(), [this]() {
Austin Schuh5ca13112021-02-07 22:06:53 -0800633 // The timer may fire spuriously. HandleEvent on the event loop will
Austin Schuhcde39fd2020-02-22 20:58:24 -0800634 // call the callback if it is needed. It may also have called it when
635 // processing some other event, and the kernel decided to deliver this
636 // wakeup anyways.
637 timerfd_.Read();
638 shm_event_loop_->HandleEvent();
639 });
Alex Perrycb7da4b2019-08-28 19:35:56 -0700640 }
641
Brian Silverman148d43d2020-06-07 18:19:22 -0500642 ~ShmTimerHandler() {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700643 shm_event_loop_->CheckCurrentThread();
Austin Schuh7d87b672019-12-01 20:23:49 -0800644 Disable();
645 shm_event_loop_->epoll_.DeleteFd(timerfd_.fd());
646 }
647
648 void HandleEvent() {
Austin Schuhcde39fd2020-02-22 20:58:24 -0800649 CHECK(!event_.valid());
Brian Silvermanaf9a4d82020-10-06 15:10:58 -0700650 disabled_ = false;
Austin Schuhcde39fd2020-02-22 20:58:24 -0800651 const auto monotonic_now = Call(monotonic_clock::now, base_);
652 if (event_.valid()) {
Philipp Schradera6712522023-07-05 20:25:11 -0700653 // If someone called Schedule inside Call, rescheduling is already taken
654 // care of. Bail.
Austin Schuhcde39fd2020-02-22 20:58:24 -0800655 return;
Austin Schuh7d87b672019-12-01 20:23:49 -0800656 }
Brian Silvermanaf9a4d82020-10-06 15:10:58 -0700657 if (disabled_) {
658 // Somebody called Disable inside Call, so we don't want to reschedule.
659 // Bail.
660 return;
661 }
Austin Schuh7d87b672019-12-01 20:23:49 -0800662
Austin Schuh4d275fc2022-09-16 15:42:45 -0700663 if (repeat_offset_ == std::chrono::seconds(0)) {
Austin Schuhcde39fd2020-02-22 20:58:24 -0800664 timerfd_.Disable();
Naman Gupta4d13b0a2022-10-19 16:41:24 -0700665 disabled_ = true;
Austin Schuhcde39fd2020-02-22 20:58:24 -0800666 } else {
667 // Compute how many cycles have elapsed and schedule the next iteration
668 // for the next iteration in the future.
669 const int elapsed_cycles =
670 std::max<int>(0, (monotonic_now - base_ + repeat_offset_ -
671 std::chrono::nanoseconds(1)) /
672 repeat_offset_);
673 base_ += repeat_offset_ * elapsed_cycles;
Austin Schuh7d87b672019-12-01 20:23:49 -0800674
Austin Schuhcde39fd2020-02-22 20:58:24 -0800675 // Update the heap and schedule the timerfd wakeup.
Austin Schuh7d87b672019-12-01 20:23:49 -0800676 event_.set_event_time(base_);
677 shm_event_loop_->AddEvent(&event_);
Austin Schuh4d275fc2022-09-16 15:42:45 -0700678 timerfd_.SetTime(base_, std::chrono::seconds(0));
Naman Gupta4d13b0a2022-10-19 16:41:24 -0700679 disabled_ = false;
Austin Schuh7d87b672019-12-01 20:23:49 -0800680 }
681 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700682
Philipp Schradera6712522023-07-05 20:25:11 -0700683 void Schedule(monotonic_clock::time_point base,
684 monotonic_clock::duration repeat_offset) override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700685 shm_event_loop_->CheckCurrentThread();
Austin Schuh7d87b672019-12-01 20:23:49 -0800686 if (event_.valid()) {
687 shm_event_loop_->RemoveEvent(&event_);
688 }
689
Alex Perrycb7da4b2019-08-28 19:35:56 -0700690 timerfd_.SetTime(base, repeat_offset);
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800691 base_ = base;
692 repeat_offset_ = repeat_offset;
Austin Schuh7d87b672019-12-01 20:23:49 -0800693 event_.set_event_time(base_);
694 shm_event_loop_->AddEvent(&event_);
Naman Gupta4d13b0a2022-10-19 16:41:24 -0700695 disabled_ = false;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700696 }
697
Austin Schuh7d87b672019-12-01 20:23:49 -0800698 void Disable() override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700699 shm_event_loop_->CheckCurrentThread();
Austin Schuh7d87b672019-12-01 20:23:49 -0800700 shm_event_loop_->RemoveEvent(&event_);
701 timerfd_.Disable();
Brian Silvermanaf9a4d82020-10-06 15:10:58 -0700702 disabled_ = true;
Austin Schuh7d87b672019-12-01 20:23:49 -0800703 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700704
Naman Gupta4d13b0a2022-10-19 16:41:24 -0700705 bool IsDisabled() override { return disabled_; }
706
Alex Perrycb7da4b2019-08-28 19:35:56 -0700707 private:
708 ShmEventLoop *shm_event_loop_;
Brian Silverman148d43d2020-06-07 18:19:22 -0500709 EventHandler<ShmTimerHandler> event_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700710
Brian Silverman148d43d2020-06-07 18:19:22 -0500711 internal::TimerFd timerfd_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700712
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800713 monotonic_clock::time_point base_;
714 monotonic_clock::duration repeat_offset_;
Brian Silvermanaf9a4d82020-10-06 15:10:58 -0700715
716 // Used to track if Disable() was called during the callback, so we know not
717 // to reschedule.
Naman Gupta4d13b0a2022-10-19 16:41:24 -0700718 bool disabled_ = true;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700719};
720
721// Adapter class to the timerfd and PhasedLoop.
Brian Silverman148d43d2020-06-07 18:19:22 -0500722class ShmPhasedLoopHandler final : public PhasedLoopHandler {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700723 public:
Brian Silverman148d43d2020-06-07 18:19:22 -0500724 ShmPhasedLoopHandler(ShmEventLoop *shm_event_loop,
725 ::std::function<void(int)> fn,
726 const monotonic_clock::duration interval,
727 const monotonic_clock::duration offset)
728 : PhasedLoopHandler(shm_event_loop, std::move(fn), interval, offset),
Austin Schuh7d87b672019-12-01 20:23:49 -0800729 shm_event_loop_(shm_event_loop),
730 event_(this) {
731 shm_event_loop_->epoll_.OnReadable(
732 timerfd_.fd(), [this]() { shm_event_loop_->HandleEvent(); });
733 }
734
735 void HandleEvent() {
736 // The return value for read is the number of cycles that have elapsed.
737 // Because we check to see when this event *should* have happened, there are
738 // cases where Read() will return 0, when 1 cycle has actually happened.
739 // This occurs when the timer interrupt hasn't triggered yet. Therefore,
740 // ignore it. Call handles rescheduling and calculating elapsed cycles
741 // without any extra help.
742 timerfd_.Read();
743 event_.Invalidate();
744
James Kuszmaul20dcc7c2023-01-20 11:06:31 -0800745 Call(monotonic_clock::now);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700746 }
747
Brian Silverman148d43d2020-06-07 18:19:22 -0500748 ~ShmPhasedLoopHandler() override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700749 shm_event_loop_->CheckCurrentThread();
Austin Schuh39788ff2019-12-01 18:22:57 -0800750 shm_event_loop_->epoll_.DeleteFd(timerfd_.fd());
Austin Schuh7d87b672019-12-01 20:23:49 -0800751 shm_event_loop_->RemoveEvent(&event_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700752 }
753
754 private:
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800755 // Reschedules the timer.
Austin Schuh39788ff2019-12-01 18:22:57 -0800756 void Schedule(monotonic_clock::time_point sleep_time) override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700757 shm_event_loop_->CheckCurrentThread();
Austin Schuh7d87b672019-12-01 20:23:49 -0800758 if (event_.valid()) {
759 shm_event_loop_->RemoveEvent(&event_);
760 }
761
Austin Schuh39788ff2019-12-01 18:22:57 -0800762 timerfd_.SetTime(sleep_time, ::aos::monotonic_clock::zero());
Austin Schuh7d87b672019-12-01 20:23:49 -0800763 event_.set_event_time(sleep_time);
764 shm_event_loop_->AddEvent(&event_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700765 }
766
767 ShmEventLoop *shm_event_loop_;
Brian Silverman148d43d2020-06-07 18:19:22 -0500768 EventHandler<ShmPhasedLoopHandler> event_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700769
Brian Silverman148d43d2020-06-07 18:19:22 -0500770 internal::TimerFd timerfd_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700771};
Brian Silverman148d43d2020-06-07 18:19:22 -0500772
773} // namespace shm_event_loop_internal
Alex Perrycb7da4b2019-08-28 19:35:56 -0700774
775::std::unique_ptr<RawFetcher> ShmEventLoop::MakeRawFetcher(
776 const Channel *channel) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700777 CheckCurrentThread();
Austin Schuhca4828c2019-12-28 14:21:35 -0800778 if (!configuration::ChannelIsReadableOnNode(channel, node())) {
779 LOG(FATAL) << "Channel { \"name\": \"" << channel->name()->string_view()
780 << "\", \"type\": \"" << channel->type()->string_view()
781 << "\" } is not able to be fetched on this node. Check your "
782 "configuration.";
Austin Schuh217a9782019-12-21 23:02:50 -0800783 }
784
Austin Schuhef323c02020-09-01 14:55:28 -0700785 return ::std::unique_ptr<RawFetcher>(
786 new ShmFetcher(shm_base_, this, channel));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700787}
788
789::std::unique_ptr<RawSender> ShmEventLoop::MakeRawSender(
790 const Channel *channel) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700791 CheckCurrentThread();
Brian Silverman0fc69932020-01-24 21:54:02 -0800792 TakeSender(channel);
Austin Schuh39788ff2019-12-01 18:22:57 -0800793
Austin Schuhef323c02020-09-01 14:55:28 -0700794 return ::std::unique_ptr<RawSender>(new ShmSender(shm_base_, this, channel));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700795}
796
797void ShmEventLoop::MakeRawWatcher(
798 const Channel *channel,
799 std::function<void(const Context &context, const void *message)> watcher) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700800 CheckCurrentThread();
Brian Silverman0fc69932020-01-24 21:54:02 -0800801 TakeWatcher(channel);
Austin Schuh217a9782019-12-21 23:02:50 -0800802
Austin Schuh39788ff2019-12-01 18:22:57 -0800803 NewWatcher(::std::unique_ptr<WatcherState>(
Austin Schuhef323c02020-09-01 14:55:28 -0700804 new ShmWatcherState(shm_base_, this, channel, std::move(watcher), true)));
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800805}
806
807void ShmEventLoop::MakeRawNoArgWatcher(
808 const Channel *channel,
809 std::function<void(const Context &context)> watcher) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700810 CheckCurrentThread();
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800811 TakeWatcher(channel);
812
Brian Silverman148d43d2020-06-07 18:19:22 -0500813 NewWatcher(::std::unique_ptr<WatcherState>(new ShmWatcherState(
Austin Schuhef323c02020-09-01 14:55:28 -0700814 shm_base_, this, channel,
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800815 [watcher](const Context &context, const void *) { watcher(context); },
816 false)));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700817}
818
819TimerHandler *ShmEventLoop::AddTimer(::std::function<void()> callback) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700820 CheckCurrentThread();
Austin Schuh39788ff2019-12-01 18:22:57 -0800821 return NewTimer(::std::unique_ptr<TimerHandler>(
Brian Silverman148d43d2020-06-07 18:19:22 -0500822 new ShmTimerHandler(this, ::std::move(callback))));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700823}
824
825PhasedLoopHandler *ShmEventLoop::AddPhasedLoop(
826 ::std::function<void(int)> callback,
827 const monotonic_clock::duration interval,
828 const monotonic_clock::duration offset) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700829 CheckCurrentThread();
Brian Silverman148d43d2020-06-07 18:19:22 -0500830 return NewPhasedLoop(::std::unique_ptr<PhasedLoopHandler>(
831 new ShmPhasedLoopHandler(this, ::std::move(callback), interval, offset)));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700832}
833
834void ShmEventLoop::OnRun(::std::function<void()> on_run) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700835 CheckCurrentThread();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700836 on_run_.push_back(::std::move(on_run));
837}
838
Austin Schuh3054f5f2021-07-21 15:38:01 -0700839void ShmEventLoop::CheckCurrentThread() const {
840 if (__builtin_expect(check_mutex_ != nullptr, false)) {
841 CHECK(check_mutex_->is_locked())
842 << ": The configured mutex is not locked while calling a "
843 "ShmEventLoop function";
844 }
845 if (__builtin_expect(!!check_tid_, false)) {
846 CHECK_EQ(syscall(SYS_gettid), *check_tid_)
847 << ": Being called from the wrong thread";
848 }
849}
850
Austin Schuh5ca13112021-02-07 22:06:53 -0800851// This is a bit tricky because watchers can generate new events at any time (as
852// long as it's in the past). We want to check the watchers at least once before
853// declaring there are no events to handle, and we want to check them again if
854// event processing takes long enough that we find an event after that point in
855// time to handle.
Austin Schuh7d87b672019-12-01 20:23:49 -0800856void ShmEventLoop::HandleEvent() {
Austin Schuh5ca13112021-02-07 22:06:53 -0800857 // Time through which we've checked for new events in watchers.
858 monotonic_clock::time_point checked_until = monotonic_clock::min_time;
859 if (!signalfd_) {
860 // Nothing to check, so we can bail out immediately once we're out of
861 // events.
862 CHECK(watchers_.empty());
863 checked_until = monotonic_clock::max_time;
Austin Schuh7d87b672019-12-01 20:23:49 -0800864 }
865
Austin Schuh5ca13112021-02-07 22:06:53 -0800866 // Loop until we run out of events to check.
Austin Schuh39788ff2019-12-01 18:22:57 -0800867 while (true) {
Austin Schuh5ca13112021-02-07 22:06:53 -0800868 // Time of the next event we know about. If this is before checked_until, we
869 // know there aren't any new events before the next one that we already know
870 // about, so no need to check the watchers.
871 monotonic_clock::time_point next_time = monotonic_clock::max_time;
872
873 if (EventCount() == 0) {
874 if (checked_until != monotonic_clock::min_time) {
875 // No events, and we've already checked the watchers at least once, so
876 // we're all done.
877 //
878 // There's a small chance that a watcher has gotten another event in
879 // between checked_until and now. If so, then the signalfd will be
880 // triggered now and we'll re-enter HandleEvent immediately. This is
881 // unlikely though, so we don't want to spend time checking all the
882 // watchers unnecessarily.
883 break;
884 }
885 } else {
886 next_time = PeekEvent()->event_time();
887 }
Austin Schuh00cad2e2022-12-02 20:11:04 -0800888 monotonic_clock::time_point now;
889 bool new_data = false;
Austin Schuh5ca13112021-02-07 22:06:53 -0800890
891 if (next_time > checked_until) {
892 // Read all of the signals, because there's no point in waking up again
893 // immediately to handle each one if we've fallen behind.
894 //
895 // This is safe before checking for new data on the watchers. If a signal
896 // is cleared here, the corresponding CheckForNewData() call below will
897 // pick it up.
898 while (true) {
899 const signalfd_siginfo result = signalfd_->Read();
900 if (result.ssi_signo == 0) {
901 break;
902 }
903 CHECK_EQ(result.ssi_signo, ipc_lib::kWakeupSignal);
904 }
Austin Schuh00cad2e2022-12-02 20:11:04 -0800905 // This is the last time we can guarantee that if a message is published
906 // before, we will notice it.
907 now = monotonic_clock::now();
Austin Schuh5ca13112021-02-07 22:06:53 -0800908
909 // Check all the watchers for new events.
910 for (std::unique_ptr<WatcherState> &base_watcher : watchers_) {
911 ShmWatcherState *const watcher =
912 reinterpret_cast<ShmWatcherState *>(base_watcher.get());
913
Austin Schuh00cad2e2022-12-02 20:11:04 -0800914 // Track if we got a message.
915 if (watcher->CheckForNewData()) {
916 new_data = true;
917 }
Austin Schuh5ca13112021-02-07 22:06:53 -0800918 }
919 if (EventCount() == 0) {
920 // Still no events, all done now.
921 break;
922 }
923
924 checked_until = now;
925 // Check for any new events we found.
926 next_time = PeekEvent()->event_time();
Austin Schuh00cad2e2022-12-02 20:11:04 -0800927 } else {
928 now = monotonic_clock::now();
Austin Schuh5ca13112021-02-07 22:06:53 -0800929 }
930
931 if (next_time > now) {
Austin Schuh00cad2e2022-12-02 20:11:04 -0800932 // Ok, we got a message with a timestamp *after* we wrote down time. We
933 // need to process it (otherwise we will go to sleep without processing
934 // it), but we also need to make sure no other messages have come in
935 // before it that we would process out of order. Just go around again to
936 // redo the checks.
937 if (new_data) {
938 continue;
939 }
Austin Schuh39788ff2019-12-01 18:22:57 -0800940 break;
941 }
942
Austin Schuh5ca13112021-02-07 22:06:53 -0800943 EventLoopEvent *const event = PopEvent();
Austin Schuh7d87b672019-12-01 20:23:49 -0800944 event->HandleEvent();
Austin Schuh39788ff2019-12-01 18:22:57 -0800945 }
946}
947
Austin Schuh32fd5a72019-12-01 22:20:26 -0800948// RAII class to mask signals.
949class ScopedSignalMask {
950 public:
951 ScopedSignalMask(std::initializer_list<int> signals) {
952 sigset_t sigset;
953 PCHECK(sigemptyset(&sigset) == 0);
954 for (int signal : signals) {
955 PCHECK(sigaddset(&sigset, signal) == 0);
956 }
957
958 PCHECK(sigprocmask(SIG_BLOCK, &sigset, &old_) == 0);
959 }
960
961 ~ScopedSignalMask() { PCHECK(sigprocmask(SIG_SETMASK, &old_, nullptr) == 0); }
962
963 private:
964 sigset_t old_;
965};
966
967// Class to manage the static state associated with killing multiple event
968// loops.
969class SignalHandler {
970 public:
971 // Gets the singleton.
972 static SignalHandler *global() {
973 static SignalHandler loop;
974 return &loop;
975 }
976
977 // Handles the signal with the singleton.
978 static void HandleSignal(int) { global()->DoHandleSignal(); }
979
980 // Registers an event loop to receive Exit() calls.
981 void Register(ShmEventLoop *event_loop) {
982 // Block signals while we have the mutex so we never race with the signal
983 // handler.
984 ScopedSignalMask mask({SIGINT, SIGHUP, SIGTERM});
985 std::unique_lock<stl_mutex> locker(mutex_);
986 if (event_loops_.size() == 0) {
987 // The first caller registers the signal handler.
988 struct sigaction new_action;
989 sigemptyset(&new_action.sa_mask);
990 // This makes it so that 2 control c's to a stuck process will kill it by
991 // restoring the original signal handler.
992 new_action.sa_flags = SA_RESETHAND;
993 new_action.sa_handler = &HandleSignal;
994
995 PCHECK(sigaction(SIGINT, &new_action, &old_action_int_) == 0);
996 PCHECK(sigaction(SIGHUP, &new_action, &old_action_hup_) == 0);
997 PCHECK(sigaction(SIGTERM, &new_action, &old_action_term_) == 0);
998 }
999
1000 event_loops_.push_back(event_loop);
1001 }
1002
1003 // Unregisters an event loop to receive Exit() calls.
1004 void Unregister(ShmEventLoop *event_loop) {
1005 // Block signals while we have the mutex so we never race with the signal
1006 // handler.
1007 ScopedSignalMask mask({SIGINT, SIGHUP, SIGTERM});
1008 std::unique_lock<stl_mutex> locker(mutex_);
1009
Brian Silverman5120afb2020-01-31 17:44:35 -08001010 event_loops_.erase(
1011 std::find(event_loops_.begin(), event_loops_.end(), event_loop));
Austin Schuh32fd5a72019-12-01 22:20:26 -08001012
1013 if (event_loops_.size() == 0u) {
1014 // The last caller restores the original signal handlers.
1015 PCHECK(sigaction(SIGINT, &old_action_int_, nullptr) == 0);
1016 PCHECK(sigaction(SIGHUP, &old_action_hup_, nullptr) == 0);
1017 PCHECK(sigaction(SIGTERM, &old_action_term_, nullptr) == 0);
1018 }
1019 }
1020
1021 private:
1022 void DoHandleSignal() {
1023 // We block signals while grabbing the lock, so there should never be a
1024 // race. Confirm that this is true using trylock.
1025 CHECK(mutex_.try_lock()) << ": sigprocmask failed to block signals while "
1026 "modifing the event loop list.";
1027 for (ShmEventLoop *event_loop : event_loops_) {
1028 event_loop->Exit();
1029 }
1030 mutex_.unlock();
1031 }
1032
1033 // Mutex to protect all state.
1034 stl_mutex mutex_;
1035 std::vector<ShmEventLoop *> event_loops_;
1036 struct sigaction old_action_int_;
1037 struct sigaction old_action_hup_;
1038 struct sigaction old_action_term_;
1039};
1040
James Kuszmaul9f998082024-05-23 15:37:35 -07001041Result<void> ShmEventLoop::Run() {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001042 CheckCurrentThread();
Austin Schuh32fd5a72019-12-01 22:20:26 -08001043 SignalHandler::global()->Register(this);
Austin Schuh39788ff2019-12-01 18:22:57 -08001044
Alex Perrycb7da4b2019-08-28 19:35:56 -07001045 if (watchers_.size() > 0) {
Austin Schuh5ca13112021-02-07 22:06:53 -08001046 signalfd_.reset(new ipc_lib::SignalFd({ipc_lib::kWakeupSignal}));
Brian Silverman36975282021-07-29 12:06:55 -07001047 signalfd_->LeaveSignalBlocked(ipc_lib::kWakeupSignal);
Alex Perrycb7da4b2019-08-28 19:35:56 -07001048
Austin Schuh5ca13112021-02-07 22:06:53 -08001049 epoll_.OnReadable(signalfd_->fd(), [this]() { HandleEvent(); });
Alex Perrycb7da4b2019-08-28 19:35:56 -07001050 }
1051
Austin Schuh39788ff2019-12-01 18:22:57 -08001052 MaybeScheduleTimingReports();
1053
Austin Schuh7d87b672019-12-01 20:23:49 -08001054 ReserveEvents();
1055
Tyler Chatow67ddb032020-01-12 14:30:04 -08001056 {
Austin Schuha0c41ba2020-09-10 22:59:14 -07001057 logging::ScopedLogRestorer prev_logger;
Tyler Chatow67ddb032020-01-12 14:30:04 -08001058 AosLogToFbs aos_logger;
1059 if (!skip_logger_) {
Austin Schuhad9e5eb2021-11-19 20:33:55 -08001060 aos_logger.Initialize(&name_, MakeSender<logging::LogMessageFbs>("/aos"));
Austin Schuha0c41ba2020-09-10 22:59:14 -07001061 prev_logger.Swap(aos_logger.implementation());
Tyler Chatow67ddb032020-01-12 14:30:04 -08001062 }
Alex Perrycb7da4b2019-08-28 19:35:56 -07001063
Tyler Chatow67ddb032020-01-12 14:30:04 -08001064 aos::SetCurrentThreadName(name_.substr(0, 16));
Brian Silverman6a54ff32020-04-28 16:41:39 -07001065 const cpu_set_t default_affinity = DefaultAffinity();
1066 if (!CPU_EQUAL(&affinity_, &default_affinity)) {
1067 ::aos::SetCurrentThreadAffinity(affinity_);
1068 }
Philipp Schrader81fa3fb2023-09-17 18:58:35 -07001069
1070 // Construct the watchers, but don't update the next pointer. This also
1071 // cleans up any watchers that previously died, and puts the nonrt work
1072 // before going realtime. After this happens, we will start queueing
1073 // signals (which may be a bit of extra work to process, but won't cause any
1074 // messages to be lost).
1075 for (::std::unique_ptr<WatcherState> &watcher : watchers_) {
1076 watcher->Construct();
1077 }
1078
Tyler Chatow67ddb032020-01-12 14:30:04 -08001079 // Now, all the callbacks are setup. Lock everything into memory and go RT.
1080 if (priority_ != 0) {
1081 ::aos::InitRT();
1082
1083 LOG(INFO) << "Setting priority to " << priority_;
1084 ::aos::SetCurrentThreadRealtimePriority(priority_);
1085 }
1086
1087 set_is_running(true);
1088
1089 // Now that we are realtime (but before the OnRun handlers run), snap the
Philipp Schrader81fa3fb2023-09-17 18:58:35 -07001090 // queue index pointer to the newest message. This happens in RT so that we
1091 // minimize the risk of losing messages.
Tyler Chatow67ddb032020-01-12 14:30:04 -08001092 for (::std::unique_ptr<WatcherState> &watcher : watchers_) {
Philipp Schrader81fa3fb2023-09-17 18:58:35 -07001093 watcher->Startup();
Tyler Chatow67ddb032020-01-12 14:30:04 -08001094 }
1095
1096 // Now that we are RT, run all the OnRun handlers.
Austin Schuha9012be2021-07-21 15:19:11 -07001097 SetTimerContext(monotonic_clock::now());
Tyler Chatow67ddb032020-01-12 14:30:04 -08001098 for (const auto &run : on_run_) {
1099 run();
1100 }
1101
1102 // And start our main event loop which runs all the timers and handles Quit.
1103 epoll_.Run();
1104
1105 // Once epoll exits, there is no useful nonrt work left to do.
1106 set_is_running(false);
1107
1108 // Nothing time or synchronization critical needs to happen after this
1109 // point. Drop RT priority.
1110 ::aos::UnsetCurrentThreadRealtimePriority();
Alex Perrycb7da4b2019-08-28 19:35:56 -07001111 }
1112
Austin Schuh39788ff2019-12-01 18:22:57 -08001113 for (::std::unique_ptr<WatcherState> &base_watcher : watchers_) {
Brian Silverman148d43d2020-06-07 18:19:22 -05001114 ShmWatcherState *watcher =
1115 reinterpret_cast<ShmWatcherState *>(base_watcher.get());
Alex Perrycb7da4b2019-08-28 19:35:56 -07001116 watcher->UnregisterWakeup();
1117 }
1118
1119 if (watchers_.size() > 0) {
Austin Schuh5ca13112021-02-07 22:06:53 -08001120 epoll_.DeleteFd(signalfd_->fd());
1121 signalfd_.reset();
Alex Perrycb7da4b2019-08-28 19:35:56 -07001122 }
Austin Schuh32fd5a72019-12-01 22:20:26 -08001123
1124 SignalHandler::global()->Unregister(this);
Austin Schuhe84c3ed2019-12-14 15:29:48 -08001125
1126 // Trigger any remaining senders or fetchers to be cleared before destroying
1127 // the event loop so the book keeping matches. Do this in the thread that
1128 // created the timing reporter.
1129 timing_report_sender_.reset();
Austin Schuh0debde12022-08-17 16:25:17 -07001130 ClearContext();
James Kuszmaul9f998082024-05-23 15:37:35 -07001131 std::unique_lock<aos::stl_mutex> locker(exit_status_mutex_);
1132 std::optional<Result<void>> exit_status;
1133 // Clear the stored exit_status_ and extract it to be returned.
1134 exit_status_.swap(exit_status);
1135 return exit_status.value_or(Result<void>{});
Alex Perrycb7da4b2019-08-28 19:35:56 -07001136}
1137
James Kuszmaul9f998082024-05-23 15:37:35 -07001138void ShmEventLoop::Exit() {
1139 observed_exit_.test_and_set();
1140 // Implicitly defaults exit_status_ to success by not setting it.
1141
1142 epoll_.Quit();
1143}
1144
1145void ShmEventLoop::ExitWithStatus(Result<void> status) {
1146 // Only set the exit status if no other Exit*() call got here first.
1147 if (!observed_exit_.test_and_set()) {
1148 std::unique_lock<aos::stl_mutex> locker(exit_status_mutex_);
1149 exit_status_ = std::move(status);
1150 } else {
1151 VLOG(1) << "Exit status is already set; not setting it again.";
1152 }
1153 Exit();
1154}
Alex Perrycb7da4b2019-08-28 19:35:56 -07001155
Brian Silvermane1fe2512022-08-14 23:18:50 -07001156std::unique_ptr<ExitHandle> ShmEventLoop::MakeExitHandle() {
1157 return std::make_unique<ShmExitHandle>(this);
1158}
1159
Alex Perrycb7da4b2019-08-28 19:35:56 -07001160ShmEventLoop::~ShmEventLoop() {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001161 CheckCurrentThread();
Austin Schuh39788ff2019-12-01 18:22:57 -08001162 // Force everything with a registered fd with epoll to be destroyed now.
1163 timers_.clear();
1164 phased_loops_.clear();
1165 watchers_.clear();
1166
Alex Perrycb7da4b2019-08-28 19:35:56 -07001167 CHECK(!is_running()) << ": ShmEventLoop destroyed while running";
Brian Silvermane1fe2512022-08-14 23:18:50 -07001168 CHECK_EQ(0, exit_handle_count_)
1169 << ": All ExitHandles must be destroyed before the ShmEventLoop";
Alex Perrycb7da4b2019-08-28 19:35:56 -07001170}
1171
Alex Perrycb7da4b2019-08-28 19:35:56 -07001172void ShmEventLoop::SetRuntimeRealtimePriority(int priority) {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001173 CheckCurrentThread();
Alex Perrycb7da4b2019-08-28 19:35:56 -07001174 if (is_running()) {
1175 LOG(FATAL) << "Cannot set realtime priority while running.";
1176 }
1177 priority_ = priority;
1178}
1179
Brian Silverman6a54ff32020-04-28 16:41:39 -07001180void ShmEventLoop::SetRuntimeAffinity(const cpu_set_t &cpuset) {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001181 CheckCurrentThread();
Brian Silverman6a54ff32020-04-28 16:41:39 -07001182 if (is_running()) {
1183 LOG(FATAL) << "Cannot set affinity while running.";
1184 }
1185 affinity_ = cpuset;
1186}
1187
James Kuszmaul57c2baa2020-01-19 14:52:52 -08001188void ShmEventLoop::set_name(const std::string_view name) {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001189 CheckCurrentThread();
James Kuszmaul57c2baa2020-01-19 14:52:52 -08001190 name_ = std::string(name);
1191 UpdateTimingReport();
1192}
1193
Brian Silvermana5450a92020-08-12 19:59:57 -07001194absl::Span<const char> ShmEventLoop::GetWatcherSharedMemory(
1195 const Channel *channel) {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001196 CheckCurrentThread();
Brian Silverman148d43d2020-06-07 18:19:22 -05001197 ShmWatcherState *const watcher_state =
1198 static_cast<ShmWatcherState *>(GetWatcherState(channel));
Brian Silverman5120afb2020-01-31 17:44:35 -08001199 return watcher_state->GetSharedMemory();
1200}
1201
Brian Silverman4f4e0612020-08-12 19:54:41 -07001202int ShmEventLoop::NumberBuffers(const Channel *channel) {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001203 CheckCurrentThread();
Austin Schuh4d275fc2022-09-16 15:42:45 -07001204 return ipc_lib::MakeQueueConfiguration(configuration(), channel)
1205 .num_messages();
Brian Silverman4f4e0612020-08-12 19:54:41 -07001206}
1207
Brian Silverman5120afb2020-01-31 17:44:35 -08001208absl::Span<char> ShmEventLoop::GetShmSenderSharedMemory(
1209 const aos::RawSender *sender) const {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001210 CheckCurrentThread();
Brian Silverman148d43d2020-06-07 18:19:22 -05001211 return static_cast<const ShmSender *>(sender)->GetSharedMemory();
Brian Silverman5120afb2020-01-31 17:44:35 -08001212}
1213
Brian Silvermana5450a92020-08-12 19:59:57 -07001214absl::Span<const char> ShmEventLoop::GetShmFetcherPrivateMemory(
Brian Silverman6d2b3592020-06-18 14:40:15 -07001215 const aos::RawFetcher *fetcher) const {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001216 CheckCurrentThread();
Brian Silverman6d2b3592020-06-18 14:40:15 -07001217 return static_cast<const ShmFetcher *>(fetcher)->GetPrivateMemory();
1218}
1219
Austin Schuh3054f5f2021-07-21 15:38:01 -07001220pid_t ShmEventLoop::GetTid() {
1221 CheckCurrentThread();
1222 return syscall(SYS_gettid);
1223}
Austin Schuh39788ff2019-12-01 18:22:57 -08001224
Alex Perrycb7da4b2019-08-28 19:35:56 -07001225} // namespace aos