blob: 8523cdfa825e8d1ba556137617a9ba32f894ffaa [file] [log] [blame]
Alex Perrycb7da4b2019-08-28 19:35:56 -07001#include "aos/events/shm_event_loop.h"
2
Alex Perrycb7da4b2019-08-28 19:35:56 -07003#include <sys/stat.h>
Austin Schuh39788ff2019-12-01 18:22:57 -08004#include <sys/syscall.h>
Alex Perrycb7da4b2019-08-28 19:35:56 -07005#include <sys/types.h>
Tyler Chatow67ddb032020-01-12 14:30:04 -08006
Alex Perrycb7da4b2019-08-28 19:35:56 -07007#include <algorithm>
8#include <atomic>
9#include <chrono>
Austin Schuh39788ff2019-12-01 18:22:57 -080010#include <iterator>
Alex Perrycb7da4b2019-08-28 19:35:56 -070011#include <stdexcept>
12
Philipp Schrader790cb542023-07-05 21:06:52 -070013#include "glog/logging.h"
14
Tyler Chatow67ddb032020-01-12 14:30:04 -080015#include "aos/events/aos_logging.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070016#include "aos/events/epoll.h"
Austin Schuh39788ff2019-12-01 18:22:57 -080017#include "aos/events/event_loop_generated.h"
18#include "aos/events/timing_statistics.h"
Austin Schuh094d09b2020-11-20 23:26:52 -080019#include "aos/init.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070020#include "aos/ipc_lib/lockless_queue.h"
Austin Schuh4d275fc2022-09-16 15:42:45 -070021#include "aos/ipc_lib/memory_mapped_queue.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070022#include "aos/realtime.h"
Austin Schuh32fd5a72019-12-01 22:20:26 -080023#include "aos/stl_mutex/stl_mutex.h"
Austin Schuhfccb2d02020-01-26 16:11:19 -080024#include "aos/util/file.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070025#include "aos/util/phased_loop.h"
26
Austin Schuhe84c3ed2019-12-14 15:29:48 -080027namespace {
28
29// Returns the portion of the path after the last /. This very much assumes
30// that the application name is null terminated.
31const char *Filename(const char *path) {
32 const std::string_view path_string_view = path;
33 auto last_slash_pos = path_string_view.find_last_of("/");
34
35 return last_slash_pos == std::string_view::npos ? path
36 : path + last_slash_pos + 1;
37}
38
39} // namespace
40
Alex Perrycb7da4b2019-08-28 19:35:56 -070041DEFINE_string(shm_base, "/dev/shm/aos",
42 "Directory to place queue backing mmaped files in.");
Brennan Coslett6fd3c002023-07-11 17:41:09 -050043// This value is affected by the umask of the process which is calling it
44// and is set to the user's value by default (check yours running `umask` on
45// the command line).
46// Any file mode requested is transformed using: mode & ~umask and the default
47// umask is 0022 (allow any permissions for the user, dont allow writes for
48// groups or others).
49// See https://man7.org/linux/man-pages/man2/umask.2.html for more details.
50// WITH THE DEFAULT UMASK YOU WONT ACTUALLY GET THESE PERMISSIONS :)
Alex Perrycb7da4b2019-08-28 19:35:56 -070051DEFINE_uint32(permissions, 0770,
Brennan Coslett6fd3c002023-07-11 17:41:09 -050052 "Permissions to make shared memory files and folders, "
Brennan Coslettd5077bc2023-07-13 08:49:35 -050053 "affected by the process's umask. "
Brennan Coslett6fd3c002023-07-11 17:41:09 -050054 "See shm_event_loop.cc for more details.");
Austin Schuhe84c3ed2019-12-14 15:29:48 -080055DEFINE_string(application_name, Filename(program_invocation_name),
56 "The application name");
Alex Perrycb7da4b2019-08-28 19:35:56 -070057
58namespace aos {
59
Brian Silverman148d43d2020-06-07 18:19:22 -050060using namespace shm_event_loop_internal;
61
Austin Schuhcdab6192019-12-29 17:47:46 -080062void SetShmBase(const std::string_view base) {
Austin Schuhef323c02020-09-01 14:55:28 -070063 FLAGS_shm_base = std::string(base) + "/aos";
Austin Schuhcdab6192019-12-29 17:47:46 -080064}
65
Brian Silverman4f4e0612020-08-12 19:54:41 -070066namespace {
67
Austin Schuh217a9782019-12-21 23:02:50 -080068const Node *MaybeMyNode(const Configuration *configuration) {
69 if (!configuration->has_nodes()) {
70 return nullptr;
71 }
Alex Perrycb7da4b2019-08-28 19:35:56 -070072
Austin Schuh217a9782019-12-21 23:02:50 -080073 return configuration::GetMyNode(configuration);
74}
Alex Perrycb7da4b2019-08-28 19:35:56 -070075
Philipp Schradera8734662023-08-06 14:49:39 -070076void IgnoreWakeupSignal() {
77 struct sigaction action;
78 action.sa_handler = SIG_IGN;
79 PCHECK(sigemptyset(&action.sa_mask) == 0);
80 action.sa_flags = 0;
81 PCHECK(sigaction(ipc_lib::kWakeupSignal, &action, nullptr) == 0);
82}
83
Austin Schuh39788ff2019-12-01 18:22:57 -080084} // namespace
85
Austin Schuh217a9782019-12-21 23:02:50 -080086ShmEventLoop::ShmEventLoop(const Configuration *configuration)
Austin Schuh83c7f702021-01-19 22:36:29 -080087 : EventLoop(configuration),
88 boot_uuid_(UUID::BootUUID()),
Austin Schuhef323c02020-09-01 14:55:28 -070089 shm_base_(FLAGS_shm_base),
Austin Schuhe84c3ed2019-12-14 15:29:48 -080090 name_(FLAGS_application_name),
Austin Schuh15649d62019-12-28 16:36:38 -080091 node_(MaybeMyNode(configuration)) {
Philipp Schradera8734662023-08-06 14:49:39 -070092 // Ignore the wakeup signal by default. Otherwise, we have race conditions on
93 // shutdown where a wakeup signal will uncleanly terminate the process.
94 // See LocklessQueueWakeUpper::Wakeup() for some more information.
95 IgnoreWakeupSignal();
96
Austin Schuh094d09b2020-11-20 23:26:52 -080097 CHECK(IsInitialized()) << ": Need to initialize AOS first.";
Austin Schuh0debde12022-08-17 16:25:17 -070098 ClearContext();
Austin Schuh15649d62019-12-28 16:36:38 -080099 if (configuration->has_nodes()) {
100 CHECK(node_ != nullptr) << ": Couldn't find node in config.";
101 }
102}
Austin Schuh217a9782019-12-21 23:02:50 -0800103
Brian Silverman148d43d2020-06-07 18:19:22 -0500104namespace shm_event_loop_internal {
Austin Schuh39788ff2019-12-01 18:22:57 -0800105
106class SimpleShmFetcher {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700107 public:
Austin Schuhef323c02020-09-01 14:55:28 -0700108 explicit SimpleShmFetcher(std::string_view shm_base, ShmEventLoop *event_loop,
109 const Channel *channel)
Austin Schuh432784f2020-06-23 17:27:35 -0700110 : event_loop_(event_loop),
111 channel_(channel),
Austin Schuh4d275fc2022-09-16 15:42:45 -0700112 lockless_queue_memory_(shm_base, FLAGS_permissions,
113 event_loop->configuration(), channel),
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700114 reader_(lockless_queue_memory_.queue()) {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700115 context_.data = nullptr;
116 // Point the queue index at the next index to read starting now. This
117 // makes it such that FetchNext will read the next message sent after
118 // the fetcher is created.
119 PointAtNextQueueIndex();
120 }
121
Austin Schuh39788ff2019-12-01 18:22:57 -0800122 ~SimpleShmFetcher() {}
Alex Perrycb7da4b2019-08-28 19:35:56 -0700123
Brian Silverman77162972020-08-12 19:52:40 -0700124 // Sets this object to pin or copy data, as configured in the channel.
125 void RetrieveData() {
126 if (channel_->read_method() == ReadMethod::PIN) {
127 PinDataOnFetch();
128 } else {
129 CopyDataOnFetch();
130 }
131 }
132
Brian Silverman3bca5322020-08-12 19:35:29 -0700133 // Sets this object to copy data out of the shared memory into a private
134 // buffer when fetching.
135 void CopyDataOnFetch() {
Brian Silverman77162972020-08-12 19:52:40 -0700136 CHECK(!pin_data());
Brian Silverman3bca5322020-08-12 19:35:29 -0700137 data_storage_.reset(static_cast<char *>(
138 malloc(channel_->max_size() + kChannelDataAlignment - 1)));
139 }
140
Brian Silverman77162972020-08-12 19:52:40 -0700141 // Sets this object to pin data in shared memory when fetching.
142 void PinDataOnFetch() {
143 CHECK(!copy_data());
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700144 auto maybe_pinner =
145 ipc_lib::LocklessQueuePinner::Make(lockless_queue_memory_.queue());
Brian Silverman77162972020-08-12 19:52:40 -0700146 if (!maybe_pinner) {
147 LOG(FATAL) << "Failed to create reader on "
148 << configuration::CleanedChannelToString(channel_)
149 << ", too many readers.";
150 }
151 pinner_ = std::move(maybe_pinner.value());
152 }
153
Alex Perrycb7da4b2019-08-28 19:35:56 -0700154 // Points the next message to fetch at the queue index which will be
155 // populated next.
156 void PointAtNextQueueIndex() {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700157 actual_queue_index_ = reader_.LatestIndex();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700158 if (!actual_queue_index_.valid()) {
159 // Nothing in the queue. The next element will show up at the 0th
160 // index in the queue.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700161 actual_queue_index_ = ipc_lib::QueueIndex::Zero(
162 LocklessQueueSize(lockless_queue_memory_.memory()));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700163 } else {
164 actual_queue_index_ = actual_queue_index_.Increment();
165 }
166 }
167
Austin Schuh39788ff2019-12-01 18:22:57 -0800168 bool FetchNext() {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700169 const ipc_lib::LocklessQueueReader::Result read_result =
Brian Silverman3bca5322020-08-12 19:35:29 -0700170 DoFetch(actual_queue_index_);
Austin Schuh432784f2020-06-23 17:27:35 -0700171
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700172 return read_result == ipc_lib::LocklessQueueReader::Result::GOOD;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700173 }
174
Austin Schuh39788ff2019-12-01 18:22:57 -0800175 bool Fetch() {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700176 const ipc_lib::QueueIndex queue_index = reader_.LatestIndex();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700177 // actual_queue_index_ is only meaningful if it was set by Fetch or
178 // FetchNext. This happens when valid_data_ has been set. So, only
179 // skip checking if valid_data_ is true.
180 //
181 // Also, if the latest queue index is invalid, we are empty. So there
182 // is nothing to fetch.
Austin Schuh39788ff2019-12-01 18:22:57 -0800183 if ((context_.data != nullptr &&
Alex Perrycb7da4b2019-08-28 19:35:56 -0700184 queue_index == actual_queue_index_.DecrementBy(1u)) ||
185 !queue_index.valid()) {
186 return false;
187 }
188
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700189 const ipc_lib::LocklessQueueReader::Result read_result =
190 DoFetch(queue_index);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700191
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700192 CHECK(read_result != ipc_lib::LocklessQueueReader::Result::NOTHING_NEW)
Austin Schuhf5652592019-12-29 16:26:15 -0800193 << ": Queue index went backwards. This should never happen. "
194 << configuration::CleanedChannelToString(channel_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700195
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700196 return read_result == ipc_lib::LocklessQueueReader::Result::GOOD;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700197 }
198
Austin Schuh39788ff2019-12-01 18:22:57 -0800199 Context context() const { return context_; }
200
Alex Perrycb7da4b2019-08-28 19:35:56 -0700201 bool RegisterWakeup(int priority) {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700202 CHECK(!watcher_);
203 watcher_ = ipc_lib::LocklessQueueWatcher::Make(
204 lockless_queue_memory_.queue(), priority);
205 return static_cast<bool>(watcher_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700206 }
207
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700208 void UnregisterWakeup() {
209 CHECK(watcher_);
210 watcher_ = std::nullopt;
211 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700212
Brian Silvermana5450a92020-08-12 19:59:57 -0700213 absl::Span<char> GetMutableSharedMemory() {
214 return lockless_queue_memory_.GetMutableSharedMemory();
Brian Silverman5120afb2020-01-31 17:44:35 -0800215 }
216
Brian Silvermana5450a92020-08-12 19:59:57 -0700217 absl::Span<const char> GetConstSharedMemory() const {
218 return lockless_queue_memory_.GetConstSharedMemory();
219 }
220
221 absl::Span<const char> GetPrivateMemory() const {
222 if (pin_data()) {
223 return lockless_queue_memory_.GetConstSharedMemory();
224 }
Brian Silverman6d2b3592020-06-18 14:40:15 -0700225 return absl::Span<char>(
226 const_cast<SimpleShmFetcher *>(this)->data_storage_start(),
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700227 LocklessQueueMessageDataSize(lockless_queue_memory_.memory()));
Brian Silverman6d2b3592020-06-18 14:40:15 -0700228 }
229
Alex Perrycb7da4b2019-08-28 19:35:56 -0700230 private:
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700231 ipc_lib::LocklessQueueReader::Result DoFetch(
232 ipc_lib::QueueIndex queue_index) {
Brian Silverman3bca5322020-08-12 19:35:29 -0700233 // TODO(austin): Get behind and make sure it dies.
234 char *copy_buffer = nullptr;
235 if (copy_data()) {
236 copy_buffer = data_storage_start();
237 }
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700238 ipc_lib::LocklessQueueReader::Result read_result = reader_.Read(
Brian Silverman3bca5322020-08-12 19:35:29 -0700239 queue_index.index(), &context_.monotonic_event_time,
240 &context_.realtime_event_time, &context_.monotonic_remote_time,
241 &context_.realtime_remote_time, &context_.remote_queue_index,
Austin Schuh82ea7382023-07-14 15:17:34 -0700242 &context_.source_boot_uuid, &context_.size, copy_buffer,
243 std::ref(should_fetch_));
Brian Silverman3bca5322020-08-12 19:35:29 -0700244
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700245 if (read_result == ipc_lib::LocklessQueueReader::Result::GOOD) {
Brian Silverman77162972020-08-12 19:52:40 -0700246 if (pin_data()) {
Brian Silverman4f4e0612020-08-12 19:54:41 -0700247 const int pin_result = pinner_->PinIndex(queue_index.index());
248 CHECK(pin_result >= 0)
Brian Silverman77162972020-08-12 19:52:40 -0700249 << ": Got behind while reading and the last message was modified "
250 "out from under us while we tried to pin it. Don't get so far "
251 "behind on: "
252 << configuration::CleanedChannelToString(channel_);
Brian Silverman4f4e0612020-08-12 19:54:41 -0700253 context_.buffer_index = pin_result;
254 } else {
255 context_.buffer_index = -1;
Brian Silverman77162972020-08-12 19:52:40 -0700256 }
257
Brian Silverman3bca5322020-08-12 19:35:29 -0700258 context_.queue_index = queue_index.index();
259 if (context_.remote_queue_index == 0xffffffffu) {
260 context_.remote_queue_index = context_.queue_index;
261 }
262 if (context_.monotonic_remote_time == aos::monotonic_clock::min_time) {
263 context_.monotonic_remote_time = context_.monotonic_event_time;
264 }
265 if (context_.realtime_remote_time == aos::realtime_clock::min_time) {
266 context_.realtime_remote_time = context_.realtime_event_time;
267 }
268 const char *const data = DataBuffer();
269 if (data) {
270 context_.data =
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700271 data +
272 LocklessQueueMessageDataSize(lockless_queue_memory_.memory()) -
273 context_.size;
Brian Silverman3bca5322020-08-12 19:35:29 -0700274 } else {
275 context_.data = nullptr;
276 }
277 actual_queue_index_ = queue_index.Increment();
278 }
279
280 // Make sure the data wasn't modified while we were reading it. This
281 // can only happen if you are reading the last message *while* it is
282 // being written to, which means you are pretty far behind.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700283 CHECK(read_result != ipc_lib::LocklessQueueReader::Result::OVERWROTE)
Brian Silverman3bca5322020-08-12 19:35:29 -0700284 << ": Got behind while reading and the last message was modified "
285 "out from under us while we were reading it. Don't get so far "
286 "behind on: "
287 << configuration::CleanedChannelToString(channel_);
288
289 // We fell behind between when we read the index and read the value.
290 // This isn't worth recovering from since this means we went to sleep
291 // for a long time in the middle of this function.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700292 if (read_result == ipc_lib::LocklessQueueReader::Result::TOO_OLD) {
Brian Silverman3bca5322020-08-12 19:35:29 -0700293 event_loop_->SendTimingReport();
294 LOG(FATAL) << "The next message is no longer available. "
295 << configuration::CleanedChannelToString(channel_);
296 }
297
298 return read_result;
299 }
300
301 char *data_storage_start() const {
302 CHECK(copy_data());
Brian Silvermana1652f32020-01-29 20:41:44 -0800303 return RoundChannelData(data_storage_.get(), channel_->max_size());
304 }
Brian Silverman3bca5322020-08-12 19:35:29 -0700305
306 // Note that for some modes the return value will change as new messages are
307 // read.
308 const char *DataBuffer() const {
309 if (copy_data()) {
310 return data_storage_start();
311 }
Brian Silverman77162972020-08-12 19:52:40 -0700312 if (pin_data()) {
313 return static_cast<const char *>(pinner_->Data());
314 }
Brian Silverman3bca5322020-08-12 19:35:29 -0700315 return nullptr;
316 }
317
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800318 bool copy_data() const { return static_cast<bool>(data_storage_); }
Brian Silverman77162972020-08-12 19:52:40 -0700319 bool pin_data() const { return static_cast<bool>(pinner_); }
Brian Silvermana1652f32020-01-29 20:41:44 -0800320
Austin Schuh432784f2020-06-23 17:27:35 -0700321 aos::ShmEventLoop *event_loop_;
Austin Schuhf5652592019-12-29 16:26:15 -0800322 const Channel *const channel_;
Austin Schuh4d275fc2022-09-16 15:42:45 -0700323 ipc_lib::MemoryMappedQueue lockless_queue_memory_;
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700324 ipc_lib::LocklessQueueReader reader_;
325 // This being nullopt indicates we're not looking for wakeups right now.
326 std::optional<ipc_lib::LocklessQueueWatcher> watcher_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700327
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700328 ipc_lib::QueueIndex actual_queue_index_ = ipc_lib::QueueIndex::Invalid();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700329
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800330 // This being empty indicates we're not going to copy data.
331 std::unique_ptr<char, decltype(&free)> data_storage_{nullptr, &free};
Austin Schuh39788ff2019-12-01 18:22:57 -0800332
Brian Silverman77162972020-08-12 19:52:40 -0700333 // This being nullopt indicates we're not going to pin messages.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700334 std::optional<ipc_lib::LocklessQueuePinner> pinner_;
Brian Silverman77162972020-08-12 19:52:40 -0700335
Austin Schuh39788ff2019-12-01 18:22:57 -0800336 Context context_;
Austin Schuh82ea7382023-07-14 15:17:34 -0700337
338 // Pre-allocated should_fetch function so we don't allocate.
339 std::function<bool(const Context &)> should_fetch_ = [](const Context &) {
340 return true;
341 };
Austin Schuh39788ff2019-12-01 18:22:57 -0800342};
343
344class ShmFetcher : public RawFetcher {
345 public:
Austin Schuhef323c02020-09-01 14:55:28 -0700346 explicit ShmFetcher(std::string_view shm_base, ShmEventLoop *event_loop,
347 const Channel *channel)
Austin Schuhaa79e4e2019-12-29 20:43:32 -0800348 : RawFetcher(event_loop, channel),
Austin Schuhef323c02020-09-01 14:55:28 -0700349 simple_shm_fetcher_(shm_base, event_loop, channel) {
Brian Silverman77162972020-08-12 19:52:40 -0700350 simple_shm_fetcher_.RetrieveData();
Brian Silverman3bca5322020-08-12 19:35:29 -0700351 }
Austin Schuh39788ff2019-12-01 18:22:57 -0800352
Austin Schuh3054f5f2021-07-21 15:38:01 -0700353 ~ShmFetcher() override {
354 shm_event_loop()->CheckCurrentThread();
355 context_.data = nullptr;
356 }
Austin Schuh39788ff2019-12-01 18:22:57 -0800357
358 std::pair<bool, monotonic_clock::time_point> DoFetchNext() override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700359 shm_event_loop()->CheckCurrentThread();
Austin Schuh39788ff2019-12-01 18:22:57 -0800360 if (simple_shm_fetcher_.FetchNext()) {
361 context_ = simple_shm_fetcher_.context();
362 return std::make_pair(true, monotonic_clock::now());
363 }
364 return std::make_pair(false, monotonic_clock::min_time);
365 }
366
367 std::pair<bool, monotonic_clock::time_point> DoFetch() override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700368 shm_event_loop()->CheckCurrentThread();
Austin Schuh39788ff2019-12-01 18:22:57 -0800369 if (simple_shm_fetcher_.Fetch()) {
370 context_ = simple_shm_fetcher_.context();
371 return std::make_pair(true, monotonic_clock::now());
372 }
373 return std::make_pair(false, monotonic_clock::min_time);
374 }
375
Brian Silvermana5450a92020-08-12 19:59:57 -0700376 absl::Span<const char> GetPrivateMemory() const {
Brian Silverman6d2b3592020-06-18 14:40:15 -0700377 return simple_shm_fetcher_.GetPrivateMemory();
378 }
379
Austin Schuh39788ff2019-12-01 18:22:57 -0800380 private:
Austin Schuh3054f5f2021-07-21 15:38:01 -0700381 const ShmEventLoop *shm_event_loop() const {
382 return static_cast<const ShmEventLoop *>(event_loop());
383 }
384
Austin Schuh39788ff2019-12-01 18:22:57 -0800385 SimpleShmFetcher simple_shm_fetcher_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700386};
387
Brian Silvermane1fe2512022-08-14 23:18:50 -0700388class ShmExitHandle : public ExitHandle {
389 public:
390 ShmExitHandle(ShmEventLoop *event_loop) : event_loop_(event_loop) {
391 ++event_loop_->exit_handle_count_;
392 }
393 ~ShmExitHandle() override {
394 CHECK_GT(event_loop_->exit_handle_count_, 0);
395 --event_loop_->exit_handle_count_;
396 }
397
398 void Exit() override { event_loop_->Exit(); }
399
400 private:
401 ShmEventLoop *const event_loop_;
402};
403
Alex Perrycb7da4b2019-08-28 19:35:56 -0700404class ShmSender : public RawSender {
405 public:
Austin Schuhef323c02020-09-01 14:55:28 -0700406 explicit ShmSender(std::string_view shm_base, EventLoop *event_loop,
407 const Channel *channel)
Austin Schuh39788ff2019-12-01 18:22:57 -0800408 : RawSender(event_loop, channel),
Austin Schuh4d275fc2022-09-16 15:42:45 -0700409 lockless_queue_memory_(shm_base, FLAGS_permissions,
410 event_loop->configuration(), channel),
Austin Schuhfff9c3a2023-06-16 18:48:23 -0700411 lockless_queue_sender_(
412 VerifySender(ipc_lib::LocklessQueueSender::Make(
413 lockless_queue_memory_.queue(),
414 configuration::ChannelStorageDuration(
415 event_loop->configuration(), channel)),
416 channel)),
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700417 wake_upper_(lockless_queue_memory_.queue()) {}
Alex Perrycb7da4b2019-08-28 19:35:56 -0700418
Austin Schuh3054f5f2021-07-21 15:38:01 -0700419 ~ShmSender() override { shm_event_loop()->CheckCurrentThread(); }
Austin Schuh39788ff2019-12-01 18:22:57 -0800420
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700421 static ipc_lib::LocklessQueueSender VerifySender(
422 std::optional<ipc_lib::LocklessQueueSender> sender,
Austin Schuhe516ab02020-05-06 21:37:04 -0700423 const Channel *channel) {
424 if (sender) {
425 return std::move(sender.value());
426 }
427 LOG(FATAL) << "Failed to create sender on "
428 << configuration::CleanedChannelToString(channel)
429 << ", too many senders.";
430 }
431
Austin Schuh3054f5f2021-07-21 15:38:01 -0700432 void *data() override {
433 shm_event_loop()->CheckCurrentThread();
434 return lockless_queue_sender_.Data();
435 }
436 size_t size() override {
437 shm_event_loop()->CheckCurrentThread();
438 return lockless_queue_sender_.size();
439 }
milind1f1dca32021-07-03 13:50:07 -0700440
441 Error DoSend(size_t length,
442 aos::monotonic_clock::time_point monotonic_remote_time,
443 aos::realtime_clock::time_point realtime_remote_time,
444 uint32_t remote_queue_index,
445 const UUID &source_boot_uuid) override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700446 shm_event_loop()->CheckCurrentThread();
Austin Schuh0f7ed462020-03-28 20:38:34 -0700447 CHECK_LE(length, static_cast<size_t>(channel()->max_size()))
448 << ": Sent too big a message on "
449 << configuration::CleanedChannelToString(channel());
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700450 const auto result = lockless_queue_sender_.Send(
451 length, monotonic_remote_time, realtime_remote_time, remote_queue_index,
452 source_boot_uuid, &monotonic_sent_time_, &realtime_sent_time_,
453 &sent_queue_index_);
454 CHECK_NE(result, ipc_lib::LocklessQueueSender::Result::INVALID_REDZONE)
Austin Schuh91ba6392020-10-03 13:27:47 -0700455 << ": Somebody wrote outside the buffer of their message on channel "
456 << configuration::CleanedChannelToString(channel());
457
Austin Schuh65493d62022-08-17 15:10:37 -0700458 wake_upper_.Wakeup(event_loop()->is_running()
459 ? event_loop()->runtime_realtime_priority()
460 : 0);
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700461 return CheckLocklessQueueResult(result);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700462 }
463
milind1f1dca32021-07-03 13:50:07 -0700464 Error DoSend(const void *msg, size_t length,
465 aos::monotonic_clock::time_point monotonic_remote_time,
466 aos::realtime_clock::time_point realtime_remote_time,
467 uint32_t remote_queue_index,
468 const UUID &source_boot_uuid) override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700469 shm_event_loop()->CheckCurrentThread();
Austin Schuh0f7ed462020-03-28 20:38:34 -0700470 CHECK_LE(length, static_cast<size_t>(channel()->max_size()))
471 << ": Sent too big a message on "
472 << configuration::CleanedChannelToString(channel());
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700473 const auto result = lockless_queue_sender_.Send(
Brian Silvermanaf9a4d82020-10-06 15:10:58 -0700474 reinterpret_cast<const char *>(msg), length, monotonic_remote_time,
Austin Schuha9012be2021-07-21 15:19:11 -0700475 realtime_remote_time, remote_queue_index, source_boot_uuid,
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700476 &monotonic_sent_time_, &realtime_sent_time_, &sent_queue_index_);
477
478 CHECK_NE(result, ipc_lib::LocklessQueueSender::Result::INVALID_REDZONE)
479 << ": Somebody wrote outside the buffer of their message on "
480 "channel "
Austin Schuh91ba6392020-10-03 13:27:47 -0700481 << configuration::CleanedChannelToString(channel());
Austin Schuh65493d62022-08-17 15:10:37 -0700482 wake_upper_.Wakeup(event_loop()->is_running()
483 ? event_loop()->runtime_realtime_priority()
484 : 0);
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700485
486 return CheckLocklessQueueResult(result);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700487 }
488
Brian Silverman5120afb2020-01-31 17:44:35 -0800489 absl::Span<char> GetSharedMemory() const {
Brian Silvermana5450a92020-08-12 19:59:57 -0700490 return lockless_queue_memory_.GetMutableSharedMemory();
Brian Silverman5120afb2020-01-31 17:44:35 -0800491 }
492
Austin Schuh3054f5f2021-07-21 15:38:01 -0700493 int buffer_index() override {
494 shm_event_loop()->CheckCurrentThread();
495 return lockless_queue_sender_.buffer_index();
496 }
Brian Silverman4f4e0612020-08-12 19:54:41 -0700497
Alex Perrycb7da4b2019-08-28 19:35:56 -0700498 private:
Austin Schuh3054f5f2021-07-21 15:38:01 -0700499 const ShmEventLoop *shm_event_loop() const {
500 return static_cast<const ShmEventLoop *>(event_loop());
501 }
502
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700503 RawSender::Error CheckLocklessQueueResult(
504 const ipc_lib::LocklessQueueSender::Result &result) {
505 switch (result) {
506 case ipc_lib::LocklessQueueSender::Result::GOOD:
507 return Error::kOk;
508 case ipc_lib::LocklessQueueSender::Result::MESSAGES_SENT_TOO_FAST:
509 return Error::kMessagesSentTooFast;
510 case ipc_lib::LocklessQueueSender::Result::INVALID_REDZONE:
511 return Error::kInvalidRedzone;
512 }
513 LOG(FATAL) << "Unknown lockless queue sender result"
514 << static_cast<int>(result);
515 }
516
Austin Schuh4d275fc2022-09-16 15:42:45 -0700517 ipc_lib::MemoryMappedQueue lockless_queue_memory_;
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700518 ipc_lib::LocklessQueueSender lockless_queue_sender_;
519 ipc_lib::LocklessQueueWakeUpper wake_upper_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700520};
521
Alex Perrycb7da4b2019-08-28 19:35:56 -0700522// Class to manage the state for a Watcher.
Brian Silverman148d43d2020-06-07 18:19:22 -0500523class ShmWatcherState : public WatcherState {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700524 public:
Brian Silverman148d43d2020-06-07 18:19:22 -0500525 ShmWatcherState(
Austin Schuhef323c02020-09-01 14:55:28 -0700526 std::string_view shm_base, ShmEventLoop *event_loop,
527 const Channel *channel,
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800528 std::function<void(const Context &context, const void *message)> fn,
529 bool copy_data)
Brian Silverman148d43d2020-06-07 18:19:22 -0500530 : WatcherState(event_loop, channel, std::move(fn)),
Austin Schuh7d87b672019-12-01 20:23:49 -0800531 event_loop_(event_loop),
532 event_(this),
Austin Schuhef323c02020-09-01 14:55:28 -0700533 simple_shm_fetcher_(shm_base, event_loop, channel) {
Brian Silverman3bca5322020-08-12 19:35:29 -0700534 if (copy_data) {
Brian Silverman77162972020-08-12 19:52:40 -0700535 simple_shm_fetcher_.RetrieveData();
Brian Silverman3bca5322020-08-12 19:35:29 -0700536 }
537 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700538
Austin Schuh3054f5f2021-07-21 15:38:01 -0700539 ~ShmWatcherState() override {
540 event_loop_->CheckCurrentThread();
541 event_loop_->RemoveEvent(&event_);
542 }
Austin Schuh39788ff2019-12-01 18:22:57 -0800543
544 void Startup(EventLoop *event_loop) override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700545 event_loop_->CheckCurrentThread();
Austin Schuh7d87b672019-12-01 20:23:49 -0800546 simple_shm_fetcher_.PointAtNextQueueIndex();
Austin Schuh65493d62022-08-17 15:10:37 -0700547 CHECK(RegisterWakeup(event_loop->runtime_realtime_priority()));
Austin Schuh39788ff2019-12-01 18:22:57 -0800548 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700549
Alex Perrycb7da4b2019-08-28 19:35:56 -0700550 // Returns true if there is new data available.
Austin Schuh7d87b672019-12-01 20:23:49 -0800551 bool CheckForNewData() {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700552 if (!has_new_data_) {
Austin Schuh39788ff2019-12-01 18:22:57 -0800553 has_new_data_ = simple_shm_fetcher_.FetchNext();
Austin Schuh7d87b672019-12-01 20:23:49 -0800554
555 if (has_new_data_) {
556 event_.set_event_time(
Austin Schuhad154822019-12-27 15:45:13 -0800557 simple_shm_fetcher_.context().monotonic_event_time);
Austin Schuh7d87b672019-12-01 20:23:49 -0800558 event_loop_->AddEvent(&event_);
559 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700560 }
561
562 return has_new_data_;
563 }
564
Alex Perrycb7da4b2019-08-28 19:35:56 -0700565 // Consumes the data by calling the callback.
Austin Schuh7d87b672019-12-01 20:23:49 -0800566 void HandleEvent() {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700567 CHECK(has_new_data_);
Austin Schuh39788ff2019-12-01 18:22:57 -0800568 DoCallCallback(monotonic_clock::now, simple_shm_fetcher_.context());
Alex Perrycb7da4b2019-08-28 19:35:56 -0700569 has_new_data_ = false;
Austin Schuh7d87b672019-12-01 20:23:49 -0800570 CheckForNewData();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700571 }
572
Austin Schuh39788ff2019-12-01 18:22:57 -0800573 // Registers us to receive a signal on event reception.
Alex Perrycb7da4b2019-08-28 19:35:56 -0700574 bool RegisterWakeup(int priority) {
Austin Schuh39788ff2019-12-01 18:22:57 -0800575 return simple_shm_fetcher_.RegisterWakeup(priority);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700576 }
577
Austin Schuh39788ff2019-12-01 18:22:57 -0800578 void UnregisterWakeup() { return simple_shm_fetcher_.UnregisterWakeup(); }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700579
Brian Silvermana5450a92020-08-12 19:59:57 -0700580 absl::Span<const char> GetSharedMemory() const {
581 return simple_shm_fetcher_.GetConstSharedMemory();
Brian Silverman5120afb2020-01-31 17:44:35 -0800582 }
583
Alex Perrycb7da4b2019-08-28 19:35:56 -0700584 private:
585 bool has_new_data_ = false;
586
Austin Schuh7d87b672019-12-01 20:23:49 -0800587 ShmEventLoop *event_loop_;
Brian Silverman148d43d2020-06-07 18:19:22 -0500588 EventHandler<ShmWatcherState> event_;
Austin Schuh39788ff2019-12-01 18:22:57 -0800589 SimpleShmFetcher simple_shm_fetcher_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700590};
591
592// Adapter class to adapt a timerfd to a TimerHandler.
Brian Silverman148d43d2020-06-07 18:19:22 -0500593class ShmTimerHandler final : public TimerHandler {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700594 public:
Brian Silverman148d43d2020-06-07 18:19:22 -0500595 ShmTimerHandler(ShmEventLoop *shm_event_loop, ::std::function<void()> fn)
Austin Schuh39788ff2019-12-01 18:22:57 -0800596 : TimerHandler(shm_event_loop, std::move(fn)),
Austin Schuh7d87b672019-12-01 20:23:49 -0800597 shm_event_loop_(shm_event_loop),
598 event_(this) {
Austin Schuhcde39fd2020-02-22 20:58:24 -0800599 shm_event_loop_->epoll_.OnReadable(timerfd_.fd(), [this]() {
Austin Schuh5ca13112021-02-07 22:06:53 -0800600 // The timer may fire spuriously. HandleEvent on the event loop will
Austin Schuhcde39fd2020-02-22 20:58:24 -0800601 // call the callback if it is needed. It may also have called it when
602 // processing some other event, and the kernel decided to deliver this
603 // wakeup anyways.
604 timerfd_.Read();
605 shm_event_loop_->HandleEvent();
606 });
Alex Perrycb7da4b2019-08-28 19:35:56 -0700607 }
608
Brian Silverman148d43d2020-06-07 18:19:22 -0500609 ~ShmTimerHandler() {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700610 shm_event_loop_->CheckCurrentThread();
Austin Schuh7d87b672019-12-01 20:23:49 -0800611 Disable();
612 shm_event_loop_->epoll_.DeleteFd(timerfd_.fd());
613 }
614
615 void HandleEvent() {
Austin Schuhcde39fd2020-02-22 20:58:24 -0800616 CHECK(!event_.valid());
Brian Silvermanaf9a4d82020-10-06 15:10:58 -0700617 disabled_ = false;
Austin Schuhcde39fd2020-02-22 20:58:24 -0800618 const auto monotonic_now = Call(monotonic_clock::now, base_);
619 if (event_.valid()) {
Philipp Schradera6712522023-07-05 20:25:11 -0700620 // If someone called Schedule inside Call, rescheduling is already taken
621 // care of. Bail.
Austin Schuhcde39fd2020-02-22 20:58:24 -0800622 return;
Austin Schuh7d87b672019-12-01 20:23:49 -0800623 }
Brian Silvermanaf9a4d82020-10-06 15:10:58 -0700624 if (disabled_) {
625 // Somebody called Disable inside Call, so we don't want to reschedule.
626 // Bail.
627 return;
628 }
Austin Schuh7d87b672019-12-01 20:23:49 -0800629
Austin Schuh4d275fc2022-09-16 15:42:45 -0700630 if (repeat_offset_ == std::chrono::seconds(0)) {
Austin Schuhcde39fd2020-02-22 20:58:24 -0800631 timerfd_.Disable();
Naman Gupta4d13b0a2022-10-19 16:41:24 -0700632 disabled_ = true;
Austin Schuhcde39fd2020-02-22 20:58:24 -0800633 } else {
634 // Compute how many cycles have elapsed and schedule the next iteration
635 // for the next iteration in the future.
636 const int elapsed_cycles =
637 std::max<int>(0, (monotonic_now - base_ + repeat_offset_ -
638 std::chrono::nanoseconds(1)) /
639 repeat_offset_);
640 base_ += repeat_offset_ * elapsed_cycles;
Austin Schuh7d87b672019-12-01 20:23:49 -0800641
Austin Schuhcde39fd2020-02-22 20:58:24 -0800642 // Update the heap and schedule the timerfd wakeup.
Austin Schuh7d87b672019-12-01 20:23:49 -0800643 event_.set_event_time(base_);
644 shm_event_loop_->AddEvent(&event_);
Austin Schuh4d275fc2022-09-16 15:42:45 -0700645 timerfd_.SetTime(base_, std::chrono::seconds(0));
Naman Gupta4d13b0a2022-10-19 16:41:24 -0700646 disabled_ = false;
Austin Schuh7d87b672019-12-01 20:23:49 -0800647 }
648 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700649
Philipp Schradera6712522023-07-05 20:25:11 -0700650 void Schedule(monotonic_clock::time_point base,
651 monotonic_clock::duration repeat_offset) override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700652 shm_event_loop_->CheckCurrentThread();
Austin Schuh7d87b672019-12-01 20:23:49 -0800653 if (event_.valid()) {
654 shm_event_loop_->RemoveEvent(&event_);
655 }
656
Alex Perrycb7da4b2019-08-28 19:35:56 -0700657 timerfd_.SetTime(base, repeat_offset);
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800658 base_ = base;
659 repeat_offset_ = repeat_offset;
Austin Schuh7d87b672019-12-01 20:23:49 -0800660 event_.set_event_time(base_);
661 shm_event_loop_->AddEvent(&event_);
Naman Gupta4d13b0a2022-10-19 16:41:24 -0700662 disabled_ = false;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700663 }
664
Austin Schuh7d87b672019-12-01 20:23:49 -0800665 void Disable() override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700666 shm_event_loop_->CheckCurrentThread();
Austin Schuh7d87b672019-12-01 20:23:49 -0800667 shm_event_loop_->RemoveEvent(&event_);
668 timerfd_.Disable();
Brian Silvermanaf9a4d82020-10-06 15:10:58 -0700669 disabled_ = true;
Austin Schuh7d87b672019-12-01 20:23:49 -0800670 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700671
Naman Gupta4d13b0a2022-10-19 16:41:24 -0700672 bool IsDisabled() override { return disabled_; }
673
Alex Perrycb7da4b2019-08-28 19:35:56 -0700674 private:
675 ShmEventLoop *shm_event_loop_;
Brian Silverman148d43d2020-06-07 18:19:22 -0500676 EventHandler<ShmTimerHandler> event_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700677
Brian Silverman148d43d2020-06-07 18:19:22 -0500678 internal::TimerFd timerfd_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700679
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800680 monotonic_clock::time_point base_;
681 monotonic_clock::duration repeat_offset_;
Brian Silvermanaf9a4d82020-10-06 15:10:58 -0700682
683 // Used to track if Disable() was called during the callback, so we know not
684 // to reschedule.
Naman Gupta4d13b0a2022-10-19 16:41:24 -0700685 bool disabled_ = true;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700686};
687
688// Adapter class to the timerfd and PhasedLoop.
Brian Silverman148d43d2020-06-07 18:19:22 -0500689class ShmPhasedLoopHandler final : public PhasedLoopHandler {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700690 public:
Brian Silverman148d43d2020-06-07 18:19:22 -0500691 ShmPhasedLoopHandler(ShmEventLoop *shm_event_loop,
692 ::std::function<void(int)> fn,
693 const monotonic_clock::duration interval,
694 const monotonic_clock::duration offset)
695 : PhasedLoopHandler(shm_event_loop, std::move(fn), interval, offset),
Austin Schuh7d87b672019-12-01 20:23:49 -0800696 shm_event_loop_(shm_event_loop),
697 event_(this) {
698 shm_event_loop_->epoll_.OnReadable(
699 timerfd_.fd(), [this]() { shm_event_loop_->HandleEvent(); });
700 }
701
702 void HandleEvent() {
703 // The return value for read is the number of cycles that have elapsed.
704 // Because we check to see when this event *should* have happened, there are
705 // cases where Read() will return 0, when 1 cycle has actually happened.
706 // This occurs when the timer interrupt hasn't triggered yet. Therefore,
707 // ignore it. Call handles rescheduling and calculating elapsed cycles
708 // without any extra help.
709 timerfd_.Read();
710 event_.Invalidate();
711
James Kuszmaul20dcc7c2023-01-20 11:06:31 -0800712 Call(monotonic_clock::now);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700713 }
714
Brian Silverman148d43d2020-06-07 18:19:22 -0500715 ~ShmPhasedLoopHandler() override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700716 shm_event_loop_->CheckCurrentThread();
Austin Schuh39788ff2019-12-01 18:22:57 -0800717 shm_event_loop_->epoll_.DeleteFd(timerfd_.fd());
Austin Schuh7d87b672019-12-01 20:23:49 -0800718 shm_event_loop_->RemoveEvent(&event_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700719 }
720
721 private:
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800722 // Reschedules the timer.
Austin Schuh39788ff2019-12-01 18:22:57 -0800723 void Schedule(monotonic_clock::time_point sleep_time) override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700724 shm_event_loop_->CheckCurrentThread();
Austin Schuh7d87b672019-12-01 20:23:49 -0800725 if (event_.valid()) {
726 shm_event_loop_->RemoveEvent(&event_);
727 }
728
Austin Schuh39788ff2019-12-01 18:22:57 -0800729 timerfd_.SetTime(sleep_time, ::aos::monotonic_clock::zero());
Austin Schuh7d87b672019-12-01 20:23:49 -0800730 event_.set_event_time(sleep_time);
731 shm_event_loop_->AddEvent(&event_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700732 }
733
734 ShmEventLoop *shm_event_loop_;
Brian Silverman148d43d2020-06-07 18:19:22 -0500735 EventHandler<ShmPhasedLoopHandler> event_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700736
Brian Silverman148d43d2020-06-07 18:19:22 -0500737 internal::TimerFd timerfd_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700738};
Brian Silverman148d43d2020-06-07 18:19:22 -0500739
740} // namespace shm_event_loop_internal
Alex Perrycb7da4b2019-08-28 19:35:56 -0700741
742::std::unique_ptr<RawFetcher> ShmEventLoop::MakeRawFetcher(
743 const Channel *channel) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700744 CheckCurrentThread();
Austin Schuhca4828c2019-12-28 14:21:35 -0800745 if (!configuration::ChannelIsReadableOnNode(channel, node())) {
746 LOG(FATAL) << "Channel { \"name\": \"" << channel->name()->string_view()
747 << "\", \"type\": \"" << channel->type()->string_view()
748 << "\" } is not able to be fetched on this node. Check your "
749 "configuration.";
Austin Schuh217a9782019-12-21 23:02:50 -0800750 }
751
Austin Schuhef323c02020-09-01 14:55:28 -0700752 return ::std::unique_ptr<RawFetcher>(
753 new ShmFetcher(shm_base_, this, channel));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700754}
755
756::std::unique_ptr<RawSender> ShmEventLoop::MakeRawSender(
757 const Channel *channel) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700758 CheckCurrentThread();
Brian Silverman0fc69932020-01-24 21:54:02 -0800759 TakeSender(channel);
Austin Schuh39788ff2019-12-01 18:22:57 -0800760
Austin Schuhef323c02020-09-01 14:55:28 -0700761 return ::std::unique_ptr<RawSender>(new ShmSender(shm_base_, this, channel));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700762}
763
764void ShmEventLoop::MakeRawWatcher(
765 const Channel *channel,
766 std::function<void(const Context &context, const void *message)> watcher) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700767 CheckCurrentThread();
Brian Silverman0fc69932020-01-24 21:54:02 -0800768 TakeWatcher(channel);
Austin Schuh217a9782019-12-21 23:02:50 -0800769
Austin Schuh39788ff2019-12-01 18:22:57 -0800770 NewWatcher(::std::unique_ptr<WatcherState>(
Austin Schuhef323c02020-09-01 14:55:28 -0700771 new ShmWatcherState(shm_base_, this, channel, std::move(watcher), true)));
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800772}
773
774void ShmEventLoop::MakeRawNoArgWatcher(
775 const Channel *channel,
776 std::function<void(const Context &context)> watcher) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700777 CheckCurrentThread();
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800778 TakeWatcher(channel);
779
Brian Silverman148d43d2020-06-07 18:19:22 -0500780 NewWatcher(::std::unique_ptr<WatcherState>(new ShmWatcherState(
Austin Schuhef323c02020-09-01 14:55:28 -0700781 shm_base_, this, channel,
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800782 [watcher](const Context &context, const void *) { watcher(context); },
783 false)));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700784}
785
786TimerHandler *ShmEventLoop::AddTimer(::std::function<void()> callback) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700787 CheckCurrentThread();
Austin Schuh39788ff2019-12-01 18:22:57 -0800788 return NewTimer(::std::unique_ptr<TimerHandler>(
Brian Silverman148d43d2020-06-07 18:19:22 -0500789 new ShmTimerHandler(this, ::std::move(callback))));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700790}
791
792PhasedLoopHandler *ShmEventLoop::AddPhasedLoop(
793 ::std::function<void(int)> callback,
794 const monotonic_clock::duration interval,
795 const monotonic_clock::duration offset) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700796 CheckCurrentThread();
Brian Silverman148d43d2020-06-07 18:19:22 -0500797 return NewPhasedLoop(::std::unique_ptr<PhasedLoopHandler>(
798 new ShmPhasedLoopHandler(this, ::std::move(callback), interval, offset)));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700799}
800
801void ShmEventLoop::OnRun(::std::function<void()> on_run) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700802 CheckCurrentThread();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700803 on_run_.push_back(::std::move(on_run));
804}
805
Austin Schuh3054f5f2021-07-21 15:38:01 -0700806void ShmEventLoop::CheckCurrentThread() const {
807 if (__builtin_expect(check_mutex_ != nullptr, false)) {
808 CHECK(check_mutex_->is_locked())
809 << ": The configured mutex is not locked while calling a "
810 "ShmEventLoop function";
811 }
812 if (__builtin_expect(!!check_tid_, false)) {
813 CHECK_EQ(syscall(SYS_gettid), *check_tid_)
814 << ": Being called from the wrong thread";
815 }
816}
817
Austin Schuh5ca13112021-02-07 22:06:53 -0800818// This is a bit tricky because watchers can generate new events at any time (as
819// long as it's in the past). We want to check the watchers at least once before
820// declaring there are no events to handle, and we want to check them again if
821// event processing takes long enough that we find an event after that point in
822// time to handle.
Austin Schuh7d87b672019-12-01 20:23:49 -0800823void ShmEventLoop::HandleEvent() {
Austin Schuh5ca13112021-02-07 22:06:53 -0800824 // Time through which we've checked for new events in watchers.
825 monotonic_clock::time_point checked_until = monotonic_clock::min_time;
826 if (!signalfd_) {
827 // Nothing to check, so we can bail out immediately once we're out of
828 // events.
829 CHECK(watchers_.empty());
830 checked_until = monotonic_clock::max_time;
Austin Schuh7d87b672019-12-01 20:23:49 -0800831 }
832
Austin Schuh5ca13112021-02-07 22:06:53 -0800833 // Loop until we run out of events to check.
Austin Schuh39788ff2019-12-01 18:22:57 -0800834 while (true) {
Austin Schuh5ca13112021-02-07 22:06:53 -0800835 // Time of the next event we know about. If this is before checked_until, we
836 // know there aren't any new events before the next one that we already know
837 // about, so no need to check the watchers.
838 monotonic_clock::time_point next_time = monotonic_clock::max_time;
839
840 if (EventCount() == 0) {
841 if (checked_until != monotonic_clock::min_time) {
842 // No events, and we've already checked the watchers at least once, so
843 // we're all done.
844 //
845 // There's a small chance that a watcher has gotten another event in
846 // between checked_until and now. If so, then the signalfd will be
847 // triggered now and we'll re-enter HandleEvent immediately. This is
848 // unlikely though, so we don't want to spend time checking all the
849 // watchers unnecessarily.
850 break;
851 }
852 } else {
853 next_time = PeekEvent()->event_time();
854 }
Austin Schuh00cad2e2022-12-02 20:11:04 -0800855 monotonic_clock::time_point now;
856 bool new_data = false;
Austin Schuh5ca13112021-02-07 22:06:53 -0800857
858 if (next_time > checked_until) {
859 // Read all of the signals, because there's no point in waking up again
860 // immediately to handle each one if we've fallen behind.
861 //
862 // This is safe before checking for new data on the watchers. If a signal
863 // is cleared here, the corresponding CheckForNewData() call below will
864 // pick it up.
865 while (true) {
866 const signalfd_siginfo result = signalfd_->Read();
867 if (result.ssi_signo == 0) {
868 break;
869 }
870 CHECK_EQ(result.ssi_signo, ipc_lib::kWakeupSignal);
871 }
Austin Schuh00cad2e2022-12-02 20:11:04 -0800872 // This is the last time we can guarantee that if a message is published
873 // before, we will notice it.
874 now = monotonic_clock::now();
Austin Schuh5ca13112021-02-07 22:06:53 -0800875
876 // Check all the watchers for new events.
877 for (std::unique_ptr<WatcherState> &base_watcher : watchers_) {
878 ShmWatcherState *const watcher =
879 reinterpret_cast<ShmWatcherState *>(base_watcher.get());
880
Austin Schuh00cad2e2022-12-02 20:11:04 -0800881 // Track if we got a message.
882 if (watcher->CheckForNewData()) {
883 new_data = true;
884 }
Austin Schuh5ca13112021-02-07 22:06:53 -0800885 }
886 if (EventCount() == 0) {
887 // Still no events, all done now.
888 break;
889 }
890
891 checked_until = now;
892 // Check for any new events we found.
893 next_time = PeekEvent()->event_time();
Austin Schuh00cad2e2022-12-02 20:11:04 -0800894 } else {
895 now = monotonic_clock::now();
Austin Schuh5ca13112021-02-07 22:06:53 -0800896 }
897
898 if (next_time > now) {
Austin Schuh00cad2e2022-12-02 20:11:04 -0800899 // Ok, we got a message with a timestamp *after* we wrote down time. We
900 // need to process it (otherwise we will go to sleep without processing
901 // it), but we also need to make sure no other messages have come in
902 // before it that we would process out of order. Just go around again to
903 // redo the checks.
904 if (new_data) {
905 continue;
906 }
Austin Schuh39788ff2019-12-01 18:22:57 -0800907 break;
908 }
909
Austin Schuh5ca13112021-02-07 22:06:53 -0800910 EventLoopEvent *const event = PopEvent();
Austin Schuh7d87b672019-12-01 20:23:49 -0800911 event->HandleEvent();
Austin Schuh39788ff2019-12-01 18:22:57 -0800912 }
913}
914
Austin Schuh32fd5a72019-12-01 22:20:26 -0800915// RAII class to mask signals.
916class ScopedSignalMask {
917 public:
918 ScopedSignalMask(std::initializer_list<int> signals) {
919 sigset_t sigset;
920 PCHECK(sigemptyset(&sigset) == 0);
921 for (int signal : signals) {
922 PCHECK(sigaddset(&sigset, signal) == 0);
923 }
924
925 PCHECK(sigprocmask(SIG_BLOCK, &sigset, &old_) == 0);
926 }
927
928 ~ScopedSignalMask() { PCHECK(sigprocmask(SIG_SETMASK, &old_, nullptr) == 0); }
929
930 private:
931 sigset_t old_;
932};
933
934// Class to manage the static state associated with killing multiple event
935// loops.
936class SignalHandler {
937 public:
938 // Gets the singleton.
939 static SignalHandler *global() {
940 static SignalHandler loop;
941 return &loop;
942 }
943
944 // Handles the signal with the singleton.
945 static void HandleSignal(int) { global()->DoHandleSignal(); }
946
947 // Registers an event loop to receive Exit() calls.
948 void Register(ShmEventLoop *event_loop) {
949 // Block signals while we have the mutex so we never race with the signal
950 // handler.
951 ScopedSignalMask mask({SIGINT, SIGHUP, SIGTERM});
952 std::unique_lock<stl_mutex> locker(mutex_);
953 if (event_loops_.size() == 0) {
954 // The first caller registers the signal handler.
955 struct sigaction new_action;
956 sigemptyset(&new_action.sa_mask);
957 // This makes it so that 2 control c's to a stuck process will kill it by
958 // restoring the original signal handler.
959 new_action.sa_flags = SA_RESETHAND;
960 new_action.sa_handler = &HandleSignal;
961
962 PCHECK(sigaction(SIGINT, &new_action, &old_action_int_) == 0);
963 PCHECK(sigaction(SIGHUP, &new_action, &old_action_hup_) == 0);
964 PCHECK(sigaction(SIGTERM, &new_action, &old_action_term_) == 0);
965 }
966
967 event_loops_.push_back(event_loop);
968 }
969
970 // Unregisters an event loop to receive Exit() calls.
971 void Unregister(ShmEventLoop *event_loop) {
972 // Block signals while we have the mutex so we never race with the signal
973 // handler.
974 ScopedSignalMask mask({SIGINT, SIGHUP, SIGTERM});
975 std::unique_lock<stl_mutex> locker(mutex_);
976
Brian Silverman5120afb2020-01-31 17:44:35 -0800977 event_loops_.erase(
978 std::find(event_loops_.begin(), event_loops_.end(), event_loop));
Austin Schuh32fd5a72019-12-01 22:20:26 -0800979
980 if (event_loops_.size() == 0u) {
981 // The last caller restores the original signal handlers.
982 PCHECK(sigaction(SIGINT, &old_action_int_, nullptr) == 0);
983 PCHECK(sigaction(SIGHUP, &old_action_hup_, nullptr) == 0);
984 PCHECK(sigaction(SIGTERM, &old_action_term_, nullptr) == 0);
985 }
986 }
987
988 private:
989 void DoHandleSignal() {
990 // We block signals while grabbing the lock, so there should never be a
991 // race. Confirm that this is true using trylock.
992 CHECK(mutex_.try_lock()) << ": sigprocmask failed to block signals while "
993 "modifing the event loop list.";
994 for (ShmEventLoop *event_loop : event_loops_) {
995 event_loop->Exit();
996 }
997 mutex_.unlock();
998 }
999
1000 // Mutex to protect all state.
1001 stl_mutex mutex_;
1002 std::vector<ShmEventLoop *> event_loops_;
1003 struct sigaction old_action_int_;
1004 struct sigaction old_action_hup_;
1005 struct sigaction old_action_term_;
1006};
1007
Alex Perrycb7da4b2019-08-28 19:35:56 -07001008void ShmEventLoop::Run() {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001009 CheckCurrentThread();
Austin Schuh32fd5a72019-12-01 22:20:26 -08001010 SignalHandler::global()->Register(this);
Austin Schuh39788ff2019-12-01 18:22:57 -08001011
Alex Perrycb7da4b2019-08-28 19:35:56 -07001012 if (watchers_.size() > 0) {
Austin Schuh5ca13112021-02-07 22:06:53 -08001013 signalfd_.reset(new ipc_lib::SignalFd({ipc_lib::kWakeupSignal}));
Alex Perrycb7da4b2019-08-28 19:35:56 -07001014
Austin Schuh5ca13112021-02-07 22:06:53 -08001015 epoll_.OnReadable(signalfd_->fd(), [this]() { HandleEvent(); });
Alex Perrycb7da4b2019-08-28 19:35:56 -07001016 }
1017
Austin Schuh39788ff2019-12-01 18:22:57 -08001018 MaybeScheduleTimingReports();
1019
Austin Schuh7d87b672019-12-01 20:23:49 -08001020 ReserveEvents();
1021
Tyler Chatow67ddb032020-01-12 14:30:04 -08001022 {
Austin Schuha0c41ba2020-09-10 22:59:14 -07001023 logging::ScopedLogRestorer prev_logger;
Tyler Chatow67ddb032020-01-12 14:30:04 -08001024 AosLogToFbs aos_logger;
1025 if (!skip_logger_) {
Austin Schuhad9e5eb2021-11-19 20:33:55 -08001026 aos_logger.Initialize(&name_, MakeSender<logging::LogMessageFbs>("/aos"));
Austin Schuha0c41ba2020-09-10 22:59:14 -07001027 prev_logger.Swap(aos_logger.implementation());
Tyler Chatow67ddb032020-01-12 14:30:04 -08001028 }
Alex Perrycb7da4b2019-08-28 19:35:56 -07001029
Tyler Chatow67ddb032020-01-12 14:30:04 -08001030 aos::SetCurrentThreadName(name_.substr(0, 16));
Brian Silverman6a54ff32020-04-28 16:41:39 -07001031 const cpu_set_t default_affinity = DefaultAffinity();
1032 if (!CPU_EQUAL(&affinity_, &default_affinity)) {
1033 ::aos::SetCurrentThreadAffinity(affinity_);
1034 }
Tyler Chatow67ddb032020-01-12 14:30:04 -08001035 // Now, all the callbacks are setup. Lock everything into memory and go RT.
1036 if (priority_ != 0) {
1037 ::aos::InitRT();
1038
1039 LOG(INFO) << "Setting priority to " << priority_;
1040 ::aos::SetCurrentThreadRealtimePriority(priority_);
1041 }
1042
1043 set_is_running(true);
1044
1045 // Now that we are realtime (but before the OnRun handlers run), snap the
1046 // queue index.
1047 for (::std::unique_ptr<WatcherState> &watcher : watchers_) {
1048 watcher->Startup(this);
1049 }
1050
1051 // Now that we are RT, run all the OnRun handlers.
Austin Schuha9012be2021-07-21 15:19:11 -07001052 SetTimerContext(monotonic_clock::now());
Tyler Chatow67ddb032020-01-12 14:30:04 -08001053 for (const auto &run : on_run_) {
1054 run();
1055 }
1056
1057 // And start our main event loop which runs all the timers and handles Quit.
1058 epoll_.Run();
1059
1060 // Once epoll exits, there is no useful nonrt work left to do.
1061 set_is_running(false);
1062
1063 // Nothing time or synchronization critical needs to happen after this
1064 // point. Drop RT priority.
1065 ::aos::UnsetCurrentThreadRealtimePriority();
Alex Perrycb7da4b2019-08-28 19:35:56 -07001066 }
1067
Austin Schuh39788ff2019-12-01 18:22:57 -08001068 for (::std::unique_ptr<WatcherState> &base_watcher : watchers_) {
Brian Silverman148d43d2020-06-07 18:19:22 -05001069 ShmWatcherState *watcher =
1070 reinterpret_cast<ShmWatcherState *>(base_watcher.get());
Alex Perrycb7da4b2019-08-28 19:35:56 -07001071 watcher->UnregisterWakeup();
1072 }
1073
1074 if (watchers_.size() > 0) {
Austin Schuh5ca13112021-02-07 22:06:53 -08001075 epoll_.DeleteFd(signalfd_->fd());
1076 signalfd_.reset();
Alex Perrycb7da4b2019-08-28 19:35:56 -07001077 }
Austin Schuh32fd5a72019-12-01 22:20:26 -08001078
1079 SignalHandler::global()->Unregister(this);
Austin Schuhe84c3ed2019-12-14 15:29:48 -08001080
1081 // Trigger any remaining senders or fetchers to be cleared before destroying
1082 // the event loop so the book keeping matches. Do this in the thread that
1083 // created the timing reporter.
1084 timing_report_sender_.reset();
Austin Schuh0debde12022-08-17 16:25:17 -07001085 ClearContext();
Alex Perrycb7da4b2019-08-28 19:35:56 -07001086}
1087
1088void ShmEventLoop::Exit() { epoll_.Quit(); }
1089
Brian Silvermane1fe2512022-08-14 23:18:50 -07001090std::unique_ptr<ExitHandle> ShmEventLoop::MakeExitHandle() {
1091 return std::make_unique<ShmExitHandle>(this);
1092}
1093
Alex Perrycb7da4b2019-08-28 19:35:56 -07001094ShmEventLoop::~ShmEventLoop() {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001095 CheckCurrentThread();
Austin Schuh39788ff2019-12-01 18:22:57 -08001096 // Force everything with a registered fd with epoll to be destroyed now.
1097 timers_.clear();
1098 phased_loops_.clear();
1099 watchers_.clear();
1100
Alex Perrycb7da4b2019-08-28 19:35:56 -07001101 CHECK(!is_running()) << ": ShmEventLoop destroyed while running";
Brian Silvermane1fe2512022-08-14 23:18:50 -07001102 CHECK_EQ(0, exit_handle_count_)
1103 << ": All ExitHandles must be destroyed before the ShmEventLoop";
Alex Perrycb7da4b2019-08-28 19:35:56 -07001104}
1105
Alex Perrycb7da4b2019-08-28 19:35:56 -07001106void ShmEventLoop::SetRuntimeRealtimePriority(int priority) {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001107 CheckCurrentThread();
Alex Perrycb7da4b2019-08-28 19:35:56 -07001108 if (is_running()) {
1109 LOG(FATAL) << "Cannot set realtime priority while running.";
1110 }
1111 priority_ = priority;
1112}
1113
Brian Silverman6a54ff32020-04-28 16:41:39 -07001114void ShmEventLoop::SetRuntimeAffinity(const cpu_set_t &cpuset) {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001115 CheckCurrentThread();
Brian Silverman6a54ff32020-04-28 16:41:39 -07001116 if (is_running()) {
1117 LOG(FATAL) << "Cannot set affinity while running.";
1118 }
1119 affinity_ = cpuset;
1120}
1121
James Kuszmaul57c2baa2020-01-19 14:52:52 -08001122void ShmEventLoop::set_name(const std::string_view name) {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001123 CheckCurrentThread();
James Kuszmaul57c2baa2020-01-19 14:52:52 -08001124 name_ = std::string(name);
1125 UpdateTimingReport();
1126}
1127
Brian Silvermana5450a92020-08-12 19:59:57 -07001128absl::Span<const char> ShmEventLoop::GetWatcherSharedMemory(
1129 const Channel *channel) {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001130 CheckCurrentThread();
Brian Silverman148d43d2020-06-07 18:19:22 -05001131 ShmWatcherState *const watcher_state =
1132 static_cast<ShmWatcherState *>(GetWatcherState(channel));
Brian Silverman5120afb2020-01-31 17:44:35 -08001133 return watcher_state->GetSharedMemory();
1134}
1135
Brian Silverman4f4e0612020-08-12 19:54:41 -07001136int ShmEventLoop::NumberBuffers(const Channel *channel) {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001137 CheckCurrentThread();
Austin Schuh4d275fc2022-09-16 15:42:45 -07001138 return ipc_lib::MakeQueueConfiguration(configuration(), channel)
1139 .num_messages();
Brian Silverman4f4e0612020-08-12 19:54:41 -07001140}
1141
Brian Silverman5120afb2020-01-31 17:44:35 -08001142absl::Span<char> ShmEventLoop::GetShmSenderSharedMemory(
1143 const aos::RawSender *sender) const {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001144 CheckCurrentThread();
Brian Silverman148d43d2020-06-07 18:19:22 -05001145 return static_cast<const ShmSender *>(sender)->GetSharedMemory();
Brian Silverman5120afb2020-01-31 17:44:35 -08001146}
1147
Brian Silvermana5450a92020-08-12 19:59:57 -07001148absl::Span<const char> ShmEventLoop::GetShmFetcherPrivateMemory(
Brian Silverman6d2b3592020-06-18 14:40:15 -07001149 const aos::RawFetcher *fetcher) const {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001150 CheckCurrentThread();
Brian Silverman6d2b3592020-06-18 14:40:15 -07001151 return static_cast<const ShmFetcher *>(fetcher)->GetPrivateMemory();
1152}
1153
Austin Schuh3054f5f2021-07-21 15:38:01 -07001154pid_t ShmEventLoop::GetTid() {
1155 CheckCurrentThread();
1156 return syscall(SYS_gettid);
1157}
Austin Schuh39788ff2019-12-01 18:22:57 -08001158
Alex Perrycb7da4b2019-08-28 19:35:56 -07001159} // namespace aos