blob: 8ed6e0e723d458cee559ca569e8832bafe9d665b [file] [log] [blame]
Alex Perrycb7da4b2019-08-28 19:35:56 -07001#include "aos/events/shm_event_loop.h"
2
Alex Perrycb7da4b2019-08-28 19:35:56 -07003#include <sys/stat.h>
Austin Schuh39788ff2019-12-01 18:22:57 -08004#include <sys/syscall.h>
Alex Perrycb7da4b2019-08-28 19:35:56 -07005#include <sys/types.h>
Tyler Chatow67ddb032020-01-12 14:30:04 -08006
Alex Perrycb7da4b2019-08-28 19:35:56 -07007#include <algorithm>
8#include <atomic>
9#include <chrono>
Austin Schuh39788ff2019-12-01 18:22:57 -080010#include <iterator>
Alex Perrycb7da4b2019-08-28 19:35:56 -070011#include <stdexcept>
12
Philipp Schrader790cb542023-07-05 21:06:52 -070013#include "glog/logging.h"
14
Tyler Chatow67ddb032020-01-12 14:30:04 -080015#include "aos/events/aos_logging.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070016#include "aos/events/epoll.h"
Austin Schuh39788ff2019-12-01 18:22:57 -080017#include "aos/events/event_loop_generated.h"
18#include "aos/events/timing_statistics.h"
Austin Schuh094d09b2020-11-20 23:26:52 -080019#include "aos/init.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070020#include "aos/ipc_lib/lockless_queue.h"
Austin Schuh4d275fc2022-09-16 15:42:45 -070021#include "aos/ipc_lib/memory_mapped_queue.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070022#include "aos/realtime.h"
Austin Schuh32fd5a72019-12-01 22:20:26 -080023#include "aos/stl_mutex/stl_mutex.h"
Austin Schuhfccb2d02020-01-26 16:11:19 -080024#include "aos/util/file.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070025#include "aos/util/phased_loop.h"
26
Austin Schuhe84c3ed2019-12-14 15:29:48 -080027namespace {
28
29// Returns the portion of the path after the last /. This very much assumes
30// that the application name is null terminated.
31const char *Filename(const char *path) {
32 const std::string_view path_string_view = path;
33 auto last_slash_pos = path_string_view.find_last_of("/");
34
35 return last_slash_pos == std::string_view::npos ? path
36 : path + last_slash_pos + 1;
37}
38
39} // namespace
40
Alex Perrycb7da4b2019-08-28 19:35:56 -070041DEFINE_string(shm_base, "/dev/shm/aos",
42 "Directory to place queue backing mmaped files in.");
Brennan Coslett6fd3c002023-07-11 17:41:09 -050043// This value is affected by the umask of the process which is calling it
44// and is set to the user's value by default (check yours running `umask` on
45// the command line).
46// Any file mode requested is transformed using: mode & ~umask and the default
47// umask is 0022 (allow any permissions for the user, dont allow writes for
48// groups or others).
49// See https://man7.org/linux/man-pages/man2/umask.2.html for more details.
50// WITH THE DEFAULT UMASK YOU WONT ACTUALLY GET THESE PERMISSIONS :)
Alex Perrycb7da4b2019-08-28 19:35:56 -070051DEFINE_uint32(permissions, 0770,
Brennan Coslett6fd3c002023-07-11 17:41:09 -050052 "Permissions to make shared memory files and folders, "
Brennan Coslettd5077bc2023-07-13 08:49:35 -050053 "affected by the process's umask. "
Brennan Coslett6fd3c002023-07-11 17:41:09 -050054 "See shm_event_loop.cc for more details.");
Austin Schuhe84c3ed2019-12-14 15:29:48 -080055DEFINE_string(application_name, Filename(program_invocation_name),
56 "The application name");
Alex Perrycb7da4b2019-08-28 19:35:56 -070057
58namespace aos {
59
Brian Silverman148d43d2020-06-07 18:19:22 -050060using namespace shm_event_loop_internal;
61
Austin Schuhcdab6192019-12-29 17:47:46 -080062void SetShmBase(const std::string_view base) {
Austin Schuhef323c02020-09-01 14:55:28 -070063 FLAGS_shm_base = std::string(base) + "/aos";
Austin Schuhcdab6192019-12-29 17:47:46 -080064}
65
Brian Silverman4f4e0612020-08-12 19:54:41 -070066namespace {
67
Austin Schuh217a9782019-12-21 23:02:50 -080068const Node *MaybeMyNode(const Configuration *configuration) {
69 if (!configuration->has_nodes()) {
70 return nullptr;
71 }
Alex Perrycb7da4b2019-08-28 19:35:56 -070072
Austin Schuh217a9782019-12-21 23:02:50 -080073 return configuration::GetMyNode(configuration);
74}
Alex Perrycb7da4b2019-08-28 19:35:56 -070075
Austin Schuh39788ff2019-12-01 18:22:57 -080076} // namespace
77
Austin Schuh217a9782019-12-21 23:02:50 -080078ShmEventLoop::ShmEventLoop(const Configuration *configuration)
Austin Schuh83c7f702021-01-19 22:36:29 -080079 : EventLoop(configuration),
80 boot_uuid_(UUID::BootUUID()),
Austin Schuhef323c02020-09-01 14:55:28 -070081 shm_base_(FLAGS_shm_base),
Austin Schuhe84c3ed2019-12-14 15:29:48 -080082 name_(FLAGS_application_name),
Austin Schuh15649d62019-12-28 16:36:38 -080083 node_(MaybeMyNode(configuration)) {
Austin Schuh094d09b2020-11-20 23:26:52 -080084 CHECK(IsInitialized()) << ": Need to initialize AOS first.";
Austin Schuh0debde12022-08-17 16:25:17 -070085 ClearContext();
Austin Schuh15649d62019-12-28 16:36:38 -080086 if (configuration->has_nodes()) {
87 CHECK(node_ != nullptr) << ": Couldn't find node in config.";
88 }
89}
Austin Schuh217a9782019-12-21 23:02:50 -080090
Brian Silverman148d43d2020-06-07 18:19:22 -050091namespace shm_event_loop_internal {
Austin Schuh39788ff2019-12-01 18:22:57 -080092
93class SimpleShmFetcher {
Alex Perrycb7da4b2019-08-28 19:35:56 -070094 public:
Austin Schuhef323c02020-09-01 14:55:28 -070095 explicit SimpleShmFetcher(std::string_view shm_base, ShmEventLoop *event_loop,
96 const Channel *channel)
Austin Schuh432784f2020-06-23 17:27:35 -070097 : event_loop_(event_loop),
98 channel_(channel),
Austin Schuh4d275fc2022-09-16 15:42:45 -070099 lockless_queue_memory_(shm_base, FLAGS_permissions,
100 event_loop->configuration(), channel),
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700101 reader_(lockless_queue_memory_.queue()) {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700102 context_.data = nullptr;
103 // Point the queue index at the next index to read starting now. This
104 // makes it such that FetchNext will read the next message sent after
105 // the fetcher is created.
106 PointAtNextQueueIndex();
107 }
108
Austin Schuh39788ff2019-12-01 18:22:57 -0800109 ~SimpleShmFetcher() {}
Alex Perrycb7da4b2019-08-28 19:35:56 -0700110
Brian Silverman77162972020-08-12 19:52:40 -0700111 // Sets this object to pin or copy data, as configured in the channel.
112 void RetrieveData() {
113 if (channel_->read_method() == ReadMethod::PIN) {
114 PinDataOnFetch();
115 } else {
116 CopyDataOnFetch();
117 }
118 }
119
Brian Silverman3bca5322020-08-12 19:35:29 -0700120 // Sets this object to copy data out of the shared memory into a private
121 // buffer when fetching.
122 void CopyDataOnFetch() {
Brian Silverman77162972020-08-12 19:52:40 -0700123 CHECK(!pin_data());
Brian Silverman3bca5322020-08-12 19:35:29 -0700124 data_storage_.reset(static_cast<char *>(
125 malloc(channel_->max_size() + kChannelDataAlignment - 1)));
126 }
127
Brian Silverman77162972020-08-12 19:52:40 -0700128 // Sets this object to pin data in shared memory when fetching.
129 void PinDataOnFetch() {
130 CHECK(!copy_data());
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700131 auto maybe_pinner =
132 ipc_lib::LocklessQueuePinner::Make(lockless_queue_memory_.queue());
Brian Silverman77162972020-08-12 19:52:40 -0700133 if (!maybe_pinner) {
134 LOG(FATAL) << "Failed to create reader on "
135 << configuration::CleanedChannelToString(channel_)
136 << ", too many readers.";
137 }
138 pinner_ = std::move(maybe_pinner.value());
139 }
140
Alex Perrycb7da4b2019-08-28 19:35:56 -0700141 // Points the next message to fetch at the queue index which will be
142 // populated next.
143 void PointAtNextQueueIndex() {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700144 actual_queue_index_ = reader_.LatestIndex();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700145 if (!actual_queue_index_.valid()) {
146 // Nothing in the queue. The next element will show up at the 0th
147 // index in the queue.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700148 actual_queue_index_ = ipc_lib::QueueIndex::Zero(
149 LocklessQueueSize(lockless_queue_memory_.memory()));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700150 } else {
151 actual_queue_index_ = actual_queue_index_.Increment();
152 }
153 }
154
Austin Schuh39788ff2019-12-01 18:22:57 -0800155 bool FetchNext() {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700156 const ipc_lib::LocklessQueueReader::Result read_result =
Brian Silverman3bca5322020-08-12 19:35:29 -0700157 DoFetch(actual_queue_index_);
Austin Schuh432784f2020-06-23 17:27:35 -0700158
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700159 return read_result == ipc_lib::LocklessQueueReader::Result::GOOD;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700160 }
161
Austin Schuh39788ff2019-12-01 18:22:57 -0800162 bool Fetch() {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700163 const ipc_lib::QueueIndex queue_index = reader_.LatestIndex();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700164 // actual_queue_index_ is only meaningful if it was set by Fetch or
165 // FetchNext. This happens when valid_data_ has been set. So, only
166 // skip checking if valid_data_ is true.
167 //
168 // Also, if the latest queue index is invalid, we are empty. So there
169 // is nothing to fetch.
Austin Schuh39788ff2019-12-01 18:22:57 -0800170 if ((context_.data != nullptr &&
Alex Perrycb7da4b2019-08-28 19:35:56 -0700171 queue_index == actual_queue_index_.DecrementBy(1u)) ||
172 !queue_index.valid()) {
173 return false;
174 }
175
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700176 const ipc_lib::LocklessQueueReader::Result read_result =
177 DoFetch(queue_index);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700178
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700179 CHECK(read_result != ipc_lib::LocklessQueueReader::Result::NOTHING_NEW)
Austin Schuhf5652592019-12-29 16:26:15 -0800180 << ": Queue index went backwards. This should never happen. "
181 << configuration::CleanedChannelToString(channel_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700182
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700183 return read_result == ipc_lib::LocklessQueueReader::Result::GOOD;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700184 }
185
Austin Schuh39788ff2019-12-01 18:22:57 -0800186 Context context() const { return context_; }
187
Alex Perrycb7da4b2019-08-28 19:35:56 -0700188 bool RegisterWakeup(int priority) {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700189 CHECK(!watcher_);
190 watcher_ = ipc_lib::LocklessQueueWatcher::Make(
191 lockless_queue_memory_.queue(), priority);
192 return static_cast<bool>(watcher_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700193 }
194
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700195 void UnregisterWakeup() {
196 CHECK(watcher_);
197 watcher_ = std::nullopt;
198 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700199
Brian Silvermana5450a92020-08-12 19:59:57 -0700200 absl::Span<char> GetMutableSharedMemory() {
201 return lockless_queue_memory_.GetMutableSharedMemory();
Brian Silverman5120afb2020-01-31 17:44:35 -0800202 }
203
Brian Silvermana5450a92020-08-12 19:59:57 -0700204 absl::Span<const char> GetConstSharedMemory() const {
205 return lockless_queue_memory_.GetConstSharedMemory();
206 }
207
208 absl::Span<const char> GetPrivateMemory() const {
209 if (pin_data()) {
210 return lockless_queue_memory_.GetConstSharedMemory();
211 }
Brian Silverman6d2b3592020-06-18 14:40:15 -0700212 return absl::Span<char>(
213 const_cast<SimpleShmFetcher *>(this)->data_storage_start(),
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700214 LocklessQueueMessageDataSize(lockless_queue_memory_.memory()));
Brian Silverman6d2b3592020-06-18 14:40:15 -0700215 }
216
Alex Perrycb7da4b2019-08-28 19:35:56 -0700217 private:
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700218 ipc_lib::LocklessQueueReader::Result DoFetch(
219 ipc_lib::QueueIndex queue_index) {
Brian Silverman3bca5322020-08-12 19:35:29 -0700220 // TODO(austin): Get behind and make sure it dies.
221 char *copy_buffer = nullptr;
222 if (copy_data()) {
223 copy_buffer = data_storage_start();
224 }
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700225 ipc_lib::LocklessQueueReader::Result read_result = reader_.Read(
Brian Silverman3bca5322020-08-12 19:35:29 -0700226 queue_index.index(), &context_.monotonic_event_time,
227 &context_.realtime_event_time, &context_.monotonic_remote_time,
228 &context_.realtime_remote_time, &context_.remote_queue_index,
Austin Schuh82ea7382023-07-14 15:17:34 -0700229 &context_.source_boot_uuid, &context_.size, copy_buffer,
230 std::ref(should_fetch_));
Brian Silverman3bca5322020-08-12 19:35:29 -0700231
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700232 if (read_result == ipc_lib::LocklessQueueReader::Result::GOOD) {
Brian Silverman77162972020-08-12 19:52:40 -0700233 if (pin_data()) {
Brian Silverman4f4e0612020-08-12 19:54:41 -0700234 const int pin_result = pinner_->PinIndex(queue_index.index());
235 CHECK(pin_result >= 0)
Brian Silverman77162972020-08-12 19:52:40 -0700236 << ": Got behind while reading and the last message was modified "
237 "out from under us while we tried to pin it. Don't get so far "
238 "behind on: "
239 << configuration::CleanedChannelToString(channel_);
Brian Silverman4f4e0612020-08-12 19:54:41 -0700240 context_.buffer_index = pin_result;
241 } else {
242 context_.buffer_index = -1;
Brian Silverman77162972020-08-12 19:52:40 -0700243 }
244
Brian Silverman3bca5322020-08-12 19:35:29 -0700245 context_.queue_index = queue_index.index();
246 if (context_.remote_queue_index == 0xffffffffu) {
247 context_.remote_queue_index = context_.queue_index;
248 }
249 if (context_.monotonic_remote_time == aos::monotonic_clock::min_time) {
250 context_.monotonic_remote_time = context_.monotonic_event_time;
251 }
252 if (context_.realtime_remote_time == aos::realtime_clock::min_time) {
253 context_.realtime_remote_time = context_.realtime_event_time;
254 }
255 const char *const data = DataBuffer();
256 if (data) {
257 context_.data =
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700258 data +
259 LocklessQueueMessageDataSize(lockless_queue_memory_.memory()) -
260 context_.size;
Brian Silverman3bca5322020-08-12 19:35:29 -0700261 } else {
262 context_.data = nullptr;
263 }
264 actual_queue_index_ = queue_index.Increment();
265 }
266
267 // Make sure the data wasn't modified while we were reading it. This
268 // can only happen if you are reading the last message *while* it is
269 // being written to, which means you are pretty far behind.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700270 CHECK(read_result != ipc_lib::LocklessQueueReader::Result::OVERWROTE)
Brian Silverman3bca5322020-08-12 19:35:29 -0700271 << ": Got behind while reading and the last message was modified "
272 "out from under us while we were reading it. Don't get so far "
273 "behind on: "
274 << configuration::CleanedChannelToString(channel_);
275
276 // We fell behind between when we read the index and read the value.
277 // This isn't worth recovering from since this means we went to sleep
278 // for a long time in the middle of this function.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700279 if (read_result == ipc_lib::LocklessQueueReader::Result::TOO_OLD) {
Brian Silverman3bca5322020-08-12 19:35:29 -0700280 event_loop_->SendTimingReport();
281 LOG(FATAL) << "The next message is no longer available. "
282 << configuration::CleanedChannelToString(channel_);
283 }
284
285 return read_result;
286 }
287
288 char *data_storage_start() const {
289 CHECK(copy_data());
Brian Silvermana1652f32020-01-29 20:41:44 -0800290 return RoundChannelData(data_storage_.get(), channel_->max_size());
291 }
Brian Silverman3bca5322020-08-12 19:35:29 -0700292
293 // Note that for some modes the return value will change as new messages are
294 // read.
295 const char *DataBuffer() const {
296 if (copy_data()) {
297 return data_storage_start();
298 }
Brian Silverman77162972020-08-12 19:52:40 -0700299 if (pin_data()) {
300 return static_cast<const char *>(pinner_->Data());
301 }
Brian Silverman3bca5322020-08-12 19:35:29 -0700302 return nullptr;
303 }
304
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800305 bool copy_data() const { return static_cast<bool>(data_storage_); }
Brian Silverman77162972020-08-12 19:52:40 -0700306 bool pin_data() const { return static_cast<bool>(pinner_); }
Brian Silvermana1652f32020-01-29 20:41:44 -0800307
Austin Schuh432784f2020-06-23 17:27:35 -0700308 aos::ShmEventLoop *event_loop_;
Austin Schuhf5652592019-12-29 16:26:15 -0800309 const Channel *const channel_;
Austin Schuh4d275fc2022-09-16 15:42:45 -0700310 ipc_lib::MemoryMappedQueue lockless_queue_memory_;
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700311 ipc_lib::LocklessQueueReader reader_;
312 // This being nullopt indicates we're not looking for wakeups right now.
313 std::optional<ipc_lib::LocklessQueueWatcher> watcher_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700314
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700315 ipc_lib::QueueIndex actual_queue_index_ = ipc_lib::QueueIndex::Invalid();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700316
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800317 // This being empty indicates we're not going to copy data.
318 std::unique_ptr<char, decltype(&free)> data_storage_{nullptr, &free};
Austin Schuh39788ff2019-12-01 18:22:57 -0800319
Brian Silverman77162972020-08-12 19:52:40 -0700320 // This being nullopt indicates we're not going to pin messages.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700321 std::optional<ipc_lib::LocklessQueuePinner> pinner_;
Brian Silverman77162972020-08-12 19:52:40 -0700322
Austin Schuh39788ff2019-12-01 18:22:57 -0800323 Context context_;
Austin Schuh82ea7382023-07-14 15:17:34 -0700324
325 // Pre-allocated should_fetch function so we don't allocate.
326 std::function<bool(const Context &)> should_fetch_ = [](const Context &) {
327 return true;
328 };
Austin Schuh39788ff2019-12-01 18:22:57 -0800329};
330
331class ShmFetcher : public RawFetcher {
332 public:
Austin Schuhef323c02020-09-01 14:55:28 -0700333 explicit ShmFetcher(std::string_view shm_base, ShmEventLoop *event_loop,
334 const Channel *channel)
Austin Schuhaa79e4e2019-12-29 20:43:32 -0800335 : RawFetcher(event_loop, channel),
Austin Schuhef323c02020-09-01 14:55:28 -0700336 simple_shm_fetcher_(shm_base, event_loop, channel) {
Brian Silverman77162972020-08-12 19:52:40 -0700337 simple_shm_fetcher_.RetrieveData();
Brian Silverman3bca5322020-08-12 19:35:29 -0700338 }
Austin Schuh39788ff2019-12-01 18:22:57 -0800339
Austin Schuh3054f5f2021-07-21 15:38:01 -0700340 ~ShmFetcher() override {
341 shm_event_loop()->CheckCurrentThread();
342 context_.data = nullptr;
343 }
Austin Schuh39788ff2019-12-01 18:22:57 -0800344
345 std::pair<bool, monotonic_clock::time_point> DoFetchNext() override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700346 shm_event_loop()->CheckCurrentThread();
Austin Schuh39788ff2019-12-01 18:22:57 -0800347 if (simple_shm_fetcher_.FetchNext()) {
348 context_ = simple_shm_fetcher_.context();
349 return std::make_pair(true, monotonic_clock::now());
350 }
351 return std::make_pair(false, monotonic_clock::min_time);
352 }
353
354 std::pair<bool, monotonic_clock::time_point> DoFetch() override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700355 shm_event_loop()->CheckCurrentThread();
Austin Schuh39788ff2019-12-01 18:22:57 -0800356 if (simple_shm_fetcher_.Fetch()) {
357 context_ = simple_shm_fetcher_.context();
358 return std::make_pair(true, monotonic_clock::now());
359 }
360 return std::make_pair(false, monotonic_clock::min_time);
361 }
362
Brian Silvermana5450a92020-08-12 19:59:57 -0700363 absl::Span<const char> GetPrivateMemory() const {
Brian Silverman6d2b3592020-06-18 14:40:15 -0700364 return simple_shm_fetcher_.GetPrivateMemory();
365 }
366
Austin Schuh39788ff2019-12-01 18:22:57 -0800367 private:
Austin Schuh3054f5f2021-07-21 15:38:01 -0700368 const ShmEventLoop *shm_event_loop() const {
369 return static_cast<const ShmEventLoop *>(event_loop());
370 }
371
Austin Schuh39788ff2019-12-01 18:22:57 -0800372 SimpleShmFetcher simple_shm_fetcher_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700373};
374
Brian Silvermane1fe2512022-08-14 23:18:50 -0700375class ShmExitHandle : public ExitHandle {
376 public:
377 ShmExitHandle(ShmEventLoop *event_loop) : event_loop_(event_loop) {
378 ++event_loop_->exit_handle_count_;
379 }
380 ~ShmExitHandle() override {
381 CHECK_GT(event_loop_->exit_handle_count_, 0);
382 --event_loop_->exit_handle_count_;
383 }
384
385 void Exit() override { event_loop_->Exit(); }
386
387 private:
388 ShmEventLoop *const event_loop_;
389};
390
Alex Perrycb7da4b2019-08-28 19:35:56 -0700391class ShmSender : public RawSender {
392 public:
Austin Schuhef323c02020-09-01 14:55:28 -0700393 explicit ShmSender(std::string_view shm_base, EventLoop *event_loop,
394 const Channel *channel)
Austin Schuh39788ff2019-12-01 18:22:57 -0800395 : RawSender(event_loop, channel),
Austin Schuh4d275fc2022-09-16 15:42:45 -0700396 lockless_queue_memory_(shm_base, FLAGS_permissions,
397 event_loop->configuration(), channel),
Austin Schuhfff9c3a2023-06-16 18:48:23 -0700398 lockless_queue_sender_(
399 VerifySender(ipc_lib::LocklessQueueSender::Make(
400 lockless_queue_memory_.queue(),
401 configuration::ChannelStorageDuration(
402 event_loop->configuration(), channel)),
403 channel)),
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700404 wake_upper_(lockless_queue_memory_.queue()) {}
Alex Perrycb7da4b2019-08-28 19:35:56 -0700405
Austin Schuh3054f5f2021-07-21 15:38:01 -0700406 ~ShmSender() override { shm_event_loop()->CheckCurrentThread(); }
Austin Schuh39788ff2019-12-01 18:22:57 -0800407
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700408 static ipc_lib::LocklessQueueSender VerifySender(
409 std::optional<ipc_lib::LocklessQueueSender> sender,
Austin Schuhe516ab02020-05-06 21:37:04 -0700410 const Channel *channel) {
411 if (sender) {
412 return std::move(sender.value());
413 }
414 LOG(FATAL) << "Failed to create sender on "
415 << configuration::CleanedChannelToString(channel)
416 << ", too many senders.";
417 }
418
Austin Schuh3054f5f2021-07-21 15:38:01 -0700419 void *data() override {
420 shm_event_loop()->CheckCurrentThread();
421 return lockless_queue_sender_.Data();
422 }
423 size_t size() override {
424 shm_event_loop()->CheckCurrentThread();
425 return lockless_queue_sender_.size();
426 }
milind1f1dca32021-07-03 13:50:07 -0700427
428 Error DoSend(size_t length,
429 aos::monotonic_clock::time_point monotonic_remote_time,
430 aos::realtime_clock::time_point realtime_remote_time,
431 uint32_t remote_queue_index,
432 const UUID &source_boot_uuid) override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700433 shm_event_loop()->CheckCurrentThread();
Austin Schuh0f7ed462020-03-28 20:38:34 -0700434 CHECK_LE(length, static_cast<size_t>(channel()->max_size()))
435 << ": Sent too big a message on "
436 << configuration::CleanedChannelToString(channel());
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700437 const auto result = lockless_queue_sender_.Send(
438 length, monotonic_remote_time, realtime_remote_time, remote_queue_index,
439 source_boot_uuid, &monotonic_sent_time_, &realtime_sent_time_,
440 &sent_queue_index_);
441 CHECK_NE(result, ipc_lib::LocklessQueueSender::Result::INVALID_REDZONE)
Austin Schuh91ba6392020-10-03 13:27:47 -0700442 << ": Somebody wrote outside the buffer of their message on channel "
443 << configuration::CleanedChannelToString(channel());
444
Austin Schuh65493d62022-08-17 15:10:37 -0700445 wake_upper_.Wakeup(event_loop()->is_running()
446 ? event_loop()->runtime_realtime_priority()
447 : 0);
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700448 return CheckLocklessQueueResult(result);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700449 }
450
milind1f1dca32021-07-03 13:50:07 -0700451 Error DoSend(const void *msg, size_t length,
452 aos::monotonic_clock::time_point monotonic_remote_time,
453 aos::realtime_clock::time_point realtime_remote_time,
454 uint32_t remote_queue_index,
455 const UUID &source_boot_uuid) override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700456 shm_event_loop()->CheckCurrentThread();
Austin Schuh0f7ed462020-03-28 20:38:34 -0700457 CHECK_LE(length, static_cast<size_t>(channel()->max_size()))
458 << ": Sent too big a message on "
459 << configuration::CleanedChannelToString(channel());
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700460 const auto result = lockless_queue_sender_.Send(
Brian Silvermanaf9a4d82020-10-06 15:10:58 -0700461 reinterpret_cast<const char *>(msg), length, monotonic_remote_time,
Austin Schuha9012be2021-07-21 15:19:11 -0700462 realtime_remote_time, remote_queue_index, source_boot_uuid,
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700463 &monotonic_sent_time_, &realtime_sent_time_, &sent_queue_index_);
464
465 CHECK_NE(result, ipc_lib::LocklessQueueSender::Result::INVALID_REDZONE)
466 << ": Somebody wrote outside the buffer of their message on "
467 "channel "
Austin Schuh91ba6392020-10-03 13:27:47 -0700468 << configuration::CleanedChannelToString(channel());
Austin Schuh65493d62022-08-17 15:10:37 -0700469 wake_upper_.Wakeup(event_loop()->is_running()
470 ? event_loop()->runtime_realtime_priority()
471 : 0);
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700472
473 return CheckLocklessQueueResult(result);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700474 }
475
Brian Silverman5120afb2020-01-31 17:44:35 -0800476 absl::Span<char> GetSharedMemory() const {
Brian Silvermana5450a92020-08-12 19:59:57 -0700477 return lockless_queue_memory_.GetMutableSharedMemory();
Brian Silverman5120afb2020-01-31 17:44:35 -0800478 }
479
Austin Schuh3054f5f2021-07-21 15:38:01 -0700480 int buffer_index() override {
481 shm_event_loop()->CheckCurrentThread();
482 return lockless_queue_sender_.buffer_index();
483 }
Brian Silverman4f4e0612020-08-12 19:54:41 -0700484
Alex Perrycb7da4b2019-08-28 19:35:56 -0700485 private:
Austin Schuh3054f5f2021-07-21 15:38:01 -0700486 const ShmEventLoop *shm_event_loop() const {
487 return static_cast<const ShmEventLoop *>(event_loop());
488 }
489
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700490 RawSender::Error CheckLocklessQueueResult(
491 const ipc_lib::LocklessQueueSender::Result &result) {
492 switch (result) {
493 case ipc_lib::LocklessQueueSender::Result::GOOD:
494 return Error::kOk;
495 case ipc_lib::LocklessQueueSender::Result::MESSAGES_SENT_TOO_FAST:
496 return Error::kMessagesSentTooFast;
497 case ipc_lib::LocklessQueueSender::Result::INVALID_REDZONE:
498 return Error::kInvalidRedzone;
499 }
500 LOG(FATAL) << "Unknown lockless queue sender result"
501 << static_cast<int>(result);
502 }
503
Austin Schuh4d275fc2022-09-16 15:42:45 -0700504 ipc_lib::MemoryMappedQueue lockless_queue_memory_;
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700505 ipc_lib::LocklessQueueSender lockless_queue_sender_;
506 ipc_lib::LocklessQueueWakeUpper wake_upper_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700507};
508
Alex Perrycb7da4b2019-08-28 19:35:56 -0700509// Class to manage the state for a Watcher.
Brian Silverman148d43d2020-06-07 18:19:22 -0500510class ShmWatcherState : public WatcherState {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700511 public:
Brian Silverman148d43d2020-06-07 18:19:22 -0500512 ShmWatcherState(
Austin Schuhef323c02020-09-01 14:55:28 -0700513 std::string_view shm_base, ShmEventLoop *event_loop,
514 const Channel *channel,
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800515 std::function<void(const Context &context, const void *message)> fn,
516 bool copy_data)
Brian Silverman148d43d2020-06-07 18:19:22 -0500517 : WatcherState(event_loop, channel, std::move(fn)),
Austin Schuh7d87b672019-12-01 20:23:49 -0800518 event_loop_(event_loop),
519 event_(this),
Austin Schuhef323c02020-09-01 14:55:28 -0700520 simple_shm_fetcher_(shm_base, event_loop, channel) {
Brian Silverman3bca5322020-08-12 19:35:29 -0700521 if (copy_data) {
Brian Silverman77162972020-08-12 19:52:40 -0700522 simple_shm_fetcher_.RetrieveData();
Brian Silverman3bca5322020-08-12 19:35:29 -0700523 }
524 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700525
Austin Schuh3054f5f2021-07-21 15:38:01 -0700526 ~ShmWatcherState() override {
527 event_loop_->CheckCurrentThread();
528 event_loop_->RemoveEvent(&event_);
529 }
Austin Schuh39788ff2019-12-01 18:22:57 -0800530
531 void Startup(EventLoop *event_loop) override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700532 event_loop_->CheckCurrentThread();
Austin Schuh7d87b672019-12-01 20:23:49 -0800533 simple_shm_fetcher_.PointAtNextQueueIndex();
Austin Schuh65493d62022-08-17 15:10:37 -0700534 CHECK(RegisterWakeup(event_loop->runtime_realtime_priority()));
Austin Schuh39788ff2019-12-01 18:22:57 -0800535 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700536
Alex Perrycb7da4b2019-08-28 19:35:56 -0700537 // Returns true if there is new data available.
Austin Schuh7d87b672019-12-01 20:23:49 -0800538 bool CheckForNewData() {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700539 if (!has_new_data_) {
Austin Schuh39788ff2019-12-01 18:22:57 -0800540 has_new_data_ = simple_shm_fetcher_.FetchNext();
Austin Schuh7d87b672019-12-01 20:23:49 -0800541
542 if (has_new_data_) {
543 event_.set_event_time(
Austin Schuhad154822019-12-27 15:45:13 -0800544 simple_shm_fetcher_.context().monotonic_event_time);
Austin Schuh7d87b672019-12-01 20:23:49 -0800545 event_loop_->AddEvent(&event_);
546 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700547 }
548
549 return has_new_data_;
550 }
551
Alex Perrycb7da4b2019-08-28 19:35:56 -0700552 // Consumes the data by calling the callback.
Austin Schuh7d87b672019-12-01 20:23:49 -0800553 void HandleEvent() {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700554 CHECK(has_new_data_);
Austin Schuh39788ff2019-12-01 18:22:57 -0800555 DoCallCallback(monotonic_clock::now, simple_shm_fetcher_.context());
Alex Perrycb7da4b2019-08-28 19:35:56 -0700556 has_new_data_ = false;
Austin Schuh7d87b672019-12-01 20:23:49 -0800557 CheckForNewData();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700558 }
559
Austin Schuh39788ff2019-12-01 18:22:57 -0800560 // Registers us to receive a signal on event reception.
Alex Perrycb7da4b2019-08-28 19:35:56 -0700561 bool RegisterWakeup(int priority) {
Austin Schuh39788ff2019-12-01 18:22:57 -0800562 return simple_shm_fetcher_.RegisterWakeup(priority);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700563 }
564
Austin Schuh39788ff2019-12-01 18:22:57 -0800565 void UnregisterWakeup() { return simple_shm_fetcher_.UnregisterWakeup(); }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700566
Brian Silvermana5450a92020-08-12 19:59:57 -0700567 absl::Span<const char> GetSharedMemory() const {
568 return simple_shm_fetcher_.GetConstSharedMemory();
Brian Silverman5120afb2020-01-31 17:44:35 -0800569 }
570
Alex Perrycb7da4b2019-08-28 19:35:56 -0700571 private:
572 bool has_new_data_ = false;
573
Austin Schuh7d87b672019-12-01 20:23:49 -0800574 ShmEventLoop *event_loop_;
Brian Silverman148d43d2020-06-07 18:19:22 -0500575 EventHandler<ShmWatcherState> event_;
Austin Schuh39788ff2019-12-01 18:22:57 -0800576 SimpleShmFetcher simple_shm_fetcher_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700577};
578
579// Adapter class to adapt a timerfd to a TimerHandler.
Brian Silverman148d43d2020-06-07 18:19:22 -0500580class ShmTimerHandler final : public TimerHandler {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700581 public:
Brian Silverman148d43d2020-06-07 18:19:22 -0500582 ShmTimerHandler(ShmEventLoop *shm_event_loop, ::std::function<void()> fn)
Austin Schuh39788ff2019-12-01 18:22:57 -0800583 : TimerHandler(shm_event_loop, std::move(fn)),
Austin Schuh7d87b672019-12-01 20:23:49 -0800584 shm_event_loop_(shm_event_loop),
585 event_(this) {
Austin Schuhcde39fd2020-02-22 20:58:24 -0800586 shm_event_loop_->epoll_.OnReadable(timerfd_.fd(), [this]() {
Austin Schuh5ca13112021-02-07 22:06:53 -0800587 // The timer may fire spuriously. HandleEvent on the event loop will
Austin Schuhcde39fd2020-02-22 20:58:24 -0800588 // call the callback if it is needed. It may also have called it when
589 // processing some other event, and the kernel decided to deliver this
590 // wakeup anyways.
591 timerfd_.Read();
592 shm_event_loop_->HandleEvent();
593 });
Alex Perrycb7da4b2019-08-28 19:35:56 -0700594 }
595
Brian Silverman148d43d2020-06-07 18:19:22 -0500596 ~ShmTimerHandler() {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700597 shm_event_loop_->CheckCurrentThread();
Austin Schuh7d87b672019-12-01 20:23:49 -0800598 Disable();
599 shm_event_loop_->epoll_.DeleteFd(timerfd_.fd());
600 }
601
602 void HandleEvent() {
Austin Schuhcde39fd2020-02-22 20:58:24 -0800603 CHECK(!event_.valid());
Brian Silvermanaf9a4d82020-10-06 15:10:58 -0700604 disabled_ = false;
Austin Schuhcde39fd2020-02-22 20:58:24 -0800605 const auto monotonic_now = Call(monotonic_clock::now, base_);
606 if (event_.valid()) {
Philipp Schradera6712522023-07-05 20:25:11 -0700607 // If someone called Schedule inside Call, rescheduling is already taken
608 // care of. Bail.
Austin Schuhcde39fd2020-02-22 20:58:24 -0800609 return;
Austin Schuh7d87b672019-12-01 20:23:49 -0800610 }
Brian Silvermanaf9a4d82020-10-06 15:10:58 -0700611 if (disabled_) {
612 // Somebody called Disable inside Call, so we don't want to reschedule.
613 // Bail.
614 return;
615 }
Austin Schuh7d87b672019-12-01 20:23:49 -0800616
Austin Schuh4d275fc2022-09-16 15:42:45 -0700617 if (repeat_offset_ == std::chrono::seconds(0)) {
Austin Schuhcde39fd2020-02-22 20:58:24 -0800618 timerfd_.Disable();
Naman Gupta4d13b0a2022-10-19 16:41:24 -0700619 disabled_ = true;
Austin Schuhcde39fd2020-02-22 20:58:24 -0800620 } else {
621 // Compute how many cycles have elapsed and schedule the next iteration
622 // for the next iteration in the future.
623 const int elapsed_cycles =
624 std::max<int>(0, (monotonic_now - base_ + repeat_offset_ -
625 std::chrono::nanoseconds(1)) /
626 repeat_offset_);
627 base_ += repeat_offset_ * elapsed_cycles;
Austin Schuh7d87b672019-12-01 20:23:49 -0800628
Austin Schuhcde39fd2020-02-22 20:58:24 -0800629 // Update the heap and schedule the timerfd wakeup.
Austin Schuh7d87b672019-12-01 20:23:49 -0800630 event_.set_event_time(base_);
631 shm_event_loop_->AddEvent(&event_);
Austin Schuh4d275fc2022-09-16 15:42:45 -0700632 timerfd_.SetTime(base_, std::chrono::seconds(0));
Naman Gupta4d13b0a2022-10-19 16:41:24 -0700633 disabled_ = false;
Austin Schuh7d87b672019-12-01 20:23:49 -0800634 }
635 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700636
Philipp Schradera6712522023-07-05 20:25:11 -0700637 void Schedule(monotonic_clock::time_point base,
638 monotonic_clock::duration repeat_offset) override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700639 shm_event_loop_->CheckCurrentThread();
Austin Schuh7d87b672019-12-01 20:23:49 -0800640 if (event_.valid()) {
641 shm_event_loop_->RemoveEvent(&event_);
642 }
643
Alex Perrycb7da4b2019-08-28 19:35:56 -0700644 timerfd_.SetTime(base, repeat_offset);
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800645 base_ = base;
646 repeat_offset_ = repeat_offset;
Austin Schuh7d87b672019-12-01 20:23:49 -0800647 event_.set_event_time(base_);
648 shm_event_loop_->AddEvent(&event_);
Naman Gupta4d13b0a2022-10-19 16:41:24 -0700649 disabled_ = false;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700650 }
651
Austin Schuh7d87b672019-12-01 20:23:49 -0800652 void Disable() override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700653 shm_event_loop_->CheckCurrentThread();
Austin Schuh7d87b672019-12-01 20:23:49 -0800654 shm_event_loop_->RemoveEvent(&event_);
655 timerfd_.Disable();
Brian Silvermanaf9a4d82020-10-06 15:10:58 -0700656 disabled_ = true;
Austin Schuh7d87b672019-12-01 20:23:49 -0800657 }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700658
Naman Gupta4d13b0a2022-10-19 16:41:24 -0700659 bool IsDisabled() override { return disabled_; }
660
Alex Perrycb7da4b2019-08-28 19:35:56 -0700661 private:
662 ShmEventLoop *shm_event_loop_;
Brian Silverman148d43d2020-06-07 18:19:22 -0500663 EventHandler<ShmTimerHandler> event_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700664
Brian Silverman148d43d2020-06-07 18:19:22 -0500665 internal::TimerFd timerfd_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700666
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800667 monotonic_clock::time_point base_;
668 monotonic_clock::duration repeat_offset_;
Brian Silvermanaf9a4d82020-10-06 15:10:58 -0700669
670 // Used to track if Disable() was called during the callback, so we know not
671 // to reschedule.
Naman Gupta4d13b0a2022-10-19 16:41:24 -0700672 bool disabled_ = true;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700673};
674
675// Adapter class to the timerfd and PhasedLoop.
Brian Silverman148d43d2020-06-07 18:19:22 -0500676class ShmPhasedLoopHandler final : public PhasedLoopHandler {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700677 public:
Brian Silverman148d43d2020-06-07 18:19:22 -0500678 ShmPhasedLoopHandler(ShmEventLoop *shm_event_loop,
679 ::std::function<void(int)> fn,
680 const monotonic_clock::duration interval,
681 const monotonic_clock::duration offset)
682 : PhasedLoopHandler(shm_event_loop, std::move(fn), interval, offset),
Austin Schuh7d87b672019-12-01 20:23:49 -0800683 shm_event_loop_(shm_event_loop),
684 event_(this) {
685 shm_event_loop_->epoll_.OnReadable(
686 timerfd_.fd(), [this]() { shm_event_loop_->HandleEvent(); });
687 }
688
689 void HandleEvent() {
690 // The return value for read is the number of cycles that have elapsed.
691 // Because we check to see when this event *should* have happened, there are
692 // cases where Read() will return 0, when 1 cycle has actually happened.
693 // This occurs when the timer interrupt hasn't triggered yet. Therefore,
694 // ignore it. Call handles rescheduling and calculating elapsed cycles
695 // without any extra help.
696 timerfd_.Read();
697 event_.Invalidate();
698
James Kuszmaul20dcc7c2023-01-20 11:06:31 -0800699 Call(monotonic_clock::now);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700700 }
701
Brian Silverman148d43d2020-06-07 18:19:22 -0500702 ~ShmPhasedLoopHandler() override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700703 shm_event_loop_->CheckCurrentThread();
Austin Schuh39788ff2019-12-01 18:22:57 -0800704 shm_event_loop_->epoll_.DeleteFd(timerfd_.fd());
Austin Schuh7d87b672019-12-01 20:23:49 -0800705 shm_event_loop_->RemoveEvent(&event_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700706 }
707
708 private:
Austin Schuhde8a8ff2019-11-30 15:25:36 -0800709 // Reschedules the timer.
Austin Schuh39788ff2019-12-01 18:22:57 -0800710 void Schedule(monotonic_clock::time_point sleep_time) override {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700711 shm_event_loop_->CheckCurrentThread();
Austin Schuh7d87b672019-12-01 20:23:49 -0800712 if (event_.valid()) {
713 shm_event_loop_->RemoveEvent(&event_);
714 }
715
Austin Schuh39788ff2019-12-01 18:22:57 -0800716 timerfd_.SetTime(sleep_time, ::aos::monotonic_clock::zero());
Austin Schuh7d87b672019-12-01 20:23:49 -0800717 event_.set_event_time(sleep_time);
718 shm_event_loop_->AddEvent(&event_);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700719 }
720
721 ShmEventLoop *shm_event_loop_;
Brian Silverman148d43d2020-06-07 18:19:22 -0500722 EventHandler<ShmPhasedLoopHandler> event_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700723
Brian Silverman148d43d2020-06-07 18:19:22 -0500724 internal::TimerFd timerfd_;
Alex Perrycb7da4b2019-08-28 19:35:56 -0700725};
Brian Silverman148d43d2020-06-07 18:19:22 -0500726
727} // namespace shm_event_loop_internal
Alex Perrycb7da4b2019-08-28 19:35:56 -0700728
729::std::unique_ptr<RawFetcher> ShmEventLoop::MakeRawFetcher(
730 const Channel *channel) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700731 CheckCurrentThread();
Austin Schuhca4828c2019-12-28 14:21:35 -0800732 if (!configuration::ChannelIsReadableOnNode(channel, node())) {
733 LOG(FATAL) << "Channel { \"name\": \"" << channel->name()->string_view()
734 << "\", \"type\": \"" << channel->type()->string_view()
735 << "\" } is not able to be fetched on this node. Check your "
736 "configuration.";
Austin Schuh217a9782019-12-21 23:02:50 -0800737 }
738
Austin Schuhef323c02020-09-01 14:55:28 -0700739 return ::std::unique_ptr<RawFetcher>(
740 new ShmFetcher(shm_base_, this, channel));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700741}
742
743::std::unique_ptr<RawSender> ShmEventLoop::MakeRawSender(
744 const Channel *channel) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700745 CheckCurrentThread();
Brian Silverman0fc69932020-01-24 21:54:02 -0800746 TakeSender(channel);
Austin Schuh39788ff2019-12-01 18:22:57 -0800747
Austin Schuhef323c02020-09-01 14:55:28 -0700748 return ::std::unique_ptr<RawSender>(new ShmSender(shm_base_, this, channel));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700749}
750
751void ShmEventLoop::MakeRawWatcher(
752 const Channel *channel,
753 std::function<void(const Context &context, const void *message)> watcher) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700754 CheckCurrentThread();
Brian Silverman0fc69932020-01-24 21:54:02 -0800755 TakeWatcher(channel);
Austin Schuh217a9782019-12-21 23:02:50 -0800756
Austin Schuh39788ff2019-12-01 18:22:57 -0800757 NewWatcher(::std::unique_ptr<WatcherState>(
Austin Schuhef323c02020-09-01 14:55:28 -0700758 new ShmWatcherState(shm_base_, this, channel, std::move(watcher), true)));
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800759}
760
761void ShmEventLoop::MakeRawNoArgWatcher(
762 const Channel *channel,
763 std::function<void(const Context &context)> watcher) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700764 CheckCurrentThread();
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800765 TakeWatcher(channel);
766
Brian Silverman148d43d2020-06-07 18:19:22 -0500767 NewWatcher(::std::unique_ptr<WatcherState>(new ShmWatcherState(
Austin Schuhef323c02020-09-01 14:55:28 -0700768 shm_base_, this, channel,
Brian Silverman6b8a3c32020-03-06 11:26:14 -0800769 [watcher](const Context &context, const void *) { watcher(context); },
770 false)));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700771}
772
773TimerHandler *ShmEventLoop::AddTimer(::std::function<void()> callback) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700774 CheckCurrentThread();
Austin Schuh39788ff2019-12-01 18:22:57 -0800775 return NewTimer(::std::unique_ptr<TimerHandler>(
Brian Silverman148d43d2020-06-07 18:19:22 -0500776 new ShmTimerHandler(this, ::std::move(callback))));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700777}
778
779PhasedLoopHandler *ShmEventLoop::AddPhasedLoop(
780 ::std::function<void(int)> callback,
781 const monotonic_clock::duration interval,
782 const monotonic_clock::duration offset) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700783 CheckCurrentThread();
Brian Silverman148d43d2020-06-07 18:19:22 -0500784 return NewPhasedLoop(::std::unique_ptr<PhasedLoopHandler>(
785 new ShmPhasedLoopHandler(this, ::std::move(callback), interval, offset)));
Alex Perrycb7da4b2019-08-28 19:35:56 -0700786}
787
788void ShmEventLoop::OnRun(::std::function<void()> on_run) {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700789 CheckCurrentThread();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700790 on_run_.push_back(::std::move(on_run));
791}
792
Austin Schuh3054f5f2021-07-21 15:38:01 -0700793void ShmEventLoop::CheckCurrentThread() const {
794 if (__builtin_expect(check_mutex_ != nullptr, false)) {
795 CHECK(check_mutex_->is_locked())
796 << ": The configured mutex is not locked while calling a "
797 "ShmEventLoop function";
798 }
799 if (__builtin_expect(!!check_tid_, false)) {
800 CHECK_EQ(syscall(SYS_gettid), *check_tid_)
801 << ": Being called from the wrong thread";
802 }
803}
804
Austin Schuh5ca13112021-02-07 22:06:53 -0800805// This is a bit tricky because watchers can generate new events at any time (as
806// long as it's in the past). We want to check the watchers at least once before
807// declaring there are no events to handle, and we want to check them again if
808// event processing takes long enough that we find an event after that point in
809// time to handle.
Austin Schuh7d87b672019-12-01 20:23:49 -0800810void ShmEventLoop::HandleEvent() {
Austin Schuh5ca13112021-02-07 22:06:53 -0800811 // Time through which we've checked for new events in watchers.
812 monotonic_clock::time_point checked_until = monotonic_clock::min_time;
813 if (!signalfd_) {
814 // Nothing to check, so we can bail out immediately once we're out of
815 // events.
816 CHECK(watchers_.empty());
817 checked_until = monotonic_clock::max_time;
Austin Schuh7d87b672019-12-01 20:23:49 -0800818 }
819
Austin Schuh5ca13112021-02-07 22:06:53 -0800820 // Loop until we run out of events to check.
Austin Schuh39788ff2019-12-01 18:22:57 -0800821 while (true) {
Austin Schuh5ca13112021-02-07 22:06:53 -0800822 // Time of the next event we know about. If this is before checked_until, we
823 // know there aren't any new events before the next one that we already know
824 // about, so no need to check the watchers.
825 monotonic_clock::time_point next_time = monotonic_clock::max_time;
826
827 if (EventCount() == 0) {
828 if (checked_until != monotonic_clock::min_time) {
829 // No events, and we've already checked the watchers at least once, so
830 // we're all done.
831 //
832 // There's a small chance that a watcher has gotten another event in
833 // between checked_until and now. If so, then the signalfd will be
834 // triggered now and we'll re-enter HandleEvent immediately. This is
835 // unlikely though, so we don't want to spend time checking all the
836 // watchers unnecessarily.
837 break;
838 }
839 } else {
840 next_time = PeekEvent()->event_time();
841 }
Austin Schuh00cad2e2022-12-02 20:11:04 -0800842 monotonic_clock::time_point now;
843 bool new_data = false;
Austin Schuh5ca13112021-02-07 22:06:53 -0800844
845 if (next_time > checked_until) {
846 // Read all of the signals, because there's no point in waking up again
847 // immediately to handle each one if we've fallen behind.
848 //
849 // This is safe before checking for new data on the watchers. If a signal
850 // is cleared here, the corresponding CheckForNewData() call below will
851 // pick it up.
852 while (true) {
853 const signalfd_siginfo result = signalfd_->Read();
854 if (result.ssi_signo == 0) {
855 break;
856 }
857 CHECK_EQ(result.ssi_signo, ipc_lib::kWakeupSignal);
858 }
Austin Schuh00cad2e2022-12-02 20:11:04 -0800859 // This is the last time we can guarantee that if a message is published
860 // before, we will notice it.
861 now = monotonic_clock::now();
Austin Schuh5ca13112021-02-07 22:06:53 -0800862
863 // Check all the watchers for new events.
864 for (std::unique_ptr<WatcherState> &base_watcher : watchers_) {
865 ShmWatcherState *const watcher =
866 reinterpret_cast<ShmWatcherState *>(base_watcher.get());
867
Austin Schuh00cad2e2022-12-02 20:11:04 -0800868 // Track if we got a message.
869 if (watcher->CheckForNewData()) {
870 new_data = true;
871 }
Austin Schuh5ca13112021-02-07 22:06:53 -0800872 }
873 if (EventCount() == 0) {
874 // Still no events, all done now.
875 break;
876 }
877
878 checked_until = now;
879 // Check for any new events we found.
880 next_time = PeekEvent()->event_time();
Austin Schuh00cad2e2022-12-02 20:11:04 -0800881 } else {
882 now = monotonic_clock::now();
Austin Schuh5ca13112021-02-07 22:06:53 -0800883 }
884
885 if (next_time > now) {
Austin Schuh00cad2e2022-12-02 20:11:04 -0800886 // Ok, we got a message with a timestamp *after* we wrote down time. We
887 // need to process it (otherwise we will go to sleep without processing
888 // it), but we also need to make sure no other messages have come in
889 // before it that we would process out of order. Just go around again to
890 // redo the checks.
891 if (new_data) {
892 continue;
893 }
Austin Schuh39788ff2019-12-01 18:22:57 -0800894 break;
895 }
896
Austin Schuh5ca13112021-02-07 22:06:53 -0800897 EventLoopEvent *const event = PopEvent();
Austin Schuh7d87b672019-12-01 20:23:49 -0800898 event->HandleEvent();
Austin Schuh39788ff2019-12-01 18:22:57 -0800899 }
900}
901
Austin Schuh32fd5a72019-12-01 22:20:26 -0800902// RAII class to mask signals.
903class ScopedSignalMask {
904 public:
905 ScopedSignalMask(std::initializer_list<int> signals) {
906 sigset_t sigset;
907 PCHECK(sigemptyset(&sigset) == 0);
908 for (int signal : signals) {
909 PCHECK(sigaddset(&sigset, signal) == 0);
910 }
911
912 PCHECK(sigprocmask(SIG_BLOCK, &sigset, &old_) == 0);
913 }
914
915 ~ScopedSignalMask() { PCHECK(sigprocmask(SIG_SETMASK, &old_, nullptr) == 0); }
916
917 private:
918 sigset_t old_;
919};
920
921// Class to manage the static state associated with killing multiple event
922// loops.
923class SignalHandler {
924 public:
925 // Gets the singleton.
926 static SignalHandler *global() {
927 static SignalHandler loop;
928 return &loop;
929 }
930
931 // Handles the signal with the singleton.
932 static void HandleSignal(int) { global()->DoHandleSignal(); }
933
934 // Registers an event loop to receive Exit() calls.
935 void Register(ShmEventLoop *event_loop) {
936 // Block signals while we have the mutex so we never race with the signal
937 // handler.
938 ScopedSignalMask mask({SIGINT, SIGHUP, SIGTERM});
939 std::unique_lock<stl_mutex> locker(mutex_);
940 if (event_loops_.size() == 0) {
941 // The first caller registers the signal handler.
942 struct sigaction new_action;
943 sigemptyset(&new_action.sa_mask);
944 // This makes it so that 2 control c's to a stuck process will kill it by
945 // restoring the original signal handler.
946 new_action.sa_flags = SA_RESETHAND;
947 new_action.sa_handler = &HandleSignal;
948
949 PCHECK(sigaction(SIGINT, &new_action, &old_action_int_) == 0);
950 PCHECK(sigaction(SIGHUP, &new_action, &old_action_hup_) == 0);
951 PCHECK(sigaction(SIGTERM, &new_action, &old_action_term_) == 0);
952 }
953
954 event_loops_.push_back(event_loop);
955 }
956
957 // Unregisters an event loop to receive Exit() calls.
958 void Unregister(ShmEventLoop *event_loop) {
959 // Block signals while we have the mutex so we never race with the signal
960 // handler.
961 ScopedSignalMask mask({SIGINT, SIGHUP, SIGTERM});
962 std::unique_lock<stl_mutex> locker(mutex_);
963
Brian Silverman5120afb2020-01-31 17:44:35 -0800964 event_loops_.erase(
965 std::find(event_loops_.begin(), event_loops_.end(), event_loop));
Austin Schuh32fd5a72019-12-01 22:20:26 -0800966
967 if (event_loops_.size() == 0u) {
968 // The last caller restores the original signal handlers.
969 PCHECK(sigaction(SIGINT, &old_action_int_, nullptr) == 0);
970 PCHECK(sigaction(SIGHUP, &old_action_hup_, nullptr) == 0);
971 PCHECK(sigaction(SIGTERM, &old_action_term_, nullptr) == 0);
972 }
973 }
974
975 private:
976 void DoHandleSignal() {
977 // We block signals while grabbing the lock, so there should never be a
978 // race. Confirm that this is true using trylock.
979 CHECK(mutex_.try_lock()) << ": sigprocmask failed to block signals while "
980 "modifing the event loop list.";
981 for (ShmEventLoop *event_loop : event_loops_) {
982 event_loop->Exit();
983 }
984 mutex_.unlock();
985 }
986
987 // Mutex to protect all state.
988 stl_mutex mutex_;
989 std::vector<ShmEventLoop *> event_loops_;
990 struct sigaction old_action_int_;
991 struct sigaction old_action_hup_;
992 struct sigaction old_action_term_;
993};
994
Alex Perrycb7da4b2019-08-28 19:35:56 -0700995void ShmEventLoop::Run() {
Austin Schuh3054f5f2021-07-21 15:38:01 -0700996 CheckCurrentThread();
Austin Schuh32fd5a72019-12-01 22:20:26 -0800997 SignalHandler::global()->Register(this);
Austin Schuh39788ff2019-12-01 18:22:57 -0800998
Alex Perrycb7da4b2019-08-28 19:35:56 -0700999 if (watchers_.size() > 0) {
Austin Schuh5ca13112021-02-07 22:06:53 -08001000 signalfd_.reset(new ipc_lib::SignalFd({ipc_lib::kWakeupSignal}));
Alex Perrycb7da4b2019-08-28 19:35:56 -07001001
Austin Schuh5ca13112021-02-07 22:06:53 -08001002 epoll_.OnReadable(signalfd_->fd(), [this]() { HandleEvent(); });
Alex Perrycb7da4b2019-08-28 19:35:56 -07001003 }
1004
Austin Schuh39788ff2019-12-01 18:22:57 -08001005 MaybeScheduleTimingReports();
1006
Austin Schuh7d87b672019-12-01 20:23:49 -08001007 ReserveEvents();
1008
Tyler Chatow67ddb032020-01-12 14:30:04 -08001009 {
Austin Schuha0c41ba2020-09-10 22:59:14 -07001010 logging::ScopedLogRestorer prev_logger;
Tyler Chatow67ddb032020-01-12 14:30:04 -08001011 AosLogToFbs aos_logger;
1012 if (!skip_logger_) {
Austin Schuhad9e5eb2021-11-19 20:33:55 -08001013 aos_logger.Initialize(&name_, MakeSender<logging::LogMessageFbs>("/aos"));
Austin Schuha0c41ba2020-09-10 22:59:14 -07001014 prev_logger.Swap(aos_logger.implementation());
Tyler Chatow67ddb032020-01-12 14:30:04 -08001015 }
Alex Perrycb7da4b2019-08-28 19:35:56 -07001016
Tyler Chatow67ddb032020-01-12 14:30:04 -08001017 aos::SetCurrentThreadName(name_.substr(0, 16));
Brian Silverman6a54ff32020-04-28 16:41:39 -07001018 const cpu_set_t default_affinity = DefaultAffinity();
1019 if (!CPU_EQUAL(&affinity_, &default_affinity)) {
1020 ::aos::SetCurrentThreadAffinity(affinity_);
1021 }
Tyler Chatow67ddb032020-01-12 14:30:04 -08001022 // Now, all the callbacks are setup. Lock everything into memory and go RT.
1023 if (priority_ != 0) {
1024 ::aos::InitRT();
1025
1026 LOG(INFO) << "Setting priority to " << priority_;
1027 ::aos::SetCurrentThreadRealtimePriority(priority_);
1028 }
1029
1030 set_is_running(true);
1031
1032 // Now that we are realtime (but before the OnRun handlers run), snap the
1033 // queue index.
1034 for (::std::unique_ptr<WatcherState> &watcher : watchers_) {
1035 watcher->Startup(this);
1036 }
1037
1038 // Now that we are RT, run all the OnRun handlers.
Austin Schuha9012be2021-07-21 15:19:11 -07001039 SetTimerContext(monotonic_clock::now());
Tyler Chatow67ddb032020-01-12 14:30:04 -08001040 for (const auto &run : on_run_) {
1041 run();
1042 }
1043
1044 // And start our main event loop which runs all the timers and handles Quit.
1045 epoll_.Run();
1046
1047 // Once epoll exits, there is no useful nonrt work left to do.
1048 set_is_running(false);
1049
1050 // Nothing time or synchronization critical needs to happen after this
1051 // point. Drop RT priority.
1052 ::aos::UnsetCurrentThreadRealtimePriority();
Alex Perrycb7da4b2019-08-28 19:35:56 -07001053 }
1054
Austin Schuh39788ff2019-12-01 18:22:57 -08001055 for (::std::unique_ptr<WatcherState> &base_watcher : watchers_) {
Brian Silverman148d43d2020-06-07 18:19:22 -05001056 ShmWatcherState *watcher =
1057 reinterpret_cast<ShmWatcherState *>(base_watcher.get());
Alex Perrycb7da4b2019-08-28 19:35:56 -07001058 watcher->UnregisterWakeup();
1059 }
1060
1061 if (watchers_.size() > 0) {
Austin Schuh5ca13112021-02-07 22:06:53 -08001062 epoll_.DeleteFd(signalfd_->fd());
1063 signalfd_.reset();
Alex Perrycb7da4b2019-08-28 19:35:56 -07001064 }
Austin Schuh32fd5a72019-12-01 22:20:26 -08001065
1066 SignalHandler::global()->Unregister(this);
Austin Schuhe84c3ed2019-12-14 15:29:48 -08001067
1068 // Trigger any remaining senders or fetchers to be cleared before destroying
1069 // the event loop so the book keeping matches. Do this in the thread that
1070 // created the timing reporter.
1071 timing_report_sender_.reset();
Austin Schuh0debde12022-08-17 16:25:17 -07001072 ClearContext();
Alex Perrycb7da4b2019-08-28 19:35:56 -07001073}
1074
1075void ShmEventLoop::Exit() { epoll_.Quit(); }
1076
Brian Silvermane1fe2512022-08-14 23:18:50 -07001077std::unique_ptr<ExitHandle> ShmEventLoop::MakeExitHandle() {
1078 return std::make_unique<ShmExitHandle>(this);
1079}
1080
Alex Perrycb7da4b2019-08-28 19:35:56 -07001081ShmEventLoop::~ShmEventLoop() {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001082 CheckCurrentThread();
Austin Schuh39788ff2019-12-01 18:22:57 -08001083 // Force everything with a registered fd with epoll to be destroyed now.
1084 timers_.clear();
1085 phased_loops_.clear();
1086 watchers_.clear();
1087
Alex Perrycb7da4b2019-08-28 19:35:56 -07001088 CHECK(!is_running()) << ": ShmEventLoop destroyed while running";
Brian Silvermane1fe2512022-08-14 23:18:50 -07001089 CHECK_EQ(0, exit_handle_count_)
1090 << ": All ExitHandles must be destroyed before the ShmEventLoop";
Alex Perrycb7da4b2019-08-28 19:35:56 -07001091}
1092
Alex Perrycb7da4b2019-08-28 19:35:56 -07001093void ShmEventLoop::SetRuntimeRealtimePriority(int priority) {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001094 CheckCurrentThread();
Alex Perrycb7da4b2019-08-28 19:35:56 -07001095 if (is_running()) {
1096 LOG(FATAL) << "Cannot set realtime priority while running.";
1097 }
1098 priority_ = priority;
1099}
1100
Brian Silverman6a54ff32020-04-28 16:41:39 -07001101void ShmEventLoop::SetRuntimeAffinity(const cpu_set_t &cpuset) {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001102 CheckCurrentThread();
Brian Silverman6a54ff32020-04-28 16:41:39 -07001103 if (is_running()) {
1104 LOG(FATAL) << "Cannot set affinity while running.";
1105 }
1106 affinity_ = cpuset;
1107}
1108
James Kuszmaul57c2baa2020-01-19 14:52:52 -08001109void ShmEventLoop::set_name(const std::string_view name) {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001110 CheckCurrentThread();
James Kuszmaul57c2baa2020-01-19 14:52:52 -08001111 name_ = std::string(name);
1112 UpdateTimingReport();
1113}
1114
Brian Silvermana5450a92020-08-12 19:59:57 -07001115absl::Span<const char> ShmEventLoop::GetWatcherSharedMemory(
1116 const Channel *channel) {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001117 CheckCurrentThread();
Brian Silverman148d43d2020-06-07 18:19:22 -05001118 ShmWatcherState *const watcher_state =
1119 static_cast<ShmWatcherState *>(GetWatcherState(channel));
Brian Silverman5120afb2020-01-31 17:44:35 -08001120 return watcher_state->GetSharedMemory();
1121}
1122
Brian Silverman4f4e0612020-08-12 19:54:41 -07001123int ShmEventLoop::NumberBuffers(const Channel *channel) {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001124 CheckCurrentThread();
Austin Schuh4d275fc2022-09-16 15:42:45 -07001125 return ipc_lib::MakeQueueConfiguration(configuration(), channel)
1126 .num_messages();
Brian Silverman4f4e0612020-08-12 19:54:41 -07001127}
1128
Brian Silverman5120afb2020-01-31 17:44:35 -08001129absl::Span<char> ShmEventLoop::GetShmSenderSharedMemory(
1130 const aos::RawSender *sender) const {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001131 CheckCurrentThread();
Brian Silverman148d43d2020-06-07 18:19:22 -05001132 return static_cast<const ShmSender *>(sender)->GetSharedMemory();
Brian Silverman5120afb2020-01-31 17:44:35 -08001133}
1134
Brian Silvermana5450a92020-08-12 19:59:57 -07001135absl::Span<const char> ShmEventLoop::GetShmFetcherPrivateMemory(
Brian Silverman6d2b3592020-06-18 14:40:15 -07001136 const aos::RawFetcher *fetcher) const {
Austin Schuh3054f5f2021-07-21 15:38:01 -07001137 CheckCurrentThread();
Brian Silverman6d2b3592020-06-18 14:40:15 -07001138 return static_cast<const ShmFetcher *>(fetcher)->GetPrivateMemory();
1139}
1140
Austin Schuh3054f5f2021-07-21 15:38:01 -07001141pid_t ShmEventLoop::GetTid() {
1142 CheckCurrentThread();
1143 return syscall(SYS_gettid);
1144}
Austin Schuh39788ff2019-12-01 18:22:57 -08001145
Alex Perrycb7da4b2019-08-28 19:35:56 -07001146} // namespace aos