blob: a26b564a79b7f705e3f49e7778cf0e0e517148db [file] [log] [blame]
Austin Schuh20b2b082019-09-11 20:42:56 -07001#include "aos/ipc_lib/lockless_queue.h"
2
3#include <linux/futex.h>
Brennan Coslett6af53bb2023-07-18 15:22:46 -05004#include <pwd.h>
Austin Schuh20b2b082019-09-11 20:42:56 -07005#include <sys/types.h>
6#include <syscall.h>
7#include <unistd.h>
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -07008
Austin Schuh20b2b082019-09-11 20:42:56 -07009#include <algorithm>
10#include <iomanip>
11#include <iostream>
12#include <sstream>
13
Austin Schuhbe416742020-10-03 17:24:26 -070014#include "absl/strings/escaping.h"
Philipp Schrader790cb542023-07-05 21:06:52 -070015#include "gflags/gflags.h"
16#include "glog/logging.h"
17
Austin Schuh20b2b082019-09-11 20:42:56 -070018#include "aos/ipc_lib/lockless_queue_memory.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070019#include "aos/realtime.h"
Austin Schuh20b2b082019-09-11 20:42:56 -070020#include "aos/util/compiler_memory_barrier.h"
21
Brian Silverman001f24d2020-08-12 19:33:20 -070022DEFINE_bool(dump_lockless_queue_data, false,
23 "If true, print the data out when dumping the queue.");
24
Stephan Pleinesf63bde82024-01-13 15:59:33 -080025namespace aos::ipc_lib {
Austin Schuh20b2b082019-09-11 20:42:56 -070026namespace {
27
Brian Silvermanfafe1fa2019-12-18 21:42:18 -080028class GrabQueueSetupLockOrDie {
29 public:
30 GrabQueueSetupLockOrDie(LocklessQueueMemory *memory) : memory_(memory) {
31 const int result = mutex_grab(&(memory->queue_setup_lock));
32 CHECK(result == 0 || result == 1) << ": " << result;
33 }
Austin Schuh20b2b082019-09-11 20:42:56 -070034
Brian Silvermanfafe1fa2019-12-18 21:42:18 -080035 ~GrabQueueSetupLockOrDie() { mutex_unlock(&(memory_->queue_setup_lock)); }
36
37 GrabQueueSetupLockOrDie(const GrabQueueSetupLockOrDie &) = delete;
38 GrabQueueSetupLockOrDie &operator=(const GrabQueueSetupLockOrDie &) = delete;
39
40 private:
41 LocklessQueueMemory *const memory_;
42};
43
Brian Silverman177567e2020-08-12 19:51:33 -070044bool IsPinned(LocklessQueueMemory *memory, Index index) {
45 DCHECK(index.valid());
46 const size_t queue_size = memory->queue_size();
47 const QueueIndex message_index =
48 memory->GetMessage(index)->header.queue_index.Load(queue_size);
49 if (!message_index.valid()) {
50 return false;
51 }
52 DCHECK(memory->GetQueue(message_index.Wrapped())->Load() != index)
53 << ": Message is in the queue";
54 for (int pinner_index = 0;
55 pinner_index < static_cast<int>(memory->config.num_pinners);
56 ++pinner_index) {
57 ipc_lib::Pinner *const pinner = memory->GetPinner(pinner_index);
58
59 if (pinner->pinned.RelaxedLoad(queue_size) == message_index) {
60 return true;
61 }
62 }
63 return false;
64}
65
66// Ensures sender->scratch_index (which must contain to_replace) is not pinned.
67//
68// Returns the new scratch_index value.
69Index SwapPinnedSenderScratch(LocklessQueueMemory *const memory,
70 ipc_lib::Sender *const sender,
71 const Index to_replace) {
72 // If anybody's trying to pin this message, then grab a message from a pinner
73 // to write into instead, and leave the message we pulled out of the queue
74 // (currently in our scratch_index) with a pinner.
75 //
76 // This loop will terminate in at most one iteration through the pinners in
77 // any steady-state configuration of the memory. There are only as many
78 // Pinner::pinned values to worry about as there are Pinner::scratch_index
79 // values to check against, plus to_replace, which means there will always be
80 // a free one. We might have to make multiple passes if things are being
81 // changed concurrently though, but nobody dying can make this loop fail to
82 // terminate (because the number of processes that can die is bounded, because
83 // no new ones can start while we've got the lock).
84 for (int pinner_index = 0; true;
85 pinner_index = (pinner_index + 1) % memory->config.num_pinners) {
86 if (!IsPinned(memory, to_replace)) {
87 // No pinners on our current scratch_index, so we're fine now.
88 VLOG(3) << "No pinners: " << to_replace.DebugString();
89 return to_replace;
90 }
91
92 ipc_lib::Pinner *const pinner = memory->GetPinner(pinner_index);
93
94 const Index pinner_scratch = pinner->scratch_index.RelaxedLoad();
95 CHECK(pinner_scratch.valid())
96 << ": Pinner scratch_index should always be valid";
97 if (IsPinned(memory, pinner_scratch)) {
98 // Wouldn't do us any good to swap with this one, so don't bother, and
99 // move onto the next one.
100 VLOG(3) << "Also pinned: " << pinner_scratch.DebugString();
101 continue;
102 }
103
104 sender->to_replace.RelaxedStore(pinner_scratch);
105 aos_compiler_memory_barrier();
106 // Give the pinner the message (which is currently in
107 // sender->scratch_index).
108 if (!pinner->scratch_index.CompareAndExchangeStrong(pinner_scratch,
109 to_replace)) {
110 // Somebody swapped into this pinner before us. The new value is probably
111 // pinned, so we don't want to look at it again immediately.
112 VLOG(3) << "Pinner " << pinner_index
113 << " scratch_index changed: " << pinner_scratch.DebugString()
114 << ", " << to_replace.DebugString();
115 sender->to_replace.RelaxedInvalidate();
116 continue;
117 }
118 aos_compiler_memory_barrier();
119 // Now update the sender's scratch space and record that we succeeded.
120 sender->scratch_index.Store(pinner_scratch);
121 aos_compiler_memory_barrier();
122 // And then record that we succeeded, but definitely after the above
123 // store.
124 sender->to_replace.RelaxedInvalidate();
125 VLOG(3) << "Got new scratch message: " << pinner_scratch.DebugString();
126
127 // If it's in a pinner's scratch_index, it should not be in the queue, which
128 // means nobody new can pin it for real. However, they can still attempt to
129 // pin it, which means we can't verify !IsPinned down here.
130
131 return pinner_scratch;
132 }
133}
134
Brian Silvermand5ca8c62020-08-12 19:51:03 -0700135// Returns true if it succeeded. Returns false if another sender died in the
136// middle.
137bool DoCleanup(LocklessQueueMemory *memory, const GrabQueueSetupLockOrDie &) {
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800138 // Make sure we start looking at shared memory fresh right now. We'll handle
139 // people dying partway through by either cleaning up after them or not, but
140 // we want to ensure we clean up after anybody who has already died when we
141 // start.
142 aos_compiler_memory_barrier();
143
Austin Schuh20b2b082019-09-11 20:42:56 -0700144 const size_t num_senders = memory->num_senders();
Brian Silverman177567e2020-08-12 19:51:33 -0700145 const size_t num_pinners = memory->num_pinners();
Austin Schuh20b2b082019-09-11 20:42:56 -0700146 const size_t queue_size = memory->queue_size();
147 const size_t num_messages = memory->num_messages();
148
149 // There are a large number of crazy cases here for how things can go wrong
150 // and how we have to recover. They either require us to keep extra track of
151 // what is going on, slowing down the send path, or require a large number of
152 // cases.
153 //
154 // The solution here is to not over-think it. This is running while not real
155 // time during construction. It is allowed to be slow. It will also very
156 // rarely trigger. There is a small uS window where process death is
157 // ambiguous.
158 //
159 // So, build up a list N long, where N is the number of messages. Search
160 // through the entire queue and the sender list (ignoring any dead senders),
161 // and mark down which ones we have seen. Once we have seen all the messages
162 // except the N dead senders, we know which messages are dead. Because the
163 // queue is active while we do this, it may take a couple of go arounds to see
164 // everything.
165
Brian Silvermand5ca8c62020-08-12 19:51:03 -0700166 ::std::vector<bool> need_recovery(num_senders, false);
167
Austin Schuh20b2b082019-09-11 20:42:56 -0700168 // Do the easy case. Find all senders who have died. See if they are either
169 // consistent already, or if they have copied over to_replace to the scratch
170 // index, but haven't cleared to_replace. Count them.
171 size_t valid_senders = 0;
172 for (size_t i = 0; i < num_senders; ++i) {
173 Sender *sender = memory->GetSender(i);
Philipp Schrader81fa3fb2023-09-17 18:58:35 -0700174 if (!sender->ownership_tracker.OwnerIsDefinitelyAbsolutelyDead()) {
Austin Schuh20b2b082019-09-11 20:42:56 -0700175 // Not dead.
176 ++valid_senders;
Brian Silvermand5ca8c62020-08-12 19:51:03 -0700177 continue;
Austin Schuh20b2b082019-09-11 20:42:56 -0700178 }
Brian Silvermand5ca8c62020-08-12 19:51:03 -0700179 VLOG(3) << "Found an easy death for sender " << i;
180 // We can do a relaxed load here because we're the only person touching
181 // this sender at this point.
182 const Index to_replace = sender->to_replace.RelaxedLoad();
183 const Index scratch_index = sender->scratch_index.Load();
184
185 // I find it easiest to think about this in terms of the set of observable
186 // states. The main code progresses through the following states:
187
188 // 1) scratch_index = xxx
189 // to_replace = invalid
190 // This is unambiguous. Already good.
191
192 // 2) scratch_index = xxx
193 // to_replace = yyy
194 // Very ambiguous. Is xxx or yyy the correct one? Need to either roll
195 // this forwards or backwards.
196
197 // 3) scratch_index = yyy
198 // to_replace = yyy
199 // We are in the act of moving to_replace to scratch_index, but didn't
200 // finish. Easy.
Brian Silverman177567e2020-08-12 19:51:33 -0700201 //
202 // If doing a pinner swap, we've definitely done it.
Brian Silvermand5ca8c62020-08-12 19:51:03 -0700203
204 // 4) scratch_index = yyy
205 // to_replace = invalid
206 // Finished, but died. Looks like 1)
207
Brian Silverman177567e2020-08-12 19:51:33 -0700208 // Swapping with a pinner's scratch_index passes through the same states.
209 // We just need to ensure the message that ends up in the senders's
210 // scratch_index isn't pinned, using the same code as sending does.
211
Brian Silvermand5ca8c62020-08-12 19:51:03 -0700212 // Any cleanup code needs to follow the same set of states to be robust to
213 // death, so death can be restarted.
214
215 if (!to_replace.valid()) {
216 // 1) or 4). Make sure we aren't corrupted and declare victory.
217 CHECK(scratch_index.valid());
218
Brian Silverman177567e2020-08-12 19:51:33 -0700219 // If it's in 1) with a pinner, the sender might have a pinned message,
220 // so fix that.
221 SwapPinnedSenderScratch(memory, sender, scratch_index);
222
223 // If it's in 4), it may not have completed this step yet. This will
224 // always be a NOP if it's in 1), verified by a DCHECK.
225 memory->GetMessage(scratch_index)->header.queue_index.RelaxedInvalidate();
226
Philipp Schraderab2f8432023-09-17 18:58:06 -0700227 sender->ownership_tracker.ForceClear();
Brian Silvermand5ca8c62020-08-12 19:51:03 -0700228 ++valid_senders;
229 continue;
230 }
231
232 // Could be 2) or 3) at this point.
233
234 if (to_replace == scratch_index) {
235 // 3) for sure.
236 // Just need to invalidate to_replace to finish.
237 sender->to_replace.Invalidate();
238
Brian Silverman177567e2020-08-12 19:51:33 -0700239 // Make sure to indicate it's an unused message before a sender gets its
240 // hands on it.
241 memory->GetMessage(scratch_index)->header.queue_index.RelaxedInvalidate();
242 aos_compiler_memory_barrier();
243
Brian Silvermand5ca8c62020-08-12 19:51:03 -0700244 // And mark that we succeeded.
Philipp Schraderab2f8432023-09-17 18:58:06 -0700245 sender->ownership_tracker.ForceClear();
Brian Silvermand5ca8c62020-08-12 19:51:03 -0700246 ++valid_senders;
247 continue;
248 }
249
250 // Must be 2). Mark it for later.
251 need_recovery[i] = true;
Austin Schuh20b2b082019-09-11 20:42:56 -0700252 }
253
Brian Silverman177567e2020-08-12 19:51:33 -0700254 // Cleaning up pinners is easy. We don't actually have to do anything, but
255 // invalidating its pinned field might help catch bugs elsewhere trying to
256 // read it before it's set.
257 for (size_t i = 0; i < num_pinners; ++i) {
258 Pinner *const pinner = memory->GetPinner(i);
Philipp Schrader81fa3fb2023-09-17 18:58:35 -0700259 if (!pinner->ownership_tracker.OwnerIsDefinitelyAbsolutelyDead()) {
Brian Silverman177567e2020-08-12 19:51:33 -0700260 continue;
261 }
262 pinner->pinned.Invalidate();
Philipp Schraderab2f8432023-09-17 18:58:06 -0700263 pinner->ownership_tracker.ForceClear();
Brian Silverman177567e2020-08-12 19:51:33 -0700264 }
265
Austin Schuh20b2b082019-09-11 20:42:56 -0700266 // If all the senders are (or were made) good, there is no need to do the hard
267 // case.
268 if (valid_senders == num_senders) {
Brian Silvermand5ca8c62020-08-12 19:51:03 -0700269 return true;
Austin Schuh20b2b082019-09-11 20:42:56 -0700270 }
271
Alex Perrycb7da4b2019-08-28 19:35:56 -0700272 VLOG(3) << "Starting hard cleanup";
Austin Schuh20b2b082019-09-11 20:42:56 -0700273
274 size_t num_accounted_for = 0;
275 size_t num_missing = 0;
276 ::std::vector<bool> accounted_for(num_messages, false);
277
278 while ((num_accounted_for + num_missing) != num_messages) {
279 num_missing = 0;
280 for (size_t i = 0; i < num_senders; ++i) {
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800281 Sender *const sender = memory->GetSender(i);
Philipp Schrader81fa3fb2023-09-17 18:58:35 -0700282 if (sender->ownership_tracker.OwnerIsDefinitelyAbsolutelyDead()) {
Brian Silvermand5ca8c62020-08-12 19:51:03 -0700283 if (!need_recovery[i]) {
284 return false;
285 }
Austin Schuh20b2b082019-09-11 20:42:56 -0700286 ++num_missing;
Brian Silverman177567e2020-08-12 19:51:33 -0700287 continue;
Austin Schuh20b2b082019-09-11 20:42:56 -0700288 }
Brian Silverman177567e2020-08-12 19:51:33 -0700289 CHECK(!need_recovery[i]) << ": Somebody else recovered a sender: " << i;
290 // We can do a relaxed load here because we're the only person touching
291 // this sender at this point, if it matters. If it's not a dead sender,
292 // then any message it ever has will eventually be accounted for if we
293 // make enough tries through the outer loop.
294 const Index scratch_index = sender->scratch_index.RelaxedLoad();
295 if (!accounted_for[scratch_index.message_index()]) {
296 ++num_accounted_for;
297 }
298 accounted_for[scratch_index.message_index()] = true;
Austin Schuh20b2b082019-09-11 20:42:56 -0700299 }
300
301 for (size_t i = 0; i < queue_size; ++i) {
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800302 // Same logic as above for scratch_index applies here too.
Austin Schuh20b2b082019-09-11 20:42:56 -0700303 const Index index = memory->GetQueue(i)->RelaxedLoad();
304 if (!accounted_for[index.message_index()]) {
305 ++num_accounted_for;
306 }
307 accounted_for[index.message_index()] = true;
308 }
Brian Silvermand5ca8c62020-08-12 19:51:03 -0700309
Brian Silverman177567e2020-08-12 19:51:33 -0700310 for (size_t pinner_index = 0; pinner_index < num_pinners; ++pinner_index) {
311 // Same logic as above for scratch_index applies here too.
312 const Index index =
313 memory->GetPinner(pinner_index)->scratch_index.RelaxedLoad();
314 if (!accounted_for[index.message_index()]) {
315 ++num_accounted_for;
316 }
317 accounted_for[index.message_index()] = true;
318 }
319
Brian Silvermand5ca8c62020-08-12 19:51:03 -0700320 CHECK_LE(num_accounted_for + num_missing, num_messages);
Austin Schuh20b2b082019-09-11 20:42:56 -0700321 }
322
323 while (num_missing != 0) {
324 const size_t starting_num_missing = num_missing;
325 for (size_t i = 0; i < num_senders; ++i) {
326 Sender *sender = memory->GetSender(i);
Philipp Schrader81fa3fb2023-09-17 18:58:35 -0700327 if (!sender->ownership_tracker.OwnerIsDefinitelyAbsolutelyDead()) {
Brian Silvermand5ca8c62020-08-12 19:51:03 -0700328 CHECK(!need_recovery[i]) << ": Somebody else recovered a sender: " << i;
329 continue;
330 }
331 if (!need_recovery[i]) {
332 return false;
333 }
334 // We can do relaxed loads here because we're the only person touching
335 // this sender at this point.
336 const Index scratch_index = sender->scratch_index.RelaxedLoad();
337 const Index to_replace = sender->to_replace.RelaxedLoad();
Austin Schuh20b2b082019-09-11 20:42:56 -0700338
Brian Silvermand5ca8c62020-08-12 19:51:03 -0700339 // Candidate.
340 if (to_replace.valid()) {
341 CHECK_LE(to_replace.message_index(), accounted_for.size());
342 }
343 if (scratch_index.valid()) {
344 CHECK_LE(scratch_index.message_index(), accounted_for.size());
345 }
346 if (!to_replace.valid() || accounted_for[to_replace.message_index()]) {
347 CHECK(scratch_index.valid());
348 VLOG(3) << "Sender " << i
349 << " died, to_replace is already accounted for";
350 // If both are accounted for, we are corrupt...
351 CHECK(!accounted_for[scratch_index.message_index()]);
Austin Schuh20b2b082019-09-11 20:42:56 -0700352
Brian Silvermand5ca8c62020-08-12 19:51:03 -0700353 // to_replace is already accounted for. This means that we didn't
354 // atomically insert scratch_index into the queue yet. So
355 // invalidate to_replace.
356 sender->to_replace.Invalidate();
Brian Silverman177567e2020-08-12 19:51:33 -0700357 // Sender definitely will not have gotten here, so finish for it.
358 memory->GetMessage(scratch_index)
359 ->header.queue_index.RelaxedInvalidate();
Austin Schuh20b2b082019-09-11 20:42:56 -0700360
Brian Silvermand5ca8c62020-08-12 19:51:03 -0700361 // And then mark this sender clean.
Philipp Schraderab2f8432023-09-17 18:58:06 -0700362 sender->ownership_tracker.ForceClear();
Brian Silvermand5ca8c62020-08-12 19:51:03 -0700363 need_recovery[i] = false;
Austin Schuh20b2b082019-09-11 20:42:56 -0700364
Brian Silvermand5ca8c62020-08-12 19:51:03 -0700365 // And account for scratch_index.
366 accounted_for[scratch_index.message_index()] = true;
367 --num_missing;
368 ++num_accounted_for;
369 } else if (!scratch_index.valid() ||
370 accounted_for[scratch_index.message_index()]) {
371 VLOG(3) << "Sender " << i
372 << " died, scratch_index is already accounted for";
373 // scratch_index is accounted for. That means we did the insert,
374 // but didn't record it.
375 CHECK(to_replace.valid());
Brian Silverman177567e2020-08-12 19:51:33 -0700376
377 // Make sure to indicate it's an unused message before a sender gets its
378 // hands on it.
379 memory->GetMessage(to_replace)->header.queue_index.RelaxedInvalidate();
380 aos_compiler_memory_barrier();
381
Brian Silvermand5ca8c62020-08-12 19:51:03 -0700382 // Finish the transaction. Copy to_replace, then clear it.
Austin Schuh20b2b082019-09-11 20:42:56 -0700383
Brian Silvermand5ca8c62020-08-12 19:51:03 -0700384 sender->scratch_index.Store(to_replace);
385 sender->to_replace.Invalidate();
Austin Schuh20b2b082019-09-11 20:42:56 -0700386
Brian Silvermand5ca8c62020-08-12 19:51:03 -0700387 // And then mark this sender clean.
Philipp Schraderab2f8432023-09-17 18:58:06 -0700388 sender->ownership_tracker.ForceClear();
Brian Silvermand5ca8c62020-08-12 19:51:03 -0700389 need_recovery[i] = false;
Austin Schuh20b2b082019-09-11 20:42:56 -0700390
Brian Silvermand5ca8c62020-08-12 19:51:03 -0700391 // And account for to_replace.
392 accounted_for[to_replace.message_index()] = true;
393 --num_missing;
394 ++num_accounted_for;
395 } else {
396 VLOG(3) << "Sender " << i << " died, neither is accounted for";
397 // Ambiguous. There will be an unambiguous one somewhere that we
398 // can do first.
Austin Schuh20b2b082019-09-11 20:42:56 -0700399 }
400 }
401 // CHECK that we are making progress.
402 CHECK_NE(num_missing, starting_num_missing);
403 }
Brian Silvermand5ca8c62020-08-12 19:51:03 -0700404 return true;
405}
406
407void Cleanup(LocklessQueueMemory *memory, const GrabQueueSetupLockOrDie &lock) {
408 // The number of iterations is bounded here because there are only a finite
409 // number of senders in existence which could die, and no new ones can be
410 // created while we're in here holding the lock.
411 while (!DoCleanup(memory, lock)) {
412 }
Austin Schuh20b2b082019-09-11 20:42:56 -0700413}
414
415// Exposes rt_tgsigqueueinfo so we can send the signal *just* to the target
416// thread.
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800417// TODO(Brian): Do directly in assembly for armhf at least for efficiency.
Austin Schuh20b2b082019-09-11 20:42:56 -0700418int rt_tgsigqueueinfo(pid_t tgid, pid_t tid, int sig, siginfo_t *si) {
419 return syscall(SYS_rt_tgsigqueueinfo, tgid, tid, sig, si);
420}
421
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700422QueueIndex ZeroOrValid(QueueIndex index) {
423 if (!index.valid()) {
424 return index.Clear();
425 }
426 return index;
427}
428
Austin Schuh20b2b082019-09-11 20:42:56 -0700429} // namespace
430
Philipp Schraderab2f8432023-09-17 18:58:06 -0700431bool PretendThatOwnerIsDeadForTesting(aos_mutex *mutex, pid_t tid) {
432 if (static_cast<pid_t>(mutex->futex & FUTEX_TID_MASK) == tid) {
433 mutex->futex = FUTEX_OWNER_DIED;
434 return true;
435 }
436 return false;
437}
438
Austin Schuh4bc4f902019-12-23 18:04:51 -0800439size_t LocklessQueueConfiguration::message_size() const {
440 // Round up the message size so following data is aligned appropriately.
Brian Silverman0eaa1da2020-08-12 20:03:52 -0700441 // Make sure to leave space to align the message data. It will be aligned
442 // relative to the start of the shared memory region, but that might not be
443 // aligned for some use cases.
Brian Silvermana1652f32020-01-29 20:41:44 -0800444 return LocklessQueueMemory::AlignmentRoundUp(message_data_size +
Brian Silverman0eaa1da2020-08-12 20:03:52 -0700445 kChannelDataRedzone * 2 +
Brian Silvermana1652f32020-01-29 20:41:44 -0800446 (kChannelDataAlignment - 1)) +
Austin Schuh4bc4f902019-12-23 18:04:51 -0800447 sizeof(Message);
448}
449
Austin Schuh20b2b082019-09-11 20:42:56 -0700450size_t LocklessQueueMemorySize(LocklessQueueConfiguration config) {
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800451 // Round up the message size so following data is aligned appropriately.
452 config.message_data_size =
453 LocklessQueueMemory::AlignmentRoundUp(config.message_data_size);
Austin Schuh20b2b082019-09-11 20:42:56 -0700454
455 // As we build up the size, confirm that everything is aligned to the
456 // alignment requirements of the type.
457 size_t size = sizeof(LocklessQueueMemory);
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800458 CHECK_EQ(size % alignof(LocklessQueueMemory), 0u);
Austin Schuh20b2b082019-09-11 20:42:56 -0700459
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800460 CHECK_EQ(size % alignof(AtomicIndex), 0u);
Austin Schuh20b2b082019-09-11 20:42:56 -0700461 size += LocklessQueueMemory::SizeOfQueue(config);
462
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800463 CHECK_EQ(size % alignof(Message), 0u);
Austin Schuh20b2b082019-09-11 20:42:56 -0700464 size += LocklessQueueMemory::SizeOfMessages(config);
465
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800466 CHECK_EQ(size % alignof(Watcher), 0u);
Austin Schuh20b2b082019-09-11 20:42:56 -0700467 size += LocklessQueueMemory::SizeOfWatchers(config);
468
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800469 CHECK_EQ(size % alignof(Sender), 0u);
Austin Schuh20b2b082019-09-11 20:42:56 -0700470 size += LocklessQueueMemory::SizeOfSenders(config);
471
Brian Silverman177567e2020-08-12 19:51:33 -0700472 CHECK_EQ(size % alignof(Pinner), 0u);
473 size += LocklessQueueMemory::SizeOfPinners(config);
474
Austin Schuh20b2b082019-09-11 20:42:56 -0700475 return size;
476}
477
Brian Silverman0eaa1da2020-08-12 20:03:52 -0700478// Calculates the starting byte for a redzone in shared memory. This starting
479// value is simply incremented for subsequent bytes.
480//
481// The result is based on the offset of the region in shared memor, to ensure it
482// is the same for each region when we generate and verify, but different for
483// each region to help catch forms of corruption like copying out-of-bounds data
484// from one place to another.
485//
486// memory is the base pointer to the shared memory. It is used to calculated
487// offsets. starting_data is the start of the redzone's data. Each one will
488// get a unique pattern.
489uint8_t RedzoneStart(const LocklessQueueMemory *memory,
490 const char *starting_data) {
491 const auto memory_int = reinterpret_cast<uintptr_t>(memory);
492 const auto starting_int = reinterpret_cast<uintptr_t>(starting_data);
493 DCHECK(starting_int >= memory_int);
494 DCHECK(starting_int < memory_int + LocklessQueueMemorySize(memory->config));
495 const uintptr_t starting_offset = starting_int - memory_int;
496 // Just XOR the lower 2 bytes. They higher-order bytes are probably 0
497 // anyways.
498 return (starting_offset & 0xFF) ^ ((starting_offset >> 8) & 0xFF);
499}
500
501// Returns true if the given redzone has invalid data.
502bool CheckRedzone(const LocklessQueueMemory *memory,
503 absl::Span<const char> redzone) {
504 uint8_t redzone_value = RedzoneStart(memory, redzone.data());
505
506 bool bad = false;
507
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700508 for (size_t i = 0; i < redzone.size() && !bad; ++i) {
Brian Silverman0eaa1da2020-08-12 20:03:52 -0700509 if (memcmp(&redzone[i], &redzone_value, 1)) {
510 bad = true;
511 }
512 ++redzone_value;
513 }
514
515 return bad;
516}
517
518// Returns true if either of message's redzones has invalid data.
519bool CheckBothRedzones(const LocklessQueueMemory *memory,
520 const Message *message) {
521 return CheckRedzone(memory,
522 message->PreRedzone(memory->message_data_size())) ||
523 CheckRedzone(memory, message->PostRedzone(memory->message_data_size(),
524 memory->message_size()));
525}
526
527// Fills the given redzone with the expected data.
528void FillRedzone(LocklessQueueMemory *memory, absl::Span<char> redzone) {
529 uint8_t redzone_value = RedzoneStart(memory, redzone.data());
530 for (size_t i = 0; i < redzone.size(); ++i) {
531 memcpy(&redzone[i], &redzone_value, 1);
532 ++redzone_value;
533 }
534
535 // Just double check that the implementations match.
536 CHECK(!CheckRedzone(memory, redzone));
537}
538
Austin Schuh20b2b082019-09-11 20:42:56 -0700539LocklessQueueMemory *InitializeLocklessQueueMemory(
540 LocklessQueueMemory *memory, LocklessQueueConfiguration config) {
541 // Everything should be zero initialized already. So we just need to fill
542 // everything out properly.
543
Brian Silvermanc57ff0a2020-04-28 16:45:13 -0700544 // This is the UID we will use for checking signal-sending permission
545 // compatibility.
546 //
547 // The manpage says:
548 // For a process to have permission to send a signal, it must either be
549 // privileged [...], or the real or effective user ID of the sending process
550 // must equal the real or saved set-user-ID of the target process.
551 //
552 // Processes typically initialize a queue in random order as they start up.
553 // This means we need an algorithm for verifying all processes have
554 // permissions to send each other signals which gives the same answer no
555 // matter what order they attach in. We would also like to avoid maintaining a
556 // shared list of the UIDs of all processes.
557 //
558 // To do this while still giving sufficient flexibility for all current use
559 // cases, we track a single UID for the queue. All processes with a matching
560 // euid+suid must have this UID. Any processes with distinct euid/suid must
561 // instead have a matching ruid. This guarantees signals can be sent between
562 // all processes attached to the queue.
563 //
564 // In particular, this allows a process to change only its euid (to interact
565 // with a queue) while still maintaining privileges via its ruid. However, it
566 // can only use privileges in ways that do not require changing the euid back,
567 // because while the euid is different it will not be able to receive signals.
568 // We can't actually verify that, but we can sanity check that things are
569 // valid when the queue is initialized.
570
571 uid_t uid;
572 {
573 uid_t ruid, euid, suid;
574 PCHECK(getresuid(&ruid, &euid, &suid) == 0);
575 // If these are equal, then use them, even if that's different from the real
576 // UID. This allows processes to keep a real UID of 0 (to have permissions
577 // to perform system-level changes) while still being able to communicate
578 // with processes running unprivileged as a distinct user.
579 if (euid == suid) {
580 uid = euid;
581 VLOG(1) << "Using euid==suid " << uid;
582 } else {
583 uid = ruid;
584 VLOG(1) << "Using ruid " << ruid;
585 }
586 }
587
Austin Schuh20b2b082019-09-11 20:42:56 -0700588 // Grab the mutex. We don't care if the previous reader died. We are going
589 // to check everything anyways.
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800590 GrabQueueSetupLockOrDie grab_queue_setup_lock(memory);
Austin Schuh20b2b082019-09-11 20:42:56 -0700591
592 if (!memory->initialized) {
593 // TODO(austin): Check these for out of bounds.
594 memory->config.num_watchers = config.num_watchers;
595 memory->config.num_senders = config.num_senders;
Brian Silverman177567e2020-08-12 19:51:33 -0700596 memory->config.num_pinners = config.num_pinners;
Austin Schuh20b2b082019-09-11 20:42:56 -0700597 memory->config.queue_size = config.queue_size;
Austin Schuh4bc4f902019-12-23 18:04:51 -0800598 memory->config.message_data_size = config.message_data_size;
Austin Schuh20b2b082019-09-11 20:42:56 -0700599
600 const size_t num_messages = memory->num_messages();
601 // There need to be at most MaxMessages() messages allocated.
602 CHECK_LE(num_messages, Index::MaxMessages());
603
604 for (size_t i = 0; i < num_messages; ++i) {
Brian Silverman0eaa1da2020-08-12 20:03:52 -0700605 Message *const message =
606 memory->GetMessage(Index(QueueIndex::Zero(memory->queue_size()), i));
607 message->header.queue_index.Invalidate();
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700608 message->header.monotonic_sent_time = monotonic_clock::min_time;
Brian Silverman0eaa1da2020-08-12 20:03:52 -0700609 FillRedzone(memory, message->PreRedzone(memory->message_data_size()));
610 FillRedzone(memory, message->PostRedzone(memory->message_data_size(),
611 memory->message_size()));
Austin Schuh20b2b082019-09-11 20:42:56 -0700612 }
613
614 for (size_t i = 0; i < memory->queue_size(); ++i) {
615 // Make the initial counter be the furthest away number. That means that
616 // index 0 should be 0xffff, 1 should be 0, etc.
617 memory->GetQueue(i)->Store(Index(QueueIndex::Zero(memory->queue_size())
618 .IncrementBy(i)
619 .DecrementBy(memory->queue_size()),
620 i));
621 }
622
623 memory->next_queue_index.Invalidate();
Brian Silvermanc57ff0a2020-04-28 16:45:13 -0700624 memory->uid = uid;
Austin Schuh20b2b082019-09-11 20:42:56 -0700625
626 for (size_t i = 0; i < memory->num_senders(); ++i) {
627 ::aos::ipc_lib::Sender *s = memory->GetSender(i);
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800628 // Nobody else can possibly be touching these because we haven't set
629 // initialized to true yet.
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700630 s->scratch_index.RelaxedStore(
631 Index(QueueIndex::Invalid(), i + memory->queue_size()));
Austin Schuh20b2b082019-09-11 20:42:56 -0700632 s->to_replace.RelaxedInvalidate();
633 }
634
Brian Silverman177567e2020-08-12 19:51:33 -0700635 for (size_t i = 0; i < memory->num_pinners(); ++i) {
636 ::aos::ipc_lib::Pinner *pinner = memory->GetPinner(i);
637 // Nobody else can possibly be touching these because we haven't set
638 // initialized to true yet.
639 pinner->scratch_index.RelaxedStore(
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700640 Index(QueueIndex::Invalid(),
641 i + memory->num_senders() + memory->queue_size()));
Brian Silverman177567e2020-08-12 19:51:33 -0700642 pinner->pinned.Invalidate();
643 }
644
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800645 aos_compiler_memory_barrier();
Austin Schuh20b2b082019-09-11 20:42:56 -0700646 // Signal everything is done. This needs to be done last, so if we die, we
647 // redo initialization.
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800648 memory->initialized = true;
Austin Schuh3328d132020-02-28 13:54:57 -0800649 } else {
Brennan Coslett6af53bb2023-07-18 15:22:46 -0500650 if (memory->uid != uid) {
651 // Subsequent calls to getpwuid() overwrite this
652 // pointer, pull the thing we care about into a
653 // string.
654 struct passwd const *user_pw = getpwuid(uid);
655 std::string user_username = user_pw->pw_name;
656 struct passwd const *memory_pw = getpwuid(memory->uid);
657 std::string memory_username = memory_pw->pw_name;
658 LOG(FATAL) << "Current user " << user_username << " (uid:" << uid << ") "
659 << "doesn't match shared memory user " << memory_username
660 << " (uid:" << memory->uid << "). "
Philipp Schrader5f832612023-08-21 10:29:57 -0700661 << "Log in as " << memory_username
Brennan Coslett6af53bb2023-07-18 15:22:46 -0500662 << " user to access this channel.";
663 }
Austin Schuh20b2b082019-09-11 20:42:56 -0700664 }
665
Austin Schuh20b2b082019-09-11 20:42:56 -0700666 return memory;
667}
668
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700669void LocklessQueue::Initialize() {
670 InitializeLocklessQueueMemory(memory_, config_);
671}
Austin Schuh20b2b082019-09-11 20:42:56 -0700672
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700673LocklessQueueWatcher::~LocklessQueueWatcher() {
674 if (watcher_index_ == -1) {
675 return;
676 }
Austin Schuh20b2b082019-09-11 20:42:56 -0700677
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700678 // Since everything is self consistent, all we need to do is make sure nobody
679 // else is running. Someone dying will get caught in the generic consistency
680 // check.
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800681 GrabQueueSetupLockOrDie grab_queue_setup_lock(memory_);
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700682
683 // Make sure we are registered.
684 CHECK_NE(watcher_index_, -1);
685
686 // Make sure we still own the slot we are supposed to.
Philipp Schraderab2f8432023-09-17 18:58:06 -0700687 CHECK(memory_->GetWatcher(watcher_index_)->ownership_tracker.IsHeldBySelf());
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700688
689 // The act of unlocking invalidates the entry. Invalidate it.
Philipp Schraderab2f8432023-09-17 18:58:06 -0700690 memory_->GetWatcher(watcher_index_)->ownership_tracker.Release();
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700691 // And internally forget the slot.
692 watcher_index_ = -1;
693
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800694 // Cleanup is cheap. The next user will do it anyways, so no need for us to do
695 // anything right now.
Austin Schuh20b2b082019-09-11 20:42:56 -0700696
697 // And confirm that nothing is owned by us.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700698 const int num_watchers = memory_->num_watchers();
Austin Schuh20b2b082019-09-11 20:42:56 -0700699 for (int i = 0; i < num_watchers; ++i) {
Philipp Schraderab2f8432023-09-17 18:58:06 -0700700 CHECK(!memory_->GetWatcher(i)->ownership_tracker.IsHeldBySelf())
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700701 << ": " << i;
Austin Schuh20b2b082019-09-11 20:42:56 -0700702 }
Austin Schuh20b2b082019-09-11 20:42:56 -0700703}
704
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700705std::optional<LocklessQueueWatcher> LocklessQueueWatcher::Make(
706 LocklessQueue queue, int priority) {
707 queue.Initialize();
708 LocklessQueueWatcher result(queue.memory(), priority);
709 if (result.watcher_index_ != -1) {
James Kuszmaul9776b392023-01-14 14:08:08 -0800710 return result;
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700711 } else {
712 return std::nullopt;
713 }
714}
Austin Schuh20b2b082019-09-11 20:42:56 -0700715
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700716LocklessQueueWatcher::LocklessQueueWatcher(LocklessQueueMemory *memory,
717 int priority)
718 : memory_(memory) {
Austin Schuh20b2b082019-09-11 20:42:56 -0700719 // TODO(austin): Make sure signal coalescing is turned on. We don't need
720 // duplicates. That will improve performance under high load.
721
722 // Since everything is self consistent, all we need to do is make sure nobody
723 // else is running. Someone dying will get caught in the generic consistency
724 // check.
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800725 GrabQueueSetupLockOrDie grab_queue_setup_lock(memory_);
Austin Schuh20b2b082019-09-11 20:42:56 -0700726 const int num_watchers = memory_->num_watchers();
727
728 // Now, find the first empty watcher and grab it.
729 CHECK_EQ(watcher_index_, -1);
730 for (int i = 0; i < num_watchers; ++i) {
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800731 // If we see a slot the kernel has marked as dead, everything we do reusing
732 // it needs to happen-after whatever that process did before dying.
Philipp Schraderab2f8432023-09-17 18:58:06 -0700733 auto *const ownership_tracker =
734 &(memory_->GetWatcher(i)->ownership_tracker);
Philipp Schrader81fa3fb2023-09-17 18:58:35 -0700735 if (ownership_tracker->LoadAcquire().IsUnclaimed() ||
736 ownership_tracker->OwnerIsDefinitelyAbsolutelyDead()) {
Austin Schuh20b2b082019-09-11 20:42:56 -0700737 watcher_index_ = i;
Brian Silverman2484eea2019-12-21 16:48:46 -0800738 // Relaxed is OK here because we're the only task going to touch it
739 // between here and the write in death_notification_init below (other
740 // recovery is blocked by us holding the setup lock).
Philipp Schraderab2f8432023-09-17 18:58:06 -0700741 ownership_tracker->ForceClear();
Austin Schuh20b2b082019-09-11 20:42:56 -0700742 break;
743 }
744 }
745
746 // Bail if we failed to find an open slot.
747 if (watcher_index_ == -1) {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700748 return;
Austin Schuh20b2b082019-09-11 20:42:56 -0700749 }
750
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700751 Watcher *const w = memory_->GetWatcher(watcher_index_);
Austin Schuh20b2b082019-09-11 20:42:56 -0700752
753 w->pid = getpid();
754 w->priority = priority;
755
756 // Grabbing a mutex is a compiler and memory barrier, so nothing before will
757 // get rearranged afterwords.
Philipp Schraderab2f8432023-09-17 18:58:06 -0700758 w->ownership_tracker.Acquire();
Austin Schuh20b2b082019-09-11 20:42:56 -0700759}
760
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700761LocklessQueueWakeUpper::LocklessQueueWakeUpper(LocklessQueue queue)
762 : memory_(queue.const_memory()), pid_(getpid()), uid_(getuid()) {
763 queue.Initialize();
764 watcher_copy_.resize(memory_->num_watchers());
Austin Schuh20b2b082019-09-11 20:42:56 -0700765}
766
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700767int LocklessQueueWakeUpper::Wakeup(const int current_priority) {
Austin Schuh20b2b082019-09-11 20:42:56 -0700768 const size_t num_watchers = memory_->num_watchers();
769
770 CHECK_EQ(watcher_copy_.size(), num_watchers);
771
772 // Grab a copy so it won't change out from underneath us, and we can sort it
773 // nicely in C++.
774 // Do note that there is still a window where the process can die *after* we
775 // read everything. We will still PI boost and send a signal to the thread in
776 // question. There is no way without pidfd's to close this window, and
777 // creating a pidfd is likely not RT.
778 for (size_t i = 0; i < num_watchers; ++i) {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700779 const Watcher *w = memory_->GetWatcher(i);
Philipp Schraderab2f8432023-09-17 18:58:06 -0700780 watcher_copy_[i].ownership_snapshot = w->ownership_tracker.LoadRelaxed();
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800781 // Force the load of the TID to come first.
782 aos_compiler_memory_barrier();
783 watcher_copy_[i].pid = w->pid.load(std::memory_order_relaxed);
784 watcher_copy_[i].priority = w->priority.load(std::memory_order_relaxed);
Austin Schuh20b2b082019-09-11 20:42:56 -0700785
786 // Use a priority of -1 to mean an invalid entry to make sorting easier.
Philipp Schraderab2f8432023-09-17 18:58:06 -0700787 if (watcher_copy_[i].ownership_snapshot.OwnerIsDead() ||
788 watcher_copy_[i].ownership_snapshot.IsUnclaimed()) {
Austin Schuh20b2b082019-09-11 20:42:56 -0700789 watcher_copy_[i].priority = -1;
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800790 } else {
791 // Ensure all of this happens after we're done looking at the pid+priority
792 // in shared memory.
793 aos_compiler_memory_barrier();
Philipp Schraderab2f8432023-09-17 18:58:06 -0700794 if (watcher_copy_[i].ownership_snapshot !=
795 w->ownership_tracker.LoadRelaxed()) {
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800796 // Confirm that the watcher hasn't been re-used and modified while we
797 // read it. If it has, mark it invalid again.
798 watcher_copy_[i].priority = -1;
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800799 }
Austin Schuh20b2b082019-09-11 20:42:56 -0700800 }
801 }
802
803 // Now sort.
804 ::std::sort(watcher_copy_.begin(), watcher_copy_.end(),
805 [](const WatcherCopy &a, const WatcherCopy &b) {
806 return a.priority > b.priority;
807 });
808
809 int count = 0;
810 if (watcher_copy_[0].priority != -1) {
811 const int max_priority =
812 ::std::max(current_priority, watcher_copy_[0].priority);
813 // Boost if we are RT and there is a higher priority sender out there.
814 // Otherwise we might run into priority inversions.
815 if (max_priority > current_priority && current_priority > 0) {
816 SetCurrentThreadRealtimePriority(max_priority);
817 }
818
819 // Build up the siginfo to send.
820 siginfo_t uinfo;
821 memset(&uinfo, 0, sizeof(uinfo));
822
823 uinfo.si_code = SI_QUEUE;
824 uinfo.si_pid = pid_;
825 uinfo.si_uid = uid_;
826 uinfo.si_value.sival_int = 0;
827
828 for (const WatcherCopy &watcher_copy : watcher_copy_) {
829 // The first -1 priority means we are at the end of the valid list.
830 if (watcher_copy.priority == -1) {
831 break;
832 }
833
834 // Send the signal. Target just the thread that sent it so that we can
835 // support multiple watchers in a process (when someone creates multiple
836 // event loops in different threads).
Philipp Schraderab2f8432023-09-17 18:58:06 -0700837 rt_tgsigqueueinfo(watcher_copy.pid, watcher_copy.ownership_snapshot.tid(),
838 kWakeupSignal, &uinfo);
Austin Schuh20b2b082019-09-11 20:42:56 -0700839
840 ++count;
841 }
842
843 // Drop back down if we were boosted.
844 if (max_priority > current_priority && current_priority > 0) {
845 SetCurrentThreadRealtimePriority(current_priority);
846 }
847 }
848
849 return count;
850}
851
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700852std::ostream &operator<<(std::ostream &os,
853 const LocklessQueueSender::Result r) {
854 os << static_cast<int>(r);
855 return os;
856}
857
858LocklessQueueSender::LocklessQueueSender(
859 LocklessQueueMemory *memory,
860 monotonic_clock::duration channel_storage_duration)
861 : memory_(memory), channel_storage_duration_(channel_storage_duration) {
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800862 GrabQueueSetupLockOrDie grab_queue_setup_lock(memory_);
Austin Schuh20b2b082019-09-11 20:42:56 -0700863
864 // Since we already have the lock, go ahead and try cleaning up.
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800865 Cleanup(memory_, grab_queue_setup_lock);
Austin Schuh20b2b082019-09-11 20:42:56 -0700866
867 const int num_senders = memory_->num_senders();
868
869 for (int i = 0; i < num_senders; ++i) {
870 ::aos::ipc_lib::Sender *s = memory->GetSender(i);
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800871 // This doesn't need synchronization because we're the only process doing
872 // initialization right now, and nobody else will be touching senders which
873 // we're interested in.
Philipp Schraderab2f8432023-09-17 18:58:06 -0700874 if (s->ownership_tracker.LoadRelaxed().IsUnclaimed()) {
Austin Schuh20b2b082019-09-11 20:42:56 -0700875 sender_index_ = i;
876 break;
877 }
878 }
879
880 if (sender_index_ == -1) {
Austin Schuhe516ab02020-05-06 21:37:04 -0700881 VLOG(1) << "Too many senders, starting to bail.";
882 return;
Austin Schuh20b2b082019-09-11 20:42:56 -0700883 }
884
Brian Silverman177567e2020-08-12 19:51:33 -0700885 ::aos::ipc_lib::Sender *const sender = memory_->GetSender(sender_index_);
Austin Schuh20b2b082019-09-11 20:42:56 -0700886
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800887 // Indicate that we are now alive by taking over the slot. If the previous
888 // owner died, we still want to do this.
Philipp Schraderab2f8432023-09-17 18:58:06 -0700889 sender->ownership_tracker.Acquire();
Brian Silverman177567e2020-08-12 19:51:33 -0700890
891 const Index scratch_index = sender->scratch_index.RelaxedLoad();
892 Message *const message = memory_->GetMessage(scratch_index);
893 CHECK(!message->header.queue_index.RelaxedLoad(memory_->queue_size()).valid())
894 << ": " << std::hex << scratch_index.get();
Austin Schuh20b2b082019-09-11 20:42:56 -0700895}
896
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700897LocklessQueueSender::~LocklessQueueSender() {
898 if (sender_index_ != -1) {
899 CHECK(memory_ != nullptr);
Philipp Schraderab2f8432023-09-17 18:58:06 -0700900 memory_->GetSender(sender_index_)->ownership_tracker.Release();
Austin Schuh20b2b082019-09-11 20:42:56 -0700901 }
902}
903
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700904std::optional<LocklessQueueSender> LocklessQueueSender::Make(
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700905 LocklessQueue queue, monotonic_clock::duration channel_storage_duration) {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700906 queue.Initialize();
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700907 LocklessQueueSender result(queue.memory(), channel_storage_duration);
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700908 if (result.sender_index_ != -1) {
James Kuszmaul9776b392023-01-14 14:08:08 -0800909 return result;
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700910 } else {
911 return std::nullopt;
912 }
913}
Alex Perrycb7da4b2019-08-28 19:35:56 -0700914
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700915size_t LocklessQueueSender::size() const {
916 return memory_->message_data_size();
917}
918
919void *LocklessQueueSender::Data() {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700920 ::aos::ipc_lib::Sender *sender = memory_->GetSender(sender_index_);
Brian Silverman177567e2020-08-12 19:51:33 -0700921 const Index scratch_index = sender->scratch_index.RelaxedLoad();
922 Message *const message = memory_->GetMessage(scratch_index);
923 // We should have invalidated this when we first got the buffer. Verify that
924 // in debug mode.
925 DCHECK(
926 !message->header.queue_index.RelaxedLoad(memory_->queue_size()).valid())
927 << ": " << std::hex << scratch_index.get();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700928
Brian Silvermana1652f32020-01-29 20:41:44 -0800929 return message->data(memory_->message_data_size());
Alex Perrycb7da4b2019-08-28 19:35:56 -0700930}
931
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700932LocklessQueueSender::Result LocklessQueueSender::Send(
Austin Schuhad154822019-12-27 15:45:13 -0800933 const char *data, size_t length,
Austin Schuhb5c6f972021-03-14 21:53:07 -0700934 monotonic_clock::time_point monotonic_remote_time,
935 realtime_clock::time_point realtime_remote_time,
Austin Schuha9012be2021-07-21 15:19:11 -0700936 uint32_t remote_queue_index, const UUID &source_boot_uuid,
Austin Schuhb5c6f972021-03-14 21:53:07 -0700937 monotonic_clock::time_point *monotonic_sent_time,
938 realtime_clock::time_point *realtime_sent_time, uint32_t *queue_index) {
Alex Perrycb7da4b2019-08-28 19:35:56 -0700939 CHECK_LE(length, size());
Austin Schuh67420a42019-12-21 21:55:04 -0800940 // Flatbuffers write from the back of the buffer to the front. If we are
941 // going to write an explicit chunk of memory into the buffer, we need to
942 // adhere to this convention and place it at the end.
943 memcpy((reinterpret_cast<char *>(Data()) + size() - length), data, length);
Austin Schuh91ba6392020-10-03 13:27:47 -0700944 return Send(length, monotonic_remote_time, realtime_remote_time,
Austin Schuha9012be2021-07-21 15:19:11 -0700945 remote_queue_index, source_boot_uuid, monotonic_sent_time,
Austin Schuhb5c6f972021-03-14 21:53:07 -0700946 realtime_sent_time, queue_index);
Alex Perrycb7da4b2019-08-28 19:35:56 -0700947}
948
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700949LocklessQueueSender::Result LocklessQueueSender::Send(
Austin Schuhb5c6f972021-03-14 21:53:07 -0700950 size_t length, monotonic_clock::time_point monotonic_remote_time,
951 realtime_clock::time_point realtime_remote_time,
Austin Schuha9012be2021-07-21 15:19:11 -0700952 uint32_t remote_queue_index, const UUID &source_boot_uuid,
Austin Schuhb5c6f972021-03-14 21:53:07 -0700953 monotonic_clock::time_point *monotonic_sent_time,
954 realtime_clock::time_point *realtime_sent_time, uint32_t *queue_index) {
Austin Schuh20b2b082019-09-11 20:42:56 -0700955 const size_t queue_size = memory_->queue_size();
Alex Perrycb7da4b2019-08-28 19:35:56 -0700956 CHECK_LE(length, size());
Austin Schuh20b2b082019-09-11 20:42:56 -0700957
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800958 ::aos::ipc_lib::Sender *const sender = memory_->GetSender(sender_index_);
959 // We can do a relaxed load on our sender because we're the only person
960 // modifying it right now.
961 const Index scratch_index = sender->scratch_index.RelaxedLoad();
962 Message *const message = memory_->GetMessage(scratch_index);
Austin Schuh91ba6392020-10-03 13:27:47 -0700963 if (CheckBothRedzones(memory_, message)) {
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700964 return Result::INVALID_REDZONE;
Austin Schuh91ba6392020-10-03 13:27:47 -0700965 }
Austin Schuh20b2b082019-09-11 20:42:56 -0700966
Brian Silverman177567e2020-08-12 19:51:33 -0700967 // We should have invalidated this when we first got the buffer. Verify that
968 // in debug mode.
969 DCHECK(
970 !message->header.queue_index.RelaxedLoad(memory_->queue_size()).valid())
971 << ": " << std::hex << scratch_index.get();
972
Austin Schuh20b2b082019-09-11 20:42:56 -0700973 message->header.length = length;
Austin Schuhad154822019-12-27 15:45:13 -0800974 // Pass these through. Any alternative behavior can be implemented out a
975 // layer.
976 message->header.remote_queue_index = remote_queue_index;
Austin Schuha9012be2021-07-21 15:19:11 -0700977 message->header.source_boot_uuid = source_boot_uuid;
Austin Schuhad154822019-12-27 15:45:13 -0800978 message->header.monotonic_remote_time = monotonic_remote_time;
979 message->header.realtime_remote_time = realtime_remote_time;
Austin Schuh20b2b082019-09-11 20:42:56 -0700980
Brian Silverman177567e2020-08-12 19:51:33 -0700981 Index to_replace = Index::Invalid();
Austin Schuh20b2b082019-09-11 20:42:56 -0700982 while (true) {
983 const QueueIndex actual_next_queue_index =
984 memory_->next_queue_index.Load(queue_size);
985 const QueueIndex next_queue_index = ZeroOrValid(actual_next_queue_index);
986
987 const QueueIndex incremented_queue_index = next_queue_index.Increment();
988
Brian Silvermanfafe1fa2019-12-18 21:42:18 -0800989 // This needs to synchronize with whoever the previous writer at this
990 // location was.
Brian Silverman177567e2020-08-12 19:51:33 -0700991 to_replace = memory_->LoadIndex(next_queue_index);
Austin Schuh20b2b082019-09-11 20:42:56 -0700992
993 const QueueIndex decremented_queue_index =
994 next_queue_index.DecrementBy(queue_size);
995
996 // See if we got beat. If we did, try to atomically update
997 // next_queue_index in case the previous writer failed and retry.
998 if (!to_replace.IsPlausible(decremented_queue_index)) {
999 // We don't care about the result. It will either succeed, or we got
1000 // beat in fixing it and just need to give up and try again. If we got
1001 // beat multiple times, the only way progress can be made is if the queue
1002 // is updated as well. This means that if we retry reading
1003 // next_queue_index, we will be at most off by one and can retry.
1004 //
1005 // Both require no further action from us.
1006 //
1007 // TODO(austin): If we are having fairness issues under contention, we
1008 // could have a mode bit in next_queue_index, and could use a lock or some
1009 // other form of PI boosting to let the higher priority task win.
1010 memory_->next_queue_index.CompareAndExchangeStrong(
1011 actual_next_queue_index, incremented_queue_index);
1012
Alex Perrycb7da4b2019-08-28 19:35:56 -07001013 VLOG(3) << "We were beat. Try again. Was " << std::hex
1014 << to_replace.get() << ", is " << decremented_queue_index.index();
Austin Schuh20b2b082019-09-11 20:42:56 -07001015 continue;
1016 }
1017
1018 // Confirm that the message is what it should be.
Brian Silverman177567e2020-08-12 19:51:33 -07001019 //
1020 // This is just a best-effort check to skip reading the clocks if possible.
1021 // If this fails, then the compare-exchange below definitely would, so we
1022 // can bail out now.
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -07001023 const Message *message_to_replace = memory_->GetMessage(to_replace);
1024 bool is_previous_index_valid = false;
Austin Schuh20b2b082019-09-11 20:42:56 -07001025 {
Austin Schuh20b2b082019-09-11 20:42:56 -07001026 const QueueIndex previous_index =
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -07001027 message_to_replace->header.queue_index.RelaxedLoad(queue_size);
1028 is_previous_index_valid = previous_index.valid();
1029 if (previous_index != decremented_queue_index &&
1030 is_previous_index_valid) {
Austin Schuh20b2b082019-09-11 20:42:56 -07001031 // Retry.
Alex Perrycb7da4b2019-08-28 19:35:56 -07001032 VLOG(3) << "Something fishy happened, queue index doesn't match. "
1033 "Retrying. Previous index was "
1034 << std::hex << previous_index.index() << ", should be "
1035 << decremented_queue_index.index();
Austin Schuh20b2b082019-09-11 20:42:56 -07001036 continue;
1037 }
1038 }
1039
1040 message->header.monotonic_sent_time = ::aos::monotonic_clock::now();
1041 message->header.realtime_sent_time = ::aos::realtime_clock::now();
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -07001042
Austin Schuhad154822019-12-27 15:45:13 -08001043 if (monotonic_sent_time != nullptr) {
1044 *monotonic_sent_time = message->header.monotonic_sent_time;
1045 }
1046 if (realtime_sent_time != nullptr) {
1047 *realtime_sent_time = message->header.realtime_sent_time;
1048 }
1049 if (queue_index != nullptr) {
1050 *queue_index = next_queue_index.index();
1051 }
Austin Schuh20b2b082019-09-11 20:42:56 -07001052
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -07001053 const auto to_replace_monotonic_sent_time =
1054 message_to_replace->header.monotonic_sent_time;
1055
1056 // If we are overwriting a message sent in the last
1057 // channel_storage_duration_, that means that we would be sending more than
1058 // queue_size messages and would therefore be sending too fast. If the
1059 // previous index is not valid then the message hasn't been filled out yet
1060 // so we aren't sending too fast. And, if it is not less than the sent time
1061 // of the message that we are going to write, someone else beat us and the
1062 // compare and exchange below will fail.
1063 if (is_previous_index_valid &&
1064 (to_replace_monotonic_sent_time <
1065 message->header.monotonic_sent_time) &&
1066 (message->header.monotonic_sent_time - to_replace_monotonic_sent_time <
1067 channel_storage_duration_)) {
1068 // There is a possibility that another context beat us to writing out the
1069 // message in the queue, but we beat that context to acquiring the sent
1070 // time. In this case our sent time is *greater than* the other context's
1071 // sent time. Therefore, we can check if we got beat filling out this
1072 // message *after* doing the above check to determine if we hit this edge
1073 // case. Otherwise, messages are being sent too fast.
1074 const QueueIndex previous_index =
1075 message_to_replace->header.queue_index.Load(queue_size);
1076 if (previous_index != decremented_queue_index && previous_index.valid()) {
1077 VLOG(3) << "Got beat during check for messages being sent too fast"
1078 "Retrying.";
1079 continue;
1080 } else {
Austin Schuh71e72142023-05-03 13:10:07 -07001081 VLOG(1) << "Messages sent too fast. Returning. Attempted index: "
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -07001082 << decremented_queue_index.index()
1083 << " message sent time: " << message->header.monotonic_sent_time
1084 << " message to replace sent time: "
1085 << to_replace_monotonic_sent_time;
Austin Schuh71e72142023-05-03 13:10:07 -07001086
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -07001087 // Since we are not using the message obtained from scratch_index
1088 // and we are not retrying, we need to invalidate its queue_index.
1089 message->header.queue_index.Invalidate();
1090 return Result::MESSAGES_SENT_TOO_FAST;
1091 }
1092 }
1093
Austin Schuh20b2b082019-09-11 20:42:56 -07001094 // Before we are fully done filling out the message, update the Sender state
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -07001095 // with the new index to write. This re-uses the barrier for the
Austin Schuh20b2b082019-09-11 20:42:56 -07001096 // queue_index store.
Alex Perrycb7da4b2019-08-28 19:35:56 -07001097 const Index index_to_write(next_queue_index, scratch_index.message_index());
Austin Schuh20b2b082019-09-11 20:42:56 -07001098
Brian Silvermanfafe1fa2019-12-18 21:42:18 -08001099 aos_compiler_memory_barrier();
1100 // We're the only person who cares about our scratch index, besides somebody
1101 // cleaning up after us.
Austin Schuh20b2b082019-09-11 20:42:56 -07001102 sender->scratch_index.RelaxedStore(index_to_write);
Brian Silvermanfafe1fa2019-12-18 21:42:18 -08001103 aos_compiler_memory_barrier();
Austin Schuh20b2b082019-09-11 20:42:56 -07001104
1105 message->header.queue_index.Store(next_queue_index);
1106
Brian Silvermanfafe1fa2019-12-18 21:42:18 -08001107 aos_compiler_memory_barrier();
Austin Schuh20b2b082019-09-11 20:42:56 -07001108 // The message is now filled out, and we have a confirmed slot to store
1109 // into.
1110 //
1111 // Start by writing down what we are going to pull out of the queue. This
Brian Silvermanfafe1fa2019-12-18 21:42:18 -08001112 // was Invalid before now. Only person who will read this is whoever cleans
1113 // up after us, so no synchronization necessary.
Austin Schuh20b2b082019-09-11 20:42:56 -07001114 sender->to_replace.RelaxedStore(to_replace);
Brian Silvermanfafe1fa2019-12-18 21:42:18 -08001115 aos_compiler_memory_barrier();
Austin Schuh20b2b082019-09-11 20:42:56 -07001116
1117 // Then exchange the next index into the queue.
1118 if (!memory_->GetQueue(next_queue_index.Wrapped())
1119 ->CompareAndExchangeStrong(to_replace, index_to_write)) {
1120 // Aw, didn't succeed. Retry.
1121 sender->to_replace.RelaxedInvalidate();
Brian Silvermanfafe1fa2019-12-18 21:42:18 -08001122 aos_compiler_memory_barrier();
Alex Perrycb7da4b2019-08-28 19:35:56 -07001123 VLOG(3) << "Failed to wrap into queue";
Austin Schuh20b2b082019-09-11 20:42:56 -07001124 continue;
1125 }
1126
1127 // Then update next_queue_index to save the next user some computation time.
1128 memory_->next_queue_index.CompareAndExchangeStrong(actual_next_queue_index,
1129 incremented_queue_index);
1130
Brian Silvermanfafe1fa2019-12-18 21:42:18 -08001131 aos_compiler_memory_barrier();
Austin Schuh20b2b082019-09-11 20:42:56 -07001132 // Now update the scratch space and record that we succeeded.
1133 sender->scratch_index.Store(to_replace);
Brian Silvermanfafe1fa2019-12-18 21:42:18 -08001134 aos_compiler_memory_barrier();
1135 // And then record that we succeeded, but definitely after the above store.
Austin Schuh20b2b082019-09-11 20:42:56 -07001136 sender->to_replace.RelaxedInvalidate();
Brian Silverman177567e2020-08-12 19:51:33 -07001137
Austin Schuh20b2b082019-09-11 20:42:56 -07001138 break;
1139 }
Brian Silverman177567e2020-08-12 19:51:33 -07001140
Brian Silverman0eaa1da2020-08-12 20:03:52 -07001141 DCHECK(!CheckBothRedzones(memory_, memory_->GetMessage(to_replace)))
1142 << ": Invalid message found in shared memory";
Brian Silverman177567e2020-08-12 19:51:33 -07001143 // to_replace is our current scratch_index. It isn't in the queue, which means
1144 // nobody new can pin it. They can set their `pinned` to it, but they will
1145 // back it out, so they don't count. This means that we just need to find a
1146 // message for which no pinner had it in `pinned`, and then we know this
1147 // message will never be pinned. We'll start with to_replace, and if that is
1148 // pinned then we'll look for a new one to use instead.
1149 const Index new_scratch =
1150 SwapPinnedSenderScratch(memory_, sender, to_replace);
Brian Silverman0eaa1da2020-08-12 20:03:52 -07001151 DCHECK(!CheckBothRedzones(
1152 memory_, memory_->GetMessage(sender->scratch_index.RelaxedLoad())))
1153 << ": Invalid message found in shared memory";
Brian Silverman177567e2020-08-12 19:51:33 -07001154
1155 // If anybody is looking at this message (they shouldn't be), then try telling
1156 // them about it (best-effort).
1157 memory_->GetMessage(new_scratch)->header.queue_index.RelaxedInvalidate();
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -07001158 return Result::GOOD;
Austin Schuh20b2b082019-09-11 20:42:56 -07001159}
1160
Brian Silvermanfc0d2e82020-08-12 19:58:35 -07001161int LocklessQueueSender::buffer_index() const {
Brian Silverman4f4e0612020-08-12 19:54:41 -07001162 ::aos::ipc_lib::Sender *const sender = memory_->GetSender(sender_index_);
1163 // We can do a relaxed load on our sender because we're the only person
1164 // modifying it right now.
1165 const Index scratch_index = sender->scratch_index.RelaxedLoad();
1166 return scratch_index.message_index();
1167}
1168
Brian Silvermanfc0d2e82020-08-12 19:58:35 -07001169LocklessQueuePinner::LocklessQueuePinner(
1170 LocklessQueueMemory *memory, const LocklessQueueMemory *const_memory)
1171 : memory_(memory), const_memory_(const_memory) {
1172 GrabQueueSetupLockOrDie grab_queue_setup_lock(memory_);
1173
1174 // Since we already have the lock, go ahead and try cleaning up.
1175 Cleanup(memory_, grab_queue_setup_lock);
1176
1177 const int num_pinners = memory_->num_pinners();
1178
1179 for (int i = 0; i < num_pinners; ++i) {
1180 ::aos::ipc_lib::Pinner *p = memory->GetPinner(i);
1181 // This doesn't need synchronization because we're the only process doing
1182 // initialization right now, and nobody else will be touching pinners which
1183 // we're interested in.
Philipp Schraderab2f8432023-09-17 18:58:06 -07001184 if (p->ownership_tracker.LoadRelaxed().IsUnclaimed()) {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -07001185 pinner_index_ = i;
1186 break;
1187 }
1188 }
1189
1190 if (pinner_index_ == -1) {
1191 VLOG(1) << "Too many pinners, starting to bail.";
1192 return;
1193 }
1194
1195 ::aos::ipc_lib::Pinner *p = memory_->GetPinner(pinner_index_);
1196 p->pinned.Invalidate();
1197
1198 // Indicate that we are now alive by taking over the slot. If the previous
1199 // owner died, we still want to do this.
Philipp Schraderab2f8432023-09-17 18:58:06 -07001200 p->ownership_tracker.Acquire();
Brian Silvermanfc0d2e82020-08-12 19:58:35 -07001201}
1202
1203LocklessQueuePinner::~LocklessQueuePinner() {
1204 if (pinner_index_ != -1) {
1205 CHECK(memory_ != nullptr);
1206 memory_->GetPinner(pinner_index_)->pinned.Invalidate();
1207 aos_compiler_memory_barrier();
Philipp Schraderab2f8432023-09-17 18:58:06 -07001208 memory_->GetPinner(pinner_index_)->ownership_tracker.Release();
Brian Silvermanfc0d2e82020-08-12 19:58:35 -07001209 }
1210}
1211
1212std::optional<LocklessQueuePinner> LocklessQueuePinner::Make(
1213 LocklessQueue queue) {
1214 queue.Initialize();
1215 LocklessQueuePinner result(queue.memory(), queue.const_memory());
1216 if (result.pinner_index_ != -1) {
James Kuszmaul9776b392023-01-14 14:08:08 -08001217 return result;
Brian Silvermanfc0d2e82020-08-12 19:58:35 -07001218 } else {
1219 return std::nullopt;
1220 }
1221}
1222
1223// This method doesn't mess with any scratch_index, so it doesn't have to worry
1224// about message ownership.
1225int LocklessQueuePinner::PinIndex(uint32_t uint32_queue_index) {
1226 const size_t queue_size = memory_->queue_size();
1227 const QueueIndex queue_index =
1228 QueueIndex::Zero(queue_size).IncrementBy(uint32_queue_index);
1229 ipc_lib::Pinner *const pinner = memory_->GetPinner(pinner_index_);
1230
1231 AtomicIndex *const queue_slot = memory_->GetQueue(queue_index.Wrapped());
1232
1233 // Indicate that we want to pin this message.
1234 pinner->pinned.Store(queue_index);
1235 aos_compiler_memory_barrier();
1236
1237 {
1238 const Index message_index = queue_slot->Load();
1239 Message *const message = memory_->GetMessage(message_index);
Brian Silverman0eaa1da2020-08-12 20:03:52 -07001240 DCHECK(!CheckBothRedzones(memory_, message))
1241 << ": Invalid message found in shared memory";
Brian Silvermanfc0d2e82020-08-12 19:58:35 -07001242
1243 const QueueIndex message_queue_index =
1244 message->header.queue_index.Load(queue_size);
1245 if (message_queue_index == queue_index) {
1246 VLOG(3) << "Eq: " << std::hex << message_queue_index.index();
1247 aos_compiler_memory_barrier();
1248 return message_index.message_index();
1249 }
1250 VLOG(3) << "Message reused: " << std::hex << message_queue_index.index()
1251 << ", " << queue_index.index();
1252 }
1253
1254 // Being down here means we asked to pin a message before realizing it's no
1255 // longer in the queue, so back that out now.
1256 pinner->pinned.Invalidate();
1257 VLOG(3) << "Unpinned: " << std::hex << queue_index.index();
1258 return -1;
1259}
1260
1261size_t LocklessQueuePinner::size() const {
1262 return const_memory_->message_data_size();
1263}
1264
1265const void *LocklessQueuePinner::Data() const {
1266 const size_t queue_size = const_memory_->queue_size();
1267 const ::aos::ipc_lib::Pinner *const pinner =
1268 const_memory_->GetPinner(pinner_index_);
1269 QueueIndex pinned = pinner->pinned.RelaxedLoad(queue_size);
1270 CHECK(pinned.valid());
1271 const Message *message = const_memory_->GetMessage(pinned);
1272
1273 return message->data(const_memory_->message_data_size());
1274}
1275
1276LocklessQueueReader::Result LocklessQueueReader::Read(
Austin Schuh20b2b082019-09-11 20:42:56 -07001277 uint32_t uint32_queue_index,
Austin Schuhb5c6f972021-03-14 21:53:07 -07001278 monotonic_clock::time_point *monotonic_sent_time,
1279 realtime_clock::time_point *realtime_sent_time,
1280 monotonic_clock::time_point *monotonic_remote_time,
1281 realtime_clock::time_point *realtime_remote_time,
Austin Schuha9012be2021-07-21 15:19:11 -07001282 uint32_t *remote_queue_index, UUID *source_boot_uuid, size_t *length,
Austin Schuhfaec51a2023-09-08 17:43:32 -07001283 char *data,
1284 std::function<bool(const Context &)> should_read_callback) const {
1285 const size_t queue_size = const_memory_->queue_size();
Austin Schuh20b2b082019-09-11 20:42:56 -07001286
1287 // Build up the QueueIndex.
1288 const QueueIndex queue_index =
1289 QueueIndex::Zero(queue_size).IncrementBy(uint32_queue_index);
1290
1291 // Read the message stored at the requested location.
Austin Schuhfaec51a2023-09-08 17:43:32 -07001292 Index mi = const_memory_->LoadIndex(queue_index);
1293 const Message *m = const_memory_->GetMessage(mi);
Austin Schuh20b2b082019-09-11 20:42:56 -07001294
1295 while (true) {
Austin Schuhfaec51a2023-09-08 17:43:32 -07001296 DCHECK(!CheckBothRedzones(const_memory_, m))
Brian Silverman0eaa1da2020-08-12 20:03:52 -07001297 << ": Invalid message found in shared memory";
Austin Schuh20b2b082019-09-11 20:42:56 -07001298 // We need to confirm that the data doesn't change while we are reading it.
1299 // Do that by first confirming that the message points to the queue index we
1300 // want.
1301 const QueueIndex starting_queue_index =
1302 m->header.queue_index.Load(queue_size);
1303 if (starting_queue_index != queue_index) {
1304 // If we found a message that is exactly 1 loop old, we just wrapped.
1305 if (starting_queue_index == queue_index.DecrementBy(queue_size)) {
Alex Perrycb7da4b2019-08-28 19:35:56 -07001306 VLOG(3) << "Matches: " << std::hex << starting_queue_index.index()
1307 << ", " << queue_index.DecrementBy(queue_size).index();
Brian Silvermanfc0d2e82020-08-12 19:58:35 -07001308 return Result::NOTHING_NEW;
Brian Silverman177567e2020-08-12 19:51:33 -07001309 }
1310
1311 // Someone has re-used this message between when we pulled it out of the
1312 // queue and when we grabbed its index. It is pretty hard to deduce
1313 // what happened. Just try again.
Austin Schuhfaec51a2023-09-08 17:43:32 -07001314 const Message *const new_m = const_memory_->GetMessage(queue_index);
Brian Silverman177567e2020-08-12 19:51:33 -07001315 if (m != new_m) {
1316 m = new_m;
1317 VLOG(3) << "Retrying, m doesn't match";
1318 continue;
1319 }
1320
1321 // We have confirmed that message still points to the same message. This
1322 // means that the message didn't get swapped out from under us, so
1323 // starting_queue_index is correct.
1324 //
1325 // Either we got too far behind (signaled by this being a valid
1326 // message), or this is one of the initial messages which are invalid.
1327 if (starting_queue_index.valid()) {
1328 VLOG(3) << "Too old. Tried for " << std::hex << queue_index.index()
1329 << ", got " << starting_queue_index.index() << ", behind by "
1330 << std::dec
1331 << (starting_queue_index.index() - queue_index.index());
Brian Silvermanfc0d2e82020-08-12 19:58:35 -07001332 return Result::TOO_OLD;
Brian Silverman177567e2020-08-12 19:51:33 -07001333 }
1334
1335 VLOG(3) << "Initial";
1336
1337 // There isn't a valid message at this location.
1338 //
1339 // If someone asks for one of the messages within the first go around,
1340 // then they need to wait. They got ahead. Otherwise, they are
1341 // asking for something crazy, like something before the beginning of
1342 // the queue. Tell them that they are behind.
Austin Schuhfaec51a2023-09-08 17:43:32 -07001343 if (uint32_queue_index < const_memory_->queue_size()) {
Brian Silverman177567e2020-08-12 19:51:33 -07001344 VLOG(3) << "Near zero, " << std::hex << uint32_queue_index;
Brian Silvermanfc0d2e82020-08-12 19:58:35 -07001345 return Result::NOTHING_NEW;
Austin Schuh20b2b082019-09-11 20:42:56 -07001346 } else {
Brian Silverman177567e2020-08-12 19:51:33 -07001347 VLOG(3) << "Not near zero, " << std::hex << uint32_queue_index;
Brian Silvermanfc0d2e82020-08-12 19:58:35 -07001348 return Result::TOO_OLD;
Austin Schuh20b2b082019-09-11 20:42:56 -07001349 }
1350 }
Alex Perrycb7da4b2019-08-28 19:35:56 -07001351 VLOG(3) << "Eq: " << std::hex << starting_queue_index.index() << ", "
1352 << queue_index.index();
Austin Schuh20b2b082019-09-11 20:42:56 -07001353 break;
1354 }
1355
Alex Perrycb7da4b2019-08-28 19:35:56 -07001356 // Then read the data out. Copy it all out to be deterministic and so we can
1357 // make length be from either end.
Austin Schuhfaec51a2023-09-08 17:43:32 -07001358 Context context;
1359 context.monotonic_event_time = m->header.monotonic_sent_time;
1360 context.realtime_event_time = m->header.realtime_sent_time;
1361 context.monotonic_remote_time = m->header.monotonic_remote_time;
1362 context.realtime_remote_time = m->header.realtime_remote_time;
1363 context.queue_index = queue_index.index();
1364 if (m->header.remote_queue_index == 0xffffffffu) {
1365 context.remote_queue_index = context.queue_index;
Austin Schuhad154822019-12-27 15:45:13 -08001366 } else {
Austin Schuhfaec51a2023-09-08 17:43:32 -07001367 context.remote_queue_index = m->header.remote_queue_index;
1368 }
1369 context.source_boot_uuid = m->header.source_boot_uuid;
1370 context.size = m->header.length;
1371 context.data = nullptr;
1372 context.buffer_index = -1;
Austin Schuh82ea7382023-07-14 15:17:34 -07001373
Austin Schuhfaec51a2023-09-08 17:43:32 -07001374 // If the callback is provided, use it.
1375 if (should_read_callback) {
Austin Schuh82ea7382023-07-14 15:17:34 -07001376 // And finally, confirm that the message *still* points to the queue index
1377 // we want. This means it didn't change out from under us. If something
1378 // changed out from under us, we were reading it much too late in its
1379 // lifetime.
1380 aos_compiler_memory_barrier();
1381 const QueueIndex final_queue_index = m->header.queue_index.Load(queue_size);
1382 if (final_queue_index != queue_index) {
1383 VLOG(3) << "Changed out from under us. Reading " << std::hex
1384 << queue_index.index() << ", finished with "
1385 << final_queue_index.index() << ", delta: " << std::dec
1386 << (final_queue_index.index() - queue_index.index());
1387 return Result::OVERWROTE;
1388 }
1389
1390 // We now know that the context is safe to use. See if we are supposed to
1391 // take the message or not.
Austin Schuhfaec51a2023-09-08 17:43:32 -07001392 if (!should_read_callback(context)) {
Austin Schuh82ea7382023-07-14 15:17:34 -07001393 return Result::FILTERED;
1394 }
Austin Schuhad154822019-12-27 15:45:13 -08001395 }
Austin Schuh20b2b082019-09-11 20:42:56 -07001396
Austin Schuhfaec51a2023-09-08 17:43:32 -07001397 // Read the data if requested.
1398 if (data) {
1399 memcpy(data, m->data(const_memory_->message_data_size()),
1400 const_memory_->message_data_size());
1401 }
1402
1403 // Now, we need to confirm that nothing has changed by re-reading the queue
1404 // index from the header since we've read all the body. We only need to do it
1405 // if we have read anything new after the previous check up above, which
1406 // happens if we read the data, or if we didn't check for the filtered case.
1407 if (data || !should_read_callback) {
Austin Schuh82ea7382023-07-14 15:17:34 -07001408 aos_compiler_memory_barrier();
1409 const QueueIndex final_queue_index = m->header.queue_index.Load(queue_size);
1410 if (final_queue_index != queue_index) {
1411 VLOG(3) << "Changed out from under us. Reading " << std::hex
1412 << queue_index.index() << ", finished with "
1413 << final_queue_index.index() << ", delta: " << std::dec
1414 << (final_queue_index.index() - queue_index.index());
1415 return Result::OVERWROTE;
1416 }
Austin Schuh20b2b082019-09-11 20:42:56 -07001417 }
1418
Austin Schuhfaec51a2023-09-08 17:43:32 -07001419 // And now take it and make it visible to the user. By doing it here, we will
1420 // never present partial or corrupted state to the user in the output
1421 // pointers.
1422 *monotonic_sent_time = context.monotonic_event_time;
1423 *realtime_sent_time = context.realtime_event_time;
1424 *remote_queue_index = context.remote_queue_index;
1425 *monotonic_remote_time = context.monotonic_remote_time;
1426 *realtime_remote_time = context.realtime_remote_time;
1427 *source_boot_uuid = context.source_boot_uuid;
1428 *length = context.size;
1429
Brian Silvermanfc0d2e82020-08-12 19:58:35 -07001430 return Result::GOOD;
Austin Schuh20b2b082019-09-11 20:42:56 -07001431}
1432
Brian Silvermanfc0d2e82020-08-12 19:58:35 -07001433QueueIndex LocklessQueueReader::LatestIndex() const {
Austin Schuhfaec51a2023-09-08 17:43:32 -07001434 const size_t queue_size = const_memory_->queue_size();
Austin Schuh20b2b082019-09-11 20:42:56 -07001435
Austin Schuhfaec51a2023-09-08 17:43:32 -07001436 // There are 2 main cases. Either the next queue index is right, or it is
1437 // behind by 1 and wrong. If nothing has been published, the next queue index
1438 // will be the reserved "Invalid" value, otherwise it will point to the next
1439 // place to write. We need to figure out if it is right or wrong, and it if
1440 // is wrong, fix it. If we don't, Read() can find the next message before
1441 // LatestIndex() sees it if someone is hammering on Read() until it returns
1442 // nothing new is left, which mean watchers and fetchers may disagree on when
1443 // a message is published.
1444 QueueIndex actual_next_queue_index =
1445 const_memory_->next_queue_index.Load(queue_size);
1446
1447 // Handle the "nothing has been published" case by making next_queue_index
1448 // point to the 0th index.
1449 const QueueIndex next_queue_index = ZeroOrValid(actual_next_queue_index);
1450
1451 // This needs to synchronize with whoever the previous writer at this
1452 // location was. Read what is there to see if the message has been published
1453 // and next_queue_index is just behind.
1454 Index to_replace = const_memory_->LoadIndex(next_queue_index);
1455
1456 // See if next_queue_index is consistent with the state of the queue. If it
1457 // is not, try to atomically update next_queue_index in case the previous
1458 // writer failed and retry.
1459 if (to_replace.IsPlausible(next_queue_index)) {
1460 // If next_queue_index ends up pointing to a message with a matching index,
1461 // this is what next_queue_index needs to be updated to
1462 const QueueIndex incremented_queue_index = next_queue_index.Increment();
1463
1464 // We don't care about the result. It will either succeed, or we got
1465 // beat in fixing it. The way the Send logic works, the pointer can never
1466 // get more than 1 behind or the next send will repair it. So, if we fail,
1467 // that means that someone else got there first and fixed it up (and
1468 // potentially someone further continued to send).
1469 //
1470 // Both require no further action from us. Worst case, our Next pointer
1471 // will not be the latest message, but there will always be a point after
1472 // which the index can change. We just need a consistent snapshot where
1473 // there is nothing in the queue that isn't accounted for by
1474 // next_queue_index.
1475 memory_->next_queue_index.CompareAndExchangeStrong(actual_next_queue_index,
1476 incremented_queue_index);
1477
1478 VLOG(3) << "next_queue_index is lagging, fixed it. Found " << std::hex
1479 << to_replace.get() << ", expected "
1480 << next_queue_index.DecrementBy(queue_size).index();
1481
1482 actual_next_queue_index = incremented_queue_index;
1483 }
1484
1485 if (actual_next_queue_index.valid()) {
1486 const QueueIndex current_queue_index =
1487 actual_next_queue_index.DecrementBy(1u);
Alex Perrycb7da4b2019-08-28 19:35:56 -07001488 return current_queue_index;
Austin Schuh20b2b082019-09-11 20:42:56 -07001489 }
Brian Silvermanfc0d2e82020-08-12 19:58:35 -07001490 return QueueIndex::Invalid();
1491}
1492
1493size_t LocklessQueueSize(const LocklessQueueMemory *memory) {
1494 return memory->queue_size();
1495}
1496
1497size_t LocklessQueueMessageDataSize(const LocklessQueueMemory *memory) {
1498 return memory->message_data_size();
Austin Schuh20b2b082019-09-11 20:42:56 -07001499}
1500
1501namespace {
1502
1503// Prints out the mutex state. Not safe to use while the mutex is being
1504// changed.
Austin Schuh83cbb1e2023-06-23 12:59:02 -07001505::std::string PrintMutex(const aos_mutex *mutex) {
Austin Schuh20b2b082019-09-11 20:42:56 -07001506 ::std::stringstream s;
1507 s << "aos_mutex(" << ::std::hex << mutex->futex;
1508
1509 if (mutex->futex != 0) {
1510 s << ":";
1511 if (mutex->futex & FUTEX_OWNER_DIED) {
1512 s << "FUTEX_OWNER_DIED|";
1513 }
1514 s << "tid=" << (mutex->futex & FUTEX_TID_MASK);
1515 }
1516
1517 s << ")";
1518 return s.str();
1519}
1520
1521} // namespace
1522
Austin Schuh83cbb1e2023-06-23 12:59:02 -07001523void PrintLocklessQueueMemory(const LocklessQueueMemory *memory) {
Austin Schuh20b2b082019-09-11 20:42:56 -07001524 const size_t queue_size = memory->queue_size();
1525 ::std::cout << "LocklessQueueMemory (" << memory << ") {" << ::std::endl;
1526 ::std::cout << " aos_mutex queue_setup_lock = "
1527 << PrintMutex(&memory->queue_setup_lock) << ::std::endl;
Brian Silvermanfafe1fa2019-12-18 21:42:18 -08001528 ::std::cout << " bool initialized = " << memory->initialized << ::std::endl;
Austin Schuh20b2b082019-09-11 20:42:56 -07001529 ::std::cout << " config {" << ::std::endl;
1530 ::std::cout << " size_t num_watchers = " << memory->config.num_watchers
1531 << ::std::endl;
1532 ::std::cout << " size_t num_senders = " << memory->config.num_senders
1533 << ::std::endl;
Brian Silverman177567e2020-08-12 19:51:33 -07001534 ::std::cout << " size_t num_pinners = " << memory->config.num_pinners
1535 << ::std::endl;
Austin Schuh20b2b082019-09-11 20:42:56 -07001536 ::std::cout << " size_t queue_size = " << memory->config.queue_size
1537 << ::std::endl;
1538 ::std::cout << " size_t message_data_size = "
1539 << memory->config.message_data_size << ::std::endl;
1540
1541 ::std::cout << " AtomicQueueIndex next_queue_index = "
1542 << memory->next_queue_index.Load(queue_size).DebugString()
1543 << ::std::endl;
1544
Austin Schuh3328d132020-02-28 13:54:57 -08001545 ::std::cout << " uid_t uid = " << memory->uid << ::std::endl;
1546
Austin Schuh20b2b082019-09-11 20:42:56 -07001547 ::std::cout << " }" << ::std::endl;
1548 ::std::cout << " AtomicIndex queue[" << queue_size << "] {" << ::std::endl;
1549 for (size_t i = 0; i < queue_size; ++i) {
1550 ::std::cout << " [" << i << "] -> "
1551 << memory->GetQueue(i)->Load().DebugString() << ::std::endl;
1552 }
1553 ::std::cout << " }" << ::std::endl;
1554 ::std::cout << " Message messages[" << memory->num_messages() << "] {"
1555 << ::std::endl;
1556 for (size_t i = 0; i < memory->num_messages(); ++i) {
Austin Schuh83cbb1e2023-06-23 12:59:02 -07001557 const Message *m = memory->GetMessage(Index(i, i));
Brian Silverman001f24d2020-08-12 19:33:20 -07001558 ::std::cout << " [" << i << "] -> Message 0x" << std::hex
1559 << (reinterpret_cast<uintptr_t>(
1560 memory->GetMessage(Index(i, i))) -
1561 reinterpret_cast<uintptr_t>(memory))
1562 << std::dec << " {" << ::std::endl;
Austin Schuh20b2b082019-09-11 20:42:56 -07001563 ::std::cout << " Header {" << ::std::endl;
1564 ::std::cout << " AtomicQueueIndex queue_index = "
1565 << m->header.queue_index.Load(queue_size).DebugString()
1566 << ::std::endl;
Brian Silverman001f24d2020-08-12 19:33:20 -07001567 ::std::cout << " monotonic_clock::time_point monotonic_sent_time = "
1568 << m->header.monotonic_sent_time << " 0x" << std::hex
1569 << m->header.monotonic_sent_time.time_since_epoch().count()
1570 << std::dec << ::std::endl;
1571 ::std::cout << " realtime_clock::time_point realtime_sent_time = "
1572 << m->header.realtime_sent_time << " 0x" << std::hex
1573 << m->header.realtime_sent_time.time_since_epoch().count()
1574 << std::dec << ::std::endl;
1575 ::std::cout
1576 << " monotonic_clock::time_point monotonic_remote_time = "
1577 << m->header.monotonic_remote_time << " 0x" << std::hex
1578 << m->header.monotonic_remote_time.time_since_epoch().count()
1579 << std::dec << ::std::endl;
1580 ::std::cout << " realtime_clock::time_point realtime_remote_time = "
1581 << m->header.realtime_remote_time << " 0x" << std::hex
1582 << m->header.realtime_remote_time.time_since_epoch().count()
1583 << std::dec << ::std::endl;
Austin Schuh20b2b082019-09-11 20:42:56 -07001584 ::std::cout << " size_t length = " << m->header.length
1585 << ::std::endl;
1586 ::std::cout << " }" << ::std::endl;
Austin Schuhbe416742020-10-03 17:24:26 -07001587 const bool corrupt = CheckBothRedzones(memory, m);
1588 if (corrupt) {
Austin Schuh83cbb1e2023-06-23 12:59:02 -07001589 absl::Span<const char> pre_redzone =
1590 m->PreRedzone(memory->message_data_size());
1591 absl::Span<const char> post_redzone =
Austin Schuhbe416742020-10-03 17:24:26 -07001592 m->PostRedzone(memory->message_data_size(), memory->message_size());
1593
1594 ::std::cout << " pre-redzone: \""
1595 << absl::BytesToHexString(std::string_view(
1596 pre_redzone.data(), pre_redzone.size()))
1597 << std::endl;
Brian Silverman0eaa1da2020-08-12 20:03:52 -07001598 ::std::cout << " // *** DATA REDZONES ARE CORRUPTED ***"
1599 << ::std::endl;
Austin Schuhbe416742020-10-03 17:24:26 -07001600 ::std::cout << " post-redzone: \""
1601 << absl::BytesToHexString(std::string_view(
1602 post_redzone.data(), post_redzone.size()))
1603 << std::endl;
Brian Silverman0eaa1da2020-08-12 20:03:52 -07001604 }
Austin Schuh20b2b082019-09-11 20:42:56 -07001605 ::std::cout << " data: {";
1606
Brian Silverman001f24d2020-08-12 19:33:20 -07001607 if (FLAGS_dump_lockless_queue_data) {
1608 const char *const m_data = m->data(memory->message_data_size());
Austin Schuhbe416742020-10-03 17:24:26 -07001609 std::cout << absl::BytesToHexString(std::string_view(
1610 m_data, corrupt ? memory->message_data_size() : m->header.length));
Austin Schuh20b2b082019-09-11 20:42:56 -07001611 }
1612 ::std::cout << ::std::setfill(' ') << ::std::dec << "}" << ::std::endl;
1613 ::std::cout << " }," << ::std::endl;
1614 }
1615 ::std::cout << " }" << ::std::endl;
1616
Alex Perrycb7da4b2019-08-28 19:35:56 -07001617 ::std::cout << " Sender senders[" << memory->num_senders() << "] {"
1618 << ::std::endl;
Austin Schuh20b2b082019-09-11 20:42:56 -07001619 for (size_t i = 0; i < memory->num_senders(); ++i) {
Austin Schuh83cbb1e2023-06-23 12:59:02 -07001620 const Sender *s = memory->GetSender(i);
Austin Schuh20b2b082019-09-11 20:42:56 -07001621 ::std::cout << " [" << i << "] -> Sender {" << ::std::endl;
Philipp Schraderab2f8432023-09-17 18:58:06 -07001622 ::std::cout << " RobustOwnershipTracker ownership_tracker = "
1623 << s->ownership_tracker.DebugString() << ::std::endl;
Austin Schuh20b2b082019-09-11 20:42:56 -07001624 ::std::cout << " AtomicIndex scratch_index = "
1625 << s->scratch_index.Load().DebugString() << ::std::endl;
1626 ::std::cout << " AtomicIndex to_replace = "
1627 << s->to_replace.Load().DebugString() << ::std::endl;
1628 ::std::cout << " }" << ::std::endl;
1629 }
1630 ::std::cout << " }" << ::std::endl;
1631
Brian Silverman177567e2020-08-12 19:51:33 -07001632 ::std::cout << " Pinner pinners[" << memory->num_pinners() << "] {"
1633 << ::std::endl;
1634 for (size_t i = 0; i < memory->num_pinners(); ++i) {
Austin Schuh83cbb1e2023-06-23 12:59:02 -07001635 const Pinner *p = memory->GetPinner(i);
Brian Silverman177567e2020-08-12 19:51:33 -07001636 ::std::cout << " [" << i << "] -> Pinner {" << ::std::endl;
Philipp Schraderab2f8432023-09-17 18:58:06 -07001637 ::std::cout << " RobustOwnershipTracker ownership_tracker = "
1638 << p->ownership_tracker.DebugString() << ::std::endl;
Brian Silverman177567e2020-08-12 19:51:33 -07001639 ::std::cout << " AtomicIndex scratch_index = "
1640 << p->scratch_index.Load().DebugString() << ::std::endl;
1641 ::std::cout << " AtomicIndex pinned = "
1642 << p->pinned.Load(memory->queue_size()).DebugString()
1643 << ::std::endl;
1644 ::std::cout << " }" << ::std::endl;
1645 }
1646 ::std::cout << " }" << ::std::endl;
1647
Austin Schuh20b2b082019-09-11 20:42:56 -07001648 ::std::cout << " Watcher watchers[" << memory->num_watchers() << "] {"
1649 << ::std::endl;
1650 for (size_t i = 0; i < memory->num_watchers(); ++i) {
Austin Schuh83cbb1e2023-06-23 12:59:02 -07001651 const Watcher *w = memory->GetWatcher(i);
Austin Schuh20b2b082019-09-11 20:42:56 -07001652 ::std::cout << " [" << i << "] -> Watcher {" << ::std::endl;
Philipp Schraderab2f8432023-09-17 18:58:06 -07001653 ::std::cout << " RobustOwnershipTracker ownership_tracker = "
1654 << w->ownership_tracker.DebugString() << ::std::endl;
Austin Schuh20b2b082019-09-11 20:42:56 -07001655 ::std::cout << " pid_t pid = " << w->pid << ::std::endl;
1656 ::std::cout << " int priority = " << w->priority << ::std::endl;
1657 ::std::cout << " }" << ::std::endl;
1658 }
1659 ::std::cout << " }" << ::std::endl;
1660
1661 ::std::cout << "}" << ::std::endl;
1662}
1663
Stephan Pleinesf63bde82024-01-13 15:59:33 -08001664} // namespace aos::ipc_lib