blob: ce067e6764b254e61022b6107f7fc9c10c1d69e1 [file] [log] [blame]
Austin Schuh20b2b082019-09-11 20:42:56 -07001#ifndef AOS_IPC_LIB_LOCKLESS_QUEUE_H_
2#define AOS_IPC_LIB_LOCKLESS_QUEUE_H_
3
Stephan Pleines682928d2024-05-31 20:43:48 -07004#include <signal.h>
5#include <stdint.h>
Tyler Chatowbf0609c2021-07-31 16:13:27 -07006
Stephan Pleines682928d2024-05-31 20:43:48 -07007#include <atomic>
8#include <functional>
9#include <iosfwd>
Austin Schuhe516ab02020-05-06 21:37:04 -070010#include <optional>
Stephan Pleines682928d2024-05-31 20:43:48 -070011#include <utility>
Brian Silverman177567e2020-08-12 19:51:33 -070012#include <vector>
Austin Schuh20b2b082019-09-11 20:42:56 -070013
Brian Silverman0eaa1da2020-08-12 20:03:52 -070014#include "absl/types/span.h"
Stephan Pleines682928d2024-05-31 20:43:48 -070015#include "glog/logging.h"
Philipp Schrader790cb542023-07-05 21:06:52 -070016
Austin Schuh82ea7382023-07-14 15:17:34 -070017#include "aos/events/context.h"
Austin Schuh20b2b082019-09-11 20:42:56 -070018#include "aos/ipc_lib/aos_sync.h"
Brian Silvermana1652f32020-01-29 20:41:44 -080019#include "aos/ipc_lib/data_alignment.h"
Austin Schuh20b2b082019-09-11 20:42:56 -070020#include "aos/ipc_lib/index.h"
Philipp Schraderab2f8432023-09-17 18:58:06 -070021#include "aos/ipc_lib/robust_ownership_tracker.h"
Austin Schuh20b2b082019-09-11 20:42:56 -070022#include "aos/time/time.h"
Austin Schuh8902fa52021-03-14 22:39:24 -070023#include "aos/uuid.h"
Austin Schuh20b2b082019-09-11 20:42:56 -070024
Stephan Pleinesd99b1ee2024-02-02 20:56:44 -080025namespace aos::ipc_lib {
Austin Schuh20b2b082019-09-11 20:42:56 -070026
27// Structure to hold the state required to wake a watcher.
28struct Watcher {
29 // Mutex that the watcher locks. If the futex is 0 (or FUTEX_OWNER_DIED),
30 // then this watcher is invalid. The futex variable will then hold the tid of
31 // the watcher, or FUTEX_OWNER_DIED if the task died.
32 //
Brian Silvermanfafe1fa2019-12-18 21:42:18 -080033 // Note: this is only modified with the queue_setup_lock lock held, but may
34 // always be read.
Austin Schuh20b2b082019-09-11 20:42:56 -070035 // Any state modification should happen before the lock is acquired.
Philipp Schraderab2f8432023-09-17 18:58:06 -070036 RobustOwnershipTracker ownership_tracker;
Austin Schuh20b2b082019-09-11 20:42:56 -070037
38 // PID of the watcher.
Brian Silvermanfafe1fa2019-12-18 21:42:18 -080039 std::atomic<pid_t> pid;
Austin Schuh20b2b082019-09-11 20:42:56 -070040
41 // RT priority of the watcher.
Brian Silvermanfafe1fa2019-12-18 21:42:18 -080042 std::atomic<int> priority;
Austin Schuh20b2b082019-09-11 20:42:56 -070043};
44
45// Structure to hold the state required to send messages.
46struct Sender {
47 // Mutex that the sender locks. If the futex is 0 (or FUTEX_OWNER_DIED), then
48 // this sender is invalid. The futex variable will then hold the tid of the
49 // sender, or FUTEX_OWNER_DIED if the task died.
50 //
Brian Silvermanfafe1fa2019-12-18 21:42:18 -080051 // Note: this is only modified with the queue_setup_lock lock held, but may
52 // always be read.
Philipp Schraderab2f8432023-09-17 18:58:06 -070053 RobustOwnershipTracker ownership_tracker;
Austin Schuh20b2b082019-09-11 20:42:56 -070054
55 // Index of the message we will be filling out.
56 AtomicIndex scratch_index;
57
58 // Index of the element being swapped with scratch_index, or Invalid if there
59 // is nothing to do.
60 AtomicIndex to_replace;
61};
62
Brian Silverman177567e2020-08-12 19:51:33 -070063// Structure to hold the state required to pin messages.
64struct Pinner {
65 // The same as Sender::tid. See there for docs.
Philipp Schraderab2f8432023-09-17 18:58:06 -070066 RobustOwnershipTracker ownership_tracker;
Brian Silverman177567e2020-08-12 19:51:33 -070067
68 // Queue index of the message we have pinned, or Invalid if there isn't one.
69 AtomicQueueIndex pinned;
70
71 // This should always be valid.
72 //
73 // Note that this is fully independent from pinned. It's just a place to stash
74 // a message, to ensure there's always an unpinned one for a writer to grab.
75 AtomicIndex scratch_index;
76};
77
Austin Schuh20b2b082019-09-11 20:42:56 -070078// Structure representing a message.
79struct Message {
80 struct Header {
81 // Index of this message in the queue. Needs to match the index this
82 // message is written into the queue at. The data in this message is only
83 // valid if it matches the index in the queue both before and after all the
84 // data is read.
85 //
86 // Note: a value of 0xffffffff always means that the contents aren't valid.
87 AtomicQueueIndex queue_index;
88
Brian Silvermanfafe1fa2019-12-18 21:42:18 -080089 // Timestamp of the message. Needs to be monotonically incrementing in the
Austin Schuh20b2b082019-09-11 20:42:56 -070090 // queue, which means that time needs to be re-sampled every time a write
91 // fails.
Austin Schuhb5c6f972021-03-14 21:53:07 -070092 monotonic_clock::time_point monotonic_sent_time;
93 realtime_clock::time_point realtime_sent_time;
Austin Schuhad154822019-12-27 15:45:13 -080094 // Timestamps of the message from the remote node. These are transparently
95 // passed through.
Austin Schuhb5c6f972021-03-14 21:53:07 -070096 monotonic_clock::time_point monotonic_remote_time;
97 realtime_clock::time_point realtime_remote_time;
Austin Schuhac6d89e2024-03-27 14:56:09 -070098 monotonic_clock::time_point monotonic_remote_transmit_time;
Austin Schuhad154822019-12-27 15:45:13 -080099
100 // Queue index from the remote node.
101 uint32_t remote_queue_index;
Austin Schuh20b2b082019-09-11 20:42:56 -0700102
Austin Schuh8902fa52021-03-14 22:39:24 -0700103 // Remote boot UUID for this message.
Austin Schuha9012be2021-07-21 15:19:11 -0700104 UUID source_boot_uuid;
Austin Schuh8902fa52021-03-14 22:39:24 -0700105
Austin Schuh20b2b082019-09-11 20:42:56 -0700106 size_t length;
107 } header;
108
Brian Silverman0eaa1da2020-08-12 20:03:52 -0700109 // Returns the start of the data buffer, given that message_data_size is
110 // the same one used to allocate this message's memory.
111 char *data(size_t message_data_size) {
112 return RoundedData(message_data_size);
113 }
114 const char *data(size_t message_data_size) const {
115 return RoundedData(message_data_size);
116 }
117
118 // Returns the pre-buffer redzone, given that message_data_size is the same
119 // one used to allocate this message's memory.
120 absl::Span<char> PreRedzone(size_t message_data_size) {
121 char *const end = data(message_data_size);
122 const auto result =
123 absl::Span<char>(&data_pointer[0], end - &data_pointer[0]);
124 DCHECK_LT(result.size(), kChannelDataRedzone + kChannelDataAlignment);
125 return result;
126 }
127 absl::Span<const char> PreRedzone(size_t message_data_size) const {
128 const char *const end = data(message_data_size);
129 const auto result =
130 absl::Span<const char>(&data_pointer[0], end - &data_pointer[0]);
131 DCHECK_LT(result.size(), kChannelDataRedzone + kChannelDataAlignment);
132 return result;
133 }
134
135 // Returns the post-buffer redzone, given that message_data_size is the same
136 // one used to allocate this message's memory.
137 absl::Span<char> PostRedzone(size_t message_data_size, size_t message_size) {
138 DCHECK_LT(message_data_size, message_size);
139 char *const redzone_end = reinterpret_cast<char *>(this) + message_size;
140 char *const data_end = data(message_data_size) + message_data_size;
141 DCHECK_GT(static_cast<void *>(redzone_end), static_cast<void *>(data_end));
142 const auto result = absl::Span<char>(data_end, redzone_end - data_end);
143 DCHECK_LT(result.size(), kChannelDataRedzone + kChannelDataAlignment * 2);
144 return result;
145 }
146 absl::Span<const char> PostRedzone(size_t message_data_size,
147 size_t message_size) const {
148 DCHECK_LT(message_data_size, message_size);
149 const char *const redzone_end =
150 reinterpret_cast<const char *>(this) + message_size;
151 const char *const data_end = data(message_data_size) + message_data_size;
152 DCHECK_GT(static_cast<const void *>(redzone_end),
153 static_cast<const void *>(data_end));
154 const auto result =
155 absl::Span<const char>(data_end, redzone_end - data_end);
156 DCHECK_LT(result.size(), kChannelDataRedzone + kChannelDataAlignment * 2);
157 return result;
Brian Silvermana1652f32020-01-29 20:41:44 -0800158 }
159
160 private:
Brian Silverman0eaa1da2020-08-12 20:03:52 -0700161 // This returns a non-const pointer into a const object. Be very careful
162 // about const correctness in publicly accessible APIs using it.
163 char *RoundedData(size_t message_data_size) const {
164 return RoundChannelData(
165 const_cast<char *>(&data_pointer[0] + kChannelDataRedzone),
166 message_data_size);
Brian Silvermana1652f32020-01-29 20:41:44 -0800167 }
168
169 char data_pointer[];
Austin Schuh20b2b082019-09-11 20:42:56 -0700170};
171
172struct LocklessQueueConfiguration {
173 // Size of the watchers list.
174 size_t num_watchers;
175 // Size of the sender list.
176 size_t num_senders;
Brian Silverman177567e2020-08-12 19:51:33 -0700177 // Size of the pinner list.
178 size_t num_pinners;
Austin Schuh20b2b082019-09-11 20:42:56 -0700179
180 // Size of the list of pointers into the messages list.
181 size_t queue_size;
182 // Size in bytes of the data stored in each Message.
183 size_t message_data_size;
184
Austin Schuh4bc4f902019-12-23 18:04:51 -0800185 size_t message_size() const;
Austin Schuh20b2b082019-09-11 20:42:56 -0700186
Brian Silverman177567e2020-08-12 19:51:33 -0700187 size_t num_messages() const { return num_senders + num_pinners + queue_size; }
Austin Schuh20b2b082019-09-11 20:42:56 -0700188};
189
190// Structure to hold the state of the queue.
191//
192// Reads and writes are lockless and constant time.
193//
194// Adding a new watcher doesn't need to be constant time for the watcher (this
195// is done before the watcher goes RT), but needs to be RT for the sender.
196struct LocklessQueueMemory;
197
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700198// Returns the size of the LocklessQueueMemory.
199size_t LocklessQueueMemorySize(LocklessQueueConfiguration config);
200
Austin Schuh20b2b082019-09-11 20:42:56 -0700201// Initializes the queue memory. memory must be either a valid pointer to the
202// queue datastructure, or must be zero initialized.
203LocklessQueueMemory *InitializeLocklessQueueMemory(
204 LocklessQueueMemory *memory, LocklessQueueConfiguration config);
205
Alex Perrycb7da4b2019-08-28 19:35:56 -0700206const static unsigned int kWakeupSignal = SIGRTMIN + 2;
Austin Schuh20b2b082019-09-11 20:42:56 -0700207
Philipp Schraderab2f8432023-09-17 18:58:06 -0700208// Sets FUTEX_OWNER_DIED if the owner was tid. This fakes what the kernel does
209// with a robust mutex.
210bool PretendThatOwnerIsDeadForTesting(aos_mutex *mutex, pid_t tid);
211
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700212// A convenient wrapper for accessing a lockless queue.
Austin Schuh20b2b082019-09-11 20:42:56 -0700213class LocklessQueue {
214 public:
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700215 LocklessQueue(const LocklessQueueMemory *const_memory,
216 LocklessQueueMemory *memory, LocklessQueueConfiguration config)
217 : const_memory_(const_memory), memory_(memory), config_(config) {}
Austin Schuh20b2b082019-09-11 20:42:56 -0700218
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700219 void Initialize();
Austin Schuh20b2b082019-09-11 20:42:56 -0700220
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700221 LocklessQueueConfiguration config() const { return config_; }
Austin Schuh20b2b082019-09-11 20:42:56 -0700222
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700223 const LocklessQueueMemory *const_memory() { return const_memory_; }
224 LocklessQueueMemory *memory() { return memory_; }
Alex Perrycb7da4b2019-08-28 19:35:56 -0700225
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700226 private:
227 const LocklessQueueMemory *const_memory_;
228 LocklessQueueMemory *memory_;
229 LocklessQueueConfiguration config_;
230};
231
232class LocklessQueueWatcher {
233 public:
234 LocklessQueueWatcher(const LocklessQueueWatcher &) = delete;
235 LocklessQueueWatcher &operator=(const LocklessQueueWatcher &) = delete;
236 LocklessQueueWatcher(LocklessQueueWatcher &&other)
237 : memory_(other.memory_), watcher_index_(other.watcher_index_) {
238 other.watcher_index_ = -1;
239 }
240 LocklessQueueWatcher &operator=(LocklessQueueWatcher &&other) {
241 std::swap(memory_, other.memory_);
242 std::swap(watcher_index_, other.watcher_index_);
243 return *this;
244 }
245
246 ~LocklessQueueWatcher();
247
248 // Registers this thread to receive the kWakeupSignal signal when
249 // LocklessQueueWakeUpper::Wakeup is called. Returns nullopt if there was an
250 // error in registration.
251 // TODO(austin): Change the API if we find ourselves with more errors.
252 static std::optional<LocklessQueueWatcher> Make(LocklessQueue queue,
253 int priority);
254
255 private:
256 LocklessQueueWatcher(LocklessQueueMemory *memory, int priority);
257
258 LocklessQueueMemory *memory_ = nullptr;
259
260 // Index in the watcher list that our entry is, or -1 if no watcher is
261 // registered.
262 int watcher_index_ = -1;
263};
264
265class LocklessQueueWakeUpper {
266 public:
267 LocklessQueueWakeUpper(LocklessQueue queue);
Austin Schuh20b2b082019-09-11 20:42:56 -0700268
269 // Sends the kWakeupSignal to all threads which have called RegisterWakeup.
270 //
271 // priority of 0 means nonrt. nonrt could have issues, so we don't PI boost
272 // if nonrt.
273 int Wakeup(int current_priority);
274
Austin Schuh20b2b082019-09-11 20:42:56 -0700275 private:
Austin Schuh20b2b082019-09-11 20:42:56 -0700276 // Memory and datastructure used to sort a list of watchers to wake
277 // up. This isn't a copy of Watcher since tid is simpler to work with here
278 // than the futex above.
279 struct WatcherCopy {
Philipp Schraderab2f8432023-09-17 18:58:06 -0700280 ThreadOwnerStatusSnapshot ownership_snapshot;
Austin Schuh20b2b082019-09-11 20:42:56 -0700281 pid_t pid;
282 int priority;
283 };
Austin Schuh20b2b082019-09-11 20:42:56 -0700284
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700285 const LocklessQueueMemory *const memory_;
Austin Schuh20b2b082019-09-11 20:42:56 -0700286 const int pid_;
287 const uid_t uid_;
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700288
289 ::std::vector<WatcherCopy> watcher_copy_;
Austin Schuh20b2b082019-09-11 20:42:56 -0700290};
291
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700292// Sender for blocks of data. The resources associated with a sender are
293// scoped to this object's lifetime.
294class LocklessQueueSender {
295 public:
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700296 // Enum of possible sending errors
297 // Send returns GOOD if the messages was sent successfully, INVALID_REDZONE if
298 // one of a message's redzones has invalid data, or MESSAGES_SENT_TOO_FAST if
299 // more than queue_size messages were going to be sent in a
300 // channel_storage_duration_.
301 enum class Result { GOOD, INVALID_REDZONE, MESSAGES_SENT_TOO_FAST };
302
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700303 LocklessQueueSender(const LocklessQueueSender &) = delete;
304 LocklessQueueSender &operator=(const LocklessQueueSender &) = delete;
305 LocklessQueueSender(LocklessQueueSender &&other)
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700306 : memory_(other.memory_),
307 sender_index_(other.sender_index_),
308 channel_storage_duration_(other.channel_storage_duration_) {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700309 other.memory_ = nullptr;
310 other.sender_index_ = -1;
311 }
312 LocklessQueueSender &operator=(LocklessQueueSender &&other) {
313 std::swap(memory_, other.memory_);
314 std::swap(sender_index_, other.sender_index_);
315 return *this;
316 }
317
318 ~LocklessQueueSender();
319
320 // Creates a sender. If we couldn't allocate a sender, returns nullopt.
321 // TODO(austin): Change the API if we find ourselves with more errors.
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700322 static std::optional<LocklessQueueSender> Make(
323 LocklessQueue queue, monotonic_clock::duration channel_storage_duration);
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700324
325 // Sends a message without copying the data.
326 // Copy at most size() bytes of data into the memory pointed to by Data(),
327 // and then call Send().
328 // Note: calls to Data() are expensive enough that you should cache it.
329 size_t size() const;
330 void *Data();
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700331 LocklessQueueSender::Result Send(
332 size_t length, monotonic_clock::time_point monotonic_remote_time,
333 realtime_clock::time_point realtime_remote_time,
Austin Schuhac6d89e2024-03-27 14:56:09 -0700334 monotonic_clock::time_point monotonic_remote_transmit_time,
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700335 uint32_t remote_queue_index, const UUID &source_boot_uuid,
336 monotonic_clock::time_point *monotonic_sent_time = nullptr,
337 realtime_clock::time_point *realtime_sent_time = nullptr,
338 uint32_t *queue_index = nullptr);
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700339
340 // Sends up to length data. Does not wakeup the target.
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700341 LocklessQueueSender::Result Send(
342 const char *data, size_t length,
343 monotonic_clock::time_point monotonic_remote_time,
344 realtime_clock::time_point realtime_remote_time,
Austin Schuhac6d89e2024-03-27 14:56:09 -0700345 monotonic_clock::time_point monotonic_remote_transmit_time,
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700346 uint32_t remote_queue_index, const UUID &source_boot_uuid,
347 monotonic_clock::time_point *monotonic_sent_time = nullptr,
348 realtime_clock::time_point *realtime_sent_time = nullptr,
349 uint32_t *queue_index = nullptr);
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700350
351 int buffer_index() const;
352
353 private:
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700354 LocklessQueueSender(LocklessQueueMemory *memory,
355 monotonic_clock::duration channel_storage_duration);
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700356
357 // Pointer to the backing memory.
358 LocklessQueueMemory *memory_ = nullptr;
359
360 // Index into the sender list.
361 int sender_index_ = -1;
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700362
363 // Storage duration of the channel used to check if messages were sent too
364 // fast
365 const monotonic_clock::duration channel_storage_duration_;
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700366};
367
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700368std::ostream &operator<<(std::ostream &os, const LocklessQueueSender::Result r);
369
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700370// Pinner for blocks of data. The resources associated with a pinner are
371// scoped to this object's lifetime.
372class LocklessQueuePinner {
373 public:
374 LocklessQueuePinner(const LocklessQueuePinner &) = delete;
375 LocklessQueuePinner &operator=(const LocklessQueuePinner &) = delete;
376 LocklessQueuePinner(LocklessQueuePinner &&other)
377 : memory_(other.memory_),
378 const_memory_(other.const_memory_),
379 pinner_index_(other.pinner_index_) {
380 other.pinner_index_ = -1;
381 }
382 LocklessQueuePinner &operator=(LocklessQueuePinner &&other) {
383 std::swap(memory_, other.memory_);
384 std::swap(const_memory_, other.const_memory_);
385 std::swap(pinner_index_, other.pinner_index_);
386 return *this;
387 }
388
389 ~LocklessQueuePinner();
390
391 // Creates a pinner. If we couldn't allocate a pinner, returns nullopt.
392 // TODO(austin): Change the API if we find ourselves with more errors.
393 static std::optional<LocklessQueuePinner> Make(LocklessQueue queue);
394
395 // Attempts to pin the message at queue_index.
396 // Un-pins the previous message.
397 // Returns the buffer index (non-negative) if it succeeds.
398 // Returns -1 if that message is no longer in the queue.
399 int PinIndex(uint32_t queue_index);
400
401 // Read at most size() bytes of data into the memory pointed to by Data().
402 // Note: calls to Data() are expensive enough that you should cache it.
403 // Don't call Data() before a successful PinIndex call.
404 size_t size() const;
405 const void *Data() const;
406
407 private:
408 LocklessQueuePinner(LocklessQueueMemory *memory,
409 const LocklessQueueMemory *const_memory);
410
411 // Pointer to the backing memory.
412 LocklessQueueMemory *memory_ = nullptr;
413 const LocklessQueueMemory *const_memory_ = nullptr;
414
415 // Index into the pinner list.
416 int pinner_index_ = -1;
417};
418
419class LocklessQueueReader {
420 public:
Austin Schuh82ea7382023-07-14 15:17:34 -0700421 enum class Result {
422 // Message we read was too old and no longer is in the queue.
423 TOO_OLD,
424 // Success!
425 GOOD,
426 // The message is in the future and we haven't written it yet.
427 NOTHING_NEW,
Austin Schuhfaec51a2023-09-08 17:43:32 -0700428 // There is a message, but should_read_callback() returned false so we
429 // didn't fetch it.
Austin Schuh82ea7382023-07-14 15:17:34 -0700430 FILTERED,
431 // The message got overwritten while we were reading it.
432 OVERWROTE,
433 };
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700434
Austin Schuhfaec51a2023-09-08 17:43:32 -0700435 LocklessQueueReader(LocklessQueue queue)
436 : memory_(queue.memory()), const_memory_(queue.const_memory()) {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700437 queue.Initialize();
438 }
439
440 // If you ask for a queue index 2 past the newest, you will still get
441 // NOTHING_NEW until that gets overwritten with new data. If you ask for an
442 // element newer than QueueSize() from the current message, we consider it
443 // behind by a large amount and return TOO_OLD. If the message is modified
Austin Schuh82ea7382023-07-14 15:17:34 -0700444 // out from underneath us as we read it, return OVERWROTE. If we found a new
445 // message, but the filter function returned false, return FILTERED.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700446 //
447 // data may be nullptr to indicate the data should not be copied.
Austin Schuhfaec51a2023-09-08 17:43:32 -0700448 Result Read(
449 uint32_t queue_index, monotonic_clock::time_point *monotonic_sent_time,
450 realtime_clock::time_point *realtime_sent_time,
451 monotonic_clock::time_point *monotonic_remote_time,
Austin Schuhac6d89e2024-03-27 14:56:09 -0700452 monotonic_clock::time_point *monotonic_remote_transmit_time,
Austin Schuhfaec51a2023-09-08 17:43:32 -0700453 realtime_clock::time_point *realtime_remote_time,
454 uint32_t *remote_queue_index, UUID *source_boot_uuid, size_t *length,
455 char *data,
456 std::function<bool(const Context &context)> should_read_callback) const;
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700457
458 // Returns the index to the latest queue message. Returns empty_queue_index()
459 // if there are no messages in the queue. Do note that this index wraps if
460 // more than 2^32 messages are sent.
461 QueueIndex LatestIndex() const;
462
463 private:
Austin Schuhfaec51a2023-09-08 17:43:32 -0700464 LocklessQueueMemory *const memory_;
465 const LocklessQueueMemory *const_memory_;
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700466};
467
468// Returns the number of messages which are logically in the queue at a time.
469size_t LocklessQueueSize(const LocklessQueueMemory *memory);
470
471// Returns the number of bytes queue users are allowed to read/write within each
472// message.
473size_t LocklessQueueMessageDataSize(const LocklessQueueMemory *memory);
474
475// TODO(austin): Return the oldest queue index. This lets us catch up nicely
476// if we got behind.
477// The easiest way to implement this is likely going to be to reserve the
478// first modulo of values for the initial time around, and never reuse them.
479// That lets us do a simple atomic read of the next index and deduce what has
480// happened. It will involve the simplest atomic operations.
481
482// TODO(austin): Make it so we can find the indices which were sent just
483// before and after a time with a binary search.
484
485// Prints to stdout the data inside the queue for debugging.
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700486void PrintLocklessQueueMemory(const LocklessQueueMemory *memory);
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700487
Stephan Pleinesd99b1ee2024-02-02 20:56:44 -0800488} // namespace aos::ipc_lib
Austin Schuh20b2b082019-09-11 20:42:56 -0700489
490#endif // AOS_IPC_LIB_LOCKLESS_QUEUE_H_