blob: 575d113cf441b992e74fdd4e6472894134b7b21c [file] [log] [blame]
Austin Schuh25356e22019-09-11 19:27:07 -07001#ifndef AOS_IPC_LIB_INDEX_H_
2#define AOS_IPC_LIB_INDEX_H_
3
Austin Schuh83cbb1e2023-06-23 12:59:02 -07004#include <stdint.h>
Brian Silverman1ed5df52021-09-13 20:14:06 -07005
Stephan Pleines682928d2024-05-31 20:43:48 -07006#include <limits>
Austin Schuh25356e22019-09-11 19:27:07 -07007
Austin Schuh99f7c6a2024-06-25 22:07:44 -07008#include "absl/log/check.h"
9#include "absl/log/log.h"
Brian Silverman177567e2020-08-12 19:51:33 -070010
Philipp Schrader790cb542023-07-05 21:06:52 -070011#include "aos/ipc_lib/shm_observers.h"
12
Austin Schuh83cbb1e2023-06-23 12:59:02 -070013#ifndef AOS_QUEUE_ATOMIC_SIZE
14#if UINTPTR_MAX == 0xffffffff
15#define AOS_QUEUE_ATOMIC_SIZE 32
16/* 32-bit */
17#elif UINTPTR_MAX == 0xffffffffffffffff
18#define AOS_QUEUE_ATOMIC_SIZE 64
19/* 64-bit */
20#else
21#error "Unknown pointer size"
22#endif
23#endif
24
Stephan Pleinesd99b1ee2024-02-02 20:56:44 -080025namespace aos::ipc_lib {
Austin Schuh25356e22019-09-11 19:27:07 -070026
Austin Schuh25356e22019-09-11 19:27:07 -070027namespace testing {
28class QueueIndexTest;
29} // namespace testing
30
31// There are 2 types of indices in the queue. 1 is the index into the overall
32// queue. If we have sent 1,000,005 messages, this is that number.
33//
34// The other is the index into the message list. This is essentially a message
35// pointer. It has some of the lower bits of the queue index encoded into it as
36// a safeguard to detect if someone re-used a message out from under us and we
37// couldn't tell otherwise. It is used to map a queue index to a message index.
38//
39// Each of these index types has an atomic version and a non-atomic. The atomic
40// version is a wrapper around a uint32_t to hang helper functions off of.
41// The non-atomic version contains all the logic. These are all in the header
42// file to encourage the compiler to inline aggressively.
43//
44// The user should very infrequently be manipulating the values of these
45// directly. Use the classes instead to do the heavy lifting.
46
47// Structure for holding the index into the queue.
48class QueueIndex {
49 public:
Austin Schuh83cbb1e2023-06-23 12:59:02 -070050#if AOS_QUEUE_ATOMIC_SIZE == 64
51 typedef uint32_t PackedIndexType;
52#else
53 typedef uint16_t PackedIndexType;
54#endif
55
Austin Schuh25356e22019-09-11 19:27:07 -070056 // Returns an invalid queue element which uses a reserved value.
Austin Schuh83cbb1e2023-06-23 12:59:02 -070057 static QueueIndex Invalid() { return QueueIndex(sentinal_value(), 0); }
Austin Schuh25356e22019-09-11 19:27:07 -070058 // Returns a queue element pointing to 0.
59 static QueueIndex Zero(uint32_t count) { return QueueIndex(0, count); }
60
61 // Returns true if the index is valid.
Austin Schuh83cbb1e2023-06-23 12:59:02 -070062 bool valid() const { return index_ != sentinal_value(); }
Austin Schuh25356e22019-09-11 19:27:07 -070063
64 // Returns the modulo base used to wrap to avoid overlapping with the reserved
65 // number.
66 // max_value is one more than the max value we can store.
67 // count is the the number of elements in the queue.
68 static constexpr uint32_t MaxIndex(uint32_t max_value, uint32_t count) {
69 return (max_value / count) * count;
70 }
71
72 // Gets the next index.
Brian Silverman177567e2020-08-12 19:51:33 -070073 QueueIndex Increment() const { return IncrementBy(1u); }
Austin Schuh25356e22019-09-11 19:27:07 -070074
75 // Gets the nth next element.
76 QueueIndex IncrementBy(uint32_t amount) const {
77 uint32_t index = index_ + amount;
78 uint32_t max_index = MaxIndex(sentinal_value(), count_);
79
80 if (index < index_) {
81 // We wrapped. We are shifting up by 0x100000000 - MaxIndex(...).
82 // Which is equivalent to subtracting MaxIndex since everything is modular
83 // with a uint32_t.
84 index -= max_index;
85 }
86
87 // Now, wrap the remainder.
88 index = index % max_index;
89 return QueueIndex(index, count_);
90 }
91
92 // Gets the nth previous element.
93 QueueIndex DecrementBy(uint32_t amount) const {
94 uint32_t index = index_ - amount;
95 if (index > index_) {
96 // We wrapped. We are shifting down by 0x100000000 - MaxIndex(...).
97 // Which is equivalent to adding MaxIndex since everything is modular with
98 // a uint32_t.
99 index += MaxIndex(sentinal_value(), count_);
100 }
101 return QueueIndex(index, count_);
102 }
103
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700104 // Returns true if the lowest bits of the queue index from the Index could
105 // plausibly match this queue index. The number of bits matched depends on
106 // the the size of atomics in use.
107 bool IsPlausible(PackedIndexType queue_index) const {
108 return valid() &&
109 (queue_index ==
110 static_cast<PackedIndexType>(
111 index_ & std::numeric_limits<PackedIndexType>::max()));
Austin Schuh25356e22019-09-11 19:27:07 -0700112 }
113
114 bool operator==(const QueueIndex other) const {
115 return other.index_ == index_;
116 }
117
118 bool operator!=(const QueueIndex other) const {
119 return other.index_ != index_;
120 }
121
122 // Returns the wrapped index into the queue.
123 uint32_t Wrapped() const { return index_ % count_; }
124
125 // Returns the raw index. This should be used very sparingly.
126 uint32_t index() const { return index_; }
127
Austin Schuh20b2b082019-09-11 20:42:56 -0700128 QueueIndex Clear() const { return QueueIndex(0, count_); }
129
130 // Returns a string representing the index.
131 ::std::string DebugString() const;
132
Austin Schuh25356e22019-09-11 19:27:07 -0700133 private:
134 QueueIndex(uint32_t index, uint32_t count) : index_(index), count_(count) {}
135
136 static constexpr uint32_t sentinal_value() { return 0xffffffffu; }
137
138 friend struct AtomicQueueIndex;
139 friend class Index;
140 // For testing.
141 friend class testing::QueueIndexTest;
142
143 // Index and number of elements in the queue.
144 uint32_t index_;
145 // Count is stored here rather than passed in everywhere in the hopes that the
146 // compiler completely optimizes out this class and this variable if it isn't
147 // used.
148 uint32_t count_;
149};
150
151// Atomic storage for setting and getting QueueIndex objects.
152// Count is the number of messages in the queue.
153struct AtomicQueueIndex {
154 public:
155 // Atomically reads the index without any ordering constraints.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700156 QueueIndex RelaxedLoad(uint32_t count) const {
Austin Schuh25356e22019-09-11 19:27:07 -0700157 return QueueIndex(index_.load(::std::memory_order_relaxed), count);
158 }
159
160 // Full bidirectional barriers here.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700161 QueueIndex Load(uint32_t count) const {
Brian Silverman2035de72019-12-18 21:39:14 -0800162 return QueueIndex(index_.load(::std::memory_order_acquire), count);
163 }
164 inline void Store(QueueIndex value) {
165 index_.store(value.index_, ::std::memory_order_release);
166 }
Austin Schuh25356e22019-09-11 19:27:07 -0700167
168 // Invalidates the element unconditionally.
169 inline void Invalidate() { Store(QueueIndex::Invalid()); }
170
Brian Silverman177567e2020-08-12 19:51:33 -0700171 inline void RelaxedInvalidate() {
172 index_.store(QueueIndex::Invalid().index_, ::std::memory_order_relaxed);
173 }
174
Austin Schuh25356e22019-09-11 19:27:07 -0700175 // Swaps expected for index atomically. Returns true on success, false
176 // otherwise.
177 inline bool CompareAndExchangeStrong(QueueIndex expected, QueueIndex index) {
Brian Silverman1ed5df52021-09-13 20:14:06 -0700178 linux_code::ipc_lib::RunShmObservers run_observers(&index_, true);
Brian Silverman2035de72019-12-18 21:39:14 -0800179 return index_.compare_exchange_strong(expected.index_, index.index_,
180 ::std::memory_order_acq_rel);
Austin Schuh25356e22019-09-11 19:27:07 -0700181 }
182
183 private:
184 ::std::atomic<uint32_t> index_;
185};
186
187// Structure holding the queue index and the index into the message list.
188class Index {
189 public:
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700190#if AOS_QUEUE_ATOMIC_SIZE == 64
191 typedef uint64_t IndexType;
192 typedef uint32_t MessageIndexType;
193#else
194 typedef uint32_t IndexType;
195 typedef uint16_t MessageIndexType;
196#endif
197 typedef QueueIndex::PackedIndexType PackedIndexType;
198
Austin Schuh25356e22019-09-11 19:27:07 -0700199 // Constructs an Index. queue_index is the QueueIndex of this message, and
200 // message_index is the index into the messages structure.
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700201 Index(QueueIndex queue_index, MessageIndexType message_index)
Austin Schuh25356e22019-09-11 19:27:07 -0700202 : Index(queue_index.index_, message_index) {}
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700203 Index(uint32_t queue_index, MessageIndexType message_index)
204 : index_(static_cast<IndexType>(
205 queue_index & std::numeric_limits<PackedIndexType>::max()) |
206 (static_cast<IndexType>(message_index)
207 << std::numeric_limits<PackedIndexType>::digits)) {
Brian Silverman177567e2020-08-12 19:51:33 -0700208 CHECK_LE(message_index, MaxMessages());
209 }
Austin Schuh25356e22019-09-11 19:27:07 -0700210
211 // Index of this message in the message array.
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700212 MessageIndexType message_index() const {
213 return (index_ >> std::numeric_limits<PackedIndexType>::digits) &
214 std::numeric_limits<MessageIndexType>::max();
215 }
Austin Schuh25356e22019-09-11 19:27:07 -0700216
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700217 // Lowest bits of the queue index of this message in the queue. This will
218 // either be 16 or 32 bits, depending on if we have 32 or 64 bit atomics under
219 // the cover.
220 PackedIndexType queue_index() const {
221 return index_ & std::numeric_limits<PackedIndexType>::max();
222 }
Austin Schuh25356e22019-09-11 19:27:07 -0700223
224 // Returns true if the provided queue index plausibly represents this Index.
225 bool IsPlausible(QueueIndex queue_index) const {
226 return queue_index.IsPlausible(this->queue_index());
227 }
228
229 // Returns an invalid Index.
230 static Index Invalid() { return Index(sentinal_value()); }
231 // Checks if this Index is valid or not.
232 bool valid() const { return index_ != sentinal_value(); }
233
234 // Returns the raw Index. This should only be used for debug.
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700235 IndexType get() const { return index_; }
Austin Schuh25356e22019-09-11 19:27:07 -0700236
237 // Returns the maximum number of messages we can store before overflowing.
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700238 static constexpr MessageIndexType MaxMessages() {
239 return std::numeric_limits<MessageIndexType>::max() - 1;
240 }
Austin Schuh25356e22019-09-11 19:27:07 -0700241
242 bool operator==(const Index other) const { return other.index_ == index_; }
Brian Silverman177567e2020-08-12 19:51:33 -0700243 bool operator!=(const Index other) const { return other.index_ != index_; }
Austin Schuh25356e22019-09-11 19:27:07 -0700244
Austin Schuh20b2b082019-09-11 20:42:56 -0700245 // Returns a string representing the index.
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700246 std::string DebugString() const;
Austin Schuh20b2b082019-09-11 20:42:56 -0700247
Austin Schuh25356e22019-09-11 19:27:07 -0700248 private:
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700249 Index(IndexType index) : index_(index) {}
Austin Schuh25356e22019-09-11 19:27:07 -0700250
251 friend class AtomicIndex;
252
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700253 static constexpr IndexType sentinal_value() {
254 return std::numeric_limits<IndexType>::max();
255 }
Austin Schuh25356e22019-09-11 19:27:07 -0700256
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700257 // Note: a value of all 1 bits is a sentinal to represent an invalid entry.
Austin Schuh25356e22019-09-11 19:27:07 -0700258 // This works because we would need to have a queue index of 0x*ffff, *and*
259 // have 0xffff messages in the message list. That constraint is easy to
260 // enforce by limiting the max messages.
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700261 IndexType index_;
Austin Schuh25356e22019-09-11 19:27:07 -0700262};
263
264// Atomic storage for setting and getting Index objects.
265class AtomicIndex {
266 public:
267 // Stores and loads atomically without ordering constraints.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700268 Index RelaxedLoad() const {
Austin Schuh25356e22019-09-11 19:27:07 -0700269 return Index(index_.load(::std::memory_order_relaxed));
270 }
271 void RelaxedStore(Index index) {
272 index_.store(index.index_, ::std::memory_order_relaxed);
273 }
274
275 // Invalidates the index atomically, but without any ordering constraints.
276 void RelaxedInvalidate() { RelaxedStore(Index::Invalid()); }
277
Brian Silverman2035de72019-12-18 21:39:14 -0800278 // Full barriers here.
Austin Schuh20b2b082019-09-11 20:42:56 -0700279 void Invalidate() { Store(Index::Invalid()); }
Brian Silverman2035de72019-12-18 21:39:14 -0800280 void Store(Index index) {
281 index_.store(index.index_, ::std::memory_order_release);
282 }
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700283 Index Load() const { return Index(index_.load(::std::memory_order_acquire)); }
Austin Schuh25356e22019-09-11 19:27:07 -0700284
285 // Swaps expected for index atomically. Returns true on success, false
286 // otherwise.
Brian Silverman177567e2020-08-12 19:51:33 -0700287 bool CompareAndExchangeStrong(Index expected, Index index) {
Brian Silverman1ed5df52021-09-13 20:14:06 -0700288 linux_code::ipc_lib::RunShmObservers run_observers(&index_, true);
Brian Silverman2035de72019-12-18 21:39:14 -0800289 return index_.compare_exchange_strong(expected.index_, index.index_,
290 ::std::memory_order_acq_rel);
Austin Schuh25356e22019-09-11 19:27:07 -0700291 }
292
Brian Silverman177567e2020-08-12 19:51:33 -0700293 bool CompareAndExchangeWeak(Index *expected, Index index) {
294 return index_.compare_exchange_weak(expected->index_, index.index_,
295 ::std::memory_order_acq_rel);
296 }
297
Austin Schuh25356e22019-09-11 19:27:07 -0700298 private:
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700299 ::std::atomic<Index::IndexType> index_;
Austin Schuh25356e22019-09-11 19:27:07 -0700300};
301
Stephan Pleinesd99b1ee2024-02-02 20:56:44 -0800302} // namespace aos::ipc_lib
Austin Schuh25356e22019-09-11 19:27:07 -0700303
304#endif // AOS_IPC_LIB_INDEX_H_