blob: c4cc64a502b26ce0d89319440114d489d743fb08 [file] [log] [blame]
Austin Schuh25356e22019-09-11 19:27:07 -07001#ifndef AOS_IPC_LIB_INDEX_H_
2#define AOS_IPC_LIB_INDEX_H_
3
Austin Schuh83cbb1e2023-06-23 12:59:02 -07004#include <stdint.h>
Brian Silverman1ed5df52021-09-13 20:14:06 -07005
Stephan Pleines682928d2024-05-31 20:43:48 -07006#include <limits>
Austin Schuh25356e22019-09-11 19:27:07 -07007
Brian Silverman177567e2020-08-12 19:51:33 -07008#include "glog/logging.h"
9
Philipp Schrader790cb542023-07-05 21:06:52 -070010#include "aos/ipc_lib/shm_observers.h"
11
Austin Schuh83cbb1e2023-06-23 12:59:02 -070012#ifndef AOS_QUEUE_ATOMIC_SIZE
13#if UINTPTR_MAX == 0xffffffff
14#define AOS_QUEUE_ATOMIC_SIZE 32
15/* 32-bit */
16#elif UINTPTR_MAX == 0xffffffffffffffff
17#define AOS_QUEUE_ATOMIC_SIZE 64
18/* 64-bit */
19#else
20#error "Unknown pointer size"
21#endif
22#endif
23
Stephan Pleinesd99b1ee2024-02-02 20:56:44 -080024namespace aos::ipc_lib {
Austin Schuh25356e22019-09-11 19:27:07 -070025
Austin Schuh25356e22019-09-11 19:27:07 -070026namespace testing {
27class QueueIndexTest;
28} // namespace testing
29
30// There are 2 types of indices in the queue. 1 is the index into the overall
31// queue. If we have sent 1,000,005 messages, this is that number.
32//
33// The other is the index into the message list. This is essentially a message
34// pointer. It has some of the lower bits of the queue index encoded into it as
35// a safeguard to detect if someone re-used a message out from under us and we
36// couldn't tell otherwise. It is used to map a queue index to a message index.
37//
38// Each of these index types has an atomic version and a non-atomic. The atomic
39// version is a wrapper around a uint32_t to hang helper functions off of.
40// The non-atomic version contains all the logic. These are all in the header
41// file to encourage the compiler to inline aggressively.
42//
43// The user should very infrequently be manipulating the values of these
44// directly. Use the classes instead to do the heavy lifting.
45
46// Structure for holding the index into the queue.
47class QueueIndex {
48 public:
Austin Schuh83cbb1e2023-06-23 12:59:02 -070049#if AOS_QUEUE_ATOMIC_SIZE == 64
50 typedef uint32_t PackedIndexType;
51#else
52 typedef uint16_t PackedIndexType;
53#endif
54
Austin Schuh25356e22019-09-11 19:27:07 -070055 // Returns an invalid queue element which uses a reserved value.
Austin Schuh83cbb1e2023-06-23 12:59:02 -070056 static QueueIndex Invalid() { return QueueIndex(sentinal_value(), 0); }
Austin Schuh25356e22019-09-11 19:27:07 -070057 // Returns a queue element pointing to 0.
58 static QueueIndex Zero(uint32_t count) { return QueueIndex(0, count); }
59
60 // Returns true if the index is valid.
Austin Schuh83cbb1e2023-06-23 12:59:02 -070061 bool valid() const { return index_ != sentinal_value(); }
Austin Schuh25356e22019-09-11 19:27:07 -070062
63 // Returns the modulo base used to wrap to avoid overlapping with the reserved
64 // number.
65 // max_value is one more than the max value we can store.
66 // count is the the number of elements in the queue.
67 static constexpr uint32_t MaxIndex(uint32_t max_value, uint32_t count) {
68 return (max_value / count) * count;
69 }
70
71 // Gets the next index.
Brian Silverman177567e2020-08-12 19:51:33 -070072 QueueIndex Increment() const { return IncrementBy(1u); }
Austin Schuh25356e22019-09-11 19:27:07 -070073
74 // Gets the nth next element.
75 QueueIndex IncrementBy(uint32_t amount) const {
76 uint32_t index = index_ + amount;
77 uint32_t max_index = MaxIndex(sentinal_value(), count_);
78
79 if (index < index_) {
80 // We wrapped. We are shifting up by 0x100000000 - MaxIndex(...).
81 // Which is equivalent to subtracting MaxIndex since everything is modular
82 // with a uint32_t.
83 index -= max_index;
84 }
85
86 // Now, wrap the remainder.
87 index = index % max_index;
88 return QueueIndex(index, count_);
89 }
90
91 // Gets the nth previous element.
92 QueueIndex DecrementBy(uint32_t amount) const {
93 uint32_t index = index_ - amount;
94 if (index > index_) {
95 // We wrapped. We are shifting down by 0x100000000 - MaxIndex(...).
96 // Which is equivalent to adding MaxIndex since everything is modular with
97 // a uint32_t.
98 index += MaxIndex(sentinal_value(), count_);
99 }
100 return QueueIndex(index, count_);
101 }
102
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700103 // Returns true if the lowest bits of the queue index from the Index could
104 // plausibly match this queue index. The number of bits matched depends on
105 // the the size of atomics in use.
106 bool IsPlausible(PackedIndexType queue_index) const {
107 return valid() &&
108 (queue_index ==
109 static_cast<PackedIndexType>(
110 index_ & std::numeric_limits<PackedIndexType>::max()));
Austin Schuh25356e22019-09-11 19:27:07 -0700111 }
112
113 bool operator==(const QueueIndex other) const {
114 return other.index_ == index_;
115 }
116
117 bool operator!=(const QueueIndex other) const {
118 return other.index_ != index_;
119 }
120
121 // Returns the wrapped index into the queue.
122 uint32_t Wrapped() const { return index_ % count_; }
123
124 // Returns the raw index. This should be used very sparingly.
125 uint32_t index() const { return index_; }
126
Austin Schuh20b2b082019-09-11 20:42:56 -0700127 QueueIndex Clear() const { return QueueIndex(0, count_); }
128
129 // Returns a string representing the index.
130 ::std::string DebugString() const;
131
Austin Schuh25356e22019-09-11 19:27:07 -0700132 private:
133 QueueIndex(uint32_t index, uint32_t count) : index_(index), count_(count) {}
134
135 static constexpr uint32_t sentinal_value() { return 0xffffffffu; }
136
137 friend struct AtomicQueueIndex;
138 friend class Index;
139 // For testing.
140 friend class testing::QueueIndexTest;
141
142 // Index and number of elements in the queue.
143 uint32_t index_;
144 // Count is stored here rather than passed in everywhere in the hopes that the
145 // compiler completely optimizes out this class and this variable if it isn't
146 // used.
147 uint32_t count_;
148};
149
150// Atomic storage for setting and getting QueueIndex objects.
151// Count is the number of messages in the queue.
152struct AtomicQueueIndex {
153 public:
154 // Atomically reads the index without any ordering constraints.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700155 QueueIndex RelaxedLoad(uint32_t count) const {
Austin Schuh25356e22019-09-11 19:27:07 -0700156 return QueueIndex(index_.load(::std::memory_order_relaxed), count);
157 }
158
159 // Full bidirectional barriers here.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700160 QueueIndex Load(uint32_t count) const {
Brian Silverman2035de72019-12-18 21:39:14 -0800161 return QueueIndex(index_.load(::std::memory_order_acquire), count);
162 }
163 inline void Store(QueueIndex value) {
164 index_.store(value.index_, ::std::memory_order_release);
165 }
Austin Schuh25356e22019-09-11 19:27:07 -0700166
167 // Invalidates the element unconditionally.
168 inline void Invalidate() { Store(QueueIndex::Invalid()); }
169
Brian Silverman177567e2020-08-12 19:51:33 -0700170 inline void RelaxedInvalidate() {
171 index_.store(QueueIndex::Invalid().index_, ::std::memory_order_relaxed);
172 }
173
Austin Schuh25356e22019-09-11 19:27:07 -0700174 // Swaps expected for index atomically. Returns true on success, false
175 // otherwise.
176 inline bool CompareAndExchangeStrong(QueueIndex expected, QueueIndex index) {
Brian Silverman1ed5df52021-09-13 20:14:06 -0700177 linux_code::ipc_lib::RunShmObservers run_observers(&index_, true);
Brian Silverman2035de72019-12-18 21:39:14 -0800178 return index_.compare_exchange_strong(expected.index_, index.index_,
179 ::std::memory_order_acq_rel);
Austin Schuh25356e22019-09-11 19:27:07 -0700180 }
181
182 private:
183 ::std::atomic<uint32_t> index_;
184};
185
186// Structure holding the queue index and the index into the message list.
187class Index {
188 public:
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700189#if AOS_QUEUE_ATOMIC_SIZE == 64
190 typedef uint64_t IndexType;
191 typedef uint32_t MessageIndexType;
192#else
193 typedef uint32_t IndexType;
194 typedef uint16_t MessageIndexType;
195#endif
196 typedef QueueIndex::PackedIndexType PackedIndexType;
197
Austin Schuh25356e22019-09-11 19:27:07 -0700198 // Constructs an Index. queue_index is the QueueIndex of this message, and
199 // message_index is the index into the messages structure.
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700200 Index(QueueIndex queue_index, MessageIndexType message_index)
Austin Schuh25356e22019-09-11 19:27:07 -0700201 : Index(queue_index.index_, message_index) {}
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700202 Index(uint32_t queue_index, MessageIndexType message_index)
203 : index_(static_cast<IndexType>(
204 queue_index & std::numeric_limits<PackedIndexType>::max()) |
205 (static_cast<IndexType>(message_index)
206 << std::numeric_limits<PackedIndexType>::digits)) {
Brian Silverman177567e2020-08-12 19:51:33 -0700207 CHECK_LE(message_index, MaxMessages());
208 }
Austin Schuh25356e22019-09-11 19:27:07 -0700209
210 // Index of this message in the message array.
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700211 MessageIndexType message_index() const {
212 return (index_ >> std::numeric_limits<PackedIndexType>::digits) &
213 std::numeric_limits<MessageIndexType>::max();
214 }
Austin Schuh25356e22019-09-11 19:27:07 -0700215
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700216 // Lowest bits of the queue index of this message in the queue. This will
217 // either be 16 or 32 bits, depending on if we have 32 or 64 bit atomics under
218 // the cover.
219 PackedIndexType queue_index() const {
220 return index_ & std::numeric_limits<PackedIndexType>::max();
221 }
Austin Schuh25356e22019-09-11 19:27:07 -0700222
223 // Returns true if the provided queue index plausibly represents this Index.
224 bool IsPlausible(QueueIndex queue_index) const {
225 return queue_index.IsPlausible(this->queue_index());
226 }
227
228 // Returns an invalid Index.
229 static Index Invalid() { return Index(sentinal_value()); }
230 // Checks if this Index is valid or not.
231 bool valid() const { return index_ != sentinal_value(); }
232
233 // Returns the raw Index. This should only be used for debug.
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700234 IndexType get() const { return index_; }
Austin Schuh25356e22019-09-11 19:27:07 -0700235
236 // Returns the maximum number of messages we can store before overflowing.
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700237 static constexpr MessageIndexType MaxMessages() {
238 return std::numeric_limits<MessageIndexType>::max() - 1;
239 }
Austin Schuh25356e22019-09-11 19:27:07 -0700240
241 bool operator==(const Index other) const { return other.index_ == index_; }
Brian Silverman177567e2020-08-12 19:51:33 -0700242 bool operator!=(const Index other) const { return other.index_ != index_; }
Austin Schuh25356e22019-09-11 19:27:07 -0700243
Austin Schuh20b2b082019-09-11 20:42:56 -0700244 // Returns a string representing the index.
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700245 std::string DebugString() const;
Austin Schuh20b2b082019-09-11 20:42:56 -0700246
Austin Schuh25356e22019-09-11 19:27:07 -0700247 private:
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700248 Index(IndexType index) : index_(index) {}
Austin Schuh25356e22019-09-11 19:27:07 -0700249
250 friend class AtomicIndex;
251
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700252 static constexpr IndexType sentinal_value() {
253 return std::numeric_limits<IndexType>::max();
254 }
Austin Schuh25356e22019-09-11 19:27:07 -0700255
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700256 // Note: a value of all 1 bits is a sentinal to represent an invalid entry.
Austin Schuh25356e22019-09-11 19:27:07 -0700257 // This works because we would need to have a queue index of 0x*ffff, *and*
258 // have 0xffff messages in the message list. That constraint is easy to
259 // enforce by limiting the max messages.
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700260 IndexType index_;
Austin Schuh25356e22019-09-11 19:27:07 -0700261};
262
263// Atomic storage for setting and getting Index objects.
264class AtomicIndex {
265 public:
266 // Stores and loads atomically without ordering constraints.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700267 Index RelaxedLoad() const {
Austin Schuh25356e22019-09-11 19:27:07 -0700268 return Index(index_.load(::std::memory_order_relaxed));
269 }
270 void RelaxedStore(Index index) {
271 index_.store(index.index_, ::std::memory_order_relaxed);
272 }
273
274 // Invalidates the index atomically, but without any ordering constraints.
275 void RelaxedInvalidate() { RelaxedStore(Index::Invalid()); }
276
Brian Silverman2035de72019-12-18 21:39:14 -0800277 // Full barriers here.
Austin Schuh20b2b082019-09-11 20:42:56 -0700278 void Invalidate() { Store(Index::Invalid()); }
Brian Silverman2035de72019-12-18 21:39:14 -0800279 void Store(Index index) {
280 index_.store(index.index_, ::std::memory_order_release);
281 }
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700282 Index Load() const { return Index(index_.load(::std::memory_order_acquire)); }
Austin Schuh25356e22019-09-11 19:27:07 -0700283
284 // Swaps expected for index atomically. Returns true on success, false
285 // otherwise.
Brian Silverman177567e2020-08-12 19:51:33 -0700286 bool CompareAndExchangeStrong(Index expected, Index index) {
Brian Silverman1ed5df52021-09-13 20:14:06 -0700287 linux_code::ipc_lib::RunShmObservers run_observers(&index_, true);
Brian Silverman2035de72019-12-18 21:39:14 -0800288 return index_.compare_exchange_strong(expected.index_, index.index_,
289 ::std::memory_order_acq_rel);
Austin Schuh25356e22019-09-11 19:27:07 -0700290 }
291
Brian Silverman177567e2020-08-12 19:51:33 -0700292 bool CompareAndExchangeWeak(Index *expected, Index index) {
293 return index_.compare_exchange_weak(expected->index_, index.index_,
294 ::std::memory_order_acq_rel);
295 }
296
Austin Schuh25356e22019-09-11 19:27:07 -0700297 private:
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700298 ::std::atomic<Index::IndexType> index_;
Austin Schuh25356e22019-09-11 19:27:07 -0700299};
300
Stephan Pleinesd99b1ee2024-02-02 20:56:44 -0800301} // namespace aos::ipc_lib
Austin Schuh25356e22019-09-11 19:27:07 -0700302
303#endif // AOS_IPC_LIB_INDEX_H_