blob: 434da7c702adac31ca89b3d64f3d3c2f27577580 [file] [log] [blame]
Austin Schuh25356e22019-09-11 19:27:07 -07001#ifndef AOS_IPC_LIB_INDEX_H_
2#define AOS_IPC_LIB_INDEX_H_
3
Austin Schuh83cbb1e2023-06-23 12:59:02 -07004#include <stdint.h>
Austin Schuh25356e22019-09-11 19:27:07 -07005#include <sys/types.h>
Brian Silverman1ed5df52021-09-13 20:14:06 -07006
Austin Schuh25356e22019-09-11 19:27:07 -07007#include <atomic>
Austin Schuh20b2b082019-09-11 20:42:56 -07008#include <string>
Austin Schuh25356e22019-09-11 19:27:07 -07009
Brian Silverman177567e2020-08-12 19:51:33 -070010#include "glog/logging.h"
11
Philipp Schrader790cb542023-07-05 21:06:52 -070012#include "aos/ipc_lib/shm_observers.h"
13
Austin Schuh83cbb1e2023-06-23 12:59:02 -070014#ifndef AOS_QUEUE_ATOMIC_SIZE
15#if UINTPTR_MAX == 0xffffffff
16#define AOS_QUEUE_ATOMIC_SIZE 32
17/* 32-bit */
18#elif UINTPTR_MAX == 0xffffffffffffffff
19#define AOS_QUEUE_ATOMIC_SIZE 64
20/* 64-bit */
21#else
22#error "Unknown pointer size"
23#endif
24#endif
25
Austin Schuh25356e22019-09-11 19:27:07 -070026namespace aos {
27namespace ipc_lib {
28
29struct AtomicQueueIndex;
30class AtomicIndex;
31class Index;
32
33namespace testing {
34class QueueIndexTest;
35} // namespace testing
36
37// There are 2 types of indices in the queue. 1 is the index into the overall
38// queue. If we have sent 1,000,005 messages, this is that number.
39//
40// The other is the index into the message list. This is essentially a message
41// pointer. It has some of the lower bits of the queue index encoded into it as
42// a safeguard to detect if someone re-used a message out from under us and we
43// couldn't tell otherwise. It is used to map a queue index to a message index.
44//
45// Each of these index types has an atomic version and a non-atomic. The atomic
46// version is a wrapper around a uint32_t to hang helper functions off of.
47// The non-atomic version contains all the logic. These are all in the header
48// file to encourage the compiler to inline aggressively.
49//
50// The user should very infrequently be manipulating the values of these
51// directly. Use the classes instead to do the heavy lifting.
52
53// Structure for holding the index into the queue.
54class QueueIndex {
55 public:
Austin Schuh83cbb1e2023-06-23 12:59:02 -070056#if AOS_QUEUE_ATOMIC_SIZE == 64
57 typedef uint32_t PackedIndexType;
58#else
59 typedef uint16_t PackedIndexType;
60#endif
61
Austin Schuh25356e22019-09-11 19:27:07 -070062 // Returns an invalid queue element which uses a reserved value.
Austin Schuh83cbb1e2023-06-23 12:59:02 -070063 static QueueIndex Invalid() { return QueueIndex(sentinal_value(), 0); }
Austin Schuh25356e22019-09-11 19:27:07 -070064 // Returns a queue element pointing to 0.
65 static QueueIndex Zero(uint32_t count) { return QueueIndex(0, count); }
66
67 // Returns true if the index is valid.
Austin Schuh83cbb1e2023-06-23 12:59:02 -070068 bool valid() const { return index_ != sentinal_value(); }
Austin Schuh25356e22019-09-11 19:27:07 -070069
70 // Returns the modulo base used to wrap to avoid overlapping with the reserved
71 // number.
72 // max_value is one more than the max value we can store.
73 // count is the the number of elements in the queue.
74 static constexpr uint32_t MaxIndex(uint32_t max_value, uint32_t count) {
75 return (max_value / count) * count;
76 }
77
78 // Gets the next index.
Brian Silverman177567e2020-08-12 19:51:33 -070079 QueueIndex Increment() const { return IncrementBy(1u); }
Austin Schuh25356e22019-09-11 19:27:07 -070080
81 // Gets the nth next element.
82 QueueIndex IncrementBy(uint32_t amount) const {
83 uint32_t index = index_ + amount;
84 uint32_t max_index = MaxIndex(sentinal_value(), count_);
85
86 if (index < index_) {
87 // We wrapped. We are shifting up by 0x100000000 - MaxIndex(...).
88 // Which is equivalent to subtracting MaxIndex since everything is modular
89 // with a uint32_t.
90 index -= max_index;
91 }
92
93 // Now, wrap the remainder.
94 index = index % max_index;
95 return QueueIndex(index, count_);
96 }
97
98 // Gets the nth previous element.
99 QueueIndex DecrementBy(uint32_t amount) const {
100 uint32_t index = index_ - amount;
101 if (index > index_) {
102 // We wrapped. We are shifting down by 0x100000000 - MaxIndex(...).
103 // Which is equivalent to adding MaxIndex since everything is modular with
104 // a uint32_t.
105 index += MaxIndex(sentinal_value(), count_);
106 }
107 return QueueIndex(index, count_);
108 }
109
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700110 // Returns true if the lowest bits of the queue index from the Index could
111 // plausibly match this queue index. The number of bits matched depends on
112 // the the size of atomics in use.
113 bool IsPlausible(PackedIndexType queue_index) const {
114 return valid() &&
115 (queue_index ==
116 static_cast<PackedIndexType>(
117 index_ & std::numeric_limits<PackedIndexType>::max()));
Austin Schuh25356e22019-09-11 19:27:07 -0700118 }
119
120 bool operator==(const QueueIndex other) const {
121 return other.index_ == index_;
122 }
123
124 bool operator!=(const QueueIndex other) const {
125 return other.index_ != index_;
126 }
127
128 // Returns the wrapped index into the queue.
129 uint32_t Wrapped() const { return index_ % count_; }
130
131 // Returns the raw index. This should be used very sparingly.
132 uint32_t index() const { return index_; }
133
Austin Schuh20b2b082019-09-11 20:42:56 -0700134 QueueIndex Clear() const { return QueueIndex(0, count_); }
135
136 // Returns a string representing the index.
137 ::std::string DebugString() const;
138
Austin Schuh25356e22019-09-11 19:27:07 -0700139 private:
140 QueueIndex(uint32_t index, uint32_t count) : index_(index), count_(count) {}
141
142 static constexpr uint32_t sentinal_value() { return 0xffffffffu; }
143
144 friend struct AtomicQueueIndex;
145 friend class Index;
146 // For testing.
147 friend class testing::QueueIndexTest;
148
149 // Index and number of elements in the queue.
150 uint32_t index_;
151 // Count is stored here rather than passed in everywhere in the hopes that the
152 // compiler completely optimizes out this class and this variable if it isn't
153 // used.
154 uint32_t count_;
155};
156
157// Atomic storage for setting and getting QueueIndex objects.
158// Count is the number of messages in the queue.
159struct AtomicQueueIndex {
160 public:
161 // Atomically reads the index without any ordering constraints.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700162 QueueIndex RelaxedLoad(uint32_t count) const {
Austin Schuh25356e22019-09-11 19:27:07 -0700163 return QueueIndex(index_.load(::std::memory_order_relaxed), count);
164 }
165
166 // Full bidirectional barriers here.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700167 QueueIndex Load(uint32_t count) const {
Brian Silverman2035de72019-12-18 21:39:14 -0800168 return QueueIndex(index_.load(::std::memory_order_acquire), count);
169 }
170 inline void Store(QueueIndex value) {
171 index_.store(value.index_, ::std::memory_order_release);
172 }
Austin Schuh25356e22019-09-11 19:27:07 -0700173
174 // Invalidates the element unconditionally.
175 inline void Invalidate() { Store(QueueIndex::Invalid()); }
176
Brian Silverman177567e2020-08-12 19:51:33 -0700177 inline void RelaxedInvalidate() {
178 index_.store(QueueIndex::Invalid().index_, ::std::memory_order_relaxed);
179 }
180
Austin Schuh25356e22019-09-11 19:27:07 -0700181 // Swaps expected for index atomically. Returns true on success, false
182 // otherwise.
183 inline bool CompareAndExchangeStrong(QueueIndex expected, QueueIndex index) {
Brian Silverman1ed5df52021-09-13 20:14:06 -0700184 linux_code::ipc_lib::RunShmObservers run_observers(&index_, true);
Brian Silverman2035de72019-12-18 21:39:14 -0800185 return index_.compare_exchange_strong(expected.index_, index.index_,
186 ::std::memory_order_acq_rel);
Austin Schuh25356e22019-09-11 19:27:07 -0700187 }
188
189 private:
190 ::std::atomic<uint32_t> index_;
191};
192
193// Structure holding the queue index and the index into the message list.
194class Index {
195 public:
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700196#if AOS_QUEUE_ATOMIC_SIZE == 64
197 typedef uint64_t IndexType;
198 typedef uint32_t MessageIndexType;
199#else
200 typedef uint32_t IndexType;
201 typedef uint16_t MessageIndexType;
202#endif
203 typedef QueueIndex::PackedIndexType PackedIndexType;
204
Austin Schuh25356e22019-09-11 19:27:07 -0700205 // Constructs an Index. queue_index is the QueueIndex of this message, and
206 // message_index is the index into the messages structure.
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700207 Index(QueueIndex queue_index, MessageIndexType message_index)
Austin Schuh25356e22019-09-11 19:27:07 -0700208 : Index(queue_index.index_, message_index) {}
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700209 Index(uint32_t queue_index, MessageIndexType message_index)
210 : index_(static_cast<IndexType>(
211 queue_index & std::numeric_limits<PackedIndexType>::max()) |
212 (static_cast<IndexType>(message_index)
213 << std::numeric_limits<PackedIndexType>::digits)) {
Brian Silverman177567e2020-08-12 19:51:33 -0700214 CHECK_LE(message_index, MaxMessages());
215 }
Austin Schuh25356e22019-09-11 19:27:07 -0700216
217 // Index of this message in the message array.
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700218 MessageIndexType message_index() const {
219 return (index_ >> std::numeric_limits<PackedIndexType>::digits) &
220 std::numeric_limits<MessageIndexType>::max();
221 }
Austin Schuh25356e22019-09-11 19:27:07 -0700222
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700223 // Lowest bits of the queue index of this message in the queue. This will
224 // either be 16 or 32 bits, depending on if we have 32 or 64 bit atomics under
225 // the cover.
226 PackedIndexType queue_index() const {
227 return index_ & std::numeric_limits<PackedIndexType>::max();
228 }
Austin Schuh25356e22019-09-11 19:27:07 -0700229
230 // Returns true if the provided queue index plausibly represents this Index.
231 bool IsPlausible(QueueIndex queue_index) const {
232 return queue_index.IsPlausible(this->queue_index());
233 }
234
235 // Returns an invalid Index.
236 static Index Invalid() { return Index(sentinal_value()); }
237 // Checks if this Index is valid or not.
238 bool valid() const { return index_ != sentinal_value(); }
239
240 // Returns the raw Index. This should only be used for debug.
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700241 IndexType get() const { return index_; }
Austin Schuh25356e22019-09-11 19:27:07 -0700242
243 // Returns the maximum number of messages we can store before overflowing.
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700244 static constexpr MessageIndexType MaxMessages() {
245 return std::numeric_limits<MessageIndexType>::max() - 1;
246 }
Austin Schuh25356e22019-09-11 19:27:07 -0700247
248 bool operator==(const Index other) const { return other.index_ == index_; }
Brian Silverman177567e2020-08-12 19:51:33 -0700249 bool operator!=(const Index other) const { return other.index_ != index_; }
Austin Schuh25356e22019-09-11 19:27:07 -0700250
Austin Schuh20b2b082019-09-11 20:42:56 -0700251 // Returns a string representing the index.
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700252 std::string DebugString() const;
Austin Schuh20b2b082019-09-11 20:42:56 -0700253
Austin Schuh25356e22019-09-11 19:27:07 -0700254 private:
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700255 Index(IndexType index) : index_(index) {}
Austin Schuh25356e22019-09-11 19:27:07 -0700256
257 friend class AtomicIndex;
258
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700259 static constexpr IndexType sentinal_value() {
260 return std::numeric_limits<IndexType>::max();
261 }
Austin Schuh25356e22019-09-11 19:27:07 -0700262
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700263 // Note: a value of all 1 bits is a sentinal to represent an invalid entry.
Austin Schuh25356e22019-09-11 19:27:07 -0700264 // This works because we would need to have a queue index of 0x*ffff, *and*
265 // have 0xffff messages in the message list. That constraint is easy to
266 // enforce by limiting the max messages.
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700267 IndexType index_;
Austin Schuh25356e22019-09-11 19:27:07 -0700268};
269
270// Atomic storage for setting and getting Index objects.
271class AtomicIndex {
272 public:
273 // Stores and loads atomically without ordering constraints.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700274 Index RelaxedLoad() const {
Austin Schuh25356e22019-09-11 19:27:07 -0700275 return Index(index_.load(::std::memory_order_relaxed));
276 }
277 void RelaxedStore(Index index) {
278 index_.store(index.index_, ::std::memory_order_relaxed);
279 }
280
281 // Invalidates the index atomically, but without any ordering constraints.
282 void RelaxedInvalidate() { RelaxedStore(Index::Invalid()); }
283
Brian Silverman2035de72019-12-18 21:39:14 -0800284 // Full barriers here.
Austin Schuh20b2b082019-09-11 20:42:56 -0700285 void Invalidate() { Store(Index::Invalid()); }
Brian Silverman2035de72019-12-18 21:39:14 -0800286 void Store(Index index) {
287 index_.store(index.index_, ::std::memory_order_release);
288 }
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700289 Index Load() const { return Index(index_.load(::std::memory_order_acquire)); }
Austin Schuh25356e22019-09-11 19:27:07 -0700290
291 // Swaps expected for index atomically. Returns true on success, false
292 // otherwise.
Brian Silverman177567e2020-08-12 19:51:33 -0700293 bool CompareAndExchangeStrong(Index expected, Index index) {
Brian Silverman1ed5df52021-09-13 20:14:06 -0700294 linux_code::ipc_lib::RunShmObservers run_observers(&index_, true);
Brian Silverman2035de72019-12-18 21:39:14 -0800295 return index_.compare_exchange_strong(expected.index_, index.index_,
296 ::std::memory_order_acq_rel);
Austin Schuh25356e22019-09-11 19:27:07 -0700297 }
298
Brian Silverman177567e2020-08-12 19:51:33 -0700299 bool CompareAndExchangeWeak(Index *expected, Index index) {
300 return index_.compare_exchange_weak(expected->index_, index.index_,
301 ::std::memory_order_acq_rel);
302 }
303
Austin Schuh25356e22019-09-11 19:27:07 -0700304 private:
Austin Schuh83cbb1e2023-06-23 12:59:02 -0700305 ::std::atomic<Index::IndexType> index_;
Austin Schuh25356e22019-09-11 19:27:07 -0700306};
307
308} // namespace ipc_lib
309} // namespace aos
310
311#endif // AOS_IPC_LIB_INDEX_H_