blob: 4814075768f01aacb66e175ae2b7bdd6e5d275b3 [file] [log] [blame]
Austin Schuh25356e22019-09-11 19:27:07 -07001#ifndef AOS_IPC_LIB_INDEX_H_
2#define AOS_IPC_LIB_INDEX_H_
3
4#include <sys/types.h>
Brian Silverman1ed5df52021-09-13 20:14:06 -07005
Austin Schuh25356e22019-09-11 19:27:07 -07006#include <atomic>
Austin Schuh20b2b082019-09-11 20:42:56 -07007#include <string>
Austin Schuh25356e22019-09-11 19:27:07 -07008
Brian Silverman177567e2020-08-12 19:51:33 -07009#include "glog/logging.h"
10
Philipp Schrader790cb542023-07-05 21:06:52 -070011#include "aos/ipc_lib/shm_observers.h"
12
Austin Schuh25356e22019-09-11 19:27:07 -070013namespace aos {
14namespace ipc_lib {
15
16struct AtomicQueueIndex;
17class AtomicIndex;
18class Index;
19
20namespace testing {
21class QueueIndexTest;
22} // namespace testing
23
24// There are 2 types of indices in the queue. 1 is the index into the overall
25// queue. If we have sent 1,000,005 messages, this is that number.
26//
27// The other is the index into the message list. This is essentially a message
28// pointer. It has some of the lower bits of the queue index encoded into it as
29// a safeguard to detect if someone re-used a message out from under us and we
30// couldn't tell otherwise. It is used to map a queue index to a message index.
31//
32// Each of these index types has an atomic version and a non-atomic. The atomic
33// version is a wrapper around a uint32_t to hang helper functions off of.
34// The non-atomic version contains all the logic. These are all in the header
35// file to encourage the compiler to inline aggressively.
36//
37// The user should very infrequently be manipulating the values of these
38// directly. Use the classes instead to do the heavy lifting.
39
40// Structure for holding the index into the queue.
41class QueueIndex {
42 public:
43 // Returns an invalid queue element which uses a reserved value.
44 static QueueIndex Invalid() { return QueueIndex(0xffffffff, 0); }
45 // Returns a queue element pointing to 0.
46 static QueueIndex Zero(uint32_t count) { return QueueIndex(0, count); }
47
48 // Returns true if the index is valid.
49 bool valid() const { return index_ != 0xffffffff; }
50
51 // Returns the modulo base used to wrap to avoid overlapping with the reserved
52 // number.
53 // max_value is one more than the max value we can store.
54 // count is the the number of elements in the queue.
55 static constexpr uint32_t MaxIndex(uint32_t max_value, uint32_t count) {
56 return (max_value / count) * count;
57 }
58
59 // Gets the next index.
Brian Silverman177567e2020-08-12 19:51:33 -070060 QueueIndex Increment() const { return IncrementBy(1u); }
Austin Schuh25356e22019-09-11 19:27:07 -070061
62 // Gets the nth next element.
63 QueueIndex IncrementBy(uint32_t amount) const {
64 uint32_t index = index_ + amount;
65 uint32_t max_index = MaxIndex(sentinal_value(), count_);
66
67 if (index < index_) {
68 // We wrapped. We are shifting up by 0x100000000 - MaxIndex(...).
69 // Which is equivalent to subtracting MaxIndex since everything is modular
70 // with a uint32_t.
71 index -= max_index;
72 }
73
74 // Now, wrap the remainder.
75 index = index % max_index;
76 return QueueIndex(index, count_);
77 }
78
79 // Gets the nth previous element.
80 QueueIndex DecrementBy(uint32_t amount) const {
81 uint32_t index = index_ - amount;
82 if (index > index_) {
83 // We wrapped. We are shifting down by 0x100000000 - MaxIndex(...).
84 // Which is equivalent to adding MaxIndex since everything is modular with
85 // a uint32_t.
86 index += MaxIndex(sentinal_value(), count_);
87 }
88 return QueueIndex(index, count_);
89 }
90
91 // Returns true if the lowest 16 bits of the queue index from the Index could
92 // plausibly match this queue index.
93 bool IsPlausible(uint16_t queue_index) const {
94 return valid() && (queue_index == static_cast<uint16_t>(index_ & 0xffff));
95 }
96
97 bool operator==(const QueueIndex other) const {
98 return other.index_ == index_;
99 }
100
101 bool operator!=(const QueueIndex other) const {
102 return other.index_ != index_;
103 }
104
105 // Returns the wrapped index into the queue.
106 uint32_t Wrapped() const { return index_ % count_; }
107
108 // Returns the raw index. This should be used very sparingly.
109 uint32_t index() const { return index_; }
110
Austin Schuh20b2b082019-09-11 20:42:56 -0700111 QueueIndex Clear() const { return QueueIndex(0, count_); }
112
113 // Returns a string representing the index.
114 ::std::string DebugString() const;
115
Austin Schuh25356e22019-09-11 19:27:07 -0700116 private:
117 QueueIndex(uint32_t index, uint32_t count) : index_(index), count_(count) {}
118
119 static constexpr uint32_t sentinal_value() { return 0xffffffffu; }
120
121 friend struct AtomicQueueIndex;
122 friend class Index;
123 // For testing.
124 friend class testing::QueueIndexTest;
125
126 // Index and number of elements in the queue.
127 uint32_t index_;
128 // Count is stored here rather than passed in everywhere in the hopes that the
129 // compiler completely optimizes out this class and this variable if it isn't
130 // used.
131 uint32_t count_;
132};
133
134// Atomic storage for setting and getting QueueIndex objects.
135// Count is the number of messages in the queue.
136struct AtomicQueueIndex {
137 public:
138 // Atomically reads the index without any ordering constraints.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700139 QueueIndex RelaxedLoad(uint32_t count) const {
Austin Schuh25356e22019-09-11 19:27:07 -0700140 return QueueIndex(index_.load(::std::memory_order_relaxed), count);
141 }
142
143 // Full bidirectional barriers here.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700144 QueueIndex Load(uint32_t count) const {
Brian Silverman2035de72019-12-18 21:39:14 -0800145 return QueueIndex(index_.load(::std::memory_order_acquire), count);
146 }
147 inline void Store(QueueIndex value) {
148 index_.store(value.index_, ::std::memory_order_release);
149 }
Austin Schuh25356e22019-09-11 19:27:07 -0700150
151 // Invalidates the element unconditionally.
152 inline void Invalidate() { Store(QueueIndex::Invalid()); }
153
Brian Silverman177567e2020-08-12 19:51:33 -0700154 inline void RelaxedInvalidate() {
155 index_.store(QueueIndex::Invalid().index_, ::std::memory_order_relaxed);
156 }
157
Austin Schuh25356e22019-09-11 19:27:07 -0700158 // Swaps expected for index atomically. Returns true on success, false
159 // otherwise.
160 inline bool CompareAndExchangeStrong(QueueIndex expected, QueueIndex index) {
Brian Silverman1ed5df52021-09-13 20:14:06 -0700161 linux_code::ipc_lib::RunShmObservers run_observers(&index_, true);
Brian Silverman2035de72019-12-18 21:39:14 -0800162 return index_.compare_exchange_strong(expected.index_, index.index_,
163 ::std::memory_order_acq_rel);
Austin Schuh25356e22019-09-11 19:27:07 -0700164 }
165
166 private:
167 ::std::atomic<uint32_t> index_;
168};
169
170// Structure holding the queue index and the index into the message list.
171class Index {
172 public:
173 // Constructs an Index. queue_index is the QueueIndex of this message, and
174 // message_index is the index into the messages structure.
175 Index(QueueIndex queue_index, uint16_t message_index)
176 : Index(queue_index.index_, message_index) {}
177 Index(uint32_t queue_index, uint16_t message_index)
178 : index_((queue_index & 0xffff) |
Brian Silverman177567e2020-08-12 19:51:33 -0700179 (static_cast<uint32_t>(message_index) << 16)) {
180 CHECK_LE(message_index, MaxMessages());
181 }
Austin Schuh25356e22019-09-11 19:27:07 -0700182
183 // Index of this message in the message array.
184 uint16_t message_index() const { return (index_ >> 16) & 0xffff; }
185
186 // Lowest 16 bits of the queue index of this message in the queue.
187 uint16_t queue_index() const { return index_ & 0xffff; }
188
189 // Returns true if the provided queue index plausibly represents this Index.
190 bool IsPlausible(QueueIndex queue_index) const {
191 return queue_index.IsPlausible(this->queue_index());
192 }
193
194 // Returns an invalid Index.
195 static Index Invalid() { return Index(sentinal_value()); }
196 // Checks if this Index is valid or not.
197 bool valid() const { return index_ != sentinal_value(); }
198
199 // Returns the raw Index. This should only be used for debug.
200 uint32_t get() const { return index_; }
201
202 // Returns the maximum number of messages we can store before overflowing.
203 static constexpr uint16_t MaxMessages() { return 0xfffe; }
204
205 bool operator==(const Index other) const { return other.index_ == index_; }
Brian Silverman177567e2020-08-12 19:51:33 -0700206 bool operator!=(const Index other) const { return other.index_ != index_; }
Austin Schuh25356e22019-09-11 19:27:07 -0700207
Austin Schuh20b2b082019-09-11 20:42:56 -0700208 // Returns a string representing the index.
209 ::std::string DebugString() const;
210
Austin Schuh25356e22019-09-11 19:27:07 -0700211 private:
Brian Silverman177567e2020-08-12 19:51:33 -0700212 Index(uint32_t index) : index_(index) {}
Austin Schuh25356e22019-09-11 19:27:07 -0700213
214 friend class AtomicIndex;
215
216 static constexpr uint32_t sentinal_value() { return 0xffffffffu; }
217
218 // Note: a value of 0xffffffff is a sentinal to represent an invalid entry.
219 // This works because we would need to have a queue index of 0x*ffff, *and*
220 // have 0xffff messages in the message list. That constraint is easy to
221 // enforce by limiting the max messages.
222 uint32_t index_;
223};
224
225// Atomic storage for setting and getting Index objects.
226class AtomicIndex {
227 public:
228 // Stores and loads atomically without ordering constraints.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700229 Index RelaxedLoad() const {
Austin Schuh25356e22019-09-11 19:27:07 -0700230 return Index(index_.load(::std::memory_order_relaxed));
231 }
232 void RelaxedStore(Index index) {
233 index_.store(index.index_, ::std::memory_order_relaxed);
234 }
235
236 // Invalidates the index atomically, but without any ordering constraints.
237 void RelaxedInvalidate() { RelaxedStore(Index::Invalid()); }
238
Brian Silverman2035de72019-12-18 21:39:14 -0800239 // Full barriers here.
Austin Schuh20b2b082019-09-11 20:42:56 -0700240 void Invalidate() { Store(Index::Invalid()); }
Brian Silverman2035de72019-12-18 21:39:14 -0800241 void Store(Index index) {
242 index_.store(index.index_, ::std::memory_order_release);
243 }
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700244 Index Load() const { return Index(index_.load(::std::memory_order_acquire)); }
Austin Schuh25356e22019-09-11 19:27:07 -0700245
246 // Swaps expected for index atomically. Returns true on success, false
247 // otherwise.
Brian Silverman177567e2020-08-12 19:51:33 -0700248 bool CompareAndExchangeStrong(Index expected, Index index) {
Brian Silverman1ed5df52021-09-13 20:14:06 -0700249 linux_code::ipc_lib::RunShmObservers run_observers(&index_, true);
Brian Silverman2035de72019-12-18 21:39:14 -0800250 return index_.compare_exchange_strong(expected.index_, index.index_,
251 ::std::memory_order_acq_rel);
Austin Schuh25356e22019-09-11 19:27:07 -0700252 }
253
Brian Silverman177567e2020-08-12 19:51:33 -0700254 bool CompareAndExchangeWeak(Index *expected, Index index) {
255 return index_.compare_exchange_weak(expected->index_, index.index_,
256 ::std::memory_order_acq_rel);
257 }
258
Austin Schuh25356e22019-09-11 19:27:07 -0700259 private:
260 ::std::atomic<uint32_t> index_;
261};
262
263} // namespace ipc_lib
264} // namespace aos
265
266#endif // AOS_IPC_LIB_INDEX_H_