blob: a47121e31d6caf19d4df85161171e821e5f9e619 [file] [log] [blame]
Austin Schuh25356e22019-09-11 19:27:07 -07001#ifndef AOS_IPC_LIB_INDEX_H_
2#define AOS_IPC_LIB_INDEX_H_
3
4#include <sys/types.h>
5#include <atomic>
Austin Schuh20b2b082019-09-11 20:42:56 -07006#include <string>
Austin Schuh25356e22019-09-11 19:27:07 -07007
Brian Silverman177567e2020-08-12 19:51:33 -07008#include "glog/logging.h"
9
Austin Schuh25356e22019-09-11 19:27:07 -070010namespace aos {
11namespace ipc_lib {
12
13struct AtomicQueueIndex;
14class AtomicIndex;
15class Index;
16
17namespace testing {
18class QueueIndexTest;
19} // namespace testing
20
21// There are 2 types of indices in the queue. 1 is the index into the overall
22// queue. If we have sent 1,000,005 messages, this is that number.
23//
24// The other is the index into the message list. This is essentially a message
25// pointer. It has some of the lower bits of the queue index encoded into it as
26// a safeguard to detect if someone re-used a message out from under us and we
27// couldn't tell otherwise. It is used to map a queue index to a message index.
28//
29// Each of these index types has an atomic version and a non-atomic. The atomic
30// version is a wrapper around a uint32_t to hang helper functions off of.
31// The non-atomic version contains all the logic. These are all in the header
32// file to encourage the compiler to inline aggressively.
33//
34// The user should very infrequently be manipulating the values of these
35// directly. Use the classes instead to do the heavy lifting.
36
37// Structure for holding the index into the queue.
38class QueueIndex {
39 public:
40 // Returns an invalid queue element which uses a reserved value.
41 static QueueIndex Invalid() { return QueueIndex(0xffffffff, 0); }
42 // Returns a queue element pointing to 0.
43 static QueueIndex Zero(uint32_t count) { return QueueIndex(0, count); }
44
45 // Returns true if the index is valid.
46 bool valid() const { return index_ != 0xffffffff; }
47
48 // Returns the modulo base used to wrap to avoid overlapping with the reserved
49 // number.
50 // max_value is one more than the max value we can store.
51 // count is the the number of elements in the queue.
52 static constexpr uint32_t MaxIndex(uint32_t max_value, uint32_t count) {
53 return (max_value / count) * count;
54 }
55
56 // Gets the next index.
Brian Silverman177567e2020-08-12 19:51:33 -070057 QueueIndex Increment() const { return IncrementBy(1u); }
Austin Schuh25356e22019-09-11 19:27:07 -070058
59 // Gets the nth next element.
60 QueueIndex IncrementBy(uint32_t amount) const {
61 uint32_t index = index_ + amount;
62 uint32_t max_index = MaxIndex(sentinal_value(), count_);
63
64 if (index < index_) {
65 // We wrapped. We are shifting up by 0x100000000 - MaxIndex(...).
66 // Which is equivalent to subtracting MaxIndex since everything is modular
67 // with a uint32_t.
68 index -= max_index;
69 }
70
71 // Now, wrap the remainder.
72 index = index % max_index;
73 return QueueIndex(index, count_);
74 }
75
76 // Gets the nth previous element.
77 QueueIndex DecrementBy(uint32_t amount) const {
78 uint32_t index = index_ - amount;
79 if (index > index_) {
80 // We wrapped. We are shifting down by 0x100000000 - MaxIndex(...).
81 // Which is equivalent to adding MaxIndex since everything is modular with
82 // a uint32_t.
83 index += MaxIndex(sentinal_value(), count_);
84 }
85 return QueueIndex(index, count_);
86 }
87
88 // Returns true if the lowest 16 bits of the queue index from the Index could
89 // plausibly match this queue index.
90 bool IsPlausible(uint16_t queue_index) const {
91 return valid() && (queue_index == static_cast<uint16_t>(index_ & 0xffff));
92 }
93
94 bool operator==(const QueueIndex other) const {
95 return other.index_ == index_;
96 }
97
98 bool operator!=(const QueueIndex other) const {
99 return other.index_ != index_;
100 }
101
102 // Returns the wrapped index into the queue.
103 uint32_t Wrapped() const { return index_ % count_; }
104
105 // Returns the raw index. This should be used very sparingly.
106 uint32_t index() const { return index_; }
107
Austin Schuh20b2b082019-09-11 20:42:56 -0700108 QueueIndex Clear() const { return QueueIndex(0, count_); }
109
110 // Returns a string representing the index.
111 ::std::string DebugString() const;
112
Austin Schuh25356e22019-09-11 19:27:07 -0700113 private:
114 QueueIndex(uint32_t index, uint32_t count) : index_(index), count_(count) {}
115
116 static constexpr uint32_t sentinal_value() { return 0xffffffffu; }
117
118 friend struct AtomicQueueIndex;
119 friend class Index;
120 // For testing.
121 friend class testing::QueueIndexTest;
122
123 // Index and number of elements in the queue.
124 uint32_t index_;
125 // Count is stored here rather than passed in everywhere in the hopes that the
126 // compiler completely optimizes out this class and this variable if it isn't
127 // used.
128 uint32_t count_;
129};
130
131// Atomic storage for setting and getting QueueIndex objects.
132// Count is the number of messages in the queue.
133struct AtomicQueueIndex {
134 public:
135 // Atomically reads the index without any ordering constraints.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700136 QueueIndex RelaxedLoad(uint32_t count) const {
Austin Schuh25356e22019-09-11 19:27:07 -0700137 return QueueIndex(index_.load(::std::memory_order_relaxed), count);
138 }
139
140 // Full bidirectional barriers here.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700141 QueueIndex Load(uint32_t count) const {
Brian Silverman2035de72019-12-18 21:39:14 -0800142 return QueueIndex(index_.load(::std::memory_order_acquire), count);
143 }
144 inline void Store(QueueIndex value) {
145 index_.store(value.index_, ::std::memory_order_release);
146 }
Austin Schuh25356e22019-09-11 19:27:07 -0700147
148 // Invalidates the element unconditionally.
149 inline void Invalidate() { Store(QueueIndex::Invalid()); }
150
Brian Silverman177567e2020-08-12 19:51:33 -0700151 inline void RelaxedInvalidate() {
152 index_.store(QueueIndex::Invalid().index_, ::std::memory_order_relaxed);
153 }
154
Austin Schuh25356e22019-09-11 19:27:07 -0700155 // Swaps expected for index atomically. Returns true on success, false
156 // otherwise.
157 inline bool CompareAndExchangeStrong(QueueIndex expected, QueueIndex index) {
Brian Silverman2035de72019-12-18 21:39:14 -0800158 return index_.compare_exchange_strong(expected.index_, index.index_,
159 ::std::memory_order_acq_rel);
Austin Schuh25356e22019-09-11 19:27:07 -0700160 }
161
162 private:
163 ::std::atomic<uint32_t> index_;
164};
165
166// Structure holding the queue index and the index into the message list.
167class Index {
168 public:
169 // Constructs an Index. queue_index is the QueueIndex of this message, and
170 // message_index is the index into the messages structure.
171 Index(QueueIndex queue_index, uint16_t message_index)
172 : Index(queue_index.index_, message_index) {}
173 Index(uint32_t queue_index, uint16_t message_index)
174 : index_((queue_index & 0xffff) |
Brian Silverman177567e2020-08-12 19:51:33 -0700175 (static_cast<uint32_t>(message_index) << 16)) {
176 CHECK_LE(message_index, MaxMessages());
177 }
Austin Schuh25356e22019-09-11 19:27:07 -0700178
179 // Index of this message in the message array.
180 uint16_t message_index() const { return (index_ >> 16) & 0xffff; }
181
182 // Lowest 16 bits of the queue index of this message in the queue.
183 uint16_t queue_index() const { return index_ & 0xffff; }
184
185 // Returns true if the provided queue index plausibly represents this Index.
186 bool IsPlausible(QueueIndex queue_index) const {
187 return queue_index.IsPlausible(this->queue_index());
188 }
189
190 // Returns an invalid Index.
191 static Index Invalid() { return Index(sentinal_value()); }
192 // Checks if this Index is valid or not.
193 bool valid() const { return index_ != sentinal_value(); }
194
195 // Returns the raw Index. This should only be used for debug.
196 uint32_t get() const { return index_; }
197
198 // Returns the maximum number of messages we can store before overflowing.
199 static constexpr uint16_t MaxMessages() { return 0xfffe; }
200
201 bool operator==(const Index other) const { return other.index_ == index_; }
Brian Silverman177567e2020-08-12 19:51:33 -0700202 bool operator!=(const Index other) const { return other.index_ != index_; }
Austin Schuh25356e22019-09-11 19:27:07 -0700203
Austin Schuh20b2b082019-09-11 20:42:56 -0700204 // Returns a string representing the index.
205 ::std::string DebugString() const;
206
Austin Schuh25356e22019-09-11 19:27:07 -0700207 private:
Brian Silverman177567e2020-08-12 19:51:33 -0700208 Index(uint32_t index) : index_(index) {}
Austin Schuh25356e22019-09-11 19:27:07 -0700209
210 friend class AtomicIndex;
211
212 static constexpr uint32_t sentinal_value() { return 0xffffffffu; }
213
214 // Note: a value of 0xffffffff is a sentinal to represent an invalid entry.
215 // This works because we would need to have a queue index of 0x*ffff, *and*
216 // have 0xffff messages in the message list. That constraint is easy to
217 // enforce by limiting the max messages.
218 uint32_t index_;
219};
220
221// Atomic storage for setting and getting Index objects.
222class AtomicIndex {
223 public:
224 // Stores and loads atomically without ordering constraints.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700225 Index RelaxedLoad() const {
Austin Schuh25356e22019-09-11 19:27:07 -0700226 return Index(index_.load(::std::memory_order_relaxed));
227 }
228 void RelaxedStore(Index index) {
229 index_.store(index.index_, ::std::memory_order_relaxed);
230 }
231
232 // Invalidates the index atomically, but without any ordering constraints.
233 void RelaxedInvalidate() { RelaxedStore(Index::Invalid()); }
234
Brian Silverman2035de72019-12-18 21:39:14 -0800235 // Full barriers here.
Austin Schuh20b2b082019-09-11 20:42:56 -0700236 void Invalidate() { Store(Index::Invalid()); }
Brian Silverman2035de72019-12-18 21:39:14 -0800237 void Store(Index index) {
238 index_.store(index.index_, ::std::memory_order_release);
239 }
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700240 Index Load() const { return Index(index_.load(::std::memory_order_acquire)); }
Austin Schuh25356e22019-09-11 19:27:07 -0700241
242 // Swaps expected for index atomically. Returns true on success, false
243 // otherwise.
Brian Silverman177567e2020-08-12 19:51:33 -0700244 bool CompareAndExchangeStrong(Index expected, Index index) {
Brian Silverman2035de72019-12-18 21:39:14 -0800245 return index_.compare_exchange_strong(expected.index_, index.index_,
246 ::std::memory_order_acq_rel);
Austin Schuh25356e22019-09-11 19:27:07 -0700247 }
248
Brian Silverman177567e2020-08-12 19:51:33 -0700249 bool CompareAndExchangeWeak(Index *expected, Index index) {
250 return index_.compare_exchange_weak(expected->index_, index.index_,
251 ::std::memory_order_acq_rel);
252 }
253
Austin Schuh25356e22019-09-11 19:27:07 -0700254 private:
255 ::std::atomic<uint32_t> index_;
256};
257
258} // namespace ipc_lib
259} // namespace aos
260
261#endif // AOS_IPC_LIB_INDEX_H_