Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 1 | #ifndef AOS_IPC_LIB_INDEX_H_ |
| 2 | #define AOS_IPC_LIB_INDEX_H_ |
| 3 | |
Austin Schuh | 83cbb1e | 2023-06-23 12:59:02 -0700 | [diff] [blame] | 4 | #include <stdint.h> |
Brian Silverman | 1ed5df5 | 2021-09-13 20:14:06 -0700 | [diff] [blame] | 5 | |
Stephan Pleines | 682928d | 2024-05-31 20:43:48 -0700 | [diff] [blame] | 6 | #include <limits> |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 7 | |
Austin Schuh | 99f7c6a | 2024-06-25 22:07:44 -0700 | [diff] [blame] | 8 | #include "absl/log/check.h" |
| 9 | #include "absl/log/log.h" |
Brian Silverman | 177567e | 2020-08-12 19:51:33 -0700 | [diff] [blame] | 10 | |
Philipp Schrader | 790cb54 | 2023-07-05 21:06:52 -0700 | [diff] [blame] | 11 | #include "aos/ipc_lib/shm_observers.h" |
| 12 | |
Austin Schuh | 83cbb1e | 2023-06-23 12:59:02 -0700 | [diff] [blame] | 13 | #ifndef AOS_QUEUE_ATOMIC_SIZE |
| 14 | #if UINTPTR_MAX == 0xffffffff |
| 15 | #define AOS_QUEUE_ATOMIC_SIZE 32 |
| 16 | /* 32-bit */ |
| 17 | #elif UINTPTR_MAX == 0xffffffffffffffff |
| 18 | #define AOS_QUEUE_ATOMIC_SIZE 64 |
| 19 | /* 64-bit */ |
| 20 | #else |
| 21 | #error "Unknown pointer size" |
| 22 | #endif |
| 23 | #endif |
| 24 | |
Stephan Pleines | d99b1ee | 2024-02-02 20:56:44 -0800 | [diff] [blame] | 25 | namespace aos::ipc_lib { |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 26 | |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 27 | namespace testing { |
| 28 | class QueueIndexTest; |
| 29 | } // namespace testing |
| 30 | |
| 31 | // There are 2 types of indices in the queue. 1 is the index into the overall |
| 32 | // queue. If we have sent 1,000,005 messages, this is that number. |
| 33 | // |
| 34 | // The other is the index into the message list. This is essentially a message |
| 35 | // pointer. It has some of the lower bits of the queue index encoded into it as |
| 36 | // a safeguard to detect if someone re-used a message out from under us and we |
| 37 | // couldn't tell otherwise. It is used to map a queue index to a message index. |
| 38 | // |
| 39 | // Each of these index types has an atomic version and a non-atomic. The atomic |
| 40 | // version is a wrapper around a uint32_t to hang helper functions off of. |
| 41 | // The non-atomic version contains all the logic. These are all in the header |
| 42 | // file to encourage the compiler to inline aggressively. |
| 43 | // |
| 44 | // The user should very infrequently be manipulating the values of these |
| 45 | // directly. Use the classes instead to do the heavy lifting. |
| 46 | |
| 47 | // Structure for holding the index into the queue. |
| 48 | class QueueIndex { |
| 49 | public: |
Austin Schuh | 83cbb1e | 2023-06-23 12:59:02 -0700 | [diff] [blame] | 50 | #if AOS_QUEUE_ATOMIC_SIZE == 64 |
| 51 | typedef uint32_t PackedIndexType; |
| 52 | #else |
| 53 | typedef uint16_t PackedIndexType; |
| 54 | #endif |
| 55 | |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 56 | // Returns an invalid queue element which uses a reserved value. |
Austin Schuh | 83cbb1e | 2023-06-23 12:59:02 -0700 | [diff] [blame] | 57 | static QueueIndex Invalid() { return QueueIndex(sentinal_value(), 0); } |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 58 | // Returns a queue element pointing to 0. |
| 59 | static QueueIndex Zero(uint32_t count) { return QueueIndex(0, count); } |
| 60 | |
| 61 | // Returns true if the index is valid. |
Austin Schuh | 83cbb1e | 2023-06-23 12:59:02 -0700 | [diff] [blame] | 62 | bool valid() const { return index_ != sentinal_value(); } |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 63 | |
| 64 | // Returns the modulo base used to wrap to avoid overlapping with the reserved |
| 65 | // number. |
| 66 | // max_value is one more than the max value we can store. |
| 67 | // count is the the number of elements in the queue. |
| 68 | static constexpr uint32_t MaxIndex(uint32_t max_value, uint32_t count) { |
| 69 | return (max_value / count) * count; |
| 70 | } |
| 71 | |
| 72 | // Gets the next index. |
Brian Silverman | 177567e | 2020-08-12 19:51:33 -0700 | [diff] [blame] | 73 | QueueIndex Increment() const { return IncrementBy(1u); } |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 74 | |
| 75 | // Gets the nth next element. |
| 76 | QueueIndex IncrementBy(uint32_t amount) const { |
| 77 | uint32_t index = index_ + amount; |
| 78 | uint32_t max_index = MaxIndex(sentinal_value(), count_); |
| 79 | |
| 80 | if (index < index_) { |
| 81 | // We wrapped. We are shifting up by 0x100000000 - MaxIndex(...). |
| 82 | // Which is equivalent to subtracting MaxIndex since everything is modular |
| 83 | // with a uint32_t. |
| 84 | index -= max_index; |
| 85 | } |
| 86 | |
| 87 | // Now, wrap the remainder. |
| 88 | index = index % max_index; |
| 89 | return QueueIndex(index, count_); |
| 90 | } |
| 91 | |
| 92 | // Gets the nth previous element. |
| 93 | QueueIndex DecrementBy(uint32_t amount) const { |
| 94 | uint32_t index = index_ - amount; |
| 95 | if (index > index_) { |
| 96 | // We wrapped. We are shifting down by 0x100000000 - MaxIndex(...). |
| 97 | // Which is equivalent to adding MaxIndex since everything is modular with |
| 98 | // a uint32_t. |
| 99 | index += MaxIndex(sentinal_value(), count_); |
| 100 | } |
| 101 | return QueueIndex(index, count_); |
| 102 | } |
| 103 | |
Austin Schuh | 83cbb1e | 2023-06-23 12:59:02 -0700 | [diff] [blame] | 104 | // Returns true if the lowest bits of the queue index from the Index could |
| 105 | // plausibly match this queue index. The number of bits matched depends on |
| 106 | // the the size of atomics in use. |
| 107 | bool IsPlausible(PackedIndexType queue_index) const { |
| 108 | return valid() && |
| 109 | (queue_index == |
| 110 | static_cast<PackedIndexType>( |
| 111 | index_ & std::numeric_limits<PackedIndexType>::max())); |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 112 | } |
| 113 | |
| 114 | bool operator==(const QueueIndex other) const { |
| 115 | return other.index_ == index_; |
| 116 | } |
| 117 | |
| 118 | bool operator!=(const QueueIndex other) const { |
| 119 | return other.index_ != index_; |
| 120 | } |
| 121 | |
| 122 | // Returns the wrapped index into the queue. |
| 123 | uint32_t Wrapped() const { return index_ % count_; } |
| 124 | |
| 125 | // Returns the raw index. This should be used very sparingly. |
| 126 | uint32_t index() const { return index_; } |
| 127 | |
Austin Schuh | 20b2b08 | 2019-09-11 20:42:56 -0700 | [diff] [blame] | 128 | QueueIndex Clear() const { return QueueIndex(0, count_); } |
| 129 | |
| 130 | // Returns a string representing the index. |
| 131 | ::std::string DebugString() const; |
| 132 | |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 133 | private: |
| 134 | QueueIndex(uint32_t index, uint32_t count) : index_(index), count_(count) {} |
| 135 | |
| 136 | static constexpr uint32_t sentinal_value() { return 0xffffffffu; } |
| 137 | |
| 138 | friend struct AtomicQueueIndex; |
| 139 | friend class Index; |
| 140 | // For testing. |
| 141 | friend class testing::QueueIndexTest; |
| 142 | |
| 143 | // Index and number of elements in the queue. |
| 144 | uint32_t index_; |
| 145 | // Count is stored here rather than passed in everywhere in the hopes that the |
| 146 | // compiler completely optimizes out this class and this variable if it isn't |
| 147 | // used. |
| 148 | uint32_t count_; |
| 149 | }; |
| 150 | |
| 151 | // Atomic storage for setting and getting QueueIndex objects. |
| 152 | // Count is the number of messages in the queue. |
| 153 | struct AtomicQueueIndex { |
| 154 | public: |
| 155 | // Atomically reads the index without any ordering constraints. |
Brian Silverman | fc0d2e8 | 2020-08-12 19:58:35 -0700 | [diff] [blame] | 156 | QueueIndex RelaxedLoad(uint32_t count) const { |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 157 | return QueueIndex(index_.load(::std::memory_order_relaxed), count); |
| 158 | } |
| 159 | |
| 160 | // Full bidirectional barriers here. |
Brian Silverman | fc0d2e8 | 2020-08-12 19:58:35 -0700 | [diff] [blame] | 161 | QueueIndex Load(uint32_t count) const { |
Brian Silverman | 2035de7 | 2019-12-18 21:39:14 -0800 | [diff] [blame] | 162 | return QueueIndex(index_.load(::std::memory_order_acquire), count); |
| 163 | } |
| 164 | inline void Store(QueueIndex value) { |
| 165 | index_.store(value.index_, ::std::memory_order_release); |
| 166 | } |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 167 | |
| 168 | // Invalidates the element unconditionally. |
| 169 | inline void Invalidate() { Store(QueueIndex::Invalid()); } |
| 170 | |
Brian Silverman | 177567e | 2020-08-12 19:51:33 -0700 | [diff] [blame] | 171 | inline void RelaxedInvalidate() { |
| 172 | index_.store(QueueIndex::Invalid().index_, ::std::memory_order_relaxed); |
| 173 | } |
| 174 | |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 175 | // Swaps expected for index atomically. Returns true on success, false |
| 176 | // otherwise. |
| 177 | inline bool CompareAndExchangeStrong(QueueIndex expected, QueueIndex index) { |
Brian Silverman | 1ed5df5 | 2021-09-13 20:14:06 -0700 | [diff] [blame] | 178 | linux_code::ipc_lib::RunShmObservers run_observers(&index_, true); |
Brian Silverman | 2035de7 | 2019-12-18 21:39:14 -0800 | [diff] [blame] | 179 | return index_.compare_exchange_strong(expected.index_, index.index_, |
| 180 | ::std::memory_order_acq_rel); |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 181 | } |
| 182 | |
| 183 | private: |
| 184 | ::std::atomic<uint32_t> index_; |
| 185 | }; |
| 186 | |
| 187 | // Structure holding the queue index and the index into the message list. |
| 188 | class Index { |
| 189 | public: |
Austin Schuh | 83cbb1e | 2023-06-23 12:59:02 -0700 | [diff] [blame] | 190 | #if AOS_QUEUE_ATOMIC_SIZE == 64 |
| 191 | typedef uint64_t IndexType; |
| 192 | typedef uint32_t MessageIndexType; |
| 193 | #else |
| 194 | typedef uint32_t IndexType; |
| 195 | typedef uint16_t MessageIndexType; |
| 196 | #endif |
| 197 | typedef QueueIndex::PackedIndexType PackedIndexType; |
| 198 | |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 199 | // Constructs an Index. queue_index is the QueueIndex of this message, and |
| 200 | // message_index is the index into the messages structure. |
Austin Schuh | 83cbb1e | 2023-06-23 12:59:02 -0700 | [diff] [blame] | 201 | Index(QueueIndex queue_index, MessageIndexType message_index) |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 202 | : Index(queue_index.index_, message_index) {} |
Austin Schuh | 83cbb1e | 2023-06-23 12:59:02 -0700 | [diff] [blame] | 203 | Index(uint32_t queue_index, MessageIndexType message_index) |
| 204 | : index_(static_cast<IndexType>( |
| 205 | queue_index & std::numeric_limits<PackedIndexType>::max()) | |
| 206 | (static_cast<IndexType>(message_index) |
| 207 | << std::numeric_limits<PackedIndexType>::digits)) { |
Brian Silverman | 177567e | 2020-08-12 19:51:33 -0700 | [diff] [blame] | 208 | CHECK_LE(message_index, MaxMessages()); |
| 209 | } |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 210 | |
| 211 | // Index of this message in the message array. |
Austin Schuh | 83cbb1e | 2023-06-23 12:59:02 -0700 | [diff] [blame] | 212 | MessageIndexType message_index() const { |
| 213 | return (index_ >> std::numeric_limits<PackedIndexType>::digits) & |
| 214 | std::numeric_limits<MessageIndexType>::max(); |
| 215 | } |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 216 | |
Austin Schuh | 83cbb1e | 2023-06-23 12:59:02 -0700 | [diff] [blame] | 217 | // Lowest bits of the queue index of this message in the queue. This will |
| 218 | // either be 16 or 32 bits, depending on if we have 32 or 64 bit atomics under |
| 219 | // the cover. |
| 220 | PackedIndexType queue_index() const { |
| 221 | return index_ & std::numeric_limits<PackedIndexType>::max(); |
| 222 | } |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 223 | |
| 224 | // Returns true if the provided queue index plausibly represents this Index. |
| 225 | bool IsPlausible(QueueIndex queue_index) const { |
| 226 | return queue_index.IsPlausible(this->queue_index()); |
| 227 | } |
| 228 | |
| 229 | // Returns an invalid Index. |
| 230 | static Index Invalid() { return Index(sentinal_value()); } |
| 231 | // Checks if this Index is valid or not. |
| 232 | bool valid() const { return index_ != sentinal_value(); } |
| 233 | |
| 234 | // Returns the raw Index. This should only be used for debug. |
Austin Schuh | 83cbb1e | 2023-06-23 12:59:02 -0700 | [diff] [blame] | 235 | IndexType get() const { return index_; } |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 236 | |
| 237 | // Returns the maximum number of messages we can store before overflowing. |
Austin Schuh | 83cbb1e | 2023-06-23 12:59:02 -0700 | [diff] [blame] | 238 | static constexpr MessageIndexType MaxMessages() { |
| 239 | return std::numeric_limits<MessageIndexType>::max() - 1; |
| 240 | } |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 241 | |
| 242 | bool operator==(const Index other) const { return other.index_ == index_; } |
Brian Silverman | 177567e | 2020-08-12 19:51:33 -0700 | [diff] [blame] | 243 | bool operator!=(const Index other) const { return other.index_ != index_; } |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 244 | |
Austin Schuh | 20b2b08 | 2019-09-11 20:42:56 -0700 | [diff] [blame] | 245 | // Returns a string representing the index. |
Austin Schuh | 83cbb1e | 2023-06-23 12:59:02 -0700 | [diff] [blame] | 246 | std::string DebugString() const; |
Austin Schuh | 20b2b08 | 2019-09-11 20:42:56 -0700 | [diff] [blame] | 247 | |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 248 | private: |
Austin Schuh | 83cbb1e | 2023-06-23 12:59:02 -0700 | [diff] [blame] | 249 | Index(IndexType index) : index_(index) {} |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 250 | |
| 251 | friend class AtomicIndex; |
| 252 | |
Austin Schuh | 83cbb1e | 2023-06-23 12:59:02 -0700 | [diff] [blame] | 253 | static constexpr IndexType sentinal_value() { |
| 254 | return std::numeric_limits<IndexType>::max(); |
| 255 | } |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 256 | |
Austin Schuh | 83cbb1e | 2023-06-23 12:59:02 -0700 | [diff] [blame] | 257 | // Note: a value of all 1 bits is a sentinal to represent an invalid entry. |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 258 | // This works because we would need to have a queue index of 0x*ffff, *and* |
| 259 | // have 0xffff messages in the message list. That constraint is easy to |
| 260 | // enforce by limiting the max messages. |
Austin Schuh | 83cbb1e | 2023-06-23 12:59:02 -0700 | [diff] [blame] | 261 | IndexType index_; |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 262 | }; |
| 263 | |
| 264 | // Atomic storage for setting and getting Index objects. |
| 265 | class AtomicIndex { |
| 266 | public: |
| 267 | // Stores and loads atomically without ordering constraints. |
Brian Silverman | fc0d2e8 | 2020-08-12 19:58:35 -0700 | [diff] [blame] | 268 | Index RelaxedLoad() const { |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 269 | return Index(index_.load(::std::memory_order_relaxed)); |
| 270 | } |
| 271 | void RelaxedStore(Index index) { |
| 272 | index_.store(index.index_, ::std::memory_order_relaxed); |
| 273 | } |
| 274 | |
| 275 | // Invalidates the index atomically, but without any ordering constraints. |
| 276 | void RelaxedInvalidate() { RelaxedStore(Index::Invalid()); } |
| 277 | |
Brian Silverman | 2035de7 | 2019-12-18 21:39:14 -0800 | [diff] [blame] | 278 | // Full barriers here. |
Austin Schuh | 20b2b08 | 2019-09-11 20:42:56 -0700 | [diff] [blame] | 279 | void Invalidate() { Store(Index::Invalid()); } |
Brian Silverman | 2035de7 | 2019-12-18 21:39:14 -0800 | [diff] [blame] | 280 | void Store(Index index) { |
| 281 | index_.store(index.index_, ::std::memory_order_release); |
| 282 | } |
Brian Silverman | fc0d2e8 | 2020-08-12 19:58:35 -0700 | [diff] [blame] | 283 | Index Load() const { return Index(index_.load(::std::memory_order_acquire)); } |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 284 | |
| 285 | // Swaps expected for index atomically. Returns true on success, false |
| 286 | // otherwise. |
Brian Silverman | 177567e | 2020-08-12 19:51:33 -0700 | [diff] [blame] | 287 | bool CompareAndExchangeStrong(Index expected, Index index) { |
Brian Silverman | 1ed5df5 | 2021-09-13 20:14:06 -0700 | [diff] [blame] | 288 | linux_code::ipc_lib::RunShmObservers run_observers(&index_, true); |
Brian Silverman | 2035de7 | 2019-12-18 21:39:14 -0800 | [diff] [blame] | 289 | return index_.compare_exchange_strong(expected.index_, index.index_, |
| 290 | ::std::memory_order_acq_rel); |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 291 | } |
| 292 | |
Brian Silverman | 177567e | 2020-08-12 19:51:33 -0700 | [diff] [blame] | 293 | bool CompareAndExchangeWeak(Index *expected, Index index) { |
| 294 | return index_.compare_exchange_weak(expected->index_, index.index_, |
| 295 | ::std::memory_order_acq_rel); |
| 296 | } |
| 297 | |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 298 | private: |
Austin Schuh | 83cbb1e | 2023-06-23 12:59:02 -0700 | [diff] [blame] | 299 | ::std::atomic<Index::IndexType> index_; |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 300 | }; |
| 301 | |
Stephan Pleines | d99b1ee | 2024-02-02 20:56:44 -0800 | [diff] [blame] | 302 | } // namespace aos::ipc_lib |
Austin Schuh | 25356e2 | 2019-09-11 19:27:07 -0700 | [diff] [blame] | 303 | |
| 304 | #endif // AOS_IPC_LIB_INDEX_H_ |