Add a set of classes to manage queue indices
This handles wrapping and invalidating indices, and wraps up the atomics
for storing and reading indices atomically
Change-Id: I8190efb1f36a9840cb545c8b4e8f0ec04488e88e
diff --git a/aos/ipc_lib/BUILD b/aos/ipc_lib/BUILD
index 85f6dcd..6947df1 100644
--- a/aos/ipc_lib/BUILD
+++ b/aos/ipc_lib/BUILD
@@ -161,3 +161,18 @@
"//aos/logging",
],
)
+
+cc_library(
+ name = "index",
+ hdrs = ["index.h"],
+)
+
+cc_test(
+ name = "index_test",
+ srcs = ["index_test.cc"],
+ deps = [
+ ":index",
+ "//aos/testing:googletest",
+ "//aos/testing:test_logging",
+ ],
+)
diff --git a/aos/ipc_lib/index.h b/aos/ipc_lib/index.h
new file mode 100644
index 0000000..69f5856
--- /dev/null
+++ b/aos/ipc_lib/index.h
@@ -0,0 +1,233 @@
+#ifndef AOS_IPC_LIB_INDEX_H_
+#define AOS_IPC_LIB_INDEX_H_
+
+#include <sys/types.h>
+#include <atomic>
+
+namespace aos {
+namespace ipc_lib {
+
+struct AtomicQueueIndex;
+class AtomicIndex;
+class Index;
+
+namespace testing {
+class QueueIndexTest;
+} // namespace testing
+
+// There are 2 types of indices in the queue. 1 is the index into the overall
+// queue. If we have sent 1,000,005 messages, this is that number.
+//
+// The other is the index into the message list. This is essentially a message
+// pointer. It has some of the lower bits of the queue index encoded into it as
+// a safeguard to detect if someone re-used a message out from under us and we
+// couldn't tell otherwise. It is used to map a queue index to a message index.
+//
+// Each of these index types has an atomic version and a non-atomic. The atomic
+// version is a wrapper around a uint32_t to hang helper functions off of.
+// The non-atomic version contains all the logic. These are all in the header
+// file to encourage the compiler to inline aggressively.
+//
+// The user should very infrequently be manipulating the values of these
+// directly. Use the classes instead to do the heavy lifting.
+
+// Structure for holding the index into the queue.
+class QueueIndex {
+ public:
+ // Returns an invalid queue element which uses a reserved value.
+ static QueueIndex Invalid() { return QueueIndex(0xffffffff, 0); }
+ // Returns a queue element pointing to 0.
+ static QueueIndex Zero(uint32_t count) { return QueueIndex(0, count); }
+
+ // Returns true if the index is valid.
+ bool valid() const { return index_ != 0xffffffff; }
+
+ // Returns the modulo base used to wrap to avoid overlapping with the reserved
+ // number.
+ // max_value is one more than the max value we can store.
+ // count is the the number of elements in the queue.
+ static constexpr uint32_t MaxIndex(uint32_t max_value, uint32_t count) {
+ return (max_value / count) * count;
+ }
+
+ // Gets the next index.
+ QueueIndex Increment() const {
+ return IncrementBy(1u);
+ }
+
+ // Gets the nth next element.
+ QueueIndex IncrementBy(uint32_t amount) const {
+ uint32_t index = index_ + amount;
+ uint32_t max_index = MaxIndex(sentinal_value(), count_);
+
+ if (index < index_) {
+ // We wrapped. We are shifting up by 0x100000000 - MaxIndex(...).
+ // Which is equivalent to subtracting MaxIndex since everything is modular
+ // with a uint32_t.
+ index -= max_index;
+ }
+
+ // Now, wrap the remainder.
+ index = index % max_index;
+ return QueueIndex(index, count_);
+ }
+
+ // Gets the nth previous element.
+ QueueIndex DecrementBy(uint32_t amount) const {
+ uint32_t index = index_ - amount;
+ if (index > index_) {
+ // We wrapped. We are shifting down by 0x100000000 - MaxIndex(...).
+ // Which is equivalent to adding MaxIndex since everything is modular with
+ // a uint32_t.
+ index += MaxIndex(sentinal_value(), count_);
+ }
+ return QueueIndex(index, count_);
+ }
+
+ // Returns true if the lowest 16 bits of the queue index from the Index could
+ // plausibly match this queue index.
+ bool IsPlausible(uint16_t queue_index) const {
+ return valid() && (queue_index == static_cast<uint16_t>(index_ & 0xffff));
+ }
+
+ bool operator==(const QueueIndex other) const {
+ return other.index_ == index_;
+ }
+
+ bool operator!=(const QueueIndex other) const {
+ return other.index_ != index_;
+ }
+
+ // Returns the wrapped index into the queue.
+ uint32_t Wrapped() const { return index_ % count_; }
+
+ // Returns the raw index. This should be used very sparingly.
+ uint32_t index() const { return index_; }
+
+ private:
+ QueueIndex(uint32_t index, uint32_t count) : index_(index), count_(count) {}
+
+ static constexpr uint32_t sentinal_value() { return 0xffffffffu; }
+
+ friend struct AtomicQueueIndex;
+ friend class Index;
+ // For testing.
+ friend class testing::QueueIndexTest;
+
+ // Index and number of elements in the queue.
+ uint32_t index_;
+ // Count is stored here rather than passed in everywhere in the hopes that the
+ // compiler completely optimizes out this class and this variable if it isn't
+ // used.
+ uint32_t count_;
+};
+
+// Atomic storage for setting and getting QueueIndex objects.
+// Count is the number of messages in the queue.
+struct AtomicQueueIndex {
+ public:
+ // Atomically reads the index without any ordering constraints.
+ QueueIndex RelaxedLoad(uint32_t count) {
+ return QueueIndex(index_.load(::std::memory_order_relaxed), count);
+ }
+
+ // Full bidirectional barriers here.
+ QueueIndex Load(uint32_t count) { return QueueIndex(index_.load(), count); }
+ inline void Store(QueueIndex value) { index_.store(value.index_); }
+
+ // Invalidates the element unconditionally.
+ inline void Invalidate() { Store(QueueIndex::Invalid()); }
+
+ // Swaps expected for index atomically. Returns true on success, false
+ // otherwise.
+ inline bool CompareAndExchangeStrong(QueueIndex expected, QueueIndex index) {
+ return index_.compare_exchange_strong(expected.index_, index.index_);
+ }
+
+ private:
+ ::std::atomic<uint32_t> index_;
+};
+
+// Structure holding the queue index and the index into the message list.
+class Index {
+ public:
+ // Constructs an Index. queue_index is the QueueIndex of this message, and
+ // message_index is the index into the messages structure.
+ Index(QueueIndex queue_index, uint16_t message_index)
+ : Index(queue_index.index_, message_index) {}
+ Index(uint32_t queue_index, uint16_t message_index)
+ : index_((queue_index & 0xffff) |
+ (static_cast<uint32_t>(message_index) << 16)) {}
+
+ // Index of this message in the message array.
+ uint16_t message_index() const { return (index_ >> 16) & 0xffff; }
+
+ // Lowest 16 bits of the queue index of this message in the queue.
+ uint16_t queue_index() const { return index_ & 0xffff; }
+
+ // Returns true if the provided queue index plausibly represents this Index.
+ bool IsPlausible(QueueIndex queue_index) const {
+ return queue_index.IsPlausible(this->queue_index());
+ }
+
+ // Returns an invalid Index.
+ static Index Invalid() { return Index(sentinal_value()); }
+ // Checks if this Index is valid or not.
+ bool valid() const { return index_ != sentinal_value(); }
+
+ // Returns the raw Index. This should only be used for debug.
+ uint32_t get() const { return index_; }
+
+ // Returns the maximum number of messages we can store before overflowing.
+ static constexpr uint16_t MaxMessages() { return 0xfffe; }
+
+ bool operator==(const Index other) const { return other.index_ == index_; }
+
+ private:
+ Index(uint32_t index)
+ : index_(index) {}
+
+ friend class AtomicIndex;
+
+ static constexpr uint32_t sentinal_value() { return 0xffffffffu; }
+
+ // Note: a value of 0xffffffff is a sentinal to represent an invalid entry.
+ // This works because we would need to have a queue index of 0x*ffff, *and*
+ // have 0xffff messages in the message list. That constraint is easy to
+ // enforce by limiting the max messages.
+ uint32_t index_;
+};
+
+// Atomic storage for setting and getting Index objects.
+class AtomicIndex {
+ public:
+ // Stores and loads atomically without ordering constraints.
+ Index RelaxedLoad() {
+ return Index(index_.load(::std::memory_order_relaxed));
+ }
+ void RelaxedStore(Index index) {
+ index_.store(index.index_, ::std::memory_order_relaxed);
+ }
+
+ // Invalidates the index atomically, but without any ordering constraints.
+ void RelaxedInvalidate() { RelaxedStore(Index::Invalid()); }
+
+ // Full bidirectional barriers here.
+ void Store(Index index) { index_.store(index.index_); }
+ Index Load() { return Index(index_.load()); }
+
+
+ // Swaps expected for index atomically. Returns true on success, false
+ // otherwise.
+ inline bool CompareAndExchangeStrong(Index expected, Index index) {
+ return index_.compare_exchange_strong(expected.index_, index.index_);
+ }
+
+ private:
+ ::std::atomic<uint32_t> index_;
+};
+
+} // namespace ipc_lib
+} // namespace aos
+
+#endif // AOS_IPC_LIB_INDEX_H_
diff --git a/aos/ipc_lib/index_test.cc b/aos/ipc_lib/index_test.cc
new file mode 100644
index 0000000..689ed24
--- /dev/null
+++ b/aos/ipc_lib/index_test.cc
@@ -0,0 +1,167 @@
+#include "aos/ipc_lib/index.h"
+
+#include "aos/testing/test_logging.h"
+#include "gtest/gtest.h"
+
+namespace aos {
+namespace ipc_lib {
+namespace testing {
+
+class QueueIndexTest : public ::testing::Test {
+ protected:
+ uint32_t GetIndex(const QueueIndex &index) {
+ printf("Index, count: %x, %x\n", index.index_, index.count_);
+ return index.index();
+ }
+
+ QueueIndex Make(uint32_t index, uint32_t count) {
+ return QueueIndex(index, count);
+ }
+};
+
+// Tests that an invalid index is invalid.
+TEST_F(QueueIndexTest, TestValid) {
+ QueueIndex invalid = QueueIndex::Invalid();
+
+ EXPECT_EQ(0xffffffff, GetIndex(invalid));
+ EXPECT_FALSE(invalid.valid());
+
+ QueueIndex valid = Make(5, 7);
+
+ EXPECT_TRUE(valid.valid());
+}
+
+// Tests that the max index function returns the max index as expected.
+TEST_F(QueueIndexTest, TestMaxIndex) {
+ EXPECT_EQ(QueueIndex::MaxIndex(12u, 4u), 12u);
+ EXPECT_EQ(QueueIndex::MaxIndex(13u, 4u), 12u);
+ EXPECT_EQ(QueueIndex::MaxIndex(14u, 4u), 12u);
+ EXPECT_EQ(QueueIndex::MaxIndex(15u, 4u), 12u);
+ EXPECT_EQ(QueueIndex::MaxIndex(16u, 4u), 16u);
+}
+
+// Tests that incrementing an index works as expected.
+TEST_F(QueueIndexTest, TestIncrement) {
+ QueueIndex zero = Make(0, 3);
+
+ EXPECT_EQ(GetIndex(zero), 0u);
+
+ QueueIndex one = zero.Increment();
+ EXPECT_EQ(GetIndex(one), 1u);
+
+ // Try it with a base 3. Apparently 3 fits exactly.
+ {
+ QueueIndex two_below = Make(QueueIndex::MaxIndex(0xffffffffu, 3u) - 2, 3);
+ EXPECT_EQ(GetIndex(two_below), 0xfffffffdu);
+
+ QueueIndex one_below = two_below.Increment();
+ EXPECT_EQ(GetIndex(one_below), 0xfffffffeu);
+
+ QueueIndex wrapped = one_below.Increment();
+ EXPECT_EQ(GetIndex(wrapped), 0);
+
+ EXPECT_EQ(wrapped, zero);
+ }
+
+ // Now try it with base 4. Should still work.
+ {
+ QueueIndex two_below = Make(QueueIndex::MaxIndex(0xffffffffu, 4u) - 2, 4);
+ EXPECT_EQ(GetIndex(two_below), 0xfffffffau);
+
+ QueueIndex one_below = two_below.Increment();
+ EXPECT_EQ(GetIndex(one_below), 0xfffffffbu);
+
+ QueueIndex wrapped = one_below.Increment();
+ EXPECT_EQ(GetIndex(wrapped), 0);
+ }
+}
+
+// Tests that decrementing and incrementing again an index works as expected.
+TEST_F(QueueIndexTest, TestDecrement) {
+ {
+ QueueIndex zero = Make(0, 3);
+ EXPECT_EQ(GetIndex(zero), 0x00000000u);
+
+ QueueIndex negative10 = zero.DecrementBy(10);
+ EXPECT_EQ(GetIndex(negative10), 0xffffffff - 10);
+
+ EXPECT_EQ(zero, negative10.IncrementBy(10));
+ }
+ {
+ QueueIndex zero = Make(0, 4);
+ EXPECT_EQ(GetIndex(zero), 0x00000000u);
+
+ QueueIndex negative10 = zero.DecrementBy(10);
+ EXPECT_EQ(GetIndex(negative10), 0xfffffffc - 10);
+
+ EXPECT_EQ(zero, negative10.IncrementBy(10));
+ }
+
+ {
+ QueueIndex five = Make(5, 3);
+ EXPECT_EQ(GetIndex(five), 5u);
+
+ QueueIndex negative10 = five.DecrementBy(10);
+ EXPECT_EQ(GetIndex(negative10), 0xffffffff - 5);
+
+ EXPECT_EQ(five, negative10.IncrementBy(10));
+ }
+ {
+ QueueIndex five = Make(5, 4);
+ EXPECT_EQ(GetIndex(five), 5u);
+
+ QueueIndex negative10 = five.DecrementBy(10);
+ EXPECT_EQ(GetIndex(negative10), 0xfffffffc - 5);
+
+ EXPECT_EQ(five, negative10.IncrementBy(10));
+ }
+}
+
+// Tests that an invalid index is invalid, and a valid one is valid.
+TEST(IndexTest, TestInvalid) {
+ EXPECT_FALSE(Index::Invalid().valid());
+
+ EXPECT_TRUE(Index(0, 0).valid());
+}
+
+// Tests that we get back the two indices as expected.
+TEST(IndexTest, TestRecoverIndices) {
+ QueueIndex five = QueueIndex::Zero(100).IncrementBy(5);
+ Index index(five, 11);
+ EXPECT_EQ(index.queue_index(), 5);
+ EXPECT_EQ(index.message_index(), 11);
+}
+
+// Tests that Plausible behaves.
+TEST(IndexTest, TestPlausible) {
+ QueueIndex five = QueueIndex::Zero(100).IncrementBy(5);
+ QueueIndex ffff = QueueIndex::Zero(100).IncrementBy(0xffff);
+
+ // Tests that if five has wrapped, we still return plausible.
+ for (int i = 0; i < 100; ++i) {
+ Index index(five, i);
+ EXPECT_EQ(index.queue_index(), 5);
+
+ EXPECT_TRUE(index.IsPlausible(five));
+
+ EXPECT_EQ(index.message_index(), i);
+
+ five = five.IncrementBy(0x10000);
+ }
+
+ // Tests that a queue index with a value of 0xffff doesn't match an invalid
+ // index.
+ for (int i = 0; i < 100; ++i) {
+ Index index(ffff, i);
+ EXPECT_EQ(index.queue_index(), 0xffff);
+
+ EXPECT_TRUE(index.IsPlausible(ffff));
+ EXPECT_FALSE(index.IsPlausible(QueueIndex::Invalid()));
+
+ EXPECT_EQ(index.message_index(), i);
+ }
+}
+
+} // namespace testing
+} // namespace ipc_lib
+} // namespace aos