Brian Silverman | aac705c | 2014-05-01 18:55:34 -0700 | [diff] [blame] | 1 | #if !AOS_DEBUG |
Austin Schuh | 7a41be6 | 2015-10-31 13:06:55 -0700 | [diff] [blame] | 2 | #undef NDEBUG |
Brian Silverman | 9eaf91a | 2014-03-24 16:37:44 -0700 | [diff] [blame] | 3 | #define NDEBUG |
| 4 | #endif |
| 5 | |
John Park | 398c74a | 2018-10-20 21:17:39 -0700 | [diff] [blame] | 6 | #include "aos/ipc_lib/queue.h" |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 7 | |
Austin Schuh | 0ad2b6f | 2019-06-09 21:27:07 -0700 | [diff] [blame] | 8 | #include <assert.h> |
| 9 | #include <errno.h> |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 10 | #include <stdio.h> |
| 11 | #include <string.h> |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 12 | |
Brian Silverman | c39e2bd | 2014-02-21 09:17:35 -0800 | [diff] [blame] | 13 | #include <algorithm> |
Austin Schuh | 0ad2b6f | 2019-06-09 21:27:07 -0700 | [diff] [blame] | 14 | #include <memory> |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 15 | |
John Park | 398c74a | 2018-10-20 21:17:39 -0700 | [diff] [blame] | 16 | #include "aos/ipc_lib/core_lib.h" |
Austin Schuh | 0ad2b6f | 2019-06-09 21:27:07 -0700 | [diff] [blame] | 17 | #include "aos/type_traits/type_traits.h" |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 18 | |
| 19 | namespace aos { |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 20 | namespace { |
| 21 | |
Austin Schuh | 0ad2b6f | 2019-06-09 21:27:07 -0700 | [diff] [blame] | 22 | namespace chrono = ::std::chrono; |
| 23 | |
Brian Silverman | 08661c7 | 2013-09-01 17:24:38 -0700 | [diff] [blame] | 24 | static_assert(shm_ok<RawQueue>::value, |
| 25 | "RawQueue instances go into shared memory"); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 26 | |
| 27 | const bool kReadDebug = false; |
Brian Silverman | bad7c8a | 2014-03-26 20:45:18 -0700 | [diff] [blame] | 28 | const bool kWriteDebug = false; |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 29 | const bool kRefDebug = false; |
| 30 | const bool kFetchDebug = false; |
Brian Silverman | cd2d84c | 2014-03-13 23:30:58 -0700 | [diff] [blame] | 31 | const bool kReadIndexDebug = false; |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 32 | |
| 33 | // The number of extra messages the pool associated with each queue will be able |
Brian Silverman | 08661c7 | 2013-09-01 17:24:38 -0700 | [diff] [blame] | 34 | // to hold (for readers who are slow about freeing them or who leak one when |
| 35 | // they get killed). |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 36 | const int kExtraMessages = 20; |
| 37 | |
| 38 | } // namespace |
| 39 | |
Brian Silverman | 7faaec7 | 2014-05-26 16:25:38 -0700 | [diff] [blame] | 40 | constexpr Options<RawQueue>::Option RawQueue::kPeek; |
| 41 | constexpr Options<RawQueue>::Option RawQueue::kFromEnd; |
| 42 | constexpr Options<RawQueue>::Option RawQueue::kNonBlock; |
| 43 | constexpr Options<RawQueue>::Option RawQueue::kBlock; |
| 44 | constexpr Options<RawQueue>::Option RawQueue::kOverride; |
Brian Silverman | 08661c7 | 2013-09-01 17:24:38 -0700 | [diff] [blame] | 45 | |
Brian Silverman | 430e7fa | 2014-03-21 16:58:33 -0700 | [diff] [blame] | 46 | // This is what gets stuck in before each queue message in memory. It is always |
| 47 | // allocated aligned to 8 bytes and its size has to maintain that alignment for |
| 48 | // the message that follows immediately. |
Brian Silverman | 08661c7 | 2013-09-01 17:24:38 -0700 | [diff] [blame] | 49 | struct RawQueue::MessageHeader { |
Brian Silverman | c2e0422 | 2014-03-22 12:43:44 -0700 | [diff] [blame] | 50 | MessageHeader *next; |
Brian Silverman | e8337b7 | 2014-04-27 19:52:19 -0700 | [diff] [blame] | 51 | |
Brian Silverman | 5f8c492 | 2014-02-11 21:22:38 -0800 | [diff] [blame] | 52 | // Gets the message header immediately preceding msg. |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 53 | static MessageHeader *Get(const void *msg) { |
Brian Silverman | 63cf241 | 2013-11-17 05:44:36 -0800 | [diff] [blame] | 54 | return reinterpret_cast<MessageHeader *>(__builtin_assume_aligned( |
| 55 | static_cast<uint8_t *>(const_cast<void *>(msg)) - sizeof(MessageHeader), |
| 56 | alignof(MessageHeader))); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 57 | } |
Brian Silverman | e8337b7 | 2014-04-27 19:52:19 -0700 | [diff] [blame] | 58 | |
| 59 | int32_t ref_count() const { |
| 60 | return __atomic_load_n(&ref_count_, __ATOMIC_RELAXED); |
| 61 | } |
| 62 | void set_ref_count(int32_t val) { |
| 63 | __atomic_store_n(&ref_count_, val, __ATOMIC_RELAXED); |
| 64 | } |
| 65 | |
Austin Schuh | 0ad2b6f | 2019-06-09 21:27:07 -0700 | [diff] [blame] | 66 | void ref_count_sub() { __atomic_sub_fetch(&ref_count_, 1, __ATOMIC_RELAXED); } |
| 67 | void ref_count_add() { __atomic_add_fetch(&ref_count_, 1, __ATOMIC_RELAXED); } |
Brian Silverman | e8337b7 | 2014-04-27 19:52:19 -0700 | [diff] [blame] | 68 | |
| 69 | private: |
| 70 | // This gets accessed with atomic instructions without any |
| 71 | // locks held by various member functions. |
| 72 | int32_t ref_count_; |
| 73 | |
Austin Schuh | 0ad2b6f | 2019-06-09 21:27:07 -0700 | [diff] [blame] | 74 | // Padding to make the total size 8 bytes if we have 4-byte pointers or bump |
| 75 | // it to 16 if a pointer is 8 bytes by itself. |
Brian Silverman | c2e0422 | 2014-03-22 12:43:44 -0700 | [diff] [blame] | 76 | #if __SIZEOF_POINTER__ == 8 |
Brian Silverman | 4b09fce | 2014-04-27 19:58:14 -0700 | [diff] [blame] | 77 | #ifdef __clang__ |
| 78 | // Clang is smart enough to realize this is unused, but GCC doesn't like the |
| 79 | // attribute here... |
| 80 | __attribute__((unused)) |
| 81 | #endif |
Brian Silverman | 227ad48 | 2014-03-23 11:21:32 -0700 | [diff] [blame] | 82 | char padding[4]; |
Brian Silverman | c2e0422 | 2014-03-22 12:43:44 -0700 | [diff] [blame] | 83 | #elif __SIZEOF_POINTER__ == 4 |
Austin Schuh | 0ad2b6f | 2019-06-09 21:27:07 -0700 | [diff] [blame] | 84 | // No padding needed to get 8 byte total size. |
Brian Silverman | c2e0422 | 2014-03-22 12:43:44 -0700 | [diff] [blame] | 85 | #else |
| 86 | #error Unknown pointer size. |
| 87 | #endif |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 88 | }; |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 89 | |
Brian Silverman | 4d0789d | 2014-03-23 17:03:07 -0700 | [diff] [blame] | 90 | inline int RawQueue::index_add1(int index) { |
| 91 | // Doing it this way instead of with % is more efficient on ARM. |
| 92 | int r = index + 1; |
| 93 | assert(index <= data_length_); |
| 94 | if (r == data_length_) { |
| 95 | return 0; |
| 96 | } else { |
| 97 | return r; |
| 98 | } |
| 99 | } |
| 100 | |
Brian Silverman | 08661c7 | 2013-09-01 17:24:38 -0700 | [diff] [blame] | 101 | void RawQueue::DecrementMessageReferenceCount(const void *msg) { |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 102 | MessageHeader *header = MessageHeader::Get(msg); |
Brian Silverman | e8337b7 | 2014-04-27 19:52:19 -0700 | [diff] [blame] | 103 | header->ref_count_sub(); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 104 | if (kRefDebug) { |
Brian Silverman | e8337b7 | 2014-04-27 19:52:19 -0700 | [diff] [blame] | 105 | printf("%p ref dec count: %p count=%d\n", this, msg, header->ref_count()); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 106 | } |
Brian Silverman | ad290d8 | 2014-03-19 17:22:05 -0700 | [diff] [blame] | 107 | |
| 108 | // The only way it should ever be 0 is if we were the last one to decrement, |
| 109 | // in which case nobody else should have it around to re-increment it or |
| 110 | // anything in the middle, so this is safe to do not atomically with the |
| 111 | // decrement. |
Brian Silverman | e8337b7 | 2014-04-27 19:52:19 -0700 | [diff] [blame] | 112 | if (header->ref_count() == 0) { |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 113 | DoFreeMessage(msg); |
Brian Silverman | ad290d8 | 2014-03-19 17:22:05 -0700 | [diff] [blame] | 114 | } else { |
Brian Silverman | e8337b7 | 2014-04-27 19:52:19 -0700 | [diff] [blame] | 115 | assert(header->ref_count() > 0); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 116 | } |
| 117 | } |
| 118 | |
Brian Silverman | 227ad48 | 2014-03-23 11:21:32 -0700 | [diff] [blame] | 119 | inline void RawQueue::IncrementMessageReferenceCount(const void *msg) const { |
Brian Silverman | 430e7fa | 2014-03-21 16:58:33 -0700 | [diff] [blame] | 120 | MessageHeader *const header = MessageHeader::Get(msg); |
Brian Silverman | e8337b7 | 2014-04-27 19:52:19 -0700 | [diff] [blame] | 121 | header->ref_count_add(); |
Brian Silverman | 430e7fa | 2014-03-21 16:58:33 -0700 | [diff] [blame] | 122 | if (kRefDebug) { |
Brian Silverman | c2e0422 | 2014-03-22 12:43:44 -0700 | [diff] [blame] | 123 | printf("%p ref inc count: %p\n", this, msg); |
Brian Silverman | 430e7fa | 2014-03-21 16:58:33 -0700 | [diff] [blame] | 124 | } |
| 125 | } |
| 126 | |
Brian Silverman | 42d5237 | 2014-03-23 15:29:13 -0700 | [diff] [blame] | 127 | inline void RawQueue::DoFreeMessage(const void *msg) { |
| 128 | MessageHeader *header = MessageHeader::Get(msg); |
| 129 | if (kRefDebug) { |
| 130 | printf("%p ref free to %p: %p\n", this, recycle_, msg); |
| 131 | } |
| 132 | |
| 133 | if (__builtin_expect(recycle_ != nullptr, 0)) { |
| 134 | void *const new_msg = recycle_->GetMessage(); |
| 135 | if (new_msg == nullptr) { |
Austin Schuh | 0ad2b6f | 2019-06-09 21:27:07 -0700 | [diff] [blame] | 136 | fprintf(stderr, |
| 137 | "queue: couldn't get a message" |
| 138 | " for recycle queue %p\n", |
| 139 | recycle_); |
Brian Silverman | 42d5237 | 2014-03-23 15:29:13 -0700 | [diff] [blame] | 140 | } else { |
Brian Silverman | e8337b7 | 2014-04-27 19:52:19 -0700 | [diff] [blame] | 141 | header->ref_count_add(); |
Brian Silverman | 42d5237 | 2014-03-23 15:29:13 -0700 | [diff] [blame] | 142 | if (!recycle_->WriteMessage(const_cast<void *>(msg), kOverride)) { |
Austin Schuh | 0ad2b6f | 2019-06-09 21:27:07 -0700 | [diff] [blame] | 143 | fprintf(stderr, |
| 144 | "queue: %p->WriteMessage(%p, kOverride) failed." |
| 145 | " aborting\n", |
| 146 | recycle_, msg); |
Brian Silverman | 42d5237 | 2014-03-23 15:29:13 -0700 | [diff] [blame] | 147 | printf("see stderr\n"); |
| 148 | abort(); |
| 149 | } |
| 150 | msg = new_msg; |
| 151 | header = MessageHeader::Get(new_msg); |
| 152 | } |
| 153 | } |
| 154 | |
| 155 | // This works around GCC bug 60272 (fixed in 4.8.3). |
| 156 | // new_next should just get replaced with header->next (and the body of the |
| 157 | // loop should become empty). |
| 158 | // The bug is that the store to new_next after the compare/exchange is |
| 159 | // unconditional but it should only be if it fails, which could mean |
| 160 | // overwriting what somebody else who preempted us right then changed it to. |
| 161 | // TODO(brians): Get rid of this workaround once we get a new enough GCC. |
| 162 | MessageHeader *new_next = __atomic_load_n(&free_messages_, __ATOMIC_RELAXED); |
| 163 | do { |
| 164 | header->next = new_next; |
| 165 | } while (__builtin_expect( |
| 166 | !__atomic_compare_exchange_n(&free_messages_, &new_next, header, true, |
| 167 | __ATOMIC_RELEASE, __ATOMIC_RELAXED), |
| 168 | 0)); |
| 169 | } |
| 170 | |
| 171 | void *RawQueue::GetMessage() { |
Brian Silverman | 42d5237 | 2014-03-23 15:29:13 -0700 | [diff] [blame] | 172 | MessageHeader *header = __atomic_load_n(&free_messages_, __ATOMIC_RELAXED); |
| 173 | do { |
| 174 | if (__builtin_expect(header == nullptr, 0)) { |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 175 | LOG(FATAL) << "overused pool of queue " << this << " (" << name_ << ")"; |
Brian Silverman | 42d5237 | 2014-03-23 15:29:13 -0700 | [diff] [blame] | 176 | } |
| 177 | } while (__builtin_expect( |
| 178 | !__atomic_compare_exchange_n(&free_messages_, &header, header->next, true, |
| 179 | __ATOMIC_ACQ_REL, __ATOMIC_RELAXED), |
| 180 | 0)); |
| 181 | void *msg = reinterpret_cast<uint8_t *>(header + 1); |
| 182 | // It might be uninitialized, 0 from a previous use, or 1 from previously |
| 183 | // being recycled. |
Brian Silverman | e8337b7 | 2014-04-27 19:52:19 -0700 | [diff] [blame] | 184 | header->set_ref_count(1); |
Brian Silverman | 42d5237 | 2014-03-23 15:29:13 -0700 | [diff] [blame] | 185 | if (kRefDebug) { |
| 186 | printf("%p ref alloc: %p\n", this, msg); |
| 187 | } |
| 188 | return msg; |
| 189 | } |
| 190 | |
Brian Silverman | 08661c7 | 2013-09-01 17:24:38 -0700 | [diff] [blame] | 191 | RawQueue::RawQueue(const char *name, size_t length, int hash, int queue_length) |
Brian Silverman | 5f8c492 | 2014-02-11 21:22:38 -0800 | [diff] [blame] | 192 | : readable_(&data_lock_), writable_(&data_lock_) { |
Brian Silverman | c2e0422 | 2014-03-22 12:43:44 -0700 | [diff] [blame] | 193 | static_assert(shm_ok<RawQueue::MessageHeader>::value, |
| 194 | "the whole point is to stick it in shared memory"); |
| 195 | static_assert((sizeof(RawQueue::MessageHeader) % 8) == 0, |
| 196 | "need to revalidate size/alignent assumptions"); |
| 197 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 198 | CHECK_GE(queue_length, 1) << ": queue length " << queue_length << " of " |
| 199 | << name << " needs to be at least 1"; |
Brian Silverman | 227ad48 | 2014-03-23 11:21:32 -0700 | [diff] [blame] | 200 | |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 201 | const size_t name_size = strlen(name) + 1; |
| 202 | char *temp = static_cast<char *>(shm_malloc(name_size)); |
| 203 | memcpy(temp, name, name_size); |
| 204 | name_ = temp; |
| 205 | length_ = length; |
| 206 | hash_ = hash; |
| 207 | queue_length_ = queue_length; |
| 208 | |
| 209 | next_ = NULL; |
| 210 | recycle_ = NULL; |
| 211 | |
| 212 | if (kFetchDebug) { |
Austin Schuh | 0ad2b6f | 2019-06-09 21:27:07 -0700 | [diff] [blame] | 213 | printf("initializing name=%s, length=%zd, hash=%d, queue_length=%d\n", name, |
| 214 | length, hash, queue_length); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 215 | } |
| 216 | |
| 217 | data_length_ = queue_length + 1; |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 218 | data_ = static_cast<void **>(shm_malloc(sizeof(void *) * data_length_)); |
| 219 | data_start_ = 0; |
| 220 | data_end_ = 0; |
| 221 | messages_ = 0; |
| 222 | |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 223 | msg_length_ = length + sizeof(MessageHeader); |
Brian Silverman | c2e0422 | 2014-03-22 12:43:44 -0700 | [diff] [blame] | 224 | |
Brian Silverman | 227ad48 | 2014-03-23 11:21:32 -0700 | [diff] [blame] | 225 | // Create all of the messages for the free list and stick them on. |
| 226 | { |
| 227 | MessageHeader *previous = nullptr; |
| 228 | for (int i = 0; i < queue_length + kExtraMessages; ++i) { |
| 229 | MessageHeader *const message = |
| 230 | static_cast<MessageHeader *>(shm_malloc(msg_length_)); |
| 231 | free_messages_ = message; |
| 232 | message->next = previous; |
| 233 | previous = message; |
| 234 | } |
Brian Silverman | 60eff20 | 2014-03-21 17:10:02 -0700 | [diff] [blame] | 235 | } |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 236 | |
Brian Silverman | 3510980 | 2014-04-09 14:31:53 -0700 | [diff] [blame] | 237 | readable_waiting_ = false; |
| 238 | |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 239 | if (kFetchDebug) { |
| 240 | printf("made queue %s\n", name); |
| 241 | } |
| 242 | } |
Brian Silverman | 42d5237 | 2014-03-23 15:29:13 -0700 | [diff] [blame] | 243 | |
Brian Silverman | 08661c7 | 2013-09-01 17:24:38 -0700 | [diff] [blame] | 244 | RawQueue *RawQueue::Fetch(const char *name, size_t length, int hash, |
Austin Schuh | 0ad2b6f | 2019-06-09 21:27:07 -0700 | [diff] [blame] | 245 | int queue_length) { |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 246 | if (kFetchDebug) { |
| 247 | printf("fetching queue %s\n", name); |
| 248 | } |
Brian Silverman | 4aeac5f | 2014-02-11 22:17:07 -0800 | [diff] [blame] | 249 | if (mutex_lock(&global_core->mem_struct->queues.lock) != 0) { |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 250 | LOG(FATAL) << "mutex_lock(" << &global_core->mem_struct->queues.lock |
| 251 | << ") failed"; |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 252 | } |
Austin Schuh | 0ad2b6f | 2019-06-09 21:27:07 -0700 | [diff] [blame] | 253 | RawQueue *current = |
| 254 | static_cast<RawQueue *>(global_core->mem_struct->queues.pointer); |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 255 | if (current != NULL) { |
| 256 | while (true) { |
| 257 | // If we found a matching queue. |
| 258 | if (strcmp(current->name_, name) == 0 && current->length_ == length && |
| 259 | current->hash_ == hash && current->queue_length_ == queue_length) { |
Brian Silverman | 4aeac5f | 2014-02-11 22:17:07 -0800 | [diff] [blame] | 260 | mutex_unlock(&global_core->mem_struct->queues.lock); |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 261 | return current; |
| 262 | } else { |
| 263 | if (kFetchDebug) { |
| 264 | printf("rejected queue %s strcmp=%d target=%s\n", current->name_, |
| 265 | strcmp(current->name_, name), name); |
| 266 | } |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 267 | } |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 268 | // If this is the last one. |
| 269 | if (current->next_ == NULL) break; |
| 270 | current = current->next_; |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 271 | } |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 272 | } |
| 273 | |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 274 | RawQueue *r = new (shm_malloc(sizeof(RawQueue))) |
| 275 | RawQueue(name, length, hash, queue_length); |
| 276 | if (current == NULL) { // if we don't already have one |
Brian Silverman | 4aeac5f | 2014-02-11 22:17:07 -0800 | [diff] [blame] | 277 | global_core->mem_struct->queues.pointer = r; |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 278 | } else { |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 279 | current->next_ = r; |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 280 | } |
| 281 | |
Brian Silverman | 4aeac5f | 2014-02-11 22:17:07 -0800 | [diff] [blame] | 282 | mutex_unlock(&global_core->mem_struct->queues.lock); |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 283 | return r; |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 284 | } |
Brian Silverman | 42d5237 | 2014-03-23 15:29:13 -0700 | [diff] [blame] | 285 | |
Brian Silverman | 08661c7 | 2013-09-01 17:24:38 -0700 | [diff] [blame] | 286 | RawQueue *RawQueue::Fetch(const char *name, size_t length, int hash, |
Austin Schuh | 0ad2b6f | 2019-06-09 21:27:07 -0700 | [diff] [blame] | 287 | int queue_length, int recycle_hash, |
| 288 | int recycle_length, RawQueue **recycle) { |
Brian Silverman | 08661c7 | 2013-09-01 17:24:38 -0700 | [diff] [blame] | 289 | RawQueue *r = Fetch(name, length, hash, queue_length); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 290 | r->recycle_ = Fetch(name, length, recycle_hash, recycle_length); |
| 291 | if (r == r->recycle_) { |
| 292 | fprintf(stderr, "queue: r->recycle_(=%p) == r(=%p)\n", r->recycle_, r); |
| 293 | printf("see stderr\n"); |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 294 | r->recycle_ = NULL; |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 295 | abort(); |
| 296 | } |
| 297 | *recycle = r->recycle_; |
| 298 | return r; |
| 299 | } |
| 300 | |
Brian Silverman | 7faaec7 | 2014-05-26 16:25:38 -0700 | [diff] [blame] | 301 | bool RawQueue::DoWriteMessage(void *msg, Options<RawQueue> options) { |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 302 | if (kWriteDebug) { |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 303 | printf("queue: %p->WriteMessage(%p, %x), len :%zu\n", this, msg, options.printable(), msg_length_); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 304 | } |
Brian Silverman | 4f2a34e | 2016-01-02 14:00:27 -0800 | [diff] [blame] | 305 | |
| 306 | bool signal_readable; |
| 307 | |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 308 | { |
Brian Silverman | dc1eb27 | 2014-08-19 14:25:59 -0400 | [diff] [blame] | 309 | IPCMutexLocker locker(&data_lock_); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 310 | CHECK(!locker.owner_died()); |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 311 | |
| 312 | int new_end; |
| 313 | while (true) { |
Brian Silverman | 4d0789d | 2014-03-23 17:03:07 -0700 | [diff] [blame] | 314 | new_end = index_add1(data_end_); |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 315 | // If there is room in the queue right now. |
| 316 | if (new_end != data_start_) break; |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 317 | if (options & kNonBlock) { |
| 318 | if (kWriteDebug) { |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 319 | printf("queue: not blocking on %p. returning false\n", this); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 320 | } |
Brian Silverman | 358c49f | 2014-03-05 16:56:34 -0800 | [diff] [blame] | 321 | DecrementMessageReferenceCount(msg); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 322 | return false; |
| 323 | } else if (options & kOverride) { |
| 324 | if (kWriteDebug) { |
| 325 | printf("queue: overriding on %p\n", this); |
| 326 | } |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 327 | // Avoid leaking the message that we're going to overwrite. |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 328 | DecrementMessageReferenceCount(data_[data_start_]); |
Brian Silverman | 4d0789d | 2014-03-23 17:03:07 -0700 | [diff] [blame] | 329 | data_start_ = index_add1(data_start_); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 330 | } else { // kBlock |
Brian Silverman | 7faaec7 | 2014-05-26 16:25:38 -0700 | [diff] [blame] | 331 | assert(options & kBlock); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 332 | if (kWriteDebug) { |
| 333 | printf("queue: going to wait for writable_ of %p\n", this); |
| 334 | } |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 335 | CHECK(!writable_.Wait()); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 336 | } |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 337 | } |
| 338 | data_[data_end_] = msg; |
| 339 | ++messages_; |
| 340 | data_end_ = new_end; |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 341 | |
Brian Silverman | 4f2a34e | 2016-01-02 14:00:27 -0800 | [diff] [blame] | 342 | signal_readable = readable_waiting_; |
| 343 | readable_waiting_ = false; |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 344 | } |
Brian Silverman | 4f2a34e | 2016-01-02 14:00:27 -0800 | [diff] [blame] | 345 | |
| 346 | if (signal_readable) { |
| 347 | if (kWriteDebug) { |
| 348 | printf("queue: broadcasting to readable_ of %p\n", this); |
| 349 | } |
| 350 | readable_.Broadcast(); |
| 351 | } else if (kWriteDebug) { |
| 352 | printf("queue: skipping broadcast to readable_ of %p\n", this); |
| 353 | } |
| 354 | |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 355 | if (kWriteDebug) { |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 356 | printf("queue: write returning true on queue %p\n", this); |
| 357 | } |
| 358 | return true; |
| 359 | } |
| 360 | |
Brian Silverman | 42d5237 | 2014-03-23 15:29:13 -0700 | [diff] [blame] | 361 | inline void RawQueue::ReadCommonEnd() { |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 362 | if (is_writable()) { |
| 363 | if (kReadDebug) { |
| 364 | printf("queue: %ssignalling writable_ of %p\n", |
Brian Silverman | 42d5237 | 2014-03-23 15:29:13 -0700 | [diff] [blame] | 365 | writable_start_ ? "not " : "", this); |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 366 | } |
Brian Silverman | 4f2a34e | 2016-01-02 14:00:27 -0800 | [diff] [blame] | 367 | if (!writable_start_) writable_.Broadcast(); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 368 | } |
| 369 | } |
Brian Silverman | 227ad48 | 2014-03-23 11:21:32 -0700 | [diff] [blame] | 370 | |
Austin Schuh | 0ad2b6f | 2019-06-09 21:27:07 -0700 | [diff] [blame] | 371 | bool RawQueue::ReadCommonStart(Options<RawQueue> options, int *index, |
| 372 | chrono::nanoseconds timeout) { |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 373 | while (data_start_ == data_end_ || ((index != NULL) && messages_ <= *index)) { |
| 374 | if (options & kNonBlock) { |
| 375 | if (kReadDebug) { |
| 376 | printf("queue: not going to block waiting on %p\n", this); |
| 377 | } |
| 378 | return false; |
| 379 | } else { // kBlock |
Brian Silverman | 7faaec7 | 2014-05-26 16:25:38 -0700 | [diff] [blame] | 380 | assert(options & kBlock); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 381 | if (kReadDebug) { |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 382 | printf("queue: going to wait for readable_ of %p\n", this); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 383 | } |
Brian Silverman | 3510980 | 2014-04-09 14:31:53 -0700 | [diff] [blame] | 384 | readable_waiting_ = true; |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 385 | // Wait for a message to become readable. |
Austin Schuh | 0ad2b6f | 2019-06-09 21:27:07 -0700 | [diff] [blame] | 386 | while (true) { |
| 387 | Condition::WaitResult wait_result = readable_.WaitTimed(timeout); |
| 388 | if (wait_result == Condition::WaitResult::kOk) { |
| 389 | break; |
| 390 | } |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 391 | CHECK(wait_result != Condition::WaitResult::kOwnerDied); |
Austin Schuh | 0ad2b6f | 2019-06-09 21:27:07 -0700 | [diff] [blame] | 392 | if (wait_result == Condition::WaitResult::kTimeout) { |
| 393 | return false; |
| 394 | } |
| 395 | } |
| 396 | |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 397 | if (kReadDebug) { |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 398 | printf("queue: done waiting for readable_ of %p\n", this); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 399 | } |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 400 | } |
| 401 | } |
Brian Silverman | 9c9f198 | 2014-05-24 12:01:49 -0700 | [diff] [blame] | 402 | // We have to check down here because we might have unlocked the mutex while |
| 403 | // Wait()ing above so this value might have changed. |
| 404 | writable_start_ = is_writable(); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 405 | if (kReadDebug) { |
Austin Schuh | 0ad2b6f | 2019-06-09 21:27:07 -0700 | [diff] [blame] | 406 | printf("queue: %p->read(%p) start=%d end=%d writable_start=%d\n", this, |
| 407 | index, data_start_, data_end_, writable_start_); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 408 | } |
| 409 | return true; |
| 410 | } |
Brian Silverman | 227ad48 | 2014-03-23 11:21:32 -0700 | [diff] [blame] | 411 | |
| 412 | inline int RawQueue::LastMessageIndex() const { |
| 413 | int pos = data_end_ - 1; |
| 414 | if (pos < 0) { // If it wrapped around. |
| 415 | pos = data_length_ - 1; |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 416 | } |
Brian Silverman | 227ad48 | 2014-03-23 11:21:32 -0700 | [diff] [blame] | 417 | return pos; |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 418 | } |
Brian Silverman | 227ad48 | 2014-03-23 11:21:32 -0700 | [diff] [blame] | 419 | |
Brian Silverman | 7faaec7 | 2014-05-26 16:25:38 -0700 | [diff] [blame] | 420 | const void *RawQueue::DoReadMessage(Options<RawQueue> options) { |
Brian Silverman | eb51cbb | 2014-03-14 22:57:08 -0700 | [diff] [blame] | 421 | // TODO(brians): Test this function. |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 422 | if (kReadDebug) { |
Brian Silverman | 7faaec7 | 2014-05-26 16:25:38 -0700 | [diff] [blame] | 423 | printf("queue: %p->ReadMessage(%x)\n", this, options.printable()); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 424 | } |
| 425 | void *msg = NULL; |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 426 | |
Brian Silverman | dc1eb27 | 2014-08-19 14:25:59 -0400 | [diff] [blame] | 427 | IPCMutexLocker locker(&data_lock_); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 428 | CHECK(!locker.owner_died()); |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 429 | |
Austin Schuh | 0ad2b6f | 2019-06-09 21:27:07 -0700 | [diff] [blame] | 430 | if (!ReadCommonStart(options, nullptr, chrono::nanoseconds(0))) { |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 431 | if (kReadDebug) { |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 432 | printf("queue: %p common returned false\n", this); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 433 | } |
| 434 | return NULL; |
| 435 | } |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 436 | |
Brian Silverman | 227ad48 | 2014-03-23 11:21:32 -0700 | [diff] [blame] | 437 | if (options & kFromEnd) { |
| 438 | if (options & kPeek) { |
| 439 | if (kReadDebug) { |
| 440 | printf("queue: %p shortcutting c2: %d\n", this, LastMessageIndex()); |
| 441 | } |
| 442 | msg = data_[LastMessageIndex()]; |
| 443 | IncrementMessageReferenceCount(msg); |
| 444 | } else { |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 445 | while (true) { |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 446 | if (kReadDebug) { |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 447 | printf("queue: %p start of c2\n", this); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 448 | } |
| 449 | // This loop pulls each message out of the buffer. |
| 450 | const int pos = data_start_; |
Brian Silverman | 4d0789d | 2014-03-23 17:03:07 -0700 | [diff] [blame] | 451 | data_start_ = index_add1(data_start_); |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 452 | // If this is the last one. |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 453 | if (data_start_ == data_end_) { |
| 454 | if (kReadDebug) { |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 455 | printf("queue: %p reading from c2: %d\n", this, pos); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 456 | } |
| 457 | msg = data_[pos]; |
| 458 | break; |
| 459 | } |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 460 | // This message is not going to be in the queue any more. |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 461 | DecrementMessageReferenceCount(data_[pos]); |
| 462 | } |
Brian Silverman | 227ad48 | 2014-03-23 11:21:32 -0700 | [diff] [blame] | 463 | } |
| 464 | } else { |
| 465 | if (kReadDebug) { |
| 466 | printf("queue: %p reading from d2: %d\n", this, data_start_); |
| 467 | } |
| 468 | msg = data_[data_start_]; |
| 469 | if (options & kPeek) { |
| 470 | IncrementMessageReferenceCount(msg); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 471 | } else { |
Brian Silverman | 4d0789d | 2014-03-23 17:03:07 -0700 | [diff] [blame] | 472 | data_start_ = index_add1(data_start_); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 473 | } |
| 474 | } |
Brian Silverman | 42d5237 | 2014-03-23 15:29:13 -0700 | [diff] [blame] | 475 | ReadCommonEnd(); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 476 | if (kReadDebug) { |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 477 | printf("queue: %p read returning %p\n", this, msg); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 478 | } |
| 479 | return msg; |
| 480 | } |
Brian Silverman | 227ad48 | 2014-03-23 11:21:32 -0700 | [diff] [blame] | 481 | |
Austin Schuh | 0ad2b6f | 2019-06-09 21:27:07 -0700 | [diff] [blame] | 482 | const void *RawQueue::DoReadMessageIndex(Options<RawQueue> options, int *index, |
| 483 | chrono::nanoseconds timeout) { |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 484 | if (kReadDebug) { |
Austin Schuh | 0ad2b6f | 2019-06-09 21:27:07 -0700 | [diff] [blame] | 485 | printf("queue: %p->ReadMessageIndex(%x, %p(*=%d))\n", this, |
| 486 | options.printable(), index, *index); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 487 | } |
| 488 | void *msg = NULL; |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 489 | |
Brian Silverman | dc1eb27 | 2014-08-19 14:25:59 -0400 | [diff] [blame] | 490 | IPCMutexLocker locker(&data_lock_); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 491 | CHECK(!locker.owner_died()); |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 492 | |
Austin Schuh | 0ad2b6f | 2019-06-09 21:27:07 -0700 | [diff] [blame] | 493 | if (!ReadCommonStart(options, index, timeout)) { |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 494 | if (kReadDebug) { |
| 495 | printf("queue: %p common returned false\n", this); |
| 496 | } |
| 497 | return NULL; |
| 498 | } |
| 499 | |
| 500 | // TODO(parker): Handle integer wrap on the index. |
| 501 | |
Brian Silverman | eb51cbb | 2014-03-14 22:57:08 -0700 | [diff] [blame] | 502 | if (options & kFromEnd) { |
Brian Silverman | 227ad48 | 2014-03-23 11:21:32 -0700 | [diff] [blame] | 503 | if (kReadDebug) { |
Brian Silverman | 227ad48 | 2014-03-23 11:21:32 -0700 | [diff] [blame] | 504 | printf("queue: %p reading from c1: %d\n", this, LastMessageIndex()); |
| 505 | } |
| 506 | msg = data_[LastMessageIndex()]; |
Brian Silverman | 7faaec7 | 2014-05-26 16:25:38 -0700 | [diff] [blame] | 507 | |
| 508 | // We'd skip this if we had kPeek, but kPeek | kFromEnd isn't valid for |
| 509 | // reading with an index. |
| 510 | *index = messages_; |
Brian Silverman | c39e2bd | 2014-02-21 09:17:35 -0800 | [diff] [blame] | 511 | } else { |
Brian Silverman | 227ad48 | 2014-03-23 11:21:32 -0700 | [diff] [blame] | 512 | // Where we're going to start reading. |
| 513 | int my_start; |
| 514 | |
Brian Silverman | eb51cbb | 2014-03-14 22:57:08 -0700 | [diff] [blame] | 515 | const int unread_messages = messages_ - *index; |
| 516 | assert(unread_messages > 0); |
| 517 | int current_messages = data_end_ - data_start_; |
| 518 | if (current_messages < 0) current_messages += data_length_; |
Brian Silverman | cd2d84c | 2014-03-13 23:30:58 -0700 | [diff] [blame] | 519 | if (kReadIndexDebug) { |
Austin Schuh | 0ad2b6f | 2019-06-09 21:27:07 -0700 | [diff] [blame] | 520 | printf("queue: %p start=%d end=%d current=%d\n", this, data_start_, |
| 521 | data_end_, current_messages); |
Brian Silverman | cd2d84c | 2014-03-13 23:30:58 -0700 | [diff] [blame] | 522 | } |
Brian Silverman | eb51cbb | 2014-03-14 22:57:08 -0700 | [diff] [blame] | 523 | assert(current_messages > 0); |
| 524 | // If we're behind the available messages. |
| 525 | if (unread_messages > current_messages) { |
| 526 | // Catch index up to the last available message. |
| 527 | *index = messages_ - current_messages; |
| 528 | // And that's the one we're going to read. |
| 529 | my_start = data_start_; |
| 530 | if (kReadIndexDebug) { |
| 531 | printf("queue: %p jumping ahead to message %d (have %d) (at %d)\n", |
| 532 | this, *index, messages_, data_start_); |
| 533 | } |
Brian Silverman | cd2d84c | 2014-03-13 23:30:58 -0700 | [diff] [blame] | 534 | } else { |
Brian Silverman | eb51cbb | 2014-03-14 22:57:08 -0700 | [diff] [blame] | 535 | // Just start reading at the first available message that we haven't yet |
| 536 | // read. |
| 537 | my_start = data_end_ - unread_messages; |
| 538 | if (kReadIndexDebug) { |
| 539 | printf("queue: %p original read from %d\n", this, my_start); |
| 540 | } |
| 541 | if (data_start_ < data_end_) { |
Brian Silverman | 42d5237 | 2014-03-23 15:29:13 -0700 | [diff] [blame] | 542 | assert(my_start >= 0); |
Brian Silverman | eb51cbb | 2014-03-14 22:57:08 -0700 | [diff] [blame] | 543 | } |
Brian Silverman | 42d5237 | 2014-03-23 15:29:13 -0700 | [diff] [blame] | 544 | if (my_start < 0) my_start += data_length_; |
Brian Silverman | 67e34f5 | 2014-03-13 15:52:57 -0700 | [diff] [blame] | 545 | } |
Brian Silverman | c39e2bd | 2014-02-21 09:17:35 -0800 | [diff] [blame] | 546 | |
Brian Silverman | 227ad48 | 2014-03-23 11:21:32 -0700 | [diff] [blame] | 547 | if (kReadDebug) { |
| 548 | printf("queue: %p reading from d1: %d\n", this, my_start); |
Brian Silverman | 797e71e | 2013-09-06 17:29:39 -0700 | [diff] [blame] | 549 | } |
Brian Silverman | 227ad48 | 2014-03-23 11:21:32 -0700 | [diff] [blame] | 550 | // We have to be either after the start or before the end, even if the queue |
Brian Silverman | 42d5237 | 2014-03-23 15:29:13 -0700 | [diff] [blame] | 551 | // is wrapped around (should be both if it's not). |
Brian Silverman | 227ad48 | 2014-03-23 11:21:32 -0700 | [diff] [blame] | 552 | assert((my_start >= data_start_) || (my_start < data_end_)); |
| 553 | // More sanity checking. |
| 554 | assert((my_start >= 0) && (my_start < data_length_)); |
| 555 | msg = data_[my_start]; |
| 556 | if (!(options & kPeek)) ++(*index); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 557 | } |
Brian Silverman | 227ad48 | 2014-03-23 11:21:32 -0700 | [diff] [blame] | 558 | IncrementMessageReferenceCount(msg); |
| 559 | |
Brian Silverman | 42d5237 | 2014-03-23 15:29:13 -0700 | [diff] [blame] | 560 | ReadCommonEnd(); |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 561 | return msg; |
| 562 | } |
| 563 | |
Brian Silverman | c2e0422 | 2014-03-22 12:43:44 -0700 | [diff] [blame] | 564 | int RawQueue::FreeMessages() const { |
| 565 | int r = 0; |
| 566 | MessageHeader *header = free_messages_; |
| 567 | while (header != nullptr) { |
| 568 | ++r; |
| 569 | header = header->next; |
| 570 | } |
| 571 | return r; |
| 572 | } |
| 573 | |
Brian Silverman | a6d1b56 | 2013-09-01 14:39:39 -0700 | [diff] [blame] | 574 | } // namespace aos |