blob: 4025554ec2298aaf886e7779330f5df8b8fc02bc [file] [log] [blame]
Brian Silvermanaac705c2014-05-01 18:55:34 -07001#if !AOS_DEBUG
Brian Silverman9eaf91a2014-03-24 16:37:44 -07002#define NDEBUG
3#endif
4
Brian Silverman14fd0fb2014-01-14 21:42:01 -08005#include "aos/linux_code/ipc_lib/queue.h"
Brian Silvermana6d1b562013-09-01 14:39:39 -07006
7#include <stdio.h>
8#include <string.h>
9#include <errno.h>
10#include <assert.h>
11
12#include <memory>
Brian Silvermanc39e2bd2014-02-21 09:17:35 -080013#include <algorithm>
Brian Silvermana6d1b562013-09-01 14:39:39 -070014
15#include "aos/common/logging/logging.h"
16#include "aos/common/type_traits.h"
Brian Silverman14fd0fb2014-01-14 21:42:01 -080017#include "aos/linux_code/ipc_lib/core_lib.h"
Brian Silvermana6d1b562013-09-01 14:39:39 -070018
19namespace aos {
Brian Silvermana6d1b562013-09-01 14:39:39 -070020namespace {
21
Brian Silverman08661c72013-09-01 17:24:38 -070022static_assert(shm_ok<RawQueue>::value,
23 "RawQueue instances go into shared memory");
Brian Silvermana6d1b562013-09-01 14:39:39 -070024
25const bool kReadDebug = false;
Brian Silvermanbad7c8a2014-03-26 20:45:18 -070026const bool kWriteDebug = false;
Brian Silvermana6d1b562013-09-01 14:39:39 -070027const bool kRefDebug = false;
28const bool kFetchDebug = false;
Brian Silvermancd2d84c2014-03-13 23:30:58 -070029const bool kReadIndexDebug = false;
Brian Silvermana6d1b562013-09-01 14:39:39 -070030
31// The number of extra messages the pool associated with each queue will be able
Brian Silverman08661c72013-09-01 17:24:38 -070032// to hold (for readers who are slow about freeing them or who leak one when
33// they get killed).
Brian Silvermana6d1b562013-09-01 14:39:39 -070034const int kExtraMessages = 20;
35
36} // namespace
37
Brian Silverman08661c72013-09-01 17:24:38 -070038const int RawQueue::kPeek;
39const int RawQueue::kFromEnd;
40const int RawQueue::kNonBlock;
41const int RawQueue::kBlock;
42const int RawQueue::kOverride;
43
Brian Silverman430e7fa2014-03-21 16:58:33 -070044// This is what gets stuck in before each queue message in memory. It is always
45// allocated aligned to 8 bytes and its size has to maintain that alignment for
46// the message that follows immediately.
Brian Silverman08661c72013-09-01 17:24:38 -070047struct RawQueue::MessageHeader {
Brian Silvermanc2e04222014-03-22 12:43:44 -070048 MessageHeader *next;
Brian Silvermane8337b72014-04-27 19:52:19 -070049
Brian Silverman5f8c4922014-02-11 21:22:38 -080050 // Gets the message header immediately preceding msg.
Brian Silvermana6d1b562013-09-01 14:39:39 -070051 static MessageHeader *Get(const void *msg) {
Brian Silverman63cf2412013-11-17 05:44:36 -080052 return reinterpret_cast<MessageHeader *>(__builtin_assume_aligned(
53 static_cast<uint8_t *>(const_cast<void *>(msg)) - sizeof(MessageHeader),
54 alignof(MessageHeader)));
Brian Silvermana6d1b562013-09-01 14:39:39 -070055 }
Brian Silvermane8337b72014-04-27 19:52:19 -070056
57 int32_t ref_count() const {
58 return __atomic_load_n(&ref_count_, __ATOMIC_RELAXED);
59 }
60 void set_ref_count(int32_t val) {
61 __atomic_store_n(&ref_count_, val, __ATOMIC_RELAXED);
62 }
63
64 void ref_count_sub() {
65 // TODO(brians): Take the #ifdef out once clang can handle the
66 // __atomic_*_fetch variants which could be more efficient.
67#ifdef __clang__
68 __atomic_fetch_sub(&ref_count_, 1, __ATOMIC_RELAXED);
69#else
70 __atomic_sub_fetch(&ref_count_, 1, __ATOMIC_RELAXED);
71#endif
72 }
73 void ref_count_add() {
74#ifdef __clang__
75 __atomic_fetch_add(&ref_count_, 1, __ATOMIC_RELAXED);
76#else
77 __atomic_add_fetch(&ref_count_, 1, __ATOMIC_RELAXED);
78#endif
79 }
80
81 private:
82 // This gets accessed with atomic instructions without any
83 // locks held by various member functions.
84 int32_t ref_count_;
85
Brian Silverman227ad482014-03-23 11:21:32 -070086 // Padding to make the total size 8 bytes if we have 4-byte pointers or bump
87 // it to 16 if a pointer is 8 bytes by itself.
Brian Silvermanc2e04222014-03-22 12:43:44 -070088#if __SIZEOF_POINTER__ == 8
Brian Silverman4b09fce2014-04-27 19:58:14 -070089#ifdef __clang__
90 // Clang is smart enough to realize this is unused, but GCC doesn't like the
91 // attribute here...
92 __attribute__((unused))
93#endif
Brian Silverman227ad482014-03-23 11:21:32 -070094 char padding[4];
Brian Silvermanc2e04222014-03-22 12:43:44 -070095#elif __SIZEOF_POINTER__ == 4
96 // No padding needed to get 8 byte total size.
97#else
98#error Unknown pointer size.
99#endif
Brian Silvermana6d1b562013-09-01 14:39:39 -0700100};
Brian Silvermana6d1b562013-09-01 14:39:39 -0700101
Brian Silverman4d0789d2014-03-23 17:03:07 -0700102inline int RawQueue::index_add1(int index) {
103 // Doing it this way instead of with % is more efficient on ARM.
104 int r = index + 1;
105 assert(index <= data_length_);
106 if (r == data_length_) {
107 return 0;
108 } else {
109 return r;
110 }
111}
112
Brian Silverman08661c72013-09-01 17:24:38 -0700113void RawQueue::DecrementMessageReferenceCount(const void *msg) {
Brian Silvermana6d1b562013-09-01 14:39:39 -0700114 MessageHeader *header = MessageHeader::Get(msg);
Brian Silvermane8337b72014-04-27 19:52:19 -0700115 header->ref_count_sub();
Brian Silvermana6d1b562013-09-01 14:39:39 -0700116 if (kRefDebug) {
Brian Silvermane8337b72014-04-27 19:52:19 -0700117 printf("%p ref dec count: %p count=%d\n", this, msg, header->ref_count());
Brian Silvermana6d1b562013-09-01 14:39:39 -0700118 }
Brian Silvermanad290d82014-03-19 17:22:05 -0700119
120 // The only way it should ever be 0 is if we were the last one to decrement,
121 // in which case nobody else should have it around to re-increment it or
122 // anything in the middle, so this is safe to do not atomically with the
123 // decrement.
Brian Silvermane8337b72014-04-27 19:52:19 -0700124 if (header->ref_count() == 0) {
Brian Silvermana6d1b562013-09-01 14:39:39 -0700125 DoFreeMessage(msg);
Brian Silvermanad290d82014-03-19 17:22:05 -0700126 } else {
Brian Silvermane8337b72014-04-27 19:52:19 -0700127 assert(header->ref_count() > 0);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700128 }
129}
130
Brian Silverman227ad482014-03-23 11:21:32 -0700131inline void RawQueue::IncrementMessageReferenceCount(const void *msg) const {
Brian Silverman430e7fa2014-03-21 16:58:33 -0700132 MessageHeader *const header = MessageHeader::Get(msg);
Brian Silvermane8337b72014-04-27 19:52:19 -0700133 header->ref_count_add();
Brian Silverman430e7fa2014-03-21 16:58:33 -0700134 if (kRefDebug) {
Brian Silvermanc2e04222014-03-22 12:43:44 -0700135 printf("%p ref inc count: %p\n", this, msg);
Brian Silverman430e7fa2014-03-21 16:58:33 -0700136 }
137}
138
Brian Silverman42d52372014-03-23 15:29:13 -0700139inline void RawQueue::DoFreeMessage(const void *msg) {
140 MessageHeader *header = MessageHeader::Get(msg);
141 if (kRefDebug) {
142 printf("%p ref free to %p: %p\n", this, recycle_, msg);
143 }
144
145 if (__builtin_expect(recycle_ != nullptr, 0)) {
146 void *const new_msg = recycle_->GetMessage();
147 if (new_msg == nullptr) {
148 fprintf(stderr, "queue: couldn't get a message"
149 " for recycle queue %p\n", recycle_);
150 } else {
Brian Silvermane8337b72014-04-27 19:52:19 -0700151 header->ref_count_add();
Brian Silverman42d52372014-03-23 15:29:13 -0700152 if (!recycle_->WriteMessage(const_cast<void *>(msg), kOverride)) {
153 fprintf(stderr, "queue: %p->WriteMessage(%p, kOverride) failed."
154 " aborting\n", recycle_, msg);
155 printf("see stderr\n");
156 abort();
157 }
158 msg = new_msg;
159 header = MessageHeader::Get(new_msg);
160 }
161 }
162
163 // This works around GCC bug 60272 (fixed in 4.8.3).
164 // new_next should just get replaced with header->next (and the body of the
165 // loop should become empty).
166 // The bug is that the store to new_next after the compare/exchange is
167 // unconditional but it should only be if it fails, which could mean
168 // overwriting what somebody else who preempted us right then changed it to.
169 // TODO(brians): Get rid of this workaround once we get a new enough GCC.
170 MessageHeader *new_next = __atomic_load_n(&free_messages_, __ATOMIC_RELAXED);
171 do {
172 header->next = new_next;
173 } while (__builtin_expect(
174 !__atomic_compare_exchange_n(&free_messages_, &new_next, header, true,
175 __ATOMIC_RELEASE, __ATOMIC_RELAXED),
176 0));
177}
178
179void *RawQueue::GetMessage() {
Brian Silverman42d52372014-03-23 15:29:13 -0700180 MessageHeader *header = __atomic_load_n(&free_messages_, __ATOMIC_RELAXED);
181 do {
182 if (__builtin_expect(header == nullptr, 0)) {
183 LOG(FATAL, "overused pool of queue %p\n", this);
184 }
185 } while (__builtin_expect(
186 !__atomic_compare_exchange_n(&free_messages_, &header, header->next, true,
187 __ATOMIC_ACQ_REL, __ATOMIC_RELAXED),
188 0));
189 void *msg = reinterpret_cast<uint8_t *>(header + 1);
190 // It might be uninitialized, 0 from a previous use, or 1 from previously
191 // being recycled.
Brian Silvermane8337b72014-04-27 19:52:19 -0700192 header->set_ref_count(1);
Brian Silverman42d52372014-03-23 15:29:13 -0700193 if (kRefDebug) {
194 printf("%p ref alloc: %p\n", this, msg);
195 }
196 return msg;
197}
198
Brian Silverman08661c72013-09-01 17:24:38 -0700199RawQueue::RawQueue(const char *name, size_t length, int hash, int queue_length)
Brian Silverman5f8c4922014-02-11 21:22:38 -0800200 : readable_(&data_lock_), writable_(&data_lock_) {
Brian Silvermanc2e04222014-03-22 12:43:44 -0700201 static_assert(shm_ok<RawQueue::MessageHeader>::value,
202 "the whole point is to stick it in shared memory");
203 static_assert((sizeof(RawQueue::MessageHeader) % 8) == 0,
204 "need to revalidate size/alignent assumptions");
205
Brian Silverman227ad482014-03-23 11:21:32 -0700206 if (queue_length < 1) {
207 LOG(FATAL, "queue length %d needs to be at least 1\n", queue_length);
208 }
209
Brian Silvermana6d1b562013-09-01 14:39:39 -0700210 const size_t name_size = strlen(name) + 1;
211 char *temp = static_cast<char *>(shm_malloc(name_size));
212 memcpy(temp, name, name_size);
213 name_ = temp;
214 length_ = length;
215 hash_ = hash;
216 queue_length_ = queue_length;
217
218 next_ = NULL;
219 recycle_ = NULL;
220
221 if (kFetchDebug) {
222 printf("initializing name=%s, length=%zd, hash=%d, queue_length=%d\n",
223 name, length, hash, queue_length);
224 }
225
226 data_length_ = queue_length + 1;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700227 data_ = static_cast<void **>(shm_malloc(sizeof(void *) * data_length_));
228 data_start_ = 0;
229 data_end_ = 0;
230 messages_ = 0;
231
Brian Silvermana6d1b562013-09-01 14:39:39 -0700232 msg_length_ = length + sizeof(MessageHeader);
Brian Silvermanc2e04222014-03-22 12:43:44 -0700233
Brian Silverman227ad482014-03-23 11:21:32 -0700234 // Create all of the messages for the free list and stick them on.
235 {
236 MessageHeader *previous = nullptr;
237 for (int i = 0; i < queue_length + kExtraMessages; ++i) {
238 MessageHeader *const message =
239 static_cast<MessageHeader *>(shm_malloc(msg_length_));
240 free_messages_ = message;
241 message->next = previous;
242 previous = message;
243 }
Brian Silverman60eff202014-03-21 17:10:02 -0700244 }
Brian Silvermana6d1b562013-09-01 14:39:39 -0700245
Brian Silverman35109802014-04-09 14:31:53 -0700246 readable_waiting_ = false;
247
Brian Silvermana6d1b562013-09-01 14:39:39 -0700248 if (kFetchDebug) {
249 printf("made queue %s\n", name);
250 }
251}
Brian Silverman42d52372014-03-23 15:29:13 -0700252
Brian Silverman08661c72013-09-01 17:24:38 -0700253RawQueue *RawQueue::Fetch(const char *name, size_t length, int hash,
Brian Silvermana6d1b562013-09-01 14:39:39 -0700254 int queue_length) {
255 if (kFetchDebug) {
256 printf("fetching queue %s\n", name);
257 }
Brian Silverman4aeac5f2014-02-11 22:17:07 -0800258 if (mutex_lock(&global_core->mem_struct->queues.lock) != 0) {
Brian Silverman227ad482014-03-23 11:21:32 -0700259 LOG(FATAL, "mutex_lock(%p) failed\n",
260 &global_core->mem_struct->queues.lock);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700261 }
Brian Silverman08661c72013-09-01 17:24:38 -0700262 RawQueue *current = static_cast<RawQueue *>(
Brian Silverman4aeac5f2014-02-11 22:17:07 -0800263 global_core->mem_struct->queues.pointer);
Brian Silverman797e71e2013-09-06 17:29:39 -0700264 if (current != NULL) {
265 while (true) {
266 // If we found a matching queue.
267 if (strcmp(current->name_, name) == 0 && current->length_ == length &&
268 current->hash_ == hash && current->queue_length_ == queue_length) {
Brian Silverman4aeac5f2014-02-11 22:17:07 -0800269 mutex_unlock(&global_core->mem_struct->queues.lock);
Brian Silverman797e71e2013-09-06 17:29:39 -0700270 return current;
271 } else {
272 if (kFetchDebug) {
273 printf("rejected queue %s strcmp=%d target=%s\n", current->name_,
274 strcmp(current->name_, name), name);
275 }
Brian Silvermana6d1b562013-09-01 14:39:39 -0700276 }
Brian Silverman797e71e2013-09-06 17:29:39 -0700277 // If this is the last one.
278 if (current->next_ == NULL) break;
279 current = current->next_;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700280 }
Brian Silvermana6d1b562013-09-01 14:39:39 -0700281 }
282
Brian Silverman797e71e2013-09-06 17:29:39 -0700283 RawQueue *r = new (shm_malloc(sizeof(RawQueue)))
284 RawQueue(name, length, hash, queue_length);
285 if (current == NULL) { // if we don't already have one
Brian Silverman4aeac5f2014-02-11 22:17:07 -0800286 global_core->mem_struct->queues.pointer = r;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700287 } else {
Brian Silverman797e71e2013-09-06 17:29:39 -0700288 current->next_ = r;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700289 }
290
Brian Silverman4aeac5f2014-02-11 22:17:07 -0800291 mutex_unlock(&global_core->mem_struct->queues.lock);
Brian Silverman797e71e2013-09-06 17:29:39 -0700292 return r;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700293}
Brian Silverman42d52372014-03-23 15:29:13 -0700294
Brian Silverman08661c72013-09-01 17:24:38 -0700295RawQueue *RawQueue::Fetch(const char *name, size_t length, int hash,
Brian Silvermana6d1b562013-09-01 14:39:39 -0700296 int queue_length,
Brian Silverman08661c72013-09-01 17:24:38 -0700297 int recycle_hash, int recycle_length, RawQueue **recycle) {
298 RawQueue *r = Fetch(name, length, hash, queue_length);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700299 r->recycle_ = Fetch(name, length, recycle_hash, recycle_length);
300 if (r == r->recycle_) {
301 fprintf(stderr, "queue: r->recycle_(=%p) == r(=%p)\n", r->recycle_, r);
302 printf("see stderr\n");
Brian Silverman797e71e2013-09-06 17:29:39 -0700303 r->recycle_ = NULL;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700304 abort();
305 }
306 *recycle = r->recycle_;
307 return r;
308}
309
Brian Silverman08661c72013-09-01 17:24:38 -0700310bool RawQueue::WriteMessage(void *msg, int options) {
Brian Silvermana6d1b562013-09-01 14:39:39 -0700311 if (kWriteDebug) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700312 printf("queue: %p->WriteMessage(%p, %x)\n", this, msg, options);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700313 }
Brian Silvermana6d1b562013-09-01 14:39:39 -0700314 {
315 MutexLocker locker(&data_lock_);
Brian Silverman797e71e2013-09-06 17:29:39 -0700316 bool writable_waited = false;
317
318 int new_end;
319 while (true) {
Brian Silverman4d0789d2014-03-23 17:03:07 -0700320 new_end = index_add1(data_end_);
Brian Silverman797e71e2013-09-06 17:29:39 -0700321 // If there is room in the queue right now.
322 if (new_end != data_start_) break;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700323 if (options & kNonBlock) {
324 if (kWriteDebug) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700325 printf("queue: not blocking on %p. returning false\n", this);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700326 }
Brian Silverman358c49f2014-03-05 16:56:34 -0800327 DecrementMessageReferenceCount(msg);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700328 return false;
329 } else if (options & kOverride) {
330 if (kWriteDebug) {
331 printf("queue: overriding on %p\n", this);
332 }
Brian Silverman797e71e2013-09-06 17:29:39 -0700333 // Avoid leaking the message that we're going to overwrite.
Brian Silvermana6d1b562013-09-01 14:39:39 -0700334 DecrementMessageReferenceCount(data_[data_start_]);
Brian Silverman4d0789d2014-03-23 17:03:07 -0700335 data_start_ = index_add1(data_start_);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700336 } else { // kBlock
337 if (kWriteDebug) {
338 printf("queue: going to wait for writable_ of %p\n", this);
339 }
Brian Silverman08661c72013-09-01 17:24:38 -0700340 writable_.Wait();
Brian Silverman797e71e2013-09-06 17:29:39 -0700341 writable_waited = true;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700342 }
Brian Silvermana6d1b562013-09-01 14:39:39 -0700343 }
344 data_[data_end_] = msg;
345 ++messages_;
346 data_end_ = new_end;
Brian Silverman797e71e2013-09-06 17:29:39 -0700347
Brian Silverman35109802014-04-09 14:31:53 -0700348 if (readable_waiting_) {
349 if (kWriteDebug) {
350 printf("queue: broadcasting to readable_ of %p\n", this);
351 }
352 readable_waiting_ = false;
353 readable_.Broadcast();
354 } else if (kWriteDebug) {
355 printf("queue: skipping broadcast to readable_ of %p\n", this);
Brian Silverman797e71e2013-09-06 17:29:39 -0700356 }
Brian Silverman797e71e2013-09-06 17:29:39 -0700357
358 // If we got a signal on writable_ here and it's still writable, then we
359 // need to signal the next person in line (if any).
360 if (writable_waited && is_writable()) {
361 if (kWriteDebug) {
362 printf("queue: resignalling writable_ of %p\n", this);
363 }
364 writable_.Signal();
365 }
Brian Silvermana6d1b562013-09-01 14:39:39 -0700366 }
367 if (kWriteDebug) {
Brian Silvermana6d1b562013-09-01 14:39:39 -0700368 printf("queue: write returning true on queue %p\n", this);
369 }
370 return true;
371}
372
Brian Silverman42d52372014-03-23 15:29:13 -0700373inline void RawQueue::ReadCommonEnd() {
Brian Silverman797e71e2013-09-06 17:29:39 -0700374 if (is_writable()) {
375 if (kReadDebug) {
376 printf("queue: %ssignalling writable_ of %p\n",
Brian Silverman42d52372014-03-23 15:29:13 -0700377 writable_start_ ? "not " : "", this);
Brian Silverman797e71e2013-09-06 17:29:39 -0700378 }
Brian Silverman42d52372014-03-23 15:29:13 -0700379 if (!writable_start_) writable_.Signal();
Brian Silvermana6d1b562013-09-01 14:39:39 -0700380 }
381}
Brian Silverman227ad482014-03-23 11:21:32 -0700382
Brian Silverman42d52372014-03-23 15:29:13 -0700383bool RawQueue::ReadCommonStart(int options, int *index) {
384 writable_start_ = is_writable();
Brian Silvermana6d1b562013-09-01 14:39:39 -0700385 while (data_start_ == data_end_ || ((index != NULL) && messages_ <= *index)) {
386 if (options & kNonBlock) {
387 if (kReadDebug) {
388 printf("queue: not going to block waiting on %p\n", this);
389 }
390 return false;
391 } else { // kBlock
392 if (kReadDebug) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700393 printf("queue: going to wait for readable_ of %p\n", this);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700394 }
Brian Silverman35109802014-04-09 14:31:53 -0700395 readable_waiting_ = true;
Brian Silverman797e71e2013-09-06 17:29:39 -0700396 // Wait for a message to become readable.
Brian Silvermana6d1b562013-09-01 14:39:39 -0700397 readable_.Wait();
398 if (kReadDebug) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700399 printf("queue: done waiting for readable_ of %p\n", this);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700400 }
Brian Silvermana6d1b562013-09-01 14:39:39 -0700401 }
402 }
403 if (kReadDebug) {
Brian Silvermanc39e2bd2014-02-21 09:17:35 -0800404 printf("queue: %p->read(%p) start=%d end=%d\n", this, index, data_start_,
405 data_end_);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700406 }
407 return true;
408}
Brian Silverman227ad482014-03-23 11:21:32 -0700409
410inline int RawQueue::LastMessageIndex() const {
411 int pos = data_end_ - 1;
412 if (pos < 0) { // If it wrapped around.
413 pos = data_length_ - 1;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700414 }
Brian Silverman227ad482014-03-23 11:21:32 -0700415 return pos;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700416}
Brian Silverman227ad482014-03-23 11:21:32 -0700417
Brian Silverman08661c72013-09-01 17:24:38 -0700418const void *RawQueue::ReadMessage(int options) {
Brian Silvermaneb51cbb2014-03-14 22:57:08 -0700419 // TODO(brians): Test this function.
Brian Silvermana6d1b562013-09-01 14:39:39 -0700420 if (kReadDebug) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700421 printf("queue: %p->ReadMessage(%x)\n", this, options);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700422 }
423 void *msg = NULL;
Brian Silverman797e71e2013-09-06 17:29:39 -0700424
Brian Silvermana6d1b562013-09-01 14:39:39 -0700425 MutexLocker locker(&data_lock_);
Brian Silverman797e71e2013-09-06 17:29:39 -0700426
Brian Silverman42d52372014-03-23 15:29:13 -0700427 if (!ReadCommonStart(options, nullptr)) {
Brian Silvermana6d1b562013-09-01 14:39:39 -0700428 if (kReadDebug) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700429 printf("queue: %p common returned false\n", this);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700430 }
431 return NULL;
432 }
Brian Silverman797e71e2013-09-06 17:29:39 -0700433
Brian Silverman227ad482014-03-23 11:21:32 -0700434 if (options & kFromEnd) {
435 if (options & kPeek) {
436 if (kReadDebug) {
437 printf("queue: %p shortcutting c2: %d\n", this, LastMessageIndex());
438 }
439 msg = data_[LastMessageIndex()];
440 IncrementMessageReferenceCount(msg);
441 } else {
Brian Silverman797e71e2013-09-06 17:29:39 -0700442 while (true) {
Brian Silvermana6d1b562013-09-01 14:39:39 -0700443 if (kReadDebug) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700444 printf("queue: %p start of c2\n", this);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700445 }
446 // This loop pulls each message out of the buffer.
447 const int pos = data_start_;
Brian Silverman4d0789d2014-03-23 17:03:07 -0700448 data_start_ = index_add1(data_start_);
Brian Silverman797e71e2013-09-06 17:29:39 -0700449 // If this is the last one.
Brian Silvermana6d1b562013-09-01 14:39:39 -0700450 if (data_start_ == data_end_) {
451 if (kReadDebug) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700452 printf("queue: %p reading from c2: %d\n", this, pos);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700453 }
454 msg = data_[pos];
455 break;
456 }
Brian Silverman797e71e2013-09-06 17:29:39 -0700457 // This message is not going to be in the queue any more.
Brian Silvermana6d1b562013-09-01 14:39:39 -0700458 DecrementMessageReferenceCount(data_[pos]);
459 }
Brian Silverman227ad482014-03-23 11:21:32 -0700460 }
461 } else {
462 if (kReadDebug) {
463 printf("queue: %p reading from d2: %d\n", this, data_start_);
464 }
465 msg = data_[data_start_];
466 if (options & kPeek) {
467 IncrementMessageReferenceCount(msg);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700468 } else {
Brian Silverman4d0789d2014-03-23 17:03:07 -0700469 data_start_ = index_add1(data_start_);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700470 }
471 }
Brian Silverman42d52372014-03-23 15:29:13 -0700472 ReadCommonEnd();
Brian Silvermana6d1b562013-09-01 14:39:39 -0700473 if (kReadDebug) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700474 printf("queue: %p read returning %p\n", this, msg);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700475 }
476 return msg;
477}
Brian Silverman227ad482014-03-23 11:21:32 -0700478
Brian Silverman08661c72013-09-01 17:24:38 -0700479const void *RawQueue::ReadMessageIndex(int options, int *index) {
Brian Silvermana6d1b562013-09-01 14:39:39 -0700480 if (kReadDebug) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700481 printf("queue: %p->ReadMessageIndex(%x, %p(*=%d))\n",
Brian Silvermana6d1b562013-09-01 14:39:39 -0700482 this, options, index, *index);
483 }
484 void *msg = NULL;
Brian Silverman797e71e2013-09-06 17:29:39 -0700485
486 MutexLocker locker(&data_lock_);
487
Brian Silverman42d52372014-03-23 15:29:13 -0700488 if (!ReadCommonStart(options, index)) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700489 if (kReadDebug) {
490 printf("queue: %p common returned false\n", this);
491 }
492 return NULL;
493 }
494
495 // TODO(parker): Handle integer wrap on the index.
496
Brian Silvermaneb51cbb2014-03-14 22:57:08 -0700497 if (options & kFromEnd) {
Brian Silverman227ad482014-03-23 11:21:32 -0700498 if (kReadDebug) {
Brian Silverman227ad482014-03-23 11:21:32 -0700499 printf("queue: %p reading from c1: %d\n", this, LastMessageIndex());
500 }
501 msg = data_[LastMessageIndex()];
502 if (!(options & kPeek)) *index = messages_;
Brian Silvermanc39e2bd2014-02-21 09:17:35 -0800503 } else {
Brian Silverman227ad482014-03-23 11:21:32 -0700504 // Where we're going to start reading.
505 int my_start;
506
Brian Silvermaneb51cbb2014-03-14 22:57:08 -0700507 const int unread_messages = messages_ - *index;
508 assert(unread_messages > 0);
509 int current_messages = data_end_ - data_start_;
510 if (current_messages < 0) current_messages += data_length_;
Brian Silvermancd2d84c2014-03-13 23:30:58 -0700511 if (kReadIndexDebug) {
Brian Silvermaneb51cbb2014-03-14 22:57:08 -0700512 printf("queue: %p start=%d end=%d current=%d\n",
513 this, data_start_, data_end_, current_messages);
Brian Silvermancd2d84c2014-03-13 23:30:58 -0700514 }
Brian Silvermaneb51cbb2014-03-14 22:57:08 -0700515 assert(current_messages > 0);
516 // If we're behind the available messages.
517 if (unread_messages > current_messages) {
518 // Catch index up to the last available message.
519 *index = messages_ - current_messages;
520 // And that's the one we're going to read.
521 my_start = data_start_;
522 if (kReadIndexDebug) {
523 printf("queue: %p jumping ahead to message %d (have %d) (at %d)\n",
524 this, *index, messages_, data_start_);
525 }
Brian Silvermancd2d84c2014-03-13 23:30:58 -0700526 } else {
Brian Silvermaneb51cbb2014-03-14 22:57:08 -0700527 // Just start reading at the first available message that we haven't yet
528 // read.
529 my_start = data_end_ - unread_messages;
530 if (kReadIndexDebug) {
531 printf("queue: %p original read from %d\n", this, my_start);
532 }
533 if (data_start_ < data_end_) {
Brian Silverman42d52372014-03-23 15:29:13 -0700534 assert(my_start >= 0);
Brian Silvermaneb51cbb2014-03-14 22:57:08 -0700535 }
Brian Silverman42d52372014-03-23 15:29:13 -0700536 if (my_start < 0) my_start += data_length_;
Brian Silverman67e34f52014-03-13 15:52:57 -0700537 }
Brian Silvermanc39e2bd2014-02-21 09:17:35 -0800538
Brian Silverman227ad482014-03-23 11:21:32 -0700539 if (kReadDebug) {
540 printf("queue: %p reading from d1: %d\n", this, my_start);
Brian Silverman797e71e2013-09-06 17:29:39 -0700541 }
Brian Silverman227ad482014-03-23 11:21:32 -0700542 // We have to be either after the start or before the end, even if the queue
Brian Silverman42d52372014-03-23 15:29:13 -0700543 // is wrapped around (should be both if it's not).
Brian Silverman227ad482014-03-23 11:21:32 -0700544 assert((my_start >= data_start_) || (my_start < data_end_));
545 // More sanity checking.
546 assert((my_start >= 0) && (my_start < data_length_));
547 msg = data_[my_start];
548 if (!(options & kPeek)) ++(*index);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700549 }
Brian Silverman227ad482014-03-23 11:21:32 -0700550 IncrementMessageReferenceCount(msg);
551
Brian Silverman42d52372014-03-23 15:29:13 -0700552 ReadCommonEnd();
Brian Silvermana6d1b562013-09-01 14:39:39 -0700553 return msg;
554}
555
Brian Silvermanc2e04222014-03-22 12:43:44 -0700556int RawQueue::FreeMessages() const {
557 int r = 0;
558 MessageHeader *header = free_messages_;
559 while (header != nullptr) {
560 ++r;
561 header = header->next;
562 }
563 return r;
564}
565
Brian Silvermanb3267322014-04-10 12:10:03 -0700566bool RawQueue::IsDebug() {
Brian Silvermanaac705c2014-05-01 18:55:34 -0700567#if AOS_DEBUG
Brian Silvermanb3267322014-04-10 12:10:03 -0700568 return true;
569#else
570 return false;
571#endif
572}
573
Brian Silvermana6d1b562013-09-01 14:39:39 -0700574} // namespace aos