blob: 2738e85e9ec2cf3f4ddd04f9e6991c05957186f9 [file] [log] [blame]
Brian Silvermanaac705c2014-05-01 18:55:34 -07001#if !AOS_DEBUG
Austin Schuh7a41be62015-10-31 13:06:55 -07002#undef NDEBUG
Brian Silverman9eaf91a2014-03-24 16:37:44 -07003#define NDEBUG
4#endif
5
Brian Silverman14fd0fb2014-01-14 21:42:01 -08006#include "aos/linux_code/ipc_lib/queue.h"
Brian Silvermana6d1b562013-09-01 14:39:39 -07007
8#include <stdio.h>
9#include <string.h>
10#include <errno.h>
11#include <assert.h>
12
13#include <memory>
Brian Silvermanc39e2bd2014-02-21 09:17:35 -080014#include <algorithm>
Brian Silvermana6d1b562013-09-01 14:39:39 -070015
Brian Silvermana6d1b562013-09-01 14:39:39 -070016#include "aos/common/type_traits.h"
Brian Silverman14fd0fb2014-01-14 21:42:01 -080017#include "aos/linux_code/ipc_lib/core_lib.h"
Brian Silvermana6d1b562013-09-01 14:39:39 -070018
19namespace aos {
Brian Silvermana6d1b562013-09-01 14:39:39 -070020namespace {
21
Brian Silverman08661c72013-09-01 17:24:38 -070022static_assert(shm_ok<RawQueue>::value,
23 "RawQueue instances go into shared memory");
Brian Silvermana6d1b562013-09-01 14:39:39 -070024
25const bool kReadDebug = false;
Brian Silvermanbad7c8a2014-03-26 20:45:18 -070026const bool kWriteDebug = false;
Brian Silvermana6d1b562013-09-01 14:39:39 -070027const bool kRefDebug = false;
28const bool kFetchDebug = false;
Brian Silvermancd2d84c2014-03-13 23:30:58 -070029const bool kReadIndexDebug = false;
Brian Silvermana6d1b562013-09-01 14:39:39 -070030
31// The number of extra messages the pool associated with each queue will be able
Brian Silverman08661c72013-09-01 17:24:38 -070032// to hold (for readers who are slow about freeing them or who leak one when
33// they get killed).
Brian Silvermana6d1b562013-09-01 14:39:39 -070034const int kExtraMessages = 20;
35
36} // namespace
37
Brian Silverman7faaec72014-05-26 16:25:38 -070038constexpr Options<RawQueue>::Option RawQueue::kPeek;
39constexpr Options<RawQueue>::Option RawQueue::kFromEnd;
40constexpr Options<RawQueue>::Option RawQueue::kNonBlock;
41constexpr Options<RawQueue>::Option RawQueue::kBlock;
42constexpr Options<RawQueue>::Option RawQueue::kOverride;
Brian Silverman08661c72013-09-01 17:24:38 -070043
Brian Silverman430e7fa2014-03-21 16:58:33 -070044// This is what gets stuck in before each queue message in memory. It is always
45// allocated aligned to 8 bytes and its size has to maintain that alignment for
46// the message that follows immediately.
Brian Silverman08661c72013-09-01 17:24:38 -070047struct RawQueue::MessageHeader {
Brian Silvermanc2e04222014-03-22 12:43:44 -070048 MessageHeader *next;
Brian Silvermane8337b72014-04-27 19:52:19 -070049
Brian Silverman5f8c4922014-02-11 21:22:38 -080050 // Gets the message header immediately preceding msg.
Brian Silvermana6d1b562013-09-01 14:39:39 -070051 static MessageHeader *Get(const void *msg) {
Brian Silverman63cf2412013-11-17 05:44:36 -080052 return reinterpret_cast<MessageHeader *>(__builtin_assume_aligned(
53 static_cast<uint8_t *>(const_cast<void *>(msg)) - sizeof(MessageHeader),
54 alignof(MessageHeader)));
Brian Silvermana6d1b562013-09-01 14:39:39 -070055 }
Brian Silvermane8337b72014-04-27 19:52:19 -070056
57 int32_t ref_count() const {
58 return __atomic_load_n(&ref_count_, __ATOMIC_RELAXED);
59 }
60 void set_ref_count(int32_t val) {
61 __atomic_store_n(&ref_count_, val, __ATOMIC_RELAXED);
62 }
63
64 void ref_count_sub() {
Brian Silvermane8337b72014-04-27 19:52:19 -070065 __atomic_sub_fetch(&ref_count_, 1, __ATOMIC_RELAXED);
Brian Silvermane8337b72014-04-27 19:52:19 -070066 }
67 void ref_count_add() {
Brian Silvermane8337b72014-04-27 19:52:19 -070068 __atomic_add_fetch(&ref_count_, 1, __ATOMIC_RELAXED);
Brian Silvermane8337b72014-04-27 19:52:19 -070069 }
70
71 private:
72 // This gets accessed with atomic instructions without any
73 // locks held by various member functions.
74 int32_t ref_count_;
75
Brian Silverman227ad482014-03-23 11:21:32 -070076 // Padding to make the total size 8 bytes if we have 4-byte pointers or bump
77 // it to 16 if a pointer is 8 bytes by itself.
Brian Silvermanc2e04222014-03-22 12:43:44 -070078#if __SIZEOF_POINTER__ == 8
Brian Silverman4b09fce2014-04-27 19:58:14 -070079#ifdef __clang__
80 // Clang is smart enough to realize this is unused, but GCC doesn't like the
81 // attribute here...
82 __attribute__((unused))
83#endif
Brian Silverman227ad482014-03-23 11:21:32 -070084 char padding[4];
Brian Silvermanc2e04222014-03-22 12:43:44 -070085#elif __SIZEOF_POINTER__ == 4
86 // No padding needed to get 8 byte total size.
87#else
88#error Unknown pointer size.
89#endif
Brian Silvermana6d1b562013-09-01 14:39:39 -070090};
Brian Silvermana6d1b562013-09-01 14:39:39 -070091
Brian Silverman4d0789d2014-03-23 17:03:07 -070092inline int RawQueue::index_add1(int index) {
93 // Doing it this way instead of with % is more efficient on ARM.
94 int r = index + 1;
95 assert(index <= data_length_);
96 if (r == data_length_) {
97 return 0;
98 } else {
99 return r;
100 }
101}
102
Brian Silverman08661c72013-09-01 17:24:38 -0700103void RawQueue::DecrementMessageReferenceCount(const void *msg) {
Brian Silvermana6d1b562013-09-01 14:39:39 -0700104 MessageHeader *header = MessageHeader::Get(msg);
Brian Silvermane8337b72014-04-27 19:52:19 -0700105 header->ref_count_sub();
Brian Silvermana6d1b562013-09-01 14:39:39 -0700106 if (kRefDebug) {
Brian Silvermane8337b72014-04-27 19:52:19 -0700107 printf("%p ref dec count: %p count=%d\n", this, msg, header->ref_count());
Brian Silvermana6d1b562013-09-01 14:39:39 -0700108 }
Brian Silvermanad290d82014-03-19 17:22:05 -0700109
110 // The only way it should ever be 0 is if we were the last one to decrement,
111 // in which case nobody else should have it around to re-increment it or
112 // anything in the middle, so this is safe to do not atomically with the
113 // decrement.
Brian Silvermane8337b72014-04-27 19:52:19 -0700114 if (header->ref_count() == 0) {
Brian Silvermana6d1b562013-09-01 14:39:39 -0700115 DoFreeMessage(msg);
Brian Silvermanad290d82014-03-19 17:22:05 -0700116 } else {
Brian Silvermane8337b72014-04-27 19:52:19 -0700117 assert(header->ref_count() > 0);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700118 }
119}
120
Brian Silverman227ad482014-03-23 11:21:32 -0700121inline void RawQueue::IncrementMessageReferenceCount(const void *msg) const {
Brian Silverman430e7fa2014-03-21 16:58:33 -0700122 MessageHeader *const header = MessageHeader::Get(msg);
Brian Silvermane8337b72014-04-27 19:52:19 -0700123 header->ref_count_add();
Brian Silverman430e7fa2014-03-21 16:58:33 -0700124 if (kRefDebug) {
Brian Silvermanc2e04222014-03-22 12:43:44 -0700125 printf("%p ref inc count: %p\n", this, msg);
Brian Silverman430e7fa2014-03-21 16:58:33 -0700126 }
127}
128
Brian Silverman42d52372014-03-23 15:29:13 -0700129inline void RawQueue::DoFreeMessage(const void *msg) {
130 MessageHeader *header = MessageHeader::Get(msg);
131 if (kRefDebug) {
132 printf("%p ref free to %p: %p\n", this, recycle_, msg);
133 }
134
135 if (__builtin_expect(recycle_ != nullptr, 0)) {
136 void *const new_msg = recycle_->GetMessage();
137 if (new_msg == nullptr) {
138 fprintf(stderr, "queue: couldn't get a message"
139 " for recycle queue %p\n", recycle_);
140 } else {
Brian Silvermane8337b72014-04-27 19:52:19 -0700141 header->ref_count_add();
Brian Silverman42d52372014-03-23 15:29:13 -0700142 if (!recycle_->WriteMessage(const_cast<void *>(msg), kOverride)) {
143 fprintf(stderr, "queue: %p->WriteMessage(%p, kOverride) failed."
144 " aborting\n", recycle_, msg);
145 printf("see stderr\n");
146 abort();
147 }
148 msg = new_msg;
149 header = MessageHeader::Get(new_msg);
150 }
151 }
152
153 // This works around GCC bug 60272 (fixed in 4.8.3).
154 // new_next should just get replaced with header->next (and the body of the
155 // loop should become empty).
156 // The bug is that the store to new_next after the compare/exchange is
157 // unconditional but it should only be if it fails, which could mean
158 // overwriting what somebody else who preempted us right then changed it to.
159 // TODO(brians): Get rid of this workaround once we get a new enough GCC.
160 MessageHeader *new_next = __atomic_load_n(&free_messages_, __ATOMIC_RELAXED);
161 do {
162 header->next = new_next;
163 } while (__builtin_expect(
164 !__atomic_compare_exchange_n(&free_messages_, &new_next, header, true,
165 __ATOMIC_RELEASE, __ATOMIC_RELAXED),
166 0));
167}
168
169void *RawQueue::GetMessage() {
Brian Silverman42d52372014-03-23 15:29:13 -0700170 MessageHeader *header = __atomic_load_n(&free_messages_, __ATOMIC_RELAXED);
171 do {
172 if (__builtin_expect(header == nullptr, 0)) {
Brian Silvermanbb345532015-02-07 17:16:46 -0500173 LOG(FATAL, "overused pool of queue %p (%s)\n", this, name_);
Brian Silverman42d52372014-03-23 15:29:13 -0700174 }
175 } while (__builtin_expect(
176 !__atomic_compare_exchange_n(&free_messages_, &header, header->next, true,
177 __ATOMIC_ACQ_REL, __ATOMIC_RELAXED),
178 0));
179 void *msg = reinterpret_cast<uint8_t *>(header + 1);
180 // It might be uninitialized, 0 from a previous use, or 1 from previously
181 // being recycled.
Brian Silvermane8337b72014-04-27 19:52:19 -0700182 header->set_ref_count(1);
Brian Silverman42d52372014-03-23 15:29:13 -0700183 if (kRefDebug) {
184 printf("%p ref alloc: %p\n", this, msg);
185 }
186 return msg;
187}
188
Brian Silverman08661c72013-09-01 17:24:38 -0700189RawQueue::RawQueue(const char *name, size_t length, int hash, int queue_length)
Brian Silverman5f8c4922014-02-11 21:22:38 -0800190 : readable_(&data_lock_), writable_(&data_lock_) {
Brian Silvermanc2e04222014-03-22 12:43:44 -0700191 static_assert(shm_ok<RawQueue::MessageHeader>::value,
192 "the whole point is to stick it in shared memory");
193 static_assert((sizeof(RawQueue::MessageHeader) % 8) == 0,
194 "need to revalidate size/alignent assumptions");
195
Brian Silverman227ad482014-03-23 11:21:32 -0700196 if (queue_length < 1) {
Brian Silvermanbb345532015-02-07 17:16:46 -0500197 LOG(FATAL, "queue length %d of %s needs to be at least 1\n", queue_length,
198 name_);
Brian Silverman227ad482014-03-23 11:21:32 -0700199 }
200
Brian Silvermana6d1b562013-09-01 14:39:39 -0700201 const size_t name_size = strlen(name) + 1;
202 char *temp = static_cast<char *>(shm_malloc(name_size));
203 memcpy(temp, name, name_size);
204 name_ = temp;
205 length_ = length;
206 hash_ = hash;
207 queue_length_ = queue_length;
208
209 next_ = NULL;
210 recycle_ = NULL;
211
212 if (kFetchDebug) {
213 printf("initializing name=%s, length=%zd, hash=%d, queue_length=%d\n",
214 name, length, hash, queue_length);
215 }
216
217 data_length_ = queue_length + 1;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700218 data_ = static_cast<void **>(shm_malloc(sizeof(void *) * data_length_));
219 data_start_ = 0;
220 data_end_ = 0;
221 messages_ = 0;
222
Brian Silvermana6d1b562013-09-01 14:39:39 -0700223 msg_length_ = length + sizeof(MessageHeader);
Brian Silvermanc2e04222014-03-22 12:43:44 -0700224
Brian Silverman227ad482014-03-23 11:21:32 -0700225 // Create all of the messages for the free list and stick them on.
226 {
227 MessageHeader *previous = nullptr;
228 for (int i = 0; i < queue_length + kExtraMessages; ++i) {
229 MessageHeader *const message =
230 static_cast<MessageHeader *>(shm_malloc(msg_length_));
231 free_messages_ = message;
232 message->next = previous;
233 previous = message;
234 }
Brian Silverman60eff202014-03-21 17:10:02 -0700235 }
Brian Silvermana6d1b562013-09-01 14:39:39 -0700236
Brian Silverman35109802014-04-09 14:31:53 -0700237 readable_waiting_ = false;
238
Brian Silvermana6d1b562013-09-01 14:39:39 -0700239 if (kFetchDebug) {
240 printf("made queue %s\n", name);
241 }
242}
Brian Silverman42d52372014-03-23 15:29:13 -0700243
Brian Silverman08661c72013-09-01 17:24:38 -0700244RawQueue *RawQueue::Fetch(const char *name, size_t length, int hash,
Brian Silvermana6d1b562013-09-01 14:39:39 -0700245 int queue_length) {
246 if (kFetchDebug) {
247 printf("fetching queue %s\n", name);
248 }
Brian Silverman4aeac5f2014-02-11 22:17:07 -0800249 if (mutex_lock(&global_core->mem_struct->queues.lock) != 0) {
Brian Silverman227ad482014-03-23 11:21:32 -0700250 LOG(FATAL, "mutex_lock(%p) failed\n",
251 &global_core->mem_struct->queues.lock);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700252 }
Brian Silverman08661c72013-09-01 17:24:38 -0700253 RawQueue *current = static_cast<RawQueue *>(
Brian Silverman4aeac5f2014-02-11 22:17:07 -0800254 global_core->mem_struct->queues.pointer);
Brian Silverman797e71e2013-09-06 17:29:39 -0700255 if (current != NULL) {
256 while (true) {
257 // If we found a matching queue.
258 if (strcmp(current->name_, name) == 0 && current->length_ == length &&
259 current->hash_ == hash && current->queue_length_ == queue_length) {
Brian Silverman4aeac5f2014-02-11 22:17:07 -0800260 mutex_unlock(&global_core->mem_struct->queues.lock);
Brian Silverman797e71e2013-09-06 17:29:39 -0700261 return current;
262 } else {
263 if (kFetchDebug) {
264 printf("rejected queue %s strcmp=%d target=%s\n", current->name_,
265 strcmp(current->name_, name), name);
266 }
Brian Silvermana6d1b562013-09-01 14:39:39 -0700267 }
Brian Silverman797e71e2013-09-06 17:29:39 -0700268 // If this is the last one.
269 if (current->next_ == NULL) break;
270 current = current->next_;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700271 }
Brian Silvermana6d1b562013-09-01 14:39:39 -0700272 }
273
Brian Silverman797e71e2013-09-06 17:29:39 -0700274 RawQueue *r = new (shm_malloc(sizeof(RawQueue)))
275 RawQueue(name, length, hash, queue_length);
276 if (current == NULL) { // if we don't already have one
Brian Silverman4aeac5f2014-02-11 22:17:07 -0800277 global_core->mem_struct->queues.pointer = r;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700278 } else {
Brian Silverman797e71e2013-09-06 17:29:39 -0700279 current->next_ = r;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700280 }
281
Brian Silverman4aeac5f2014-02-11 22:17:07 -0800282 mutex_unlock(&global_core->mem_struct->queues.lock);
Brian Silverman797e71e2013-09-06 17:29:39 -0700283 return r;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700284}
Brian Silverman42d52372014-03-23 15:29:13 -0700285
Brian Silverman08661c72013-09-01 17:24:38 -0700286RawQueue *RawQueue::Fetch(const char *name, size_t length, int hash,
Brian Silvermana6d1b562013-09-01 14:39:39 -0700287 int queue_length,
Brian Silverman08661c72013-09-01 17:24:38 -0700288 int recycle_hash, int recycle_length, RawQueue **recycle) {
289 RawQueue *r = Fetch(name, length, hash, queue_length);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700290 r->recycle_ = Fetch(name, length, recycle_hash, recycle_length);
291 if (r == r->recycle_) {
292 fprintf(stderr, "queue: r->recycle_(=%p) == r(=%p)\n", r->recycle_, r);
293 printf("see stderr\n");
Brian Silverman797e71e2013-09-06 17:29:39 -0700294 r->recycle_ = NULL;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700295 abort();
296 }
297 *recycle = r->recycle_;
298 return r;
299}
300
Brian Silverman7faaec72014-05-26 16:25:38 -0700301bool RawQueue::DoWriteMessage(void *msg, Options<RawQueue> options) {
Brian Silvermana6d1b562013-09-01 14:39:39 -0700302 if (kWriteDebug) {
Brian Silverman7faaec72014-05-26 16:25:38 -0700303 printf("queue: %p->WriteMessage(%p, %x)\n", this, msg, options.printable());
Brian Silvermana6d1b562013-09-01 14:39:39 -0700304 }
Brian Silverman4f2a34e2016-01-02 14:00:27 -0800305
306 bool signal_readable;
307
Brian Silvermana6d1b562013-09-01 14:39:39 -0700308 {
Brian Silvermandc1eb272014-08-19 14:25:59 -0400309 IPCMutexLocker locker(&data_lock_);
310 CHECK(!locker.owner_died());
Brian Silverman797e71e2013-09-06 17:29:39 -0700311
312 int new_end;
313 while (true) {
Brian Silverman4d0789d2014-03-23 17:03:07 -0700314 new_end = index_add1(data_end_);
Brian Silverman797e71e2013-09-06 17:29:39 -0700315 // If there is room in the queue right now.
316 if (new_end != data_start_) break;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700317 if (options & kNonBlock) {
318 if (kWriteDebug) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700319 printf("queue: not blocking on %p. returning false\n", this);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700320 }
Brian Silverman358c49f2014-03-05 16:56:34 -0800321 DecrementMessageReferenceCount(msg);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700322 return false;
323 } else if (options & kOverride) {
324 if (kWriteDebug) {
325 printf("queue: overriding on %p\n", this);
326 }
Brian Silverman797e71e2013-09-06 17:29:39 -0700327 // Avoid leaking the message that we're going to overwrite.
Brian Silvermana6d1b562013-09-01 14:39:39 -0700328 DecrementMessageReferenceCount(data_[data_start_]);
Brian Silverman4d0789d2014-03-23 17:03:07 -0700329 data_start_ = index_add1(data_start_);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700330 } else { // kBlock
Brian Silverman7faaec72014-05-26 16:25:38 -0700331 assert(options & kBlock);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700332 if (kWriteDebug) {
333 printf("queue: going to wait for writable_ of %p\n", this);
334 }
Brian Silvermandc1eb272014-08-19 14:25:59 -0400335 CHECK(!writable_.Wait());
Brian Silvermana6d1b562013-09-01 14:39:39 -0700336 }
Brian Silvermana6d1b562013-09-01 14:39:39 -0700337 }
338 data_[data_end_] = msg;
339 ++messages_;
340 data_end_ = new_end;
Brian Silverman797e71e2013-09-06 17:29:39 -0700341
Brian Silverman4f2a34e2016-01-02 14:00:27 -0800342 signal_readable = readable_waiting_;
343 readable_waiting_ = false;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700344 }
Brian Silverman4f2a34e2016-01-02 14:00:27 -0800345
346 if (signal_readable) {
347 if (kWriteDebug) {
348 printf("queue: broadcasting to readable_ of %p\n", this);
349 }
350 readable_.Broadcast();
351 } else if (kWriteDebug) {
352 printf("queue: skipping broadcast to readable_ of %p\n", this);
353 }
354
Brian Silvermana6d1b562013-09-01 14:39:39 -0700355 if (kWriteDebug) {
Brian Silvermana6d1b562013-09-01 14:39:39 -0700356 printf("queue: write returning true on queue %p\n", this);
357 }
358 return true;
359}
360
Brian Silverman42d52372014-03-23 15:29:13 -0700361inline void RawQueue::ReadCommonEnd() {
Brian Silverman797e71e2013-09-06 17:29:39 -0700362 if (is_writable()) {
363 if (kReadDebug) {
364 printf("queue: %ssignalling writable_ of %p\n",
Brian Silverman42d52372014-03-23 15:29:13 -0700365 writable_start_ ? "not " : "", this);
Brian Silverman797e71e2013-09-06 17:29:39 -0700366 }
Brian Silverman4f2a34e2016-01-02 14:00:27 -0800367 if (!writable_start_) writable_.Broadcast();
Brian Silvermana6d1b562013-09-01 14:39:39 -0700368 }
369}
Brian Silverman227ad482014-03-23 11:21:32 -0700370
Brian Silverman7faaec72014-05-26 16:25:38 -0700371bool RawQueue::ReadCommonStart(Options<RawQueue> options, int *index) {
Brian Silvermana6d1b562013-09-01 14:39:39 -0700372 while (data_start_ == data_end_ || ((index != NULL) && messages_ <= *index)) {
373 if (options & kNonBlock) {
374 if (kReadDebug) {
375 printf("queue: not going to block waiting on %p\n", this);
376 }
377 return false;
378 } else { // kBlock
Brian Silverman7faaec72014-05-26 16:25:38 -0700379 assert(options & kBlock);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700380 if (kReadDebug) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700381 printf("queue: going to wait for readable_ of %p\n", this);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700382 }
Brian Silverman35109802014-04-09 14:31:53 -0700383 readable_waiting_ = true;
Brian Silverman797e71e2013-09-06 17:29:39 -0700384 // Wait for a message to become readable.
Brian Silvermandc1eb272014-08-19 14:25:59 -0400385 CHECK(!readable_.Wait());
Brian Silvermana6d1b562013-09-01 14:39:39 -0700386 if (kReadDebug) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700387 printf("queue: done waiting for readable_ of %p\n", this);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700388 }
Brian Silvermana6d1b562013-09-01 14:39:39 -0700389 }
390 }
Brian Silverman9c9f1982014-05-24 12:01:49 -0700391 // We have to check down here because we might have unlocked the mutex while
392 // Wait()ing above so this value might have changed.
393 writable_start_ = is_writable();
Brian Silvermana6d1b562013-09-01 14:39:39 -0700394 if (kReadDebug) {
Brian Silverman9c9f1982014-05-24 12:01:49 -0700395 printf("queue: %p->read(%p) start=%d end=%d writable_start=%d\n",
396 this, index, data_start_, data_end_, writable_start_);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700397 }
398 return true;
399}
Brian Silverman227ad482014-03-23 11:21:32 -0700400
401inline int RawQueue::LastMessageIndex() const {
402 int pos = data_end_ - 1;
403 if (pos < 0) { // If it wrapped around.
404 pos = data_length_ - 1;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700405 }
Brian Silverman227ad482014-03-23 11:21:32 -0700406 return pos;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700407}
Brian Silverman227ad482014-03-23 11:21:32 -0700408
Brian Silverman7faaec72014-05-26 16:25:38 -0700409const void *RawQueue::DoReadMessage(Options<RawQueue> options) {
Brian Silvermaneb51cbb2014-03-14 22:57:08 -0700410 // TODO(brians): Test this function.
Brian Silvermana6d1b562013-09-01 14:39:39 -0700411 if (kReadDebug) {
Brian Silverman7faaec72014-05-26 16:25:38 -0700412 printf("queue: %p->ReadMessage(%x)\n", this, options.printable());
Brian Silvermana6d1b562013-09-01 14:39:39 -0700413 }
414 void *msg = NULL;
Brian Silverman797e71e2013-09-06 17:29:39 -0700415
Brian Silvermandc1eb272014-08-19 14:25:59 -0400416 IPCMutexLocker locker(&data_lock_);
417 CHECK(!locker.owner_died());
Brian Silverman797e71e2013-09-06 17:29:39 -0700418
Brian Silverman42d52372014-03-23 15:29:13 -0700419 if (!ReadCommonStart(options, nullptr)) {
Brian Silvermana6d1b562013-09-01 14:39:39 -0700420 if (kReadDebug) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700421 printf("queue: %p common returned false\n", this);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700422 }
423 return NULL;
424 }
Brian Silverman797e71e2013-09-06 17:29:39 -0700425
Brian Silverman227ad482014-03-23 11:21:32 -0700426 if (options & kFromEnd) {
427 if (options & kPeek) {
428 if (kReadDebug) {
429 printf("queue: %p shortcutting c2: %d\n", this, LastMessageIndex());
430 }
431 msg = data_[LastMessageIndex()];
432 IncrementMessageReferenceCount(msg);
433 } else {
Brian Silverman797e71e2013-09-06 17:29:39 -0700434 while (true) {
Brian Silvermana6d1b562013-09-01 14:39:39 -0700435 if (kReadDebug) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700436 printf("queue: %p start of c2\n", this);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700437 }
438 // This loop pulls each message out of the buffer.
439 const int pos = data_start_;
Brian Silverman4d0789d2014-03-23 17:03:07 -0700440 data_start_ = index_add1(data_start_);
Brian Silverman797e71e2013-09-06 17:29:39 -0700441 // If this is the last one.
Brian Silvermana6d1b562013-09-01 14:39:39 -0700442 if (data_start_ == data_end_) {
443 if (kReadDebug) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700444 printf("queue: %p reading from c2: %d\n", this, pos);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700445 }
446 msg = data_[pos];
447 break;
448 }
Brian Silverman797e71e2013-09-06 17:29:39 -0700449 // This message is not going to be in the queue any more.
Brian Silvermana6d1b562013-09-01 14:39:39 -0700450 DecrementMessageReferenceCount(data_[pos]);
451 }
Brian Silverman227ad482014-03-23 11:21:32 -0700452 }
453 } else {
454 if (kReadDebug) {
455 printf("queue: %p reading from d2: %d\n", this, data_start_);
456 }
457 msg = data_[data_start_];
458 if (options & kPeek) {
459 IncrementMessageReferenceCount(msg);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700460 } else {
Brian Silverman4d0789d2014-03-23 17:03:07 -0700461 data_start_ = index_add1(data_start_);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700462 }
463 }
Brian Silverman42d52372014-03-23 15:29:13 -0700464 ReadCommonEnd();
Brian Silvermana6d1b562013-09-01 14:39:39 -0700465 if (kReadDebug) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700466 printf("queue: %p read returning %p\n", this, msg);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700467 }
468 return msg;
469}
Brian Silverman227ad482014-03-23 11:21:32 -0700470
Brian Silverman7faaec72014-05-26 16:25:38 -0700471const void *RawQueue::DoReadMessageIndex(Options<RawQueue> options,
472 int *index) {
Brian Silvermana6d1b562013-09-01 14:39:39 -0700473 if (kReadDebug) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700474 printf("queue: %p->ReadMessageIndex(%x, %p(*=%d))\n",
Brian Silverman7faaec72014-05-26 16:25:38 -0700475 this, options.printable(), index, *index);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700476 }
477 void *msg = NULL;
Brian Silverman797e71e2013-09-06 17:29:39 -0700478
Brian Silvermandc1eb272014-08-19 14:25:59 -0400479 IPCMutexLocker locker(&data_lock_);
480 CHECK(!locker.owner_died());
Brian Silverman797e71e2013-09-06 17:29:39 -0700481
Brian Silverman42d52372014-03-23 15:29:13 -0700482 if (!ReadCommonStart(options, index)) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700483 if (kReadDebug) {
484 printf("queue: %p common returned false\n", this);
485 }
486 return NULL;
487 }
488
489 // TODO(parker): Handle integer wrap on the index.
490
Brian Silvermaneb51cbb2014-03-14 22:57:08 -0700491 if (options & kFromEnd) {
Brian Silverman227ad482014-03-23 11:21:32 -0700492 if (kReadDebug) {
Brian Silverman227ad482014-03-23 11:21:32 -0700493 printf("queue: %p reading from c1: %d\n", this, LastMessageIndex());
494 }
495 msg = data_[LastMessageIndex()];
Brian Silverman7faaec72014-05-26 16:25:38 -0700496
497 // We'd skip this if we had kPeek, but kPeek | kFromEnd isn't valid for
498 // reading with an index.
499 *index = messages_;
Brian Silvermanc39e2bd2014-02-21 09:17:35 -0800500 } else {
Brian Silverman227ad482014-03-23 11:21:32 -0700501 // Where we're going to start reading.
502 int my_start;
503
Brian Silvermaneb51cbb2014-03-14 22:57:08 -0700504 const int unread_messages = messages_ - *index;
505 assert(unread_messages > 0);
506 int current_messages = data_end_ - data_start_;
507 if (current_messages < 0) current_messages += data_length_;
Brian Silvermancd2d84c2014-03-13 23:30:58 -0700508 if (kReadIndexDebug) {
Brian Silvermaneb51cbb2014-03-14 22:57:08 -0700509 printf("queue: %p start=%d end=%d current=%d\n",
510 this, data_start_, data_end_, current_messages);
Brian Silvermancd2d84c2014-03-13 23:30:58 -0700511 }
Brian Silvermaneb51cbb2014-03-14 22:57:08 -0700512 assert(current_messages > 0);
513 // If we're behind the available messages.
514 if (unread_messages > current_messages) {
515 // Catch index up to the last available message.
516 *index = messages_ - current_messages;
517 // And that's the one we're going to read.
518 my_start = data_start_;
519 if (kReadIndexDebug) {
520 printf("queue: %p jumping ahead to message %d (have %d) (at %d)\n",
521 this, *index, messages_, data_start_);
522 }
Brian Silvermancd2d84c2014-03-13 23:30:58 -0700523 } else {
Brian Silvermaneb51cbb2014-03-14 22:57:08 -0700524 // Just start reading at the first available message that we haven't yet
525 // read.
526 my_start = data_end_ - unread_messages;
527 if (kReadIndexDebug) {
528 printf("queue: %p original read from %d\n", this, my_start);
529 }
530 if (data_start_ < data_end_) {
Brian Silverman42d52372014-03-23 15:29:13 -0700531 assert(my_start >= 0);
Brian Silvermaneb51cbb2014-03-14 22:57:08 -0700532 }
Brian Silverman42d52372014-03-23 15:29:13 -0700533 if (my_start < 0) my_start += data_length_;
Brian Silverman67e34f52014-03-13 15:52:57 -0700534 }
Brian Silvermanc39e2bd2014-02-21 09:17:35 -0800535
Brian Silverman227ad482014-03-23 11:21:32 -0700536 if (kReadDebug) {
537 printf("queue: %p reading from d1: %d\n", this, my_start);
Brian Silverman797e71e2013-09-06 17:29:39 -0700538 }
Brian Silverman227ad482014-03-23 11:21:32 -0700539 // We have to be either after the start or before the end, even if the queue
Brian Silverman42d52372014-03-23 15:29:13 -0700540 // is wrapped around (should be both if it's not).
Brian Silverman227ad482014-03-23 11:21:32 -0700541 assert((my_start >= data_start_) || (my_start < data_end_));
542 // More sanity checking.
543 assert((my_start >= 0) && (my_start < data_length_));
544 msg = data_[my_start];
545 if (!(options & kPeek)) ++(*index);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700546 }
Brian Silverman227ad482014-03-23 11:21:32 -0700547 IncrementMessageReferenceCount(msg);
548
Brian Silverman42d52372014-03-23 15:29:13 -0700549 ReadCommonEnd();
Brian Silvermana6d1b562013-09-01 14:39:39 -0700550 return msg;
551}
552
Brian Silvermanc2e04222014-03-22 12:43:44 -0700553int RawQueue::FreeMessages() const {
554 int r = 0;
555 MessageHeader *header = free_messages_;
556 while (header != nullptr) {
557 ++r;
558 header = header->next;
559 }
560 return r;
561}
562
Brian Silvermana6d1b562013-09-01 14:39:39 -0700563} // namespace aos