blob: 5b6a0facb0c8a5e0a3f92e91c044daed9a10c679 [file] [log] [blame]
Brian Silvermanaac705c2014-05-01 18:55:34 -07001#if !AOS_DEBUG
Austin Schuh7a41be62015-10-31 13:06:55 -07002#undef NDEBUG
Brian Silverman9eaf91a2014-03-24 16:37:44 -07003#define NDEBUG
4#endif
5
John Park398c74a2018-10-20 21:17:39 -07006#include "aos/ipc_lib/queue.h"
Brian Silvermana6d1b562013-09-01 14:39:39 -07007
Austin Schuh0ad2b6f2019-06-09 21:27:07 -07008#include <assert.h>
9#include <errno.h>
Brian Silvermana6d1b562013-09-01 14:39:39 -070010#include <stdio.h>
11#include <string.h>
Brian Silvermana6d1b562013-09-01 14:39:39 -070012
Brian Silvermanc39e2bd2014-02-21 09:17:35 -080013#include <algorithm>
Austin Schuh0ad2b6f2019-06-09 21:27:07 -070014#include <memory>
Brian Silvermana6d1b562013-09-01 14:39:39 -070015
John Park398c74a2018-10-20 21:17:39 -070016#include "aos/ipc_lib/core_lib.h"
Austin Schuh0ad2b6f2019-06-09 21:27:07 -070017#include "aos/type_traits/type_traits.h"
Brian Silvermana6d1b562013-09-01 14:39:39 -070018
19namespace aos {
Brian Silvermana6d1b562013-09-01 14:39:39 -070020namespace {
21
Austin Schuh0ad2b6f2019-06-09 21:27:07 -070022namespace chrono = ::std::chrono;
23
Brian Silverman08661c72013-09-01 17:24:38 -070024static_assert(shm_ok<RawQueue>::value,
25 "RawQueue instances go into shared memory");
Brian Silvermana6d1b562013-09-01 14:39:39 -070026
27const bool kReadDebug = false;
Brian Silvermanbad7c8a2014-03-26 20:45:18 -070028const bool kWriteDebug = false;
Brian Silvermana6d1b562013-09-01 14:39:39 -070029const bool kRefDebug = false;
30const bool kFetchDebug = false;
Brian Silvermancd2d84c2014-03-13 23:30:58 -070031const bool kReadIndexDebug = false;
Brian Silvermana6d1b562013-09-01 14:39:39 -070032
33// The number of extra messages the pool associated with each queue will be able
Brian Silverman08661c72013-09-01 17:24:38 -070034// to hold (for readers who are slow about freeing them or who leak one when
35// they get killed).
Brian Silvermana6d1b562013-09-01 14:39:39 -070036const int kExtraMessages = 20;
37
38} // namespace
39
Brian Silverman7faaec72014-05-26 16:25:38 -070040constexpr Options<RawQueue>::Option RawQueue::kPeek;
41constexpr Options<RawQueue>::Option RawQueue::kFromEnd;
42constexpr Options<RawQueue>::Option RawQueue::kNonBlock;
43constexpr Options<RawQueue>::Option RawQueue::kBlock;
44constexpr Options<RawQueue>::Option RawQueue::kOverride;
Brian Silverman08661c72013-09-01 17:24:38 -070045
Brian Silverman430e7fa2014-03-21 16:58:33 -070046// This is what gets stuck in before each queue message in memory. It is always
47// allocated aligned to 8 bytes and its size has to maintain that alignment for
48// the message that follows immediately.
Brian Silverman08661c72013-09-01 17:24:38 -070049struct RawQueue::MessageHeader {
Brian Silvermanc2e04222014-03-22 12:43:44 -070050 MessageHeader *next;
Brian Silvermane8337b72014-04-27 19:52:19 -070051
Brian Silverman5f8c4922014-02-11 21:22:38 -080052 // Gets the message header immediately preceding msg.
Brian Silvermana6d1b562013-09-01 14:39:39 -070053 static MessageHeader *Get(const void *msg) {
Brian Silverman63cf2412013-11-17 05:44:36 -080054 return reinterpret_cast<MessageHeader *>(__builtin_assume_aligned(
55 static_cast<uint8_t *>(const_cast<void *>(msg)) - sizeof(MessageHeader),
56 alignof(MessageHeader)));
Brian Silvermana6d1b562013-09-01 14:39:39 -070057 }
Brian Silvermane8337b72014-04-27 19:52:19 -070058
59 int32_t ref_count() const {
60 return __atomic_load_n(&ref_count_, __ATOMIC_RELAXED);
61 }
62 void set_ref_count(int32_t val) {
63 __atomic_store_n(&ref_count_, val, __ATOMIC_RELAXED);
64 }
65
Austin Schuh0ad2b6f2019-06-09 21:27:07 -070066 void ref_count_sub() { __atomic_sub_fetch(&ref_count_, 1, __ATOMIC_RELAXED); }
67 void ref_count_add() { __atomic_add_fetch(&ref_count_, 1, __ATOMIC_RELAXED); }
Brian Silvermane8337b72014-04-27 19:52:19 -070068
69 private:
70 // This gets accessed with atomic instructions without any
71 // locks held by various member functions.
72 int32_t ref_count_;
73
Austin Schuh0ad2b6f2019-06-09 21:27:07 -070074// Padding to make the total size 8 bytes if we have 4-byte pointers or bump
75// it to 16 if a pointer is 8 bytes by itself.
Brian Silvermanc2e04222014-03-22 12:43:44 -070076#if __SIZEOF_POINTER__ == 8
Brian Silverman4b09fce2014-04-27 19:58:14 -070077#ifdef __clang__
78 // Clang is smart enough to realize this is unused, but GCC doesn't like the
79 // attribute here...
80 __attribute__((unused))
81#endif
Brian Silverman227ad482014-03-23 11:21:32 -070082 char padding[4];
Brian Silvermanc2e04222014-03-22 12:43:44 -070083#elif __SIZEOF_POINTER__ == 4
Austin Schuh0ad2b6f2019-06-09 21:27:07 -070084// No padding needed to get 8 byte total size.
Brian Silvermanc2e04222014-03-22 12:43:44 -070085#else
86#error Unknown pointer size.
87#endif
Brian Silvermana6d1b562013-09-01 14:39:39 -070088};
Brian Silvermana6d1b562013-09-01 14:39:39 -070089
Brian Silverman4d0789d2014-03-23 17:03:07 -070090inline int RawQueue::index_add1(int index) {
91 // Doing it this way instead of with % is more efficient on ARM.
92 int r = index + 1;
93 assert(index <= data_length_);
94 if (r == data_length_) {
95 return 0;
96 } else {
97 return r;
98 }
99}
100
Brian Silverman08661c72013-09-01 17:24:38 -0700101void RawQueue::DecrementMessageReferenceCount(const void *msg) {
Brian Silvermana6d1b562013-09-01 14:39:39 -0700102 MessageHeader *header = MessageHeader::Get(msg);
Brian Silvermane8337b72014-04-27 19:52:19 -0700103 header->ref_count_sub();
Brian Silvermana6d1b562013-09-01 14:39:39 -0700104 if (kRefDebug) {
Brian Silvermane8337b72014-04-27 19:52:19 -0700105 printf("%p ref dec count: %p count=%d\n", this, msg, header->ref_count());
Brian Silvermana6d1b562013-09-01 14:39:39 -0700106 }
Brian Silvermanad290d82014-03-19 17:22:05 -0700107
108 // The only way it should ever be 0 is if we were the last one to decrement,
109 // in which case nobody else should have it around to re-increment it or
110 // anything in the middle, so this is safe to do not atomically with the
111 // decrement.
Brian Silvermane8337b72014-04-27 19:52:19 -0700112 if (header->ref_count() == 0) {
Brian Silvermana6d1b562013-09-01 14:39:39 -0700113 DoFreeMessage(msg);
Brian Silvermanad290d82014-03-19 17:22:05 -0700114 } else {
Brian Silvermane8337b72014-04-27 19:52:19 -0700115 assert(header->ref_count() > 0);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700116 }
117}
118
Brian Silverman227ad482014-03-23 11:21:32 -0700119inline void RawQueue::IncrementMessageReferenceCount(const void *msg) const {
Brian Silverman430e7fa2014-03-21 16:58:33 -0700120 MessageHeader *const header = MessageHeader::Get(msg);
Brian Silvermane8337b72014-04-27 19:52:19 -0700121 header->ref_count_add();
Brian Silverman430e7fa2014-03-21 16:58:33 -0700122 if (kRefDebug) {
Brian Silvermanc2e04222014-03-22 12:43:44 -0700123 printf("%p ref inc count: %p\n", this, msg);
Brian Silverman430e7fa2014-03-21 16:58:33 -0700124 }
125}
126
Brian Silverman42d52372014-03-23 15:29:13 -0700127inline void RawQueue::DoFreeMessage(const void *msg) {
128 MessageHeader *header = MessageHeader::Get(msg);
129 if (kRefDebug) {
130 printf("%p ref free to %p: %p\n", this, recycle_, msg);
131 }
132
133 if (__builtin_expect(recycle_ != nullptr, 0)) {
134 void *const new_msg = recycle_->GetMessage();
135 if (new_msg == nullptr) {
Austin Schuh0ad2b6f2019-06-09 21:27:07 -0700136 fprintf(stderr,
137 "queue: couldn't get a message"
138 " for recycle queue %p\n",
139 recycle_);
Brian Silverman42d52372014-03-23 15:29:13 -0700140 } else {
Brian Silvermane8337b72014-04-27 19:52:19 -0700141 header->ref_count_add();
Brian Silverman42d52372014-03-23 15:29:13 -0700142 if (!recycle_->WriteMessage(const_cast<void *>(msg), kOverride)) {
Austin Schuh0ad2b6f2019-06-09 21:27:07 -0700143 fprintf(stderr,
144 "queue: %p->WriteMessage(%p, kOverride) failed."
145 " aborting\n",
146 recycle_, msg);
Brian Silverman42d52372014-03-23 15:29:13 -0700147 printf("see stderr\n");
148 abort();
149 }
150 msg = new_msg;
151 header = MessageHeader::Get(new_msg);
152 }
153 }
154
155 // This works around GCC bug 60272 (fixed in 4.8.3).
156 // new_next should just get replaced with header->next (and the body of the
157 // loop should become empty).
158 // The bug is that the store to new_next after the compare/exchange is
159 // unconditional but it should only be if it fails, which could mean
160 // overwriting what somebody else who preempted us right then changed it to.
161 // TODO(brians): Get rid of this workaround once we get a new enough GCC.
162 MessageHeader *new_next = __atomic_load_n(&free_messages_, __ATOMIC_RELAXED);
163 do {
164 header->next = new_next;
165 } while (__builtin_expect(
166 !__atomic_compare_exchange_n(&free_messages_, &new_next, header, true,
167 __ATOMIC_RELEASE, __ATOMIC_RELAXED),
168 0));
169}
170
171void *RawQueue::GetMessage() {
Brian Silverman42d52372014-03-23 15:29:13 -0700172 MessageHeader *header = __atomic_load_n(&free_messages_, __ATOMIC_RELAXED);
173 do {
174 if (__builtin_expect(header == nullptr, 0)) {
Austin Schuhf257f3c2019-10-27 21:00:43 -0700175 AOS_LOG(FATAL, "overused pool of queue %p (%s)\n", this, name_);
Brian Silverman42d52372014-03-23 15:29:13 -0700176 }
177 } while (__builtin_expect(
178 !__atomic_compare_exchange_n(&free_messages_, &header, header->next, true,
179 __ATOMIC_ACQ_REL, __ATOMIC_RELAXED),
180 0));
181 void *msg = reinterpret_cast<uint8_t *>(header + 1);
182 // It might be uninitialized, 0 from a previous use, or 1 from previously
183 // being recycled.
Brian Silvermane8337b72014-04-27 19:52:19 -0700184 header->set_ref_count(1);
Brian Silverman42d52372014-03-23 15:29:13 -0700185 if (kRefDebug) {
186 printf("%p ref alloc: %p\n", this, msg);
187 }
188 return msg;
189}
190
Brian Silverman08661c72013-09-01 17:24:38 -0700191RawQueue::RawQueue(const char *name, size_t length, int hash, int queue_length)
Brian Silverman5f8c4922014-02-11 21:22:38 -0800192 : readable_(&data_lock_), writable_(&data_lock_) {
Brian Silvermanc2e04222014-03-22 12:43:44 -0700193 static_assert(shm_ok<RawQueue::MessageHeader>::value,
194 "the whole point is to stick it in shared memory");
195 static_assert((sizeof(RawQueue::MessageHeader) % 8) == 0,
196 "need to revalidate size/alignent assumptions");
197
Brian Silverman227ad482014-03-23 11:21:32 -0700198 if (queue_length < 1) {
Austin Schuhf257f3c2019-10-27 21:00:43 -0700199 AOS_LOG(FATAL, "queue length %d of %s needs to be at least 1\n",
200 queue_length, name);
Brian Silverman227ad482014-03-23 11:21:32 -0700201 }
202
Brian Silvermana6d1b562013-09-01 14:39:39 -0700203 const size_t name_size = strlen(name) + 1;
204 char *temp = static_cast<char *>(shm_malloc(name_size));
205 memcpy(temp, name, name_size);
206 name_ = temp;
207 length_ = length;
208 hash_ = hash;
209 queue_length_ = queue_length;
210
211 next_ = NULL;
212 recycle_ = NULL;
213
214 if (kFetchDebug) {
Austin Schuh0ad2b6f2019-06-09 21:27:07 -0700215 printf("initializing name=%s, length=%zd, hash=%d, queue_length=%d\n", name,
216 length, hash, queue_length);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700217 }
218
219 data_length_ = queue_length + 1;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700220 data_ = static_cast<void **>(shm_malloc(sizeof(void *) * data_length_));
221 data_start_ = 0;
222 data_end_ = 0;
223 messages_ = 0;
224
Brian Silvermana6d1b562013-09-01 14:39:39 -0700225 msg_length_ = length + sizeof(MessageHeader);
Brian Silvermanc2e04222014-03-22 12:43:44 -0700226
Brian Silverman227ad482014-03-23 11:21:32 -0700227 // Create all of the messages for the free list and stick them on.
228 {
229 MessageHeader *previous = nullptr;
230 for (int i = 0; i < queue_length + kExtraMessages; ++i) {
231 MessageHeader *const message =
232 static_cast<MessageHeader *>(shm_malloc(msg_length_));
233 free_messages_ = message;
234 message->next = previous;
235 previous = message;
236 }
Brian Silverman60eff202014-03-21 17:10:02 -0700237 }
Brian Silvermana6d1b562013-09-01 14:39:39 -0700238
Brian Silverman35109802014-04-09 14:31:53 -0700239 readable_waiting_ = false;
240
Brian Silvermana6d1b562013-09-01 14:39:39 -0700241 if (kFetchDebug) {
242 printf("made queue %s\n", name);
243 }
244}
Brian Silverman42d52372014-03-23 15:29:13 -0700245
Brian Silverman08661c72013-09-01 17:24:38 -0700246RawQueue *RawQueue::Fetch(const char *name, size_t length, int hash,
Austin Schuh0ad2b6f2019-06-09 21:27:07 -0700247 int queue_length) {
Brian Silvermana6d1b562013-09-01 14:39:39 -0700248 if (kFetchDebug) {
249 printf("fetching queue %s\n", name);
250 }
Brian Silverman4aeac5f2014-02-11 22:17:07 -0800251 if (mutex_lock(&global_core->mem_struct->queues.lock) != 0) {
Austin Schuhf257f3c2019-10-27 21:00:43 -0700252 AOS_LOG(FATAL, "mutex_lock(%p) failed\n",
253 &global_core->mem_struct->queues.lock);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700254 }
Austin Schuh0ad2b6f2019-06-09 21:27:07 -0700255 RawQueue *current =
256 static_cast<RawQueue *>(global_core->mem_struct->queues.pointer);
Brian Silverman797e71e2013-09-06 17:29:39 -0700257 if (current != NULL) {
258 while (true) {
259 // If we found a matching queue.
260 if (strcmp(current->name_, name) == 0 && current->length_ == length &&
261 current->hash_ == hash && current->queue_length_ == queue_length) {
Brian Silverman4aeac5f2014-02-11 22:17:07 -0800262 mutex_unlock(&global_core->mem_struct->queues.lock);
Brian Silverman797e71e2013-09-06 17:29:39 -0700263 return current;
264 } else {
265 if (kFetchDebug) {
266 printf("rejected queue %s strcmp=%d target=%s\n", current->name_,
267 strcmp(current->name_, name), name);
268 }
Brian Silvermana6d1b562013-09-01 14:39:39 -0700269 }
Brian Silverman797e71e2013-09-06 17:29:39 -0700270 // If this is the last one.
271 if (current->next_ == NULL) break;
272 current = current->next_;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700273 }
Brian Silvermana6d1b562013-09-01 14:39:39 -0700274 }
275
Brian Silverman797e71e2013-09-06 17:29:39 -0700276 RawQueue *r = new (shm_malloc(sizeof(RawQueue)))
277 RawQueue(name, length, hash, queue_length);
278 if (current == NULL) { // if we don't already have one
Brian Silverman4aeac5f2014-02-11 22:17:07 -0800279 global_core->mem_struct->queues.pointer = r;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700280 } else {
Brian Silverman797e71e2013-09-06 17:29:39 -0700281 current->next_ = r;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700282 }
283
Brian Silverman4aeac5f2014-02-11 22:17:07 -0800284 mutex_unlock(&global_core->mem_struct->queues.lock);
Brian Silverman797e71e2013-09-06 17:29:39 -0700285 return r;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700286}
Brian Silverman42d52372014-03-23 15:29:13 -0700287
Brian Silverman08661c72013-09-01 17:24:38 -0700288RawQueue *RawQueue::Fetch(const char *name, size_t length, int hash,
Austin Schuh0ad2b6f2019-06-09 21:27:07 -0700289 int queue_length, int recycle_hash,
290 int recycle_length, RawQueue **recycle) {
Brian Silverman08661c72013-09-01 17:24:38 -0700291 RawQueue *r = Fetch(name, length, hash, queue_length);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700292 r->recycle_ = Fetch(name, length, recycle_hash, recycle_length);
293 if (r == r->recycle_) {
294 fprintf(stderr, "queue: r->recycle_(=%p) == r(=%p)\n", r->recycle_, r);
295 printf("see stderr\n");
Brian Silverman797e71e2013-09-06 17:29:39 -0700296 r->recycle_ = NULL;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700297 abort();
298 }
299 *recycle = r->recycle_;
300 return r;
301}
302
Brian Silverman7faaec72014-05-26 16:25:38 -0700303bool RawQueue::DoWriteMessage(void *msg, Options<RawQueue> options) {
Brian Silvermana6d1b562013-09-01 14:39:39 -0700304 if (kWriteDebug) {
Brian Silverman7faaec72014-05-26 16:25:38 -0700305 printf("queue: %p->WriteMessage(%p, %x)\n", this, msg, options.printable());
Brian Silvermana6d1b562013-09-01 14:39:39 -0700306 }
Brian Silverman4f2a34e2016-01-02 14:00:27 -0800307
308 bool signal_readable;
309
Brian Silvermana6d1b562013-09-01 14:39:39 -0700310 {
Brian Silvermandc1eb272014-08-19 14:25:59 -0400311 IPCMutexLocker locker(&data_lock_);
Austin Schuhf257f3c2019-10-27 21:00:43 -0700312 AOS_CHECK(!locker.owner_died());
Brian Silverman797e71e2013-09-06 17:29:39 -0700313
314 int new_end;
315 while (true) {
Brian Silverman4d0789d2014-03-23 17:03:07 -0700316 new_end = index_add1(data_end_);
Brian Silverman797e71e2013-09-06 17:29:39 -0700317 // If there is room in the queue right now.
318 if (new_end != data_start_) break;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700319 if (options & kNonBlock) {
320 if (kWriteDebug) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700321 printf("queue: not blocking on %p. returning false\n", this);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700322 }
Brian Silverman358c49f2014-03-05 16:56:34 -0800323 DecrementMessageReferenceCount(msg);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700324 return false;
325 } else if (options & kOverride) {
326 if (kWriteDebug) {
327 printf("queue: overriding on %p\n", this);
328 }
Brian Silverman797e71e2013-09-06 17:29:39 -0700329 // Avoid leaking the message that we're going to overwrite.
Brian Silvermana6d1b562013-09-01 14:39:39 -0700330 DecrementMessageReferenceCount(data_[data_start_]);
Brian Silverman4d0789d2014-03-23 17:03:07 -0700331 data_start_ = index_add1(data_start_);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700332 } else { // kBlock
Brian Silverman7faaec72014-05-26 16:25:38 -0700333 assert(options & kBlock);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700334 if (kWriteDebug) {
335 printf("queue: going to wait for writable_ of %p\n", this);
336 }
Austin Schuhf257f3c2019-10-27 21:00:43 -0700337 AOS_CHECK(!writable_.Wait());
Brian Silvermana6d1b562013-09-01 14:39:39 -0700338 }
Brian Silvermana6d1b562013-09-01 14:39:39 -0700339 }
340 data_[data_end_] = msg;
341 ++messages_;
342 data_end_ = new_end;
Brian Silverman797e71e2013-09-06 17:29:39 -0700343
Brian Silverman4f2a34e2016-01-02 14:00:27 -0800344 signal_readable = readable_waiting_;
345 readable_waiting_ = false;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700346 }
Brian Silverman4f2a34e2016-01-02 14:00:27 -0800347
348 if (signal_readable) {
349 if (kWriteDebug) {
350 printf("queue: broadcasting to readable_ of %p\n", this);
351 }
352 readable_.Broadcast();
353 } else if (kWriteDebug) {
354 printf("queue: skipping broadcast to readable_ of %p\n", this);
355 }
356
Brian Silvermana6d1b562013-09-01 14:39:39 -0700357 if (kWriteDebug) {
Brian Silvermana6d1b562013-09-01 14:39:39 -0700358 printf("queue: write returning true on queue %p\n", this);
359 }
360 return true;
361}
362
Brian Silverman42d52372014-03-23 15:29:13 -0700363inline void RawQueue::ReadCommonEnd() {
Brian Silverman797e71e2013-09-06 17:29:39 -0700364 if (is_writable()) {
365 if (kReadDebug) {
366 printf("queue: %ssignalling writable_ of %p\n",
Brian Silverman42d52372014-03-23 15:29:13 -0700367 writable_start_ ? "not " : "", this);
Brian Silverman797e71e2013-09-06 17:29:39 -0700368 }
Brian Silverman4f2a34e2016-01-02 14:00:27 -0800369 if (!writable_start_) writable_.Broadcast();
Brian Silvermana6d1b562013-09-01 14:39:39 -0700370 }
371}
Brian Silverman227ad482014-03-23 11:21:32 -0700372
Austin Schuh0ad2b6f2019-06-09 21:27:07 -0700373bool RawQueue::ReadCommonStart(Options<RawQueue> options, int *index,
374 chrono::nanoseconds timeout) {
Brian Silvermana6d1b562013-09-01 14:39:39 -0700375 while (data_start_ == data_end_ || ((index != NULL) && messages_ <= *index)) {
376 if (options & kNonBlock) {
377 if (kReadDebug) {
378 printf("queue: not going to block waiting on %p\n", this);
379 }
380 return false;
381 } else { // kBlock
Brian Silverman7faaec72014-05-26 16:25:38 -0700382 assert(options & kBlock);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700383 if (kReadDebug) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700384 printf("queue: going to wait for readable_ of %p\n", this);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700385 }
Brian Silverman35109802014-04-09 14:31:53 -0700386 readable_waiting_ = true;
Brian Silverman797e71e2013-09-06 17:29:39 -0700387 // Wait for a message to become readable.
Austin Schuh0ad2b6f2019-06-09 21:27:07 -0700388 while (true) {
389 Condition::WaitResult wait_result = readable_.WaitTimed(timeout);
390 if (wait_result == Condition::WaitResult::kOk) {
391 break;
392 }
Austin Schuhf257f3c2019-10-27 21:00:43 -0700393 AOS_CHECK(wait_result != Condition::WaitResult::kOwnerDied);
Austin Schuh0ad2b6f2019-06-09 21:27:07 -0700394 if (wait_result == Condition::WaitResult::kTimeout) {
395 return false;
396 }
397 }
398
Brian Silvermana6d1b562013-09-01 14:39:39 -0700399 if (kReadDebug) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700400 printf("queue: done waiting for readable_ of %p\n", this);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700401 }
Brian Silvermana6d1b562013-09-01 14:39:39 -0700402 }
403 }
Brian Silverman9c9f1982014-05-24 12:01:49 -0700404 // We have to check down here because we might have unlocked the mutex while
405 // Wait()ing above so this value might have changed.
406 writable_start_ = is_writable();
Brian Silvermana6d1b562013-09-01 14:39:39 -0700407 if (kReadDebug) {
Austin Schuh0ad2b6f2019-06-09 21:27:07 -0700408 printf("queue: %p->read(%p) start=%d end=%d writable_start=%d\n", this,
409 index, data_start_, data_end_, writable_start_);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700410 }
411 return true;
412}
Brian Silverman227ad482014-03-23 11:21:32 -0700413
414inline int RawQueue::LastMessageIndex() const {
415 int pos = data_end_ - 1;
416 if (pos < 0) { // If it wrapped around.
417 pos = data_length_ - 1;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700418 }
Brian Silverman227ad482014-03-23 11:21:32 -0700419 return pos;
Brian Silvermana6d1b562013-09-01 14:39:39 -0700420}
Brian Silverman227ad482014-03-23 11:21:32 -0700421
Brian Silverman7faaec72014-05-26 16:25:38 -0700422const void *RawQueue::DoReadMessage(Options<RawQueue> options) {
Brian Silvermaneb51cbb2014-03-14 22:57:08 -0700423 // TODO(brians): Test this function.
Brian Silvermana6d1b562013-09-01 14:39:39 -0700424 if (kReadDebug) {
Brian Silverman7faaec72014-05-26 16:25:38 -0700425 printf("queue: %p->ReadMessage(%x)\n", this, options.printable());
Brian Silvermana6d1b562013-09-01 14:39:39 -0700426 }
427 void *msg = NULL;
Brian Silverman797e71e2013-09-06 17:29:39 -0700428
Brian Silvermandc1eb272014-08-19 14:25:59 -0400429 IPCMutexLocker locker(&data_lock_);
Austin Schuhf257f3c2019-10-27 21:00:43 -0700430 AOS_CHECK(!locker.owner_died());
Brian Silverman797e71e2013-09-06 17:29:39 -0700431
Austin Schuh0ad2b6f2019-06-09 21:27:07 -0700432 if (!ReadCommonStart(options, nullptr, chrono::nanoseconds(0))) {
Brian Silvermana6d1b562013-09-01 14:39:39 -0700433 if (kReadDebug) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700434 printf("queue: %p common returned false\n", this);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700435 }
436 return NULL;
437 }
Brian Silverman797e71e2013-09-06 17:29:39 -0700438
Brian Silverman227ad482014-03-23 11:21:32 -0700439 if (options & kFromEnd) {
440 if (options & kPeek) {
441 if (kReadDebug) {
442 printf("queue: %p shortcutting c2: %d\n", this, LastMessageIndex());
443 }
444 msg = data_[LastMessageIndex()];
445 IncrementMessageReferenceCount(msg);
446 } else {
Brian Silverman797e71e2013-09-06 17:29:39 -0700447 while (true) {
Brian Silvermana6d1b562013-09-01 14:39:39 -0700448 if (kReadDebug) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700449 printf("queue: %p start of c2\n", this);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700450 }
451 // This loop pulls each message out of the buffer.
452 const int pos = data_start_;
Brian Silverman4d0789d2014-03-23 17:03:07 -0700453 data_start_ = index_add1(data_start_);
Brian Silverman797e71e2013-09-06 17:29:39 -0700454 // If this is the last one.
Brian Silvermana6d1b562013-09-01 14:39:39 -0700455 if (data_start_ == data_end_) {
456 if (kReadDebug) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700457 printf("queue: %p reading from c2: %d\n", this, pos);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700458 }
459 msg = data_[pos];
460 break;
461 }
Brian Silverman797e71e2013-09-06 17:29:39 -0700462 // This message is not going to be in the queue any more.
Brian Silvermana6d1b562013-09-01 14:39:39 -0700463 DecrementMessageReferenceCount(data_[pos]);
464 }
Brian Silverman227ad482014-03-23 11:21:32 -0700465 }
466 } else {
467 if (kReadDebug) {
468 printf("queue: %p reading from d2: %d\n", this, data_start_);
469 }
470 msg = data_[data_start_];
471 if (options & kPeek) {
472 IncrementMessageReferenceCount(msg);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700473 } else {
Brian Silverman4d0789d2014-03-23 17:03:07 -0700474 data_start_ = index_add1(data_start_);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700475 }
476 }
Brian Silverman42d52372014-03-23 15:29:13 -0700477 ReadCommonEnd();
Brian Silvermana6d1b562013-09-01 14:39:39 -0700478 if (kReadDebug) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700479 printf("queue: %p read returning %p\n", this, msg);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700480 }
481 return msg;
482}
Brian Silverman227ad482014-03-23 11:21:32 -0700483
Austin Schuh0ad2b6f2019-06-09 21:27:07 -0700484const void *RawQueue::DoReadMessageIndex(Options<RawQueue> options, int *index,
485 chrono::nanoseconds timeout) {
Brian Silvermana6d1b562013-09-01 14:39:39 -0700486 if (kReadDebug) {
Austin Schuh0ad2b6f2019-06-09 21:27:07 -0700487 printf("queue: %p->ReadMessageIndex(%x, %p(*=%d))\n", this,
488 options.printable(), index, *index);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700489 }
490 void *msg = NULL;
Brian Silverman797e71e2013-09-06 17:29:39 -0700491
Brian Silvermandc1eb272014-08-19 14:25:59 -0400492 IPCMutexLocker locker(&data_lock_);
Austin Schuhf257f3c2019-10-27 21:00:43 -0700493 AOS_CHECK(!locker.owner_died());
Brian Silverman797e71e2013-09-06 17:29:39 -0700494
Austin Schuh0ad2b6f2019-06-09 21:27:07 -0700495 if (!ReadCommonStart(options, index, timeout)) {
Brian Silverman797e71e2013-09-06 17:29:39 -0700496 if (kReadDebug) {
497 printf("queue: %p common returned false\n", this);
498 }
499 return NULL;
500 }
501
502 // TODO(parker): Handle integer wrap on the index.
503
Brian Silvermaneb51cbb2014-03-14 22:57:08 -0700504 if (options & kFromEnd) {
Brian Silverman227ad482014-03-23 11:21:32 -0700505 if (kReadDebug) {
Brian Silverman227ad482014-03-23 11:21:32 -0700506 printf("queue: %p reading from c1: %d\n", this, LastMessageIndex());
507 }
508 msg = data_[LastMessageIndex()];
Brian Silverman7faaec72014-05-26 16:25:38 -0700509
510 // We'd skip this if we had kPeek, but kPeek | kFromEnd isn't valid for
511 // reading with an index.
512 *index = messages_;
Brian Silvermanc39e2bd2014-02-21 09:17:35 -0800513 } else {
Brian Silverman227ad482014-03-23 11:21:32 -0700514 // Where we're going to start reading.
515 int my_start;
516
Brian Silvermaneb51cbb2014-03-14 22:57:08 -0700517 const int unread_messages = messages_ - *index;
518 assert(unread_messages > 0);
519 int current_messages = data_end_ - data_start_;
520 if (current_messages < 0) current_messages += data_length_;
Brian Silvermancd2d84c2014-03-13 23:30:58 -0700521 if (kReadIndexDebug) {
Austin Schuh0ad2b6f2019-06-09 21:27:07 -0700522 printf("queue: %p start=%d end=%d current=%d\n", this, data_start_,
523 data_end_, current_messages);
Brian Silvermancd2d84c2014-03-13 23:30:58 -0700524 }
Brian Silvermaneb51cbb2014-03-14 22:57:08 -0700525 assert(current_messages > 0);
526 // If we're behind the available messages.
527 if (unread_messages > current_messages) {
528 // Catch index up to the last available message.
529 *index = messages_ - current_messages;
530 // And that's the one we're going to read.
531 my_start = data_start_;
532 if (kReadIndexDebug) {
533 printf("queue: %p jumping ahead to message %d (have %d) (at %d)\n",
534 this, *index, messages_, data_start_);
535 }
Brian Silvermancd2d84c2014-03-13 23:30:58 -0700536 } else {
Brian Silvermaneb51cbb2014-03-14 22:57:08 -0700537 // Just start reading at the first available message that we haven't yet
538 // read.
539 my_start = data_end_ - unread_messages;
540 if (kReadIndexDebug) {
541 printf("queue: %p original read from %d\n", this, my_start);
542 }
543 if (data_start_ < data_end_) {
Brian Silverman42d52372014-03-23 15:29:13 -0700544 assert(my_start >= 0);
Brian Silvermaneb51cbb2014-03-14 22:57:08 -0700545 }
Brian Silverman42d52372014-03-23 15:29:13 -0700546 if (my_start < 0) my_start += data_length_;
Brian Silverman67e34f52014-03-13 15:52:57 -0700547 }
Brian Silvermanc39e2bd2014-02-21 09:17:35 -0800548
Brian Silverman227ad482014-03-23 11:21:32 -0700549 if (kReadDebug) {
550 printf("queue: %p reading from d1: %d\n", this, my_start);
Brian Silverman797e71e2013-09-06 17:29:39 -0700551 }
Brian Silverman227ad482014-03-23 11:21:32 -0700552 // We have to be either after the start or before the end, even if the queue
Brian Silverman42d52372014-03-23 15:29:13 -0700553 // is wrapped around (should be both if it's not).
Brian Silverman227ad482014-03-23 11:21:32 -0700554 assert((my_start >= data_start_) || (my_start < data_end_));
555 // More sanity checking.
556 assert((my_start >= 0) && (my_start < data_length_));
557 msg = data_[my_start];
558 if (!(options & kPeek)) ++(*index);
Brian Silvermana6d1b562013-09-01 14:39:39 -0700559 }
Brian Silverman227ad482014-03-23 11:21:32 -0700560 IncrementMessageReferenceCount(msg);
561
Brian Silverman42d52372014-03-23 15:29:13 -0700562 ReadCommonEnd();
Brian Silvermana6d1b562013-09-01 14:39:39 -0700563 return msg;
564}
565
Brian Silvermanc2e04222014-03-22 12:43:44 -0700566int RawQueue::FreeMessages() const {
567 int r = 0;
568 MessageHeader *header = free_messages_;
569 while (header != nullptr) {
570 ++r;
571 header = header->next;
572 }
573 return r;
574}
575
Brian Silvermana6d1b562013-09-01 14:39:39 -0700576} // namespace aos