blob: 57e2a5e3d040b904509b49f70c9aed3f34400311 [file] [log] [blame]
Brian Silvermana6d1b562013-09-01 14:39:39 -07001#include "aos/common/queue.h"
2
3#include <stdio.h>
4#include <string.h>
5#include <errno.h>
6#include <assert.h>
7
8#include <memory>
9
10#include "aos/common/logging/logging.h"
11#include "aos/common/type_traits.h"
12
13namespace aos {
14
15namespace {
16
17static_assert(shm_ok<Queue>::value, "Queue instances go into shared memory");
18
19const bool kReadDebug = false;
20const bool kWriteDebug = false;
21const bool kRefDebug = false;
22const bool kFetchDebug = false;
23
24// The number of extra messages the pool associated with each queue will be able
25// to hold (for readers who are slow about freeing them).
26const int kExtraMessages = 20;
27
28} // namespace
29
30struct Queue::MessageHeader {
31 int ref_count;
32 int index; // in pool_
33 static MessageHeader *Get(const void *msg) {
34 return reinterpret_cast<MessageHeader *>(
35 static_cast<uint8_t *>(const_cast<void *>(msg)) -
36 sizeof(MessageHeader));
37 }
38 void Swap(MessageHeader *other) {
39 MessageHeader temp;
40 memcpy(&temp, other, sizeof(temp));
41 memcpy(other, this, sizeof(*other));
42 memcpy(this, &temp, sizeof(*this));
43 }
44};
45static_assert(shm_ok<Queue::MessageHeader>::value, "the whole point"
46 " is to stick it in shared memory");
47
48// TODO(brians) maybe do this with atomic integer instructions so it doesn't
49// have to lock/unlock pool_lock_
50void Queue::DecrementMessageReferenceCount(const void *msg) {
51 MutexLocker locker(&pool_lock_);
52 MessageHeader *header = MessageHeader::Get(msg);
53 --header->ref_count;
54 assert(header->ref_count >= 0);
55 if (kRefDebug) {
56 printf("ref_dec_count: %p count=%d\n", msg, header->ref_count);
57 }
58 if (header->ref_count == 0) {
59 DoFreeMessage(msg);
60 }
61}
62
63Queue::Queue(const char *name, size_t length, int hash, int queue_length) {
64 const size_t name_size = strlen(name) + 1;
65 char *temp = static_cast<char *>(shm_malloc(name_size));
66 memcpy(temp, name, name_size);
67 name_ = temp;
68 length_ = length;
69 hash_ = hash;
70 queue_length_ = queue_length;
71
72 next_ = NULL;
73 recycle_ = NULL;
74
75 if (kFetchDebug) {
76 printf("initializing name=%s, length=%zd, hash=%d, queue_length=%d\n",
77 name, length, hash, queue_length);
78 }
79
80 data_length_ = queue_length + 1;
81 if (data_length_ < 2) { // TODO(brians) when could this happen?
82 data_length_ = 2;
83 }
84 data_ = static_cast<void **>(shm_malloc(sizeof(void *) * data_length_));
85 data_start_ = 0;
86 data_end_ = 0;
87 messages_ = 0;
88
89 mem_length_ = queue_length + kExtraMessages;
90 pool_length_ = 0;
91 messages_used_ = 0;
92 msg_length_ = length + sizeof(MessageHeader);
93 pool_ = static_cast<MessageHeader **>(
94 shm_malloc(sizeof(MessageHeader *) * mem_length_));
95
96 if (kFetchDebug) {
97 printf("made queue %s\n", name);
98 }
99}
100Queue *Queue::Fetch(const char *name, size_t length, int hash,
101 int queue_length) {
102 if (kFetchDebug) {
103 printf("fetching queue %s\n", name);
104 }
105 if (mutex_lock(&global_core->mem_struct->queues.alloc_lock) != 0) {
106 return NULL;
107 }
108 Queue *current = static_cast<Queue *>(
109 global_core->mem_struct->queues.queue_list);
110 Queue *last = NULL;
111 while (current != NULL) {
112 // if we found a matching queue
113 if (strcmp(current->name_, name) == 0 && current->length_ == length &&
114 current->hash_ == hash && current->queue_length_ == queue_length) {
115 mutex_unlock(&global_core->mem_struct->queues.alloc_lock);
116 return current;
117 } else {
118 if (kFetchDebug) {
119 printf("rejected queue %s strcmp=%d target=%s\n", current->name_,
120 strcmp(current->name_, name), name);
121 }
122 }
123 current = current->next_;
124 }
125
126 void *temp = shm_malloc(sizeof(Queue));
127 current = new (temp) Queue(name, length, hash, queue_length);
128 if (last == NULL) { // if we don't have one to tack the new one on to
129 global_core->mem_struct->queues.queue_list = current;
130 } else {
131 last->next_ = current;
132 }
133
134 mutex_unlock(&global_core->mem_struct->queues.alloc_lock);
135 return current;
136}
137Queue *Queue::Fetch(const char *name, size_t length, int hash,
138 int queue_length,
139 int recycle_hash, int recycle_length, Queue **recycle) {
140 Queue *r = Fetch(name, length, hash, queue_length);
141 r->recycle_ = Fetch(name, length, recycle_hash, recycle_length);
142 if (r == r->recycle_) {
143 fprintf(stderr, "queue: r->recycle_(=%p) == r(=%p)\n", r->recycle_, r);
144 printf("see stderr\n");
145 abort();
146 }
147 *recycle = r->recycle_;
148 return r;
149}
150
151void Queue::DoFreeMessage(const void *msg) {
152 MessageHeader *header = MessageHeader::Get(msg);
153 if (pool_[header->index] != header) { // if something's messed up
154 fprintf(stderr, "queue: something is very very wrong with queue %p."
155 " pool_(=%p)[header->index(=%d)] != header(=%p)\n",
156 this, pool_, header->index, header);
157 printf("queue: see stderr\n");
158 abort();
159 }
160 if (kRefDebug) {
161 printf("ref free: %p\n", msg);
162 }
163 --messages_used_;
164
165 if (recycle_ != NULL) {
166 void *const new_msg = recycle_->GetMessage();
167 if (new_msg == NULL) {
168 fprintf(stderr, "queue: couldn't get a message"
169 " for recycle queue %p\n", recycle_);
170 } else {
171 // Take a message from recycle_ and switch its
172 // header with the one being freed, which effectively
173 // switches which queue each message belongs to.
174 MessageHeader *const new_header = MessageHeader::Get(new_msg);
175 // also switch the messages between the pools
176 pool_[header->index] = new_header;
177 {
178 MutexLocker locker(&recycle_->pool_lock_);
179 recycle_->pool_[new_header->index] = header;
180 // swap the information in both headers
181 header->Swap(new_header);
182 // don't unlock the other pool until all of its messages are valid
183 }
184 // use the header for new_msg which is now for this pool
185 header = new_header;
186 if (!recycle_->WriteMessage(const_cast<void *>(msg), kOverride)) {
187 fprintf(stderr, "queue: %p->WriteMessage(%p, kOverride) failed."
188 " aborting\n", recycle_, msg);
189 printf("see stderr\n");
190 abort();
191 }
192 msg = new_msg;
193 }
194 }
195
196 // where the one we're freeing was
197 int index = header->index;
198 header->index = -1;
199 if (index != messages_used_) { // if we're not freeing the one on the end
200 // put the last one where the one we're freeing was
201 header = pool_[index] = pool_[messages_used_];
202 // put the one we're freeing at the end
203 pool_[messages_used_] = MessageHeader::Get(msg);
204 // update the former last one's index
205 header->index = index;
206 }
207}
208
209bool Queue::WriteMessage(void *msg, int options) {
210 if (kWriteDebug) {
211 printf("queue: %p->WriteMessage(%p, %d)\n", this, msg, options);
212 }
213 if (msg == NULL || msg < reinterpret_cast<void *>(global_core->mem_struct) ||
214 msg > static_cast<void *>((
215 reinterpret_cast<uintptr_t>(global_core->mem_struct) +
216 global_core->size))) {
217 fprintf(stderr, "queue: attempt to write bad message %p to %p. aborting\n",
218 msg, this);
219 printf("see stderr\n");
220 abort();
221 }
222 {
223 MutexLocker locker(&data_lock_);
224 int new_end = (data_end_ + 1) % data_length_;
225 while (new_end == data_start_) {
226 if (options & kNonBlock) {
227 if (kWriteDebug) {
228 printf("queue: not blocking on %p. returning -1\n", this);
229 }
230 return false;
231 } else if (options & kOverride) {
232 if (kWriteDebug) {
233 printf("queue: overriding on %p\n", this);
234 }
235 // avoid leaking the message that we're going to overwrite
236 DecrementMessageReferenceCount(data_[data_start_]);
237 data_start_ = (data_start_ + 1) % data_length_;
238 } else { // kBlock
239 if (kWriteDebug) {
240 printf("queue: going to wait for writable_ of %p\n", this);
241 }
242 writable_.Wait(&data_lock_);
243 }
244 new_end = (data_end_ + 1) % data_length_;
245 }
246 data_[data_end_] = msg;
247 ++messages_;
248 data_end_ = new_end;
249 }
250 if (kWriteDebug) {
251 printf("queue: setting readable of %p\n", this);
252 }
253 readable_.Signal();
254 if (kWriteDebug) {
255 printf("queue: write returning true on queue %p\n", this);
256 }
257 return true;
258}
259
260void Queue::ReadCommonEnd(bool read) {
261 if (read) {
262 writable_.Signal();
263 }
264}
265bool Queue::ReadCommonStart(int options, int *index) {
266 while (data_start_ == data_end_ || ((index != NULL) && messages_ <= *index)) {
267 if (options & kNonBlock) {
268 if (kReadDebug) {
269 printf("queue: not going to block waiting on %p\n", this);
270 }
271 return false;
272 } else { // kBlock
273 if (kReadDebug) {
274 printf("queue: going to wait for readable of %p\n", this);
275 }
276 data_lock_.Unlock();
277 // wait for a message to become readable
278 readable_.Wait();
279 if (kReadDebug) {
280 printf("queue: done waiting for readable of %p\n", this);
281 }
282 data_lock_.Lock();
283 }
284 }
285 if (kReadDebug) {
286 printf("queue: %p->read start=%d end=%d\n", this, data_start_, data_end_);
287 }
288 return true;
289}
290void *Queue::ReadPeek(int options, int start) {
291 void *ret;
292 if (options & kFromEnd) {
293 int pos = data_end_ - 1;
294 if (pos < 0) { // if it needs to wrap
295 pos = data_length_ - 1;
296 }
297 if (kReadDebug) {
298 printf("queue: reading from line %d: %d\n", __LINE__, pos);
299 }
300 ret = data_[pos];
301 } else {
302 if (kReadDebug) {
303 printf("queue: reading from line %d: %d\n", __LINE__, start);
304 }
305 ret = data_[start];
306 }
307 MessageHeader *const header = MessageHeader::Get(ret);
308 ++header->ref_count;
309 if (kRefDebug) {
310 printf("ref inc count: %p\n", ret);
311 }
312 return ret;
313}
314const void *Queue::ReadMessage(int options) {
315 if (kReadDebug) {
316 printf("queue: %p->ReadMessage(%d)\n", this, options);
317 }
318 void *msg = NULL;
319 MutexLocker locker(&data_lock_);
320 if (!ReadCommonStart(options, NULL)) {
321 if (kReadDebug) {
322 printf("queue: common returned false for %p\n", this);
323 }
324 return NULL;
325 }
326 if (options & kPeek) {
327 msg = ReadPeek(options, data_start_);
328 } else {
329 if (options & kFromEnd) {
330 while (1) {
331 if (kReadDebug) {
332 printf("queue: start of c2 of %p\n", this);
333 }
334 // This loop pulls each message out of the buffer.
335 const int pos = data_start_;
336 data_start_ = (data_start_ + 1) % data_length_;
337 // if this is the last one
338 if (data_start_ == data_end_) {
339 if (kReadDebug) {
340 printf("queue: reading from c2: %d\n", pos);
341 }
342 msg = data_[pos];
343 break;
344 }
345 // it's not going to be in the queue any more
346 DecrementMessageReferenceCount(data_[pos]);
347 }
348 } else {
349 if (kReadDebug) {
350 printf("queue: reading from d2: %d\n", data_start_);
351 }
352 msg = data_[data_start_];
353 data_start_ = (data_start_ + 1) % data_length_;
354 }
355 }
356 ReadCommonEnd(!(options & kPeek));
357 if (kReadDebug) {
358 printf("queue: read returning %p\n", msg);
359 }
360 return msg;
361}
362const void *Queue::ReadMessageIndex(int options, int *index) {
363 if (kReadDebug) {
364 printf("queue: %p->ReadMessageIndex(%d, %p(*=%d))\n",
365 this, options, index, *index);
366 }
367 void *msg = NULL;
368 {
369 MutexLocker locker(&data_lock_);
370 if (!ReadCommonStart(options, index)) {
371 if (kReadDebug) {
372 printf("queue: common returned false for %p\n", this);
373 }
374 return NULL;
375 }
376 // TODO(parker): Handle integer wrap on the index.
377 const int offset = messages_ - *index;
378 int my_start = data_end_ - offset;
379 if (offset >= data_length_) { // if we're behind the available messages
380 // catch index up to the last available message
381 *index += data_start_ - my_start;
382 // and that's the one we're going to read
383 my_start = data_start_;
384 }
385 if (my_start < 0) { // if we want to read off the end of the buffer
386 // unwrap where we're going to read from
387 my_start += data_length_;
388 }
389 if (options & kPeek) {
390 msg = ReadPeek(options, my_start);
391 } else {
392 if (options & kFromEnd) {
393 if (kReadDebug) {
394 printf("queue: start of c1 of %p\n", this);
395 }
396 int pos = data_end_ - 1;
397 if (pos < 0) { // if it wrapped
398 pos = data_length_ - 1; // unwrap it
399 }
400 if (kReadDebug) {
401 printf("queue: reading from c1: %d\n", pos);
402 }
403 msg = data_[pos];
404 *index = messages_;
405 } else {
406 if (kReadDebug) {
407 printf("queue: reading from d1: %d\n", my_start);
408 }
409 msg = data_[my_start];
410 ++(*index);
411 }
412 MessageHeader *const header = MessageHeader::Get(msg);
413 ++header->ref_count;
414 if (kRefDebug) {
415 printf("ref_inc_count: %p\n", msg);
416 }
417 }
418 }
419 // this function never consumes one off the queue
420 ReadCommonEnd(false);
421 return msg;
422}
423
424void *Queue::GetMessage() {
425 MutexLocker locker(&pool_lock_);
426 MessageHeader *header;
427 if (pool_length_ - messages_used_ > 0) {
428 header = pool_[messages_used_];
429 } else {
430 if (pool_length_ >= mem_length_) {
431 LOG(FATAL, "overused pool %p from queue %p\n", pool, queue);
432 }
433 header = pool_[pool_length_] =
434 static_cast<MessageHeader *>(shm_malloc(msg_length_));
435 ++pool_length_;
436 }
437 void *msg = reinterpret_cast<uint8_t *>(header) + sizeof(MessageHeader);
438 header->ref_count = 1;
439 if (kRefDebug) {
440 printf("ref alloc: %p\n", msg);
441 }
442 header->index = messages_used_;
443 ++messages_used_;
444 return msg;
445}
446
447} // namespace aos