Austin Schuh | 208337d | 2022-01-01 14:29:11 -0800 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (c) 2020 Raspberry Pi (Trading) Ltd. |
| 3 | * |
| 4 | * SPDX-License-Identifier: BSD-3-Clause |
| 5 | */ |
| 6 | |
| 7 | #include <stdlib.h> |
| 8 | #include <string.h> |
| 9 | #include "pico/util/queue.h" |
| 10 | |
| 11 | void queue_init_with_spinlock(queue_t *q, uint element_size, uint element_count, uint spinlock_num) { |
| 12 | lock_init(&q->core, spinlock_num); |
| 13 | q->data = (uint8_t *)calloc(element_count + 1, element_size); |
| 14 | q->element_count = (uint16_t)element_count; |
| 15 | q->element_size = (uint16_t)element_size; |
| 16 | q->wptr = 0; |
| 17 | q->rptr = 0; |
| 18 | } |
| 19 | |
| 20 | void queue_free(queue_t *q) { |
| 21 | free(q->data); |
| 22 | } |
| 23 | |
| 24 | static inline void *element_ptr(queue_t *q, uint index) { |
| 25 | assert(index <= q->element_count); |
| 26 | return q->data + index * q->element_size; |
| 27 | } |
| 28 | |
| 29 | static inline uint16_t inc_index(queue_t *q, uint16_t index) { |
| 30 | if (++index > q->element_count) { // > because we have element_count + 1 elements |
| 31 | index = 0; |
| 32 | } |
| 33 | |
| 34 | #if PICO_QUEUE_MAX_LEVEL |
| 35 | uint16_t level = queue_get_level_unsafe(q); |
| 36 | if (level > q->max_level) { |
| 37 | q->max_level = level; |
| 38 | } |
| 39 | #endif |
| 40 | |
| 41 | return index; |
| 42 | } |
| 43 | |
| 44 | static bool queue_add_internal(queue_t *q, const void *data, bool block) { |
| 45 | do { |
| 46 | uint32_t save = spin_lock_blocking(q->core.spin_lock); |
| 47 | if (queue_get_level_unsafe(q) != q->element_count) { |
| 48 | memcpy(element_ptr(q, q->wptr), data, q->element_size); |
| 49 | q->wptr = inc_index(q, q->wptr); |
| 50 | lock_internal_spin_unlock_with_notify(&q->core, save); |
| 51 | return true; |
| 52 | } |
| 53 | if (block) { |
| 54 | lock_internal_spin_unlock_with_wait(&q->core, save); |
| 55 | } else { |
| 56 | spin_unlock(q->core.spin_lock, save); |
| 57 | return false; |
| 58 | } |
| 59 | } while (true); |
| 60 | } |
| 61 | |
| 62 | static bool queue_remove_internal(queue_t *q, void *data, bool block) { |
| 63 | do { |
| 64 | uint32_t save = spin_lock_blocking(q->core.spin_lock); |
| 65 | if (queue_get_level_unsafe(q) != 0) { |
| 66 | memcpy(data, element_ptr(q, q->rptr), q->element_size); |
| 67 | q->rptr = inc_index(q, q->rptr); |
| 68 | lock_internal_spin_unlock_with_notify(&q->core, save); |
| 69 | return true; |
| 70 | } |
| 71 | if (block) { |
| 72 | lock_internal_spin_unlock_with_wait(&q->core, save); |
| 73 | } else { |
| 74 | spin_unlock(q->core.spin_lock, save); |
| 75 | return false; |
| 76 | } |
| 77 | } while (true); |
| 78 | } |
| 79 | |
| 80 | static bool queue_peek_internal(queue_t *q, void *data, bool block) { |
| 81 | do { |
| 82 | uint32_t save = spin_lock_blocking(q->core.spin_lock); |
| 83 | if (queue_get_level_unsafe(q) != 0) { |
| 84 | memcpy(data, element_ptr(q, q->rptr), q->element_size); |
| 85 | lock_internal_spin_unlock_with_notify(&q->core, save); |
| 86 | return true; |
| 87 | } |
| 88 | if (block) { |
| 89 | lock_internal_spin_unlock_with_wait(&q->core, save); |
| 90 | } else { |
| 91 | spin_unlock(q->core.spin_lock, save); |
| 92 | return false; |
| 93 | } |
| 94 | } while (true); |
| 95 | } |
| 96 | |
| 97 | bool queue_try_add(queue_t *q, const void *data) { |
| 98 | return queue_add_internal(q, data, false); |
| 99 | } |
| 100 | |
| 101 | bool queue_try_remove(queue_t *q, void *data) { |
| 102 | return queue_remove_internal(q, data, false); |
| 103 | } |
| 104 | |
| 105 | bool queue_try_peek(queue_t *q, void *data) { |
| 106 | return queue_peek_internal(q, data, false); |
| 107 | } |
| 108 | |
| 109 | void queue_add_blocking(queue_t *q, const void *data) { |
| 110 | queue_add_internal(q, data, true); |
| 111 | } |
| 112 | |
| 113 | void queue_remove_blocking(queue_t *q, void *data) { |
| 114 | queue_remove_internal(q, data, true); |
| 115 | } |
| 116 | |
| 117 | void queue_peek_blocking(queue_t *q, void *data) { |
| 118 | queue_peek_internal(q, data, true); |
| 119 | } |