Austin Schuh | 208337d | 2022-01-01 14:29:11 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2020 Raspberry Pi (Trading) Ltd. |
| 3 | * |
| 4 | * SPDX-License-Identifier: BSD-3-Clause |
| 5 | */ |
| 6 | |
| 7 | #include "hardware/irq.h" |
| 8 | #include "hardware/regs/m0plus.h" |
| 9 | #include "hardware/platform_defs.h" |
| 10 | #include "hardware/structs/scb.h" |
Ravago Jones | d208ae7 | 2023-02-13 02:24:07 -0800 | [diff] [blame^] | 11 | #include "hardware/claim.h" |
Austin Schuh | 208337d | 2022-01-01 14:29:11 -0800 | [diff] [blame] | 12 | |
| 13 | #include "pico/mutex.h" |
| 14 | #include "pico/assert.h" |
| 15 | |
| 16 | extern void __unhandled_user_irq(void); |
| 17 | |
Ravago Jones | d208ae7 | 2023-02-13 02:24:07 -0800 | [diff] [blame^] | 18 | static uint8_t user_irq_claimed[NUM_CORES]; |
| 19 | |
Austin Schuh | 208337d | 2022-01-01 14:29:11 -0800 | [diff] [blame] | 20 | static inline irq_handler_t *get_vtable(void) { |
| 21 | return (irq_handler_t *) scb_hw->vtor; |
| 22 | } |
| 23 | |
| 24 | static inline void *add_thumb_bit(void *addr) { |
| 25 | return (void *) (((uintptr_t) addr) | 0x1); |
| 26 | } |
| 27 | |
| 28 | static inline void *remove_thumb_bit(void *addr) { |
| 29 | return (void *) (((uintptr_t) addr) & (uint)~0x1); |
| 30 | } |
| 31 | |
| 32 | static void set_raw_irq_handler_and_unlock(uint num, irq_handler_t handler, uint32_t save) { |
| 33 | // update vtable (vtable_handler may be same or updated depending on cases, but we do it anyway for compactness) |
| 34 | get_vtable()[16 + num] = handler; |
| 35 | __dmb(); |
| 36 | spin_unlock(spin_lock_instance(PICO_SPINLOCK_ID_IRQ), save); |
| 37 | } |
| 38 | |
| 39 | void irq_set_enabled(uint num, bool enabled) { |
| 40 | check_irq_param(num); |
| 41 | irq_set_mask_enabled(1u << num, enabled); |
| 42 | } |
| 43 | |
| 44 | bool irq_is_enabled(uint num) { |
| 45 | check_irq_param(num); |
| 46 | return 0 != ((1u << num) & *((io_rw_32 *) (PPB_BASE + M0PLUS_NVIC_ISER_OFFSET))); |
| 47 | } |
| 48 | |
| 49 | void irq_set_mask_enabled(uint32_t mask, bool enabled) { |
| 50 | if (enabled) { |
| 51 | // Clear pending before enable |
| 52 | // (if IRQ is actually asserted, it will immediately re-pend) |
| 53 | *((io_rw_32 *) (PPB_BASE + M0PLUS_NVIC_ICPR_OFFSET)) = mask; |
| 54 | *((io_rw_32 *) (PPB_BASE + M0PLUS_NVIC_ISER_OFFSET)) = mask; |
| 55 | } else { |
| 56 | *((io_rw_32 *) (PPB_BASE + M0PLUS_NVIC_ICER_OFFSET)) = mask; |
| 57 | } |
| 58 | } |
| 59 | |
| 60 | void irq_set_pending(uint num) { |
| 61 | check_irq_param(num); |
| 62 | *((io_rw_32 *) (PPB_BASE + M0PLUS_NVIC_ISPR_OFFSET)) = 1u << num; |
| 63 | } |
| 64 | |
| 65 | #if !PICO_DISABLE_SHARED_IRQ_HANDLERS |
| 66 | // limited by 8 bit relative links (and reality) |
| 67 | static_assert(PICO_MAX_SHARED_IRQ_HANDLERS >= 1 && PICO_MAX_SHARED_IRQ_HANDLERS < 0x7f, ""); |
| 68 | |
| 69 | // note these are not real functions, they are code fragments (i.e. don't call them) |
| 70 | extern void irq_handler_chain_first_slot(void); |
| 71 | extern void irq_handler_chain_remove_tail(void); |
| 72 | |
| 73 | extern struct irq_handler_chain_slot { |
| 74 | // first 3 half words are executable code (raw vtable handler points to one slot, and inst3 will jump to next |
| 75 | // in chain (or end of chain handler) |
| 76 | uint16_t inst1; |
| 77 | uint16_t inst2; |
| 78 | uint16_t inst3; |
| 79 | union { |
| 80 | // when a handler is removed while executing, it needs an extra instruction, which overwrites |
| 81 | // the link and the priority; this is ok because no one else is modifying the chain, as |
| 82 | // the chain is effectively core local, and the user code which might still need this link |
| 83 | // disable the IRQ in question before updating, which means we aren't executing! |
| 84 | struct { |
| 85 | int8_t link; |
| 86 | uint8_t priority; |
| 87 | }; |
| 88 | uint16_t inst4; |
| 89 | }; |
| 90 | irq_handler_t handler; |
| 91 | } irq_handler_chain_slots[PICO_MAX_SHARED_IRQ_HANDLERS]; |
| 92 | |
| 93 | static int8_t irq_hander_chain_free_slot_head; |
| 94 | |
| 95 | static inline bool is_shared_irq_raw_handler(irq_handler_t raw_handler) { |
| 96 | return (uintptr_t)raw_handler - (uintptr_t)irq_handler_chain_slots < sizeof(irq_handler_chain_slots); |
| 97 | } |
Ravago Jones | d208ae7 | 2023-02-13 02:24:07 -0800 | [diff] [blame^] | 98 | |
| 99 | bool irq_has_shared_handler(uint irq_num) { |
| 100 | check_irq_param(irq_num); |
| 101 | irq_handler_t handler = irq_get_vtable_handler(irq_num); |
| 102 | return handler && is_shared_irq_raw_handler(handler); |
| 103 | } |
| 104 | |
Austin Schuh | 208337d | 2022-01-01 14:29:11 -0800 | [diff] [blame] | 105 | #else |
| 106 | #define is_shared_irq_raw_handler(h) false |
Ravago Jones | d208ae7 | 2023-02-13 02:24:07 -0800 | [diff] [blame^] | 107 | bool irq_has_shared_handler(uint irq_num) { |
| 108 | return false; |
| 109 | } |
Austin Schuh | 208337d | 2022-01-01 14:29:11 -0800 | [diff] [blame] | 110 | #endif |
| 111 | |
Ravago Jones | d208ae7 | 2023-02-13 02:24:07 -0800 | [diff] [blame^] | 112 | |
Austin Schuh | 208337d | 2022-01-01 14:29:11 -0800 | [diff] [blame] | 113 | irq_handler_t irq_get_vtable_handler(uint num) { |
| 114 | check_irq_param(num); |
| 115 | return get_vtable()[16 + num]; |
| 116 | } |
| 117 | |
| 118 | void irq_set_exclusive_handler(uint num, irq_handler_t handler) { |
| 119 | check_irq_param(num); |
| 120 | #if !PICO_NO_RAM_VECTOR_TABLE |
| 121 | spin_lock_t *lock = spin_lock_instance(PICO_SPINLOCK_ID_IRQ); |
| 122 | uint32_t save = spin_lock_blocking(lock); |
| 123 | __unused irq_handler_t current = irq_get_vtable_handler(num); |
| 124 | hard_assert(current == __unhandled_user_irq || current == handler); |
| 125 | set_raw_irq_handler_and_unlock(num, handler, save); |
| 126 | #else |
| 127 | panic_unsupported(); |
| 128 | #endif |
| 129 | } |
| 130 | |
| 131 | irq_handler_t irq_get_exclusive_handler(uint num) { |
| 132 | check_irq_param(num); |
| 133 | #if !PICO_NO_RAM_VECTOR_TABLE |
| 134 | spin_lock_t *lock = spin_lock_instance(PICO_SPINLOCK_ID_IRQ); |
| 135 | uint32_t save = spin_lock_blocking(lock); |
| 136 | irq_handler_t current = irq_get_vtable_handler(num); |
| 137 | spin_unlock(lock, save); |
| 138 | if (current == __unhandled_user_irq || is_shared_irq_raw_handler(current)) { |
| 139 | return NULL; |
| 140 | } |
| 141 | return current; |
| 142 | #else |
| 143 | panic_unsupported(); |
| 144 | #endif |
| 145 | } |
| 146 | |
| 147 | |
| 148 | #if !PICO_DISABLE_SHARED_IRQ_HANDLERS |
| 149 | static uint16_t make_branch(uint16_t *from, void *to) { |
| 150 | uint32_t ui_from = (uint32_t)from; |
| 151 | uint32_t ui_to = (uint32_t)to; |
Ravago Jones | d208ae7 | 2023-02-13 02:24:07 -0800 | [diff] [blame^] | 152 | int32_t delta = (int32_t)(ui_to - ui_from - 4); |
| 153 | assert(delta >= -2048 && delta <= 2046 && !(delta & 1)); |
| 154 | return (uint16_t)(0xe000 | ((delta >> 1) & 0x7ff)); |
Austin Schuh | 208337d | 2022-01-01 14:29:11 -0800 | [diff] [blame] | 155 | } |
| 156 | |
| 157 | static void insert_branch_and_link(uint16_t *from, void *to) { |
| 158 | uint32_t ui_from = (uint32_t)from; |
| 159 | uint32_t ui_to = (uint32_t)to; |
| 160 | uint32_t delta = (ui_to - ui_from - 4) / 2; |
| 161 | assert(!(delta >> 11u)); |
| 162 | from[0] = (uint16_t)(0xf000 | ((delta >> 11u) & 0x7ffu)); |
| 163 | from[1] = (uint16_t)(0xf800 | (delta & 0x7ffu)); |
| 164 | } |
| 165 | |
| 166 | static inline void *resolve_branch(uint16_t *inst) { |
| 167 | assert(0x1c == (*inst)>>11u); |
| 168 | int32_t i_addr = (*inst) << 21u; |
| 169 | i_addr /= (int32_t)(1u<<21u); |
| 170 | return inst + 2 + i_addr; |
| 171 | } |
| 172 | |
| 173 | // GCC produces horrible code for subtraction of pointers here, and it was bugging me |
| 174 | static inline int8_t slot_diff(struct irq_handler_chain_slot *to, struct irq_handler_chain_slot *from) { |
| 175 | static_assert(sizeof(struct irq_handler_chain_slot) == 12, ""); |
| 176 | int32_t result = 0xaaaa; |
| 177 | // return (to - from); |
| 178 | // note this implementation has limited range, but is fine for plenty more than -128->127 result |
| 179 | asm (".syntax unified\n" |
| 180 | "subs %1, %2\n" |
| 181 | "adcs %1, %1\n" // * 2 (and + 1 if negative for rounding) |
| 182 | "muls %0, %1\n" |
| 183 | "lsrs %0, 20\n" |
| 184 | : "+l" (result), "+l" (to) |
| 185 | : "l" (from) |
| 186 | : |
| 187 | ); |
| 188 | return (int8_t)result; |
| 189 | } |
| 190 | |
| 191 | static inline int8_t get_slot_index(struct irq_handler_chain_slot *slot) { |
| 192 | return slot_diff(slot, irq_handler_chain_slots); |
| 193 | } |
| 194 | #endif |
| 195 | |
| 196 | void irq_add_shared_handler(uint num, irq_handler_t handler, uint8_t order_priority) { |
| 197 | check_irq_param(num); |
| 198 | #if PICO_NO_RAM_VECTOR_TABLE |
| 199 | panic_unsupported() |
| 200 | #elif PICO_DISABLE_SHARED_IRQ_HANDLERS |
| 201 | irq_set_exclusive_handler(num, handler); |
| 202 | #else |
| 203 | spin_lock_t *lock = spin_lock_instance(PICO_SPINLOCK_ID_IRQ); |
| 204 | uint32_t save = spin_lock_blocking(lock); |
| 205 | hard_assert(irq_hander_chain_free_slot_head >= 0); // we must have a slot |
| 206 | struct irq_handler_chain_slot *slot = &irq_handler_chain_slots[irq_hander_chain_free_slot_head]; |
| 207 | int8_t slot_index = irq_hander_chain_free_slot_head; |
| 208 | irq_hander_chain_free_slot_head = slot->link; |
| 209 | irq_handler_t vtable_handler = get_vtable()[16 + num]; |
| 210 | if (!is_shared_irq_raw_handler(vtable_handler)) { |
| 211 | // start new chain |
| 212 | hard_assert(vtable_handler == __unhandled_user_irq); |
| 213 | struct irq_handler_chain_slot slot_data = { |
| 214 | .inst1 = 0xa100, // add r1, pc, #0 |
| 215 | .inst2 = make_branch(&slot->inst2, irq_handler_chain_first_slot), // b irq_handler_chain_first_slot |
| 216 | .inst3 = 0xbd00, // pop {pc} |
| 217 | .link = -1, |
| 218 | .priority = order_priority, |
| 219 | .handler = handler |
| 220 | }; |
| 221 | *slot = slot_data; |
| 222 | vtable_handler = (irq_handler_t)add_thumb_bit(slot); |
| 223 | } else { |
| 224 | assert(!((((uintptr_t)vtable_handler) - ((uintptr_t)irq_handler_chain_slots) - 1)%sizeof(struct irq_handler_chain_slot))); |
| 225 | struct irq_handler_chain_slot *prev_slot = NULL; |
| 226 | struct irq_handler_chain_slot *existing_vtable_slot = remove_thumb_bit(vtable_handler); |
| 227 | struct irq_handler_chain_slot *cur_slot = existing_vtable_slot; |
| 228 | while (cur_slot->priority > order_priority) { |
| 229 | prev_slot = cur_slot; |
| 230 | if (cur_slot->link < 0) break; |
| 231 | cur_slot = &irq_handler_chain_slots[cur_slot->link]; |
| 232 | } |
| 233 | if (prev_slot) { |
| 234 | // insert into chain |
| 235 | struct irq_handler_chain_slot slot_data = { |
| 236 | .inst1 = 0x4801, // ldr r0, [pc, #4] |
| 237 | .inst2 = 0x4780, // blx r0 |
| 238 | .inst3 = prev_slot->link >= 0 ? |
| 239 | make_branch(&slot->inst3, resolve_branch(&prev_slot->inst3)) : // b next_slot |
| 240 | 0xbd00, // pop {pc} |
| 241 | .link = prev_slot->link, |
| 242 | .priority = order_priority, |
| 243 | .handler = handler |
| 244 | }; |
| 245 | // update code and data links |
| 246 | prev_slot->inst3 = make_branch(&prev_slot->inst3, slot), |
| 247 | prev_slot->link = slot_index; |
| 248 | *slot = slot_data; |
| 249 | } else { |
| 250 | // update with new chain head |
| 251 | struct irq_handler_chain_slot slot_data = { |
| 252 | .inst1 = 0xa100, // add r1, pc, #0 |
| 253 | .inst2 = make_branch(&slot->inst2, irq_handler_chain_first_slot), // b irq_handler_chain_first_slot |
| 254 | .inst3 = make_branch(&slot->inst3, existing_vtable_slot), // b existing_slot |
| 255 | .link = get_slot_index(existing_vtable_slot), |
| 256 | .priority = order_priority, |
| 257 | .handler = handler |
| 258 | }; |
| 259 | *slot = slot_data; |
| 260 | // fixup previous head slot |
| 261 | existing_vtable_slot->inst1 = 0x4801; // ldr r0, [pc, #4] |
| 262 | existing_vtable_slot->inst2 = 0x4780; // blx r0 |
| 263 | vtable_handler = (irq_handler_t)add_thumb_bit(slot); |
| 264 | } |
| 265 | } |
| 266 | set_raw_irq_handler_and_unlock(num, vtable_handler, save); |
| 267 | #endif |
| 268 | } |
| 269 | |
| 270 | void irq_remove_handler(uint num, irq_handler_t handler) { |
| 271 | #if !PICO_NO_RAM_VECTOR_TABLE |
| 272 | spin_lock_t *lock = spin_lock_instance(PICO_SPINLOCK_ID_IRQ); |
| 273 | uint32_t save = spin_lock_blocking(lock); |
| 274 | irq_handler_t vtable_handler = get_vtable()[16 + num]; |
| 275 | if (vtable_handler != __unhandled_user_irq && vtable_handler != handler) { |
| 276 | #if !PICO_DISABLE_SHARED_IRQ_HANDLERS |
| 277 | if (is_shared_irq_raw_handler(vtable_handler)) { |
| 278 | // This is a bit tricky, as an executing IRQ handler doesn't take a lock. |
| 279 | |
| 280 | // First thing to do is to disable the IRQ in question; that takes care of calls from user code. |
| 281 | // Note that a irq handler chain is local to our own core, so we don't need to worry about the other core |
| 282 | bool was_enabled = irq_is_enabled(num); |
| 283 | irq_set_enabled(num, false); |
| 284 | __dmb(); |
| 285 | |
| 286 | // It is possible we are being called while an IRQ for this chain is already in progress. |
| 287 | // The issue we have here is that we must not free a slot that is currently being executed, because |
| 288 | // inst3 is still to be executed, and inst3 might get overwritten if the slot is re-used. |
| 289 | |
| 290 | // By disallowing other exceptions from removing an IRQ handler (which seems fair) |
| 291 | // we now only have to worry about removing a slot from a chain that is currently executing. |
| 292 | |
| 293 | // Note we expect that the slot we are deleting is the one that is executing. |
| 294 | // In particular, bad things happen if the caller were to delete the handler in the chain |
| 295 | // before it. This is not an allowed use case though, and I can't imagine anyone wanting to in practice. |
| 296 | // Sadly this is not something we can detect. |
| 297 | |
| 298 | uint exception = __get_current_exception(); |
| 299 | hard_assert(!exception || exception == num + 16); |
| 300 | |
| 301 | struct irq_handler_chain_slot *prev_slot = NULL; |
| 302 | struct irq_handler_chain_slot *existing_vtable_slot = remove_thumb_bit(vtable_handler); |
| 303 | struct irq_handler_chain_slot *to_free_slot = existing_vtable_slot; |
Austin Schuh | 208337d | 2022-01-01 14:29:11 -0800 | [diff] [blame] | 304 | while (to_free_slot->handler != handler) { |
| 305 | prev_slot = to_free_slot; |
| 306 | if (to_free_slot->link < 0) break; |
| 307 | to_free_slot = &irq_handler_chain_slots[to_free_slot->link]; |
| 308 | } |
| 309 | if (to_free_slot->handler == handler) { |
| 310 | int8_t next_slot_index = to_free_slot->link; |
| 311 | if (next_slot_index >= 0) { |
| 312 | // There is another slot in the chain, so copy that over us, so that our inst3 points at something valid |
| 313 | // Note this only matters in the exception case anyway, and it that case, we will skip the next handler, |
| 314 | // however in that case it's IRQ cause should immediately cause re-entry of the IRQ and the only side |
| 315 | // effect will be that there was potentially brief out of priority order execution of the handlers |
| 316 | struct irq_handler_chain_slot *next_slot = &irq_handler_chain_slots[next_slot_index]; |
| 317 | to_free_slot->handler = next_slot->handler; |
| 318 | to_free_slot->priority = next_slot->priority; |
| 319 | to_free_slot->link = next_slot->link; |
| 320 | to_free_slot->inst3 = next_slot->link >= 0 ? |
| 321 | make_branch(&to_free_slot->inst3, resolve_branch(&next_slot->inst3)) : // b mext_>slot->next_slot |
| 322 | 0xbd00; // pop {pc} |
| 323 | |
| 324 | // add old next slot back to free list |
| 325 | next_slot->link = irq_hander_chain_free_slot_head; |
| 326 | irq_hander_chain_free_slot_head = next_slot_index; |
| 327 | } else { |
| 328 | // Slot being removed is at the end of the chain |
| 329 | if (!exception) { |
| 330 | // case when we're not in exception, we physically unlink now |
| 331 | if (prev_slot) { |
| 332 | // chain is not empty |
| 333 | prev_slot->link = -1; |
| 334 | prev_slot->inst3 = 0xbd00; // pop {pc} |
| 335 | } else { |
| 336 | // chain is not empty |
| 337 | vtable_handler = __unhandled_user_irq; |
| 338 | } |
| 339 | // add slot back to free list |
| 340 | to_free_slot->link = irq_hander_chain_free_slot_head; |
Ravago Jones | d208ae7 | 2023-02-13 02:24:07 -0800 | [diff] [blame^] | 341 | irq_hander_chain_free_slot_head = get_slot_index(to_free_slot); |
Austin Schuh | 208337d | 2022-01-01 14:29:11 -0800 | [diff] [blame] | 342 | } else { |
| 343 | // since we are the last slot we know that our inst3 hasn't executed yet, so we change |
| 344 | // it to bl to irq_handler_chain_remove_tail which will remove the slot. |
| 345 | // NOTE THAT THIS TRASHES PRIORITY AND LINK SINCE THIS IS A 4 BYTE INSTRUCTION |
| 346 | // BUT THEY ARE NOT NEEDED NOW |
| 347 | insert_branch_and_link(&to_free_slot->inst3, irq_handler_chain_remove_tail); |
| 348 | } |
| 349 | } |
| 350 | } else { |
| 351 | assert(false); // not found |
| 352 | } |
| 353 | irq_set_enabled(num, was_enabled); |
| 354 | } |
| 355 | #else |
| 356 | assert(false); // not found |
| 357 | #endif |
| 358 | } else { |
| 359 | vtable_handler = __unhandled_user_irq; |
| 360 | } |
| 361 | set_raw_irq_handler_and_unlock(num, vtable_handler, save); |
| 362 | #else |
| 363 | panic_unsupported(); |
| 364 | #endif |
| 365 | } |
| 366 | |
| 367 | void irq_set_priority(uint num, uint8_t hardware_priority) { |
| 368 | check_irq_param(num); |
| 369 | |
| 370 | // note that only 32 bit writes are supported |
| 371 | io_rw_32 *p = (io_rw_32 *)((PPB_BASE + M0PLUS_NVIC_IPR0_OFFSET) + (num & ~3u)); |
| 372 | *p = (*p & ~(0xffu << (8 * (num & 3u)))) | (((uint32_t) hardware_priority) << (8 * (num & 3u))); |
| 373 | } |
| 374 | |
| 375 | uint irq_get_priority(uint num) { |
| 376 | check_irq_param(num); |
| 377 | |
| 378 | // note that only 32 bit reads are supported |
| 379 | io_rw_32 *p = (io_rw_32 *)((PPB_BASE + M0PLUS_NVIC_IPR0_OFFSET) + (num & ~3u)); |
| 380 | return (uint8_t)(*p >> (8 * (num & 3u))); |
| 381 | } |
| 382 | |
| 383 | #if !PICO_DISABLE_SHARED_IRQ_HANDLERS |
| 384 | // used by irq_handler_chain.S to remove the last link in a handler chain after it executes |
| 385 | // note this must be called only with the last slot in a chain (and during the exception) |
| 386 | void irq_add_tail_to_free_list(struct irq_handler_chain_slot *slot) { |
| 387 | irq_handler_t slot_handler = (irq_handler_t) add_thumb_bit(slot); |
| 388 | assert(is_shared_irq_raw_handler(slot_handler)); |
| 389 | |
| 390 | uint exception = __get_current_exception(); |
| 391 | assert(exception); |
| 392 | spin_lock_t *lock = spin_lock_instance(PICO_SPINLOCK_ID_IRQ); |
| 393 | uint32_t save = spin_lock_blocking(lock); |
| 394 | int8_t slot_index = get_slot_index(slot); |
| 395 | if (slot_handler == get_vtable()[exception]) { |
| 396 | get_vtable()[exception] = __unhandled_user_irq; |
| 397 | } else { |
| 398 | bool __unused found = false; |
| 399 | // need to find who points at the slot and update it |
| 400 | for(uint i=0;i<count_of(irq_handler_chain_slots);i++) { |
| 401 | if (irq_handler_chain_slots[i].link == slot_index) { |
| 402 | irq_handler_chain_slots[i].link = -1; |
| 403 | irq_handler_chain_slots[i].inst3 = 0xbd00; // pop {pc} |
| 404 | found = true; |
| 405 | break; |
| 406 | } |
| 407 | } |
| 408 | assert(found); |
| 409 | } |
| 410 | // add slot to free list |
| 411 | slot->link = irq_hander_chain_free_slot_head; |
| 412 | irq_hander_chain_free_slot_head = slot_index; |
| 413 | spin_unlock(lock, save); |
| 414 | } |
| 415 | #endif |
| 416 | |
| 417 | void irq_init_priorities() { |
| 418 | #if PICO_DEFAULT_IRQ_PRIORITY != 0 |
| 419 | static_assert(!(NUM_IRQS & 3), ""); |
| 420 | uint32_t prio4 = (PICO_DEFAULT_IRQ_PRIORITY & 0xff) * 0x1010101u; |
| 421 | io_rw_32 * p = (io_rw_32 *)(PPB_BASE + M0PLUS_NVIC_IPR0_OFFSET); |
| 422 | for (uint i = 0; i < NUM_IRQS / 4; i++) { |
| 423 | *p++ = prio4; |
| 424 | } |
| 425 | #endif |
| 426 | } |
Ravago Jones | d208ae7 | 2023-02-13 02:24:07 -0800 | [diff] [blame^] | 427 | |
| 428 | #define FIRST_USER_IRQ (NUM_IRQS - NUM_USER_IRQS) |
| 429 | |
| 430 | static uint get_user_irq_claim_index(uint irq_num) { |
| 431 | invalid_params_if(IRQ, irq_num < FIRST_USER_IRQ || irq_num >= NUM_IRQS); |
| 432 | // we count backwards from the last, to match the existing hard coded uses of user IRQs in the SDK which were previously using 31 |
| 433 | static_assert(NUM_IRQS - FIRST_USER_IRQ <= 8, ""); // we only use a single byte's worth of claim bits today. |
| 434 | return NUM_IRQS - irq_num - 1u; |
| 435 | } |
| 436 | |
| 437 | void user_irq_claim(uint irq_num) { |
| 438 | hw_claim_or_assert(&user_irq_claimed[get_core_num()], get_user_irq_claim_index(irq_num), "User IRQ is already claimed"); |
| 439 | } |
| 440 | |
| 441 | void user_irq_unclaim(uint irq_num) { |
| 442 | hw_claim_clear(&user_irq_claimed[get_core_num()], get_user_irq_claim_index(irq_num)); |
| 443 | } |
| 444 | |
| 445 | int user_irq_claim_unused(bool required) { |
| 446 | int bit = hw_claim_unused_from_range(&user_irq_claimed[get_core_num()], required, 0, NUM_USER_IRQS - 1, "No user IRQs are available"); |
| 447 | if (bit >= 0) bit = (int)NUM_IRQS - bit - 1; |
| 448 | return bit; |
| 449 | } |
| 450 | |
| 451 | bool user_irq_is_claimed(uint irq_num) { |
| 452 | return hw_is_claimed(&user_irq_claimed[get_core_num()], get_user_irq_claim_index(irq_num)); |
| 453 | } |
| 454 | |