1 /*
2 * Copyright (c) 2012-2015 Travis Geiselbrecht
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <assert.h>
24 #include <bits.h>
25 #include <err.h>
26 #include <sys/types.h>
27 #include <debug.h>
28 #include <dev/interrupt/arm_gic.h>
29 #include <inttypes.h>
30 #include <reg.h>
31 #include <kernel/thread.h>
32 #include <kernel/debug.h>
33 #include <kernel/vm.h>
34 #include <lk/init.h>
35 #include <lk/macros.h>
36 #include <platform/interrupts.h>
37 #include <arch/ops.h>
38 #include <platform/gic.h>
39 #include <string.h>
40 #include <trace.h>
41 #include <inttypes.h>
42 #if WITH_LIB_SM
43 #include <lib/sm.h>
44 #include <lib/sm/sm_err.h>
45 #endif
46
47 #include "arm_gic_common.h"
48
49 #if GIC_VERSION > 2
50 #include "gic_v3.h"
51 #endif
52
53 #define LOCAL_TRACE 0
54
55 #if ARCH_ARM
56 #define iframe arm_iframe
57 #define IFRAME_PC(frame) ((frame)->pc)
58 #endif
59 #if ARCH_ARM64
60 #define iframe arm64_iframe_short
61 #define IFRAME_PC(frame) ((frame)->elr)
62 #endif
63
64 void platform_fiq(struct iframe *frame);
65 static status_t arm_gic_set_secure_locked(u_int irq, bool secure);
66 static void gic_set_enable(uint vector, bool enable);
67 static void arm_gic_init_hw(void);
68
69 static spin_lock_t gicd_lock;
70 #if WITH_LIB_SM
71 #define GICD_LOCK_FLAGS SPIN_LOCK_FLAG_IRQ_FIQ
72 #else
73 #define GICD_LOCK_FLAGS SPIN_LOCK_FLAG_INTERRUPTS
74 #endif
75 #define GIC_MAX_PER_CPU_INT 32
76 #define GIC_MAX_SGI_INT 16
77
78 #if ARM_GIC_USE_DOORBELL_NS_IRQ
79 #ifndef GIC_MAX_DEFERRED_ACTIVE_IRQS
80 #define GIC_MAX_DEFERRED_ACTIVE_IRQS 32
81 #endif
82 static bool doorbell_enabled;
83 #endif
84
85 struct arm_gic arm_gics[NUM_ARM_GICS];
86
arm_gic_check_init(int irq)87 static bool arm_gic_check_init(int irq)
88 {
89 /* check if we have a vaddr for gicd, both gicv2 and gicv3/4 use this */
90 if (!arm_gics[0].gicd_vaddr) {
91 TRACEF("change to interrupt %d ignored before init\n", irq);
92 return false;
93 }
94 return true;
95 }
96
97 #if WITH_LIB_SM
98 static bool arm_gic_non_secure_interrupts_frozen;
99
arm_gic_interrupt_change_allowed(int irq)100 static bool arm_gic_interrupt_change_allowed(int irq)
101 {
102 if (!arm_gic_non_secure_interrupts_frozen)
103 return arm_gic_check_init(irq);
104
105 TRACEF("change to interrupt %d ignored after booting ns\n", irq);
106 return false;
107 }
108 #else
arm_gic_interrupt_change_allowed(int irq)109 static bool arm_gic_interrupt_change_allowed(int irq)
110 {
111 return arm_gic_check_init(irq);
112 }
113 #endif
114
115 struct int_handler_struct {
116 int_handler handler;
117 void *arg;
118 };
119
120 #ifndef WITH_GIC_COMPACT_TABLE
121 /* Handler and argument storage, per interrupt. */
122 static struct int_handler_struct int_handler_table_per_cpu[GIC_MAX_PER_CPU_INT][SMP_MAX_CPUS];
123 static struct int_handler_struct int_handler_table_shared[MAX_INT-GIC_MAX_PER_CPU_INT];
124
get_int_handler(unsigned int vector,uint cpu)125 static struct int_handler_struct *get_int_handler(unsigned int vector, uint cpu)
126 {
127 if (vector < GIC_MAX_PER_CPU_INT)
128 return &int_handler_table_per_cpu[vector][cpu];
129 else if(vector < MAX_INT)
130 return &int_handler_table_shared[vector - GIC_MAX_PER_CPU_INT];
131 else
132 return NULL;
133 }
134
alloc_int_handler(unsigned int vector,uint cpu)135 static struct int_handler_struct *alloc_int_handler(unsigned int vector, uint cpu) {
136 return get_int_handler(vector, cpu);
137 }
138
139 #else /* WITH_GIC_COMPACT_TABLE */
140
141 #ifdef WITH_SMP
142 #error WITH_GIC_COMPACT_TABLE does not support SMP
143 #endif
144
145 /* Maximum count of vector entries that can be registered / handled. */
146 #ifndef GIC_COMPACT_MAX_HANDLERS
147 #define GIC_COMPACT_MAX_HANDLERS 16
148 #endif
149
150 /* Array giving a mapping from a vector number to a handler entry index.
151 * This structure is kept small so it can be searched reasonably
152 * efficiently. The position in int_handler_vecnum[] gives the index into
153 * int_handler_table[].
154 */
155 __attribute__((aligned(CACHE_LINE)))
156 static uint16_t int_handler_vecnum[GIC_COMPACT_MAX_HANDLERS];
157 static uint16_t int_handler_count = 0;
158
159 /* Handler entries themselves. */
160 static struct int_handler_struct int_handler_table[GIC_COMPACT_MAX_HANDLERS];
161
bsearch_handler(const uint16_t num,const uint16_t * base,uint_fast16_t count)162 static struct int_handler_struct *bsearch_handler(const uint16_t num, const uint16_t *base, uint_fast16_t count) {
163 const uint16_t *bottom = base;
164
165 while (count > 0) {
166 const uint16_t *mid = &bottom[count / 2];
167
168 if (num < *mid) {
169 count /= 2;
170 } else if (num > *mid) {
171 bottom = mid + 1;
172 count -= count / 2 + 1;
173 } else {
174 return &int_handler_table[mid - base];
175 }
176 }
177
178 return NULL;
179 }
180
get_int_handler(unsigned int vector,uint cpu)181 static struct int_handler_struct *get_int_handler(unsigned int vector, uint cpu)
182 {
183 return bsearch_handler(vector, int_handler_vecnum, int_handler_count);
184 }
185
alloc_int_handler(unsigned int vector,uint cpu)186 static struct int_handler_struct *alloc_int_handler(unsigned int vector, uint cpu)
187 {
188 struct int_handler_struct *handler = get_int_handler(vector, cpu);
189
190 /* Return existing allocation if there is one */
191 if (handler) {
192 return handler;
193 }
194
195 /* Check an allocation is possible */
196 assert(int_handler_count < GIC_COMPACT_MAX_HANDLERS);
197 assert(spin_lock_held(&gicd_lock));
198
199 /* Find insertion point */
200 int i = 0;
201 while (i < int_handler_count && vector > int_handler_vecnum[i]) {
202 i++;
203 }
204
205 /* Move any remainder down */
206 const int remainder = int_handler_count - i;
207 memmove(&int_handler_vecnum[i + 1], &int_handler_vecnum[i],
208 sizeof(int_handler_vecnum[0]) * remainder);
209 memmove(&int_handler_table[i + 1], &int_handler_table[i],
210 sizeof(int_handler_table[0]) * remainder);
211
212 int_handler_count++;
213
214 /* Initialise the new entry */
215 int_handler_vecnum[i] = vector;
216 int_handler_table[i].handler = NULL;
217 int_handler_table[i].arg = NULL;
218
219 /* Return the allocated handler */
220 return &int_handler_table[i];
221 }
222 #endif /* WITH_GIC_COMPACT_TABLE */
223
has_int_handler(unsigned int vector,uint cpu)224 static bool has_int_handler(unsigned int vector, uint cpu) {
225 const struct int_handler_struct *h = get_int_handler(vector, cpu);
226
227 return likely(h && h->handler);
228 }
229
230 #if ARM_GIC_USE_DOORBELL_NS_IRQ
231 static status_t arm_gic_set_priority_locked(u_int irq, uint8_t priority);
232 static u_int deferred_active_irqs[SMP_MAX_CPUS][GIC_MAX_DEFERRED_ACTIVE_IRQS];
233
reserve_deferred_active_irq_slot(void)234 static status_t reserve_deferred_active_irq_slot(void)
235 {
236 static unsigned int num_handlers = 0;
237
238 if (num_handlers == GIC_MAX_DEFERRED_ACTIVE_IRQS)
239 return ERR_NO_MEMORY;
240
241 num_handlers++;
242 return NO_ERROR;
243 }
244
defer_active_irq(unsigned int vector,uint cpu)245 static status_t defer_active_irq(unsigned int vector, uint cpu)
246 {
247 uint idx;
248
249 for (idx = 0; idx < GIC_MAX_DEFERRED_ACTIVE_IRQS; idx++) {
250 u_int irq = deferred_active_irqs[cpu][idx];
251
252 if (!irq)
253 break;
254
255 if (irq == vector) {
256 TRACEF("irq %d already deferred on cpu %u!\n", irq, cpu);
257 return ERR_ALREADY_EXISTS;
258 }
259 }
260
261 if (idx == GIC_MAX_DEFERRED_ACTIVE_IRQS)
262 panic("deferred active irq list is full on cpu %u\n", cpu);
263
264 deferred_active_irqs[cpu][idx] = vector;
265 GICCREG_WRITE(0, icc_eoir1_el1, vector);
266 LTRACEF_LEVEL(2, "deferred irq %u on cpu %u\n", vector, cpu);
267 return NO_ERROR;
268 }
269
raise_ns_doorbell_irq(uint cpu)270 static void raise_ns_doorbell_irq(uint cpu)
271 {
272 uint64_t reg = arm_gicv3_sgir_val(ARM_GIC_DOORBELL_IRQ, cpu);
273
274 if (doorbell_enabled) {
275 LTRACEF("GICD_SGIR: %" PRIx64 "\n", reg);
276 GICCREG_WRITE(0, icc_asgi1r_el1, reg);
277 }
278 }
279
fiq_enter_defer_irqs(uint cpu)280 static status_t fiq_enter_defer_irqs(uint cpu)
281 {
282 bool inject = false;
283
284 do {
285 u_int irq = GICCREG_READ(0, icc_iar1_el1) & 0x3ff;
286
287 if (irq >= 1020)
288 break;
289
290 if (defer_active_irq(irq, cpu) != NO_ERROR)
291 break;
292
293 inject = true;
294 } while (true);
295
296 if (inject)
297 raise_ns_doorbell_irq(cpu);
298
299 return ERR_NO_MSG;
300 }
301
handle_deferred_irqs(void)302 static enum handler_return handle_deferred_irqs(void)
303 {
304 enum handler_return ret = INT_NO_RESCHEDULE;
305 uint cpu = arch_curr_cpu_num();
306
307 for (uint idx = 0; idx < GIC_MAX_DEFERRED_ACTIVE_IRQS; idx++) {
308 struct int_handler_struct *h;
309 u_int irq = deferred_active_irqs[cpu][idx];
310
311 if (!irq)
312 break;
313
314 h = get_int_handler(irq, cpu);
315 if (h->handler && h->handler(h->arg) == INT_RESCHEDULE)
316 ret = INT_RESCHEDULE;
317
318 deferred_active_irqs[cpu][idx] = 0;
319 GICCREG_WRITE(0, icc_dir_el1, irq);
320 LTRACEF_LEVEL(2, "handled deferred irq %u on cpu %u\n", irq, cpu);
321 }
322
323 return ret;
324 }
325 #endif
326
register_int_handler(unsigned int vector,int_handler handler,void * arg)327 void register_int_handler(unsigned int vector, int_handler handler, void *arg)
328 {
329 struct int_handler_struct *h;
330 uint cpu = arch_curr_cpu_num();
331
332 spin_lock_saved_state_t state;
333
334 if (vector >= MAX_INT)
335 panic("register_int_handler: vector out of range %d\n", vector);
336
337 spin_lock_save(&gicd_lock, &state, GICD_LOCK_FLAGS);
338
339 if (arm_gic_interrupt_change_allowed(vector)) {
340 #if ARM_GIC_USE_DOORBELL_NS_IRQ
341 if (reserve_deferred_active_irq_slot() != NO_ERROR) {
342 panic("register_int_handler: exceeded %d deferred active irq slots\n",
343 GIC_MAX_DEFERRED_ACTIVE_IRQS);
344 }
345 #endif
346 #if GIC_VERSION > 2
347 arm_gicv3_configure_irq_locked(cpu, vector);
348 #endif
349 h = alloc_int_handler(vector, cpu);
350 h->handler = handler;
351 h->arg = arg;
352 #if ARM_GIC_USE_DOORBELL_NS_IRQ
353 arm_gic_set_priority_locked(vector, 0x7f);
354 #endif
355
356 /*
357 * For GICv3, SGIs are maskable, and on GICv2, whether they are
358 * maskable is implementation defined. As a result, the caller cannot
359 * rely on them being maskable, so we enable all registered SGIs as if
360 * they were non-maskable.
361 */
362 if (vector < GIC_MAX_SGI_INT) {
363 gic_set_enable(vector, true);
364 }
365 }
366
367 spin_unlock_restore(&gicd_lock, state, GICD_LOCK_FLAGS);
368 }
369
370 #define GIC_REG_COUNT(bit_per_reg) DIV_ROUND_UP(MAX_INT, (bit_per_reg))
371 #define DEFINE_GIC_SHADOW_REG(name, bit_per_reg, init_val, init_from) \
372 uint32_t (name)[GIC_REG_COUNT(bit_per_reg)] = { \
373 [(init_from / bit_per_reg) ... \
374 (GIC_REG_COUNT(bit_per_reg) - 1)] = (init_val) \
375 }
376
377 #if WITH_LIB_SM
378 static DEFINE_GIC_SHADOW_REG(gicd_igroupr, 32, ~0U, 0);
379 #endif
380 static DEFINE_GIC_SHADOW_REG(gicd_itargetsr, 4, 0x01010101, 32);
381
gic_set_enable(uint vector,bool enable)382 static void gic_set_enable(uint vector, bool enable)
383 {
384 int reg = vector / 32;
385 uint32_t mask = 1ULL << (vector % 32);
386
387 #if GIC_VERSION > 2
388 if (reg == 0) {
389 uint32_t cpu = arch_curr_cpu_num();
390
391 /* On GICv3/v4 these are on GICR */
392 if (enable)
393 GICRREG_WRITE(0, cpu, GICR_ISENABLER0, mask);
394 else
395 GICRREG_WRITE(0, cpu, GICR_ICENABLER0, mask);
396 return;
397 }
398 #endif
399 if (enable)
400 GICDREG_WRITE(0, GICD_ISENABLER(reg), mask);
401 else {
402 GICDREG_WRITE(0, GICD_ICENABLER(reg), mask);
403
404 #if GIC_VERSION > 2
405 /* for GIC V3, make sure write is complete */
406 arm_gicv3_wait_for_write_complete();
407 #endif
408 }
409 }
410
arm_gic_init_percpu(uint level)411 static void arm_gic_init_percpu(uint level)
412 {
413 #if GIC_VERSION > 2
414 /* GICv3/v4 */
415 arm_gicv3_init_percpu();
416 #else
417 /* GICv2 */
418 #if WITH_LIB_SM
419 GICCREG_WRITE(0, GICC_CTLR, 0xb); // enable GIC0 and select fiq mode for secure
420 GICDREG_WRITE(0, GICD_IGROUPR(0), ~0U); /* GICD_IGROUPR0 is banked */
421 #else
422 GICCREG_WRITE(0, GICC_CTLR, 1); // enable GIC0
423 #endif
424 GICCREG_WRITE(0, GICC_PMR, 0xFF); // unmask interrupts at all priority levels
425 #endif /* GIC_VERSION > 2 */
426 }
427
428 LK_INIT_HOOK_FLAGS(arm_gic_init_percpu,
429 arm_gic_init_percpu,
430 LK_INIT_LEVEL_PLATFORM_EARLY, LK_INIT_FLAG_SECONDARY_CPUS);
431
arm_gic_suspend_cpu(uint level)432 static void arm_gic_suspend_cpu(uint level)
433 {
434 #if GIC_VERSION > 2
435 arm_gicv3_suspend_cpu(arch_curr_cpu_num());
436 #endif
437 }
438
439 LK_INIT_HOOK_FLAGS(arm_gic_suspend_cpu, arm_gic_suspend_cpu,
440 LK_INIT_LEVEL_PLATFORM, LK_INIT_FLAG_CPU_OFF);
441
arm_gic_resume_cpu(uint level)442 static void arm_gic_resume_cpu(uint level)
443 {
444 spin_lock_saved_state_t state;
445 __UNUSED bool resume_gicd = false;
446
447 spin_lock_save(&gicd_lock, &state, GICD_LOCK_FLAGS);
448
449 #if GIC_VERSION > 2
450 if (!(GICDREG_READ(0, GICD_CTLR) & 5)) {
451 #else
452 if (!(GICDREG_READ(0, GICD_CTLR) & 1)) {
453 #endif
454 dprintf(SPEW, "%s: distibutor is off, calling arm_gic_init instead\n", __func__);
455 arm_gic_init_hw();
456 resume_gicd = true;
457 } else {
458 arm_gic_init_percpu(0);
459 }
460
461 #if GIC_VERSION > 2
462 {
463 uint cpu = arch_curr_cpu_num();
464 uint max_irq = resume_gicd ? MAX_INT : GIC_MAX_PER_CPU_INT;
465
466 for (uint v = 0; v < max_irq; v++) {
467 if (has_int_handler(v, cpu)) {
468 arm_gicv3_configure_irq_locked(cpu, v);
469 }
470 }
471 arm_gicv3_resume_cpu_locked(cpu, resume_gicd);
472 }
473 #endif
474 spin_unlock_restore(&gicd_lock, state, GICD_LOCK_FLAGS);
475 }
476
477 LK_INIT_HOOK_FLAGS(arm_gic_resume_cpu, arm_gic_resume_cpu,
478 LK_INIT_LEVEL_PLATFORM, LK_INIT_FLAG_CPU_RESUME);
479
480 static int arm_gic_max_cpu(void)
481 {
482 return (GICDREG_READ(0, GICD_TYPER) >> 5) & 0x7;
483 }
484
485 static void arm_gic_init_hw(void)
486 {
487 #if GIC_VERSION > 2
488 /* GICv3/v4 */
489 arm_gicv3_init();
490 #else
491 int i;
492
493 for (i = 0; i < MAX_INT; i+= 32) {
494 GICDREG_WRITE(0, GICD_ICENABLER(i / 32), ~0U);
495 GICDREG_WRITE(0, GICD_ICPENDR(i / 32), ~0U);
496 }
497
498 if (arm_gic_max_cpu() > 0) {
499 /* Set external interrupts to target cpu 0 */
500 for (i = 32; i < MAX_INT; i += 4) {
501 GICDREG_WRITE(0, GICD_ITARGETSR(i / 4), gicd_itargetsr[i / 4]);
502 }
503 }
504
505 GICDREG_WRITE(0, GICD_CTLR, 1); // enable GIC0
506 #if WITH_LIB_SM
507 GICDREG_WRITE(0, GICD_CTLR, 3); // enable GIC0 ns interrupts
508 /*
509 * Iterate through all IRQs and set them to non-secure
510 * mode. This will allow the non-secure side to handle
511 * all the interrupts we don't explicitly claim.
512 */
513 for (i = 32; i < MAX_INT; i += 32) {
514 u_int reg = i / 32;
515 GICDREG_WRITE(0, GICD_IGROUPR(reg), gicd_igroupr[reg]);
516 }
517 #endif
518 #endif /* GIC_VERSION > 2 */
519 arm_gic_init_percpu(0);
520 }
521
522 void arm_gic_init(void) {
523 #ifdef GICBASE
524 arm_gics[0].gicd_vaddr = GICBASE(0) + GICD_OFFSET;
525 arm_gics[0].gicd_size = GICD_MIN_SIZE;
526 #if GIC_VERSION > 2
527 arm_gics[0].gicr_vaddr = GICBASE(0) + GICR_OFFSET;
528 arm_gics[0].gicr_size = GICR_CPU_OFFSET(SMP_MAX_CPUS - 1) + GICR_MIN_SIZE;
529 #else /* GIC_VERSION > 2 */
530 arm_gics[0].gicc_vaddr = GICBASE(0) + GICC_OFFSET;
531 arm_gics[0].gicc_size = GICC_MIN_SIZE;
532 #endif /* GIC_VERSION > 2 */
533 #else
534 /* Platforms should define GICBASE if they want to call this */
535 panic("%s: GICBASE not defined\n", __func__);
536 #endif /* GICBASE */
537
538 arm_gic_init_hw();
539 }
540
541 static void arm_map_regs(const char* name,
542 vaddr_t* vaddr,
543 paddr_t paddr,
544 size_t size) {
545 status_t ret;
546 void* vaddrp = (void*)vaddr;
547
548 if (!size) {
549 return;
550 }
551
552 ret = vmm_alloc_physical(vmm_get_kernel_aspace(), "gic", size, &vaddrp, 0,
553 paddr, 0, ARCH_MMU_FLAG_UNCACHED_DEVICE |
554 ARCH_MMU_FLAG_PERM_NO_EXECUTE);
555 if (ret) {
556 panic("%s: failed %d\n", __func__, ret);
557 }
558
559 *vaddr = (vaddr_t)vaddrp;
560 }
561
562 void arm_gic_init_map(struct arm_gic_init_info* init_info)
563 {
564 if (init_info->gicd_size < GICD_MIN_SIZE) {
565 panic("%s: gicd mapping too small %zu\n", __func__,
566 init_info->gicd_size);
567 }
568 arm_map_regs("gicd", &arm_gics[0].gicd_vaddr, init_info->gicd_paddr,
569 init_info->gicd_size);
570 arm_gics[0].gicd_size = init_info->gicd_size;
571
572 #if GIC_VERSION > 2
573 if (init_info->gicr_size < GICR_CPU_OFFSET(SMP_MAX_CPUS - 1) + GICR_MIN_SIZE) {
574 panic("%s: gicr mapping too small %zu\n", __func__,
575 init_info->gicr_size);
576 }
577 arm_map_regs("gicr", &arm_gics[0].gicr_vaddr, init_info->gicr_paddr,
578 init_info->gicr_size);
579 arm_gics[0].gicr_size = init_info->gicr_size;
580 #else /* GIC_VERSION > 2 */
581 if (init_info->gicc_size < GICC_MIN_SIZE) {
582 panic("%s: gicc mapping too small %zu\n", __func__,
583 init_info->gicc_size);
584 }
585 arm_map_regs("gicc", &arm_gics[0].gicc_vaddr, init_info->gicc_paddr,
586 init_info->gicc_size);
587 arm_gics[0].gicc_size = init_info->gicc_size;
588 #endif /* GIC_VERSION > 2 */
589
590 arm_gic_init_hw();
591 }
592
593 static status_t arm_gic_set_secure_locked(u_int irq, bool secure)
594 {
595 #if WITH_LIB_SM
596 int reg = irq / 32;
597 uint32_t mask = 1ULL << (irq % 32);
598
599 if (irq >= MAX_INT)
600 return ERR_INVALID_ARGS;
601
602 if (secure)
603 GICDREG_WRITE(0, GICD_IGROUPR(reg), (gicd_igroupr[reg] &= ~mask));
604 else
605 GICDREG_WRITE(0, GICD_IGROUPR(reg), (gicd_igroupr[reg] |= mask));
606 LTRACEF("irq %d, secure %d, GICD_IGROUP%d = %x\n",
607 irq, secure, reg, GICDREG_READ(0, GICD_IGROUPR(reg)));
608 #endif
609 return NO_ERROR;
610 }
611
612 static status_t arm_gic_set_target_locked(u_int irq, u_int cpu_mask, u_int enable_mask)
613 {
614 u_int reg = irq / 4;
615 u_int shift = 8 * (irq % 4);
616 u_int old_val;
617 u_int new_val;
618
619 cpu_mask = (cpu_mask & 0xff) << shift;
620 enable_mask = (enable_mask << shift) & cpu_mask;
621
622 old_val = GICDREG_READ(0, GICD_ITARGETSR(reg));
623 new_val = (gicd_itargetsr[reg] & ~cpu_mask) | enable_mask;
624 GICDREG_WRITE(0, GICD_ITARGETSR(reg), (gicd_itargetsr[reg] = new_val));
625 LTRACEF("irq %i, GICD_ITARGETSR%d %x => %x (got %x)\n",
626 irq, reg, old_val, new_val, GICDREG_READ(0, GICD_ITARGETSR(reg)));
627
628 return NO_ERROR;
629 }
630
631 static status_t arm_gic_get_priority(u_int irq)
632 {
633 u_int reg = irq / 4;
634 u_int shift = 8 * (irq % 4);
635 return (GICDREG_READ(0, GICD_IPRIORITYR(reg)) >> shift) & 0xff;
636 }
637
638 static status_t arm_gic_set_priority_locked(u_int irq, uint8_t priority)
639 {
640 u_int reg = irq / 4;
641 u_int shift = 8 * (irq % 4);
642 u_int mask = 0xffU << shift;
643 uint32_t regval;
644
645 #if GIC_VERSION > 2
646 if (irq < 32) {
647 uint cpu = arch_curr_cpu_num();
648
649 /* On GICv3 IPRIORITY registers are on redistributor */
650 regval = GICRREG_READ(0, cpu, GICR_IPRIORITYR(reg));
651 LTRACEF("irq %i, cpu %d: old GICR_IPRIORITYR%d = %x\n", irq, cpu, reg,
652 regval);
653 regval = (regval & ~mask) | ((uint32_t)priority << shift);
654 GICRREG_WRITE(0, cpu, GICR_IPRIORITYR(reg), regval);
655 LTRACEF("irq %i, cpu %d, new GICD_IPRIORITYR%d = %x, req %x\n",
656 irq, cpu, reg, GICDREG_READ(0, GICD_IPRIORITYR(reg)), regval);
657 return 0;
658 }
659 #endif
660
661 regval = GICDREG_READ(0, GICD_IPRIORITYR(reg));
662 LTRACEF("irq %i, old GICD_IPRIORITYR%d = %x\n", irq, reg, regval);
663 regval = (regval & ~mask) | ((uint32_t)priority << shift);
664 GICDREG_WRITE(0, GICD_IPRIORITYR(reg), regval);
665 LTRACEF("irq %i, new GICD_IPRIORITYR%d = %x, req %x\n",
666 irq, reg, GICDREG_READ(0, GICD_IPRIORITYR(reg)), regval);
667
668 return 0;
669 }
670
671 status_t arm_gic_sgi(u_int irq, u_int flags, u_int cpu_mask)
672 {
673 if (irq >= 16) {
674 return ERR_INVALID_ARGS;
675 }
676
677 #if GIC_VERSION > 2
678 for (size_t cpu = 0; cpu < SMP_MAX_CPUS; cpu++) {
679 if (!((cpu_mask >> cpu) & 1)) {
680 continue;
681 }
682
683 uint64_t val = arm_gicv3_sgir_val(irq, cpu);
684
685 GICCREG_WRITE(0, GICC_PRIMARY_SGIR, val);
686 }
687
688 #else /* else GIC_VERSION > 2 */
689
690 u_int val =
691 ((flags & ARM_GIC_SGI_FLAG_TARGET_FILTER_MASK) << 24) |
692 ((cpu_mask & 0xff) << 16) |
693 ((flags & ARM_GIC_SGI_FLAG_NS) ? (1U << 15) : 0) |
694 (irq & 0xf);
695
696 LTRACEF("GICD_SGIR: %x\n", val);
697
698 GICDREG_WRITE(0, GICD_SGIR, val);
699
700 #endif /* else GIC_VERSION > 2 */
701
702 return NO_ERROR;
703 }
704
705 status_t mask_interrupt(unsigned int vector)
706 {
707 if (vector >= MAX_INT)
708 return ERR_INVALID_ARGS;
709
710 if (arm_gic_interrupt_change_allowed(vector))
711 gic_set_enable(vector, false);
712
713 return NO_ERROR;
714 }
715
716 status_t unmask_interrupt(unsigned int vector)
717 {
718 if (vector >= MAX_INT)
719 return ERR_INVALID_ARGS;
720
721 if (arm_gic_interrupt_change_allowed(vector))
722 gic_set_enable(vector, true);
723
724 return NO_ERROR;
725 }
726
727 static
728 enum handler_return __platform_irq(struct iframe *frame)
729 {
730 // get the current vector
731 uint32_t iar = GICCREG_READ(0, GICC_PRIMARY_IAR);
732 unsigned int vector = iar & 0x3ff;
733
734 if (vector >= 0x3fe) {
735 #if WITH_LIB_SM && ARM_GIC_USE_DOORBELL_NS_IRQ
736 // spurious or non-secure interrupt
737 return sm_handle_irq();
738 #else
739 // spurious
740 return INT_NO_RESCHEDULE;
741 #endif
742 }
743
744 THREAD_STATS_INC(interrupts);
745 KEVLOG_IRQ_ENTER(vector);
746
747 uint cpu = arch_curr_cpu_num();
748
749 LTRACEF_LEVEL(2, "iar 0x%x cpu %u currthread %p vector %d pc 0x%" PRIxPTR "\n", iar, cpu,
750 get_current_thread(), vector, (uintptr_t)IFRAME_PC(frame));
751
752 // deliver the interrupt
753 enum handler_return ret;
754
755 ret = INT_NO_RESCHEDULE;
756 struct int_handler_struct *handler = get_int_handler(vector, cpu);
757 if (handler && handler->handler)
758 ret = handler->handler(handler->arg);
759
760 GICCREG_WRITE(0, GICC_PRIMARY_EOIR, iar);
761 #if ARM_GIC_USE_DOORBELL_NS_IRQ
762 GICCREG_WRITE(0, icc_dir_el1, iar);
763 #endif
764
765 LTRACEF_LEVEL(2, "cpu %u exit %d\n", cpu, ret);
766
767 KEVLOG_IRQ_EXIT(vector);
768
769 return ret;
770 }
771
772 enum handler_return platform_irq(struct iframe *frame)
773 {
774 #if WITH_LIB_SM && !ARM_GIC_USE_DOORBELL_NS_IRQ
775 uint32_t ahppir = GICCREG_READ(0, GICC_PRIMARY_HPPIR);
776 uint32_t pending_irq = ahppir & 0x3ff;
777 struct int_handler_struct *h;
778 uint cpu = arch_curr_cpu_num();
779
780 #if ARM_MERGE_FIQ_IRQ
781 {
782 uint32_t hppir = GICCREG_READ(0, GICC_HPPIR);
783 uint32_t pending_fiq = hppir & 0x3ff;
784 if (pending_fiq < MAX_INT) {
785 platform_fiq(frame);
786 return INT_NO_RESCHEDULE;
787 }
788 }
789 #endif
790
791 LTRACEF("ahppir %d\n", ahppir);
792 if (pending_irq < MAX_INT && has_int_handler(pending_irq, cpu)) {
793 enum handler_return ret = 0;
794 uint32_t irq;
795 uint8_t old_priority;
796 spin_lock_saved_state_t state;
797
798 spin_lock_save(&gicd_lock, &state, GICD_LOCK_FLAGS);
799
800 /* Temporarily raise the priority of the interrupt we want to
801 * handle so another interrupt does not take its place before
802 * we can acknowledge it.
803 */
804 old_priority = arm_gic_get_priority(pending_irq);
805 arm_gic_set_priority_locked(pending_irq, 0);
806 DSB;
807 irq = GICCREG_READ(0, GICC_PRIMARY_IAR) & 0x3ff;
808 arm_gic_set_priority_locked(pending_irq, old_priority);
809
810 spin_unlock_restore(&gicd_lock, state, GICD_LOCK_FLAGS);
811
812 LTRACEF("irq %d\n", irq);
813 h = get_int_handler(pending_irq, cpu);
814 if (likely(h && h->handler))
815 ret = h->handler(h->arg);
816 else
817 TRACEF("unexpected irq %d != %d may get lost\n", irq, pending_irq);
818 GICCREG_WRITE(0, GICC_PRIMARY_EOIR, irq);
819 return ret;
820 }
821 return sm_handle_irq();
822 #else
823 return __platform_irq(frame);
824 #endif
825 }
826
827 void platform_fiq(struct iframe *frame)
828 {
829 #if WITH_LIB_SM
830 sm_handle_fiq();
831 #else
832 PANIC_UNIMPLEMENTED;
833 #endif
834 }
835
836 #if WITH_LIB_SM
837 static status_t arm_gic_get_next_irq_locked(u_int min_irq, uint type)
838 {
839 #if ARM_GIC_USE_DOORBELL_NS_IRQ
840 if (type == TRUSTY_IRQ_TYPE_DOORBELL && min_irq <= ARM_GIC_DOORBELL_IRQ) {
841 doorbell_enabled = true;
842 return ARM_GIC_DOORBELL_IRQ;
843 }
844 #else
845 u_int irq;
846 u_int max_irq = type == TRUSTY_IRQ_TYPE_PER_CPU ? GIC_MAX_PER_CPU_INT :
847 type == TRUSTY_IRQ_TYPE_NORMAL ? MAX_INT : 0;
848 uint cpu = arch_curr_cpu_num();
849
850 if (type == TRUSTY_IRQ_TYPE_NORMAL && min_irq < GIC_MAX_PER_CPU_INT)
851 min_irq = GIC_MAX_PER_CPU_INT;
852
853 for (irq = min_irq; irq < max_irq; irq++)
854 if (has_int_handler(irq, cpu))
855 return irq;
856 #endif
857
858 return SM_ERR_END_OF_INPUT;
859 }
860
861 long smc_intc_get_next_irq(struct smc32_args *args)
862 {
863 status_t ret;
864 spin_lock_saved_state_t state;
865
866 spin_lock_save(&gicd_lock, &state, GICD_LOCK_FLAGS);
867
868 #if !ARM_GIC_USE_DOORBELL_NS_IRQ
869 arm_gic_non_secure_interrupts_frozen = true;
870 #endif
871 ret = arm_gic_get_next_irq_locked(args->params[0], args->params[1]);
872 LTRACEF("min_irq %d, per_cpu %d, ret %d\n",
873 args->params[0], args->params[1], ret);
874
875 spin_unlock_restore(&gicd_lock, state, GICD_LOCK_FLAGS);
876
877 return ret;
878 }
879
880 enum handler_return sm_intc_enable_interrupts(void)
881 {
882 #if ARM_GIC_USE_DOORBELL_NS_IRQ
883 return handle_deferred_irqs();
884 #else
885 return INT_NO_RESCHEDULE;
886 #endif
887 }
888
889 static status_t fiq_enter_unexpected_irq(u_int cpu)
890 {
891 #if GIC_VERSION > 2
892 u_int irq = GICCREG_READ(0, icc_iar0_el1) & 0x3ff;
893 #else
894 u_int irq = GICCREG_READ(0, GICC_IAR) & 0x3ff;
895 #endif
896
897 LTRACEF("cpu %d, irq %i\n", cpu, irq);
898
899 if (irq >= 1020) {
900 LTRACEF("spurious fiq: cpu %d, new %d\n", cpu, irq);
901 return ERR_NO_MSG;
902 }
903
904 #if GIC_VERSION > 2
905 GICCREG_WRITE(0, icc_eoir0_el1, irq);
906 #else
907 GICCREG_WRITE(0, GICC_EOIR, irq);
908 #endif
909
910 dprintf(INFO, "got disabled fiq: cpu %d, new %d\n", cpu, irq);
911 return ERR_NOT_READY;
912 }
913
914 status_t sm_intc_fiq_enter(void)
915 {
916 u_int cpu = arch_curr_cpu_num();
917 #if ARM_GIC_USE_DOORBELL_NS_IRQ
918 return fiq_enter_defer_irqs(cpu);
919 #else
920 return fiq_enter_unexpected_irq(cpu);
921 #endif
922 }
923 #endif
924