1 /*
2 * Copyright (c) 2019 LK Trusty Authors. All Rights Reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <arch/ops.h>
25 #include <assert.h>
26 #include <bits.h>
27 #include <lk/macros.h>
28 #include <stdint.h>
29
30 #include <dev/interrupt/arm_gic.h>
31
32 #include "arm_gic_common.h"
33 #include "gic_v3.h"
34
35 #define WAKER_QSC_BIT (0x1u << 31)
36 #define WAKER_CA_BIT (0x1u << 2)
37 #define WAKER_PS_BIT (0x1u << 1)
38 #define WAKER_SL_BIT (0x1u << 0)
39
gicv3_gicr_exit_sleep(uint32_t cpu)40 static void gicv3_gicr_exit_sleep(uint32_t cpu) {
41 uint32_t val = GICRREG_READ(0, cpu, GICR_WAKER);
42
43 if (val & WAKER_QSC_BIT) {
44 /* clear sleep bit */
45 GICRREG_WRITE(0, cpu, GICR_WAKER, val & ~WAKER_SL_BIT);
46 while (GICRREG_READ(0, cpu, GICR_WAKER) & WAKER_QSC_BIT) {
47 }
48 }
49 }
50
gicv3_gicr_mark_awake(uint32_t cpu)51 static void gicv3_gicr_mark_awake(uint32_t cpu) {
52 uint32_t val = GICRREG_READ(0, cpu, GICR_WAKER);
53
54 if (val & WAKER_CA_BIT) {
55 /* mark CPU as awake */
56 GICRREG_WRITE(0, cpu, GICR_WAKER, val & ~WAKER_PS_BIT);
57 while (GICRREG_READ(0, cpu, GICR_WAKER) & WAKER_CA_BIT) {
58 }
59 }
60 }
61
62 #if GIC600
63 /*
64 * GIC-600 implements an additional GICR power control register
65 */
66 #define GICR_PWRR (GICR_OFFSET + 0x0024)
67
68 #define PWRR_ON (0x0u << 0)
69 #define PWRR_OFF (0x1u << 0)
70 #define PWRR_RDGPD (0x1u << 2)
71 #define PWRR_RDGPO (0x1u << 3)
72 #define PWRR_RDGP_MASK (PWRR_RDGPD | PWRR_RDGPO)
73
gicv3_gicr_power_on(uint32_t cpu)74 static void gicv3_gicr_power_on(uint32_t cpu) {
75 /* Initiate power up */
76 GICRREG_WRITE(0, cpu, GICR_PWRR, PWRR_ON);
77
78 /* wait until it is complete (both bits are clear) */
79 while (GICRREG_READ(0, cpu, GICR_PWRR) & PWRR_RDGP_MASK) {
80 }
81 }
82
gicv3_gicr_off(uint32_t cpu)83 static void gicv3_gicr_off(uint32_t cpu) {
84 /* initiate power down */
85 GICRREG_WRITE(0, cpu, GICR_PWRR, PWRR_OFF);
86
87 /* wait until it is complete (both bits are set) */
88 while ((GICRREG_READ(0, cpu, GICR_PWRR) & PWRR_RDGP_MASK) !=
89 PWRR_RDGP_MASK) {
90 }
91 }
92 #else /* GIC600 */
93
gicv3_gicr_power_on(uint32_t cpu)94 static void gicv3_gicr_power_on(uint32_t cpu) {}
gicv3_gicr_power_off(uint32_t cpu)95 static void gicv3_gicr_power_off(uint32_t cpu) {}
96
97 #endif /* GIC600 */
98
gicv3_gicr_init(void)99 static void gicv3_gicr_init(void) {
100 uint32_t cpu = arch_curr_cpu_num();
101
102 gicv3_gicr_exit_sleep(cpu);
103 gicv3_gicr_power_on(cpu);
104 gicv3_gicr_mark_awake(cpu);
105 }
106
107
108 /* GICD_CTRL Register write pending bit */
109 #define GICD_CTLR_RWP (0x1U << 31)
110
arm_gicv3_wait_for_write_complete(void)111 void arm_gicv3_wait_for_write_complete(void) {
112 /* wait until write complete */
113 while (GICDREG_READ(0, GICD_CTLR) & GICD_CTLR_RWP) {
114 }
115 }
116
gicv3_gicd_ctrl_write(uint32_t val)117 static void gicv3_gicd_ctrl_write(uint32_t val) {
118 /* write CTRL register */
119 GICDREG_WRITE(0, GICD_CTLR, val);
120
121 /* wait until write complete */
122 arm_gicv3_wait_for_write_complete();
123 }
124
gicv3_gicd_setup_irq_group(uint32_t vector,uint32_t grp)125 static void gicv3_gicd_setup_irq_group(uint32_t vector, uint32_t grp) {
126 uint32_t val;
127 uint32_t mask;
128
129 ASSERT((vector >= 32) && (vector < MAX_INT));
130
131 mask = (0x1u << (vector % 32));
132
133 val = GICDREG_READ(0, GICD_IGROUPR(vector / 32));
134 if (grp & 0x1u) {
135 val |= mask;
136 } else {
137 val &= ~mask;
138 }
139 GICDREG_WRITE(0, GICD_IGROUPR(vector / 32), val);
140
141 val = GICDREG_READ(0, GICD_IGRPMODR(vector / 32));
142 if (grp & 0x2u) {
143 val |= mask;
144 } else {
145 val &= ~mask;
146 }
147 GICDREG_WRITE(0, GICD_IGRPMODR(vector / 32), val);
148 }
149
gicv3_gicd_setup_default_group(uint32_t grp)150 static void gicv3_gicd_setup_default_group(uint32_t grp) {
151 uint32_t i;
152
153 /* Assign all interrupts to selected group */
154 for (i = 32; i < MAX_INT; i += 32) {
155 GICDREG_WRITE(0, GICD_IGROUPR(i / 32), (grp & 0x1u) ? ~0U : 0);
156 GICDREG_WRITE(0, GICD_IGRPMODR(i / 32), (grp & 0x2u) ? ~0U : 0);
157 }
158 }
159
gicv3_gicr_setup_irq_group(uint32_t vector,uint32_t grp)160 static void gicv3_gicr_setup_irq_group(uint32_t vector, uint32_t grp) {
161 uint32_t val;
162 uint32_t mask;
163 uint32_t cpu = arch_curr_cpu_num();
164
165 ASSERT(vector < 32);
166
167 mask = (0x1u << vector);
168
169 val = GICRREG_READ(0, cpu, GICR_IGROUPR0);
170 if (grp & 0x1u) {
171 val |= mask;
172 } else {
173 val &= ~mask;
174 }
175 GICRREG_WRITE(0, cpu, GICR_IGROUPR0, val);
176
177 val = GICRREG_READ(0, cpu, GICR_IGRPMODR0);
178 if (grp & 0x2u) {
179 val |= mask;
180 } else {
181 val &= ~mask;
182 }
183 GICRREG_WRITE(0, cpu, GICR_IGRPMODR0, val);
184 }
185
gicv3_gicr_setup_default_group(uint32_t grp)186 static void gicv3_gicr_setup_default_group(uint32_t grp) {
187 uint32_t cpu = arch_curr_cpu_num();
188
189 GICRREG_WRITE(0, cpu, GICR_IGROUPR0, (grp & 0x1u) ? ~0U : 0);
190 GICRREG_WRITE(0, cpu, GICR_IGRPMODR0, (grp & 0x2u) ? ~0U : 0);
191 }
192
arm_gicv3_init(void)193 void arm_gicv3_init(void) {
194 uint32_t grp_mask = (0x1u << GICV3_IRQ_GROUP);
195
196 #if !WITH_LIB_SM
197 /* non-TZ */
198 int i;
199
200 /* Disable all groups before making changes */
201 gicv3_gicd_ctrl_write(GICDREG_READ(0, GICD_CTLR) & ~0x7U);
202
203 for (i = 0; i < MAX_INT; i += 32) {
204 GICDREG_WRITE(0, GICD_ICENABLER(i / 32), ~0U);
205 GICDREG_WRITE(0, GICD_ICPENDR(i / 32), ~0U);
206 }
207
208 /* Direct SPI interrupts to any core */
209 for (i = 32; i < MAX_INT; i++) {
210 GICDREG_WRITE64(0, GICD_IROUTER(i), 0x80000000);
211 }
212 #endif
213
214 /* Enable selected group */
215 gicv3_gicd_ctrl_write(GICDREG_READ(0, GICD_CTLR) | grp_mask);
216 }
217
arm_gicv3_init_percpu(void)218 void arm_gicv3_init_percpu(void) {
219 #if WITH_LIB_SM
220 /* TZ */
221 /* Initialized by ATF */
222 #if ARM_GIC_USE_DOORBELL_NS_IRQ
223 gicv3_gicr_setup_irq_group(ARM_GIC_DOORBELL_IRQ, GICV3_IRQ_GROUP_GRP1NS);
224
225 /* Enable EOIMode=1 */
226 GICCREG_WRITE(0, icc_ctlr_el1, (GICCREG_READ(0, icc_ctlr_el1) | 0x2));
227 #endif
228 #else
229 /* non-TZ */
230
231 /* Init registributor interface */
232 gicv3_gicr_init();
233
234 /* Enable CPU interface access */
235 GICCREG_WRITE(0, icc_sre_el1, (GICCREG_READ(0, icc_sre_el1) | 0x7));
236 #endif
237
238 /* enable selected percpu group */
239 if (GICV3_IRQ_GROUP == 0) {
240 GICCREG_WRITE(0, icc_igrpen0_el1, 1);
241 } else {
242 GICCREG_WRITE(0, icc_igrpen1_el1, 1);
243 }
244
245 /* Unmask interrupts at all priority levels */
246 GICCREG_WRITE(0, icc_pmr_el1, 0xFF);
247 }
248
arm_gicv3_configure_irq_locked(unsigned int cpu,unsigned int vector)249 void arm_gicv3_configure_irq_locked(unsigned int cpu, unsigned int vector) {
250 uint32_t grp = GICV3_IRQ_GROUP;
251
252 ASSERT(vector < MAX_INT);
253
254 if (vector < 32) {
255 /* PPIs */
256 gicv3_gicr_setup_irq_group(vector, grp);
257 } else {
258 /* SPIs */
259 gicv3_gicd_setup_irq_group(vector, grp);
260 }
261 }
262
263 static uint32_t enabled_spi_mask[DIV_ROUND_UP(MAX_INT, 32)];
264 static uint32_t enabled_ppi_mask[SMP_MAX_CPUS];
265
arm_gicv3_suspend_cpu(unsigned int cpu)266 void arm_gicv3_suspend_cpu(unsigned int cpu) {
267 uint32_t i;
268 ASSERT(cpu < SMP_MAX_CPUS);
269
270 if (cpu == 0) {
271 /* also save gicd */
272 for (i = 32; i < MAX_INT; i += 32) {
273 enabled_spi_mask[i / 32] = GICDREG_READ(0, GICD_ISENABLER(i / 32));
274 }
275 }
276 enabled_ppi_mask[cpu] = GICRREG_READ(0, cpu, GICR_ISENABLER0);
277 }
278
arm_gicv3_resume_cpu_locked(unsigned int cpu,bool gicd)279 void arm_gicv3_resume_cpu_locked(unsigned int cpu, bool gicd) {
280 uint32_t i;
281 ASSERT(cpu < SMP_MAX_CPUS);
282
283 GICRREG_WRITE(0, cpu, GICR_ISENABLER0, enabled_ppi_mask[cpu]);
284
285 if (gicd) {
286 /* also resume gicd */
287 for (i = 32; i < MAX_INT; i += 32) {
288 GICDREG_WRITE(0, GICD_ISENABLER(i / 32), enabled_spi_mask[i / 32]);
289 }
290 }
291 }
292
293 #if WITH_SMP
294 STATIC_ASSERT(SMP_CPU_CLUSTER_SHIFT <= 8);
295 /* SMP_MAX_CPUs needs to be addressable with only two affinities */
296 STATIC_ASSERT((SMP_MAX_CPUS >> SMP_CPU_CLUSTER_SHIFT) <= 0x100U);
297
arch_cpu_num_to_gic_affinities(size_t cpu_num)298 __WEAK struct arm_gic_affinities arch_cpu_num_to_gic_affinities(size_t cpu_num) {
299 const size_t max_cluster_size = 1U << SMP_CPU_CLUSTER_SHIFT;
300 const uint8_t cluster_mask = max_cluster_size - 1;
301 struct arm_gic_affinities out = {
302 .aff0 = cpu_num & cluster_mask,
303 .aff1 = cpu_num >> SMP_CPU_CLUSTER_SHIFT,
304 .aff2 = 0,
305 .aff3 = 0,
306 };
307 return out;
308 }
309 #endif
310
311 #define SGIR_AFF1_SHIFT (16)
312 #define SGIR_AFF2_SHIFT (32)
313 #define SGIR_AFF3_SHIFT (48)
314 #define SGIR_IRQ_SHIFT (24)
315 #define SGIR_RS_SHIFT (44)
316 #define SGIR_TARGET_LIST_SHIFT (0)
317 #define SGIR_ASSEMBLE(val, shift) ((uint64_t)val << shift)
318
arm_gicv3_sgir_val(u_int irq,size_t cpu_num)319 uint64_t arm_gicv3_sgir_val(u_int irq, size_t cpu_num) {
320 struct arm_gic_affinities affs = arch_cpu_num_to_gic_affinities(cpu_num);
321 DEBUG_ASSERT(irq < 16);
322
323 uint8_t range_selector = affs.aff0 >> 4;
324 uint16_t target_list = 1U << (affs.aff0 & 0xf);
325 return SGIR_ASSEMBLE(irq, SGIR_IRQ_SHIFT) |
326 SGIR_ASSEMBLE(affs.aff3, SGIR_AFF3_SHIFT) |
327 SGIR_ASSEMBLE(affs.aff2, SGIR_AFF2_SHIFT) |
328 SGIR_ASSEMBLE(affs.aff1, SGIR_AFF1_SHIFT) |
329 SGIR_ASSEMBLE(range_selector, SGIR_RS_SHIFT) |
330 SGIR_ASSEMBLE(target_list, SGIR_TARGET_LIST_SHIFT);
331 }
332