1 /*
2  * Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <stdlib.h>
8 #include <stdint.h>
9 #include <stdbool.h>
10 
11 #include <arch_helpers.h>
12 #include <common/debug.h>
13 #include <drivers/delay_timer.h>
14 #include <lib/mmio.h>
15 #include <lib/psci/psci.h>
16 #include <lib/smccc.h>
17 #include <lib/spinlock.h>
18 #include <plat/common/platform.h>
19 #include <services/std_svc.h>
20 
21 #include <gpc.h>
22 #include <platform_def.h>
23 
24 #define FSL_SIP_CONFIG_GPC_MASK		U(0x00)
25 #define FSL_SIP_CONFIG_GPC_UNMASK	U(0x01)
26 #define FSL_SIP_CONFIG_GPC_SET_WAKE	U(0x02)
27 #define FSL_SIP_CONFIG_GPC_PM_DOMAIN	U(0x03)
28 #define FSL_SIP_CONFIG_GPC_SET_AFF	U(0x04)
29 #define FSL_SIP_CONFIG_GPC_CORE_WAKE	U(0x05)
30 
31 #define MAX_HW_IRQ_NUM		U(128)
32 #define MAX_IMR_NUM		U(4)
33 
34 static uint32_t gpc_saved_imrs[16];
35 static uint32_t gpc_wake_irqs[4];
36 static uint32_t gpc_imr_offset[] = {
37 	IMX_GPC_BASE + IMR1_CORE0_A53,
38 	IMX_GPC_BASE + IMR1_CORE1_A53,
39 	IMX_GPC_BASE + IMR1_CORE2_A53,
40 	IMX_GPC_BASE + IMR1_CORE3_A53,
41 	IMX_GPC_BASE + IMR1_CORE0_M4,
42 };
43 
44 spinlock_t gpc_imr_lock[4];
45 
gpc_imr_core_spin_lock(unsigned int core_id)46 static void gpc_imr_core_spin_lock(unsigned int core_id)
47 {
48 	spin_lock(&gpc_imr_lock[core_id]);
49 }
50 
gpc_imr_core_spin_unlock(unsigned int core_id)51 static void gpc_imr_core_spin_unlock(unsigned int core_id)
52 {
53 	spin_unlock(&gpc_imr_lock[core_id]);
54 }
55 
gpc_save_imr_lpm(unsigned int core_id,unsigned int imr_idx)56 static void gpc_save_imr_lpm(unsigned int core_id, unsigned int imr_idx)
57 {
58 	uint32_t reg = gpc_imr_offset[core_id] + imr_idx * 4;
59 
60 	gpc_imr_core_spin_lock(core_id);
61 
62 	gpc_saved_imrs[core_id + imr_idx * 4] = mmio_read_32(reg);
63 	mmio_write_32(reg, ~gpc_wake_irqs[imr_idx]);
64 
65 	gpc_imr_core_spin_unlock(core_id);
66 }
67 
gpc_restore_imr_lpm(unsigned int core_id,unsigned int imr_idx)68 static void gpc_restore_imr_lpm(unsigned int core_id, unsigned int imr_idx)
69 {
70 	uint32_t reg = gpc_imr_offset[core_id] + imr_idx * 4;
71 	uint32_t val = gpc_saved_imrs[core_id + imr_idx * 4];
72 
73 	gpc_imr_core_spin_lock(core_id);
74 
75 	mmio_write_32(reg, val);
76 
77 	gpc_imr_core_spin_unlock(core_id);
78 }
79 
80 /*
81  * On i.MX8MQ, only in system suspend mode, the A53 cluster can
82  * enter LPM mode and shutdown the A53 PLAT power domain. So LPM
83  * wakeup only used for system suspend. when system enter suspend,
84  * any A53 CORE can be the last core to suspend the system, But
85  * the LPM wakeup can only use the C0's IMR to wakeup A53 cluster
86  * from LPM, so save C0's IMRs before suspend, restore back after
87  * resume.
88  */
imx_set_sys_wakeup(unsigned int last_core,bool pdn)89 void imx_set_sys_wakeup(unsigned int last_core, bool pdn)
90 {
91 	unsigned int imr, core;
92 
93 	if (pdn) {
94 		for (imr = 0U; imr < MAX_IMR_NUM; imr++) {
95 			for (core = 0U; core < PLATFORM_CORE_COUNT; core++) {
96 				gpc_save_imr_lpm(core, imr);
97 			}
98 		}
99 	} else {
100 		for (imr = 0U; imr < MAX_IMR_NUM; imr++) {
101 			for (core = 0U; core < PLATFORM_CORE_COUNT; core++) {
102 				gpc_restore_imr_lpm(core, imr);
103 			}
104 		}
105 	}
106 }
107 
imx_gpc_hwirq_mask(unsigned int hwirq)108 static void imx_gpc_hwirq_mask(unsigned int hwirq)
109 {
110 	uintptr_t reg;
111 	unsigned int val;
112 
113 	if (hwirq >= MAX_HW_IRQ_NUM) {
114 		return;
115 	}
116 
117 	gpc_imr_core_spin_lock(0);
118 	reg = gpc_imr_offset[0] + (hwirq / 32) * 4;
119 	val = mmio_read_32(reg);
120 	val |= 1 << hwirq % 32;
121 	mmio_write_32(reg, val);
122 	gpc_imr_core_spin_unlock(0);
123 }
124 
imx_gpc_hwirq_unmask(unsigned int hwirq)125 static void imx_gpc_hwirq_unmask(unsigned int hwirq)
126 {
127 	uintptr_t reg;
128 	unsigned int val;
129 
130 	if (hwirq >= MAX_HW_IRQ_NUM) {
131 		return;
132 	}
133 
134 	gpc_imr_core_spin_lock(0);
135 	reg = gpc_imr_offset[0] + (hwirq / 32) * 4;
136 	val = mmio_read_32(reg);
137 	val &= ~(1 << hwirq % 32);
138 	mmio_write_32(reg, val);
139 	gpc_imr_core_spin_unlock(0);
140 }
141 
imx_gpc_set_wake(uint32_t hwirq,bool on)142 static void imx_gpc_set_wake(uint32_t hwirq, bool on)
143 {
144 	uint32_t mask, idx;
145 
146 	if (hwirq >= MAX_HW_IRQ_NUM) {
147 		return;
148 	}
149 
150 	mask = 1 << hwirq % 32;
151 	idx = hwirq / 32;
152 	gpc_wake_irqs[idx] = on ? gpc_wake_irqs[idx] | mask :
153 				 gpc_wake_irqs[idx] & ~mask;
154 }
155 
imx_gpc_mask_irq0(uint32_t core_id,uint32_t mask)156 static void imx_gpc_mask_irq0(uint32_t core_id, uint32_t mask)
157 {
158 	gpc_imr_core_spin_lock(core_id);
159 	if (mask) {
160 		mmio_setbits_32(gpc_imr_offset[core_id], 1);
161 	} else {
162 		mmio_clrbits_32(gpc_imr_offset[core_id], 1);
163 	}
164 
165 	dsb();
166 	gpc_imr_core_spin_unlock(core_id);
167 }
168 
imx_gpc_core_wake(uint32_t cpumask)169 void imx_gpc_core_wake(uint32_t cpumask)
170 {
171 	for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
172 		if (cpumask & (1 << i)) {
173 			imx_gpc_mask_irq0(i, false);
174 		}
175 	}
176 }
177 
imx_gpc_set_a53_core_awake(uint32_t core_id)178 void imx_gpc_set_a53_core_awake(uint32_t core_id)
179 {
180 	imx_gpc_mask_irq0(core_id, true);
181 }
182 
imx_gpc_set_affinity(uint32_t hwirq,unsigned int cpu_idx)183 static void imx_gpc_set_affinity(uint32_t hwirq, unsigned int cpu_idx)
184 {
185 	uintptr_t reg;
186 	unsigned int val;
187 
188 	if (hwirq >= MAX_HW_IRQ_NUM || cpu_idx >= 4) {
189 		return;
190 	}
191 
192 	/*
193 	 * using the mask/unmask bit as affinity function.unmask the
194 	 * IMR bit to enable IRQ wakeup for this core.
195 	 */
196 	gpc_imr_core_spin_lock(cpu_idx);
197 	reg = gpc_imr_offset[cpu_idx] + (hwirq / 32) * 4;
198 	val = mmio_read_32(reg);
199 	val &= ~(1 << hwirq % 32);
200 	mmio_write_32(reg, val);
201 	gpc_imr_core_spin_unlock(cpu_idx);
202 
203 	/* clear affinity of other core */
204 	for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
205 		if (cpu_idx != i) {
206 			gpc_imr_core_spin_lock(i);
207 			reg = gpc_imr_offset[i] + (hwirq / 32) * 4;
208 			val = mmio_read_32(reg);
209 			val |= (1 << hwirq % 32);
210 			mmio_write_32(reg, val);
211 			gpc_imr_core_spin_unlock(i);
212 		}
213 	}
214 }
215 
216 /* use wfi power down the core */
imx_set_cpu_pwr_off(unsigned int core_id)217 void imx_set_cpu_pwr_off(unsigned int core_id)
218 {
219 	bakery_lock_get(&gpc_lock);
220 
221 	/* enable the wfi power down of the core */
222 	mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id) |
223 			(1 << (core_id + 20)));
224 
225 	bakery_lock_release(&gpc_lock);
226 
227 	/* assert the pcg pcr bit of the core */
228 	mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1);
229 };
230 
231 /* if out of lpm, we need to do reverse steps */
imx_set_cpu_lpm(unsigned int core_id,bool pdn)232 void imx_set_cpu_lpm(unsigned int core_id, bool pdn)
233 {
234 	bakery_lock_get(&gpc_lock);
235 
236 	if (pdn) {
237 		/* enable the core WFI PDN & IRQ PUP */
238 		mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id) |
239 				(1 << (core_id + 20)) | COREx_IRQ_WUP(core_id));
240 		/* assert the pcg pcr bit of the core */
241 		mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1);
242 	} else {
243 		/* disable CORE WFI PDN & IRQ PUP */
244 		mmio_clrbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id) |
245 				COREx_IRQ_WUP(core_id));
246 		/* deassert the pcg pcr bit of the core */
247 		mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1);
248 	}
249 
250 	bakery_lock_release(&gpc_lock);
251 }
252 
imx_pup_pdn_slot_config(int last_core,bool pdn)253 void imx_pup_pdn_slot_config(int last_core, bool pdn)
254 {
255 	if (pdn) {
256 		/* SLOT0 for A53 PLAT power down */
257 		mmio_setbits_32(IMX_GPC_BASE + SLTx_CFG(0), SLT_PLAT_PDN);
258 		/* SLOT1 for A53 PLAT power up */
259 		mmio_setbits_32(IMX_GPC_BASE + SLTx_CFG(1), SLT_PLAT_PUP);
260 		/* SLOT2 for A53 primary core power up */
261 		mmio_setbits_32(IMX_GPC_BASE + SLTx_CFG(2), SLT_COREx_PUP(last_core));
262 		/* ACK setting: PLAT ACK for PDN, CORE ACK for PUP */
263 		mmio_clrsetbits_32(IMX_GPC_BASE + PGC_ACK_SEL_A53, 0xFFFFFFFF,
264 			A53_PLAT_PDN_ACK | SLT_COREx_PUP_ACK(last_core));
265 	} else {
266 		mmio_clrbits_32(IMX_GPC_BASE + SLTx_CFG(0), 0xFFFFFFFF);
267 		mmio_clrbits_32(IMX_GPC_BASE + SLTx_CFG(1), 0xFFFFFFFF);
268 		mmio_clrbits_32(IMX_GPC_BASE + SLTx_CFG(2), 0xFFFFFFFF);
269 		mmio_clrsetbits_32(IMX_GPC_BASE + PGC_ACK_SEL_A53, 0xFFFFFFFF,
270 			A53_DUMMY_PDN_ACK | A53_DUMMY_PUP_ACK);
271 	}
272 }
273 
imx_set_cluster_powerdown(unsigned int last_core,uint8_t power_state)274 void imx_set_cluster_powerdown(unsigned int last_core, uint8_t power_state)
275 {
276 	uint32_t val;
277 
278 	if (is_local_state_off(power_state)) {
279 		val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC);
280 		val |= A53_LPM_STOP; /* enable C0-C1's STOP mode */
281 		val &= ~CPU_CLOCK_ON_LPM; /* disable CPU clock in LPM mode */
282 		mmio_write_32(IMX_GPC_BASE + LPCR_A53_BSC, val);
283 
284 		/* enable C2-3's STOP mode */
285 		mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_BSC2, A53_LPM_STOP);
286 
287 		/* enable PLAT/SCU power down */
288 		val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_AD);
289 		val &= ~EN_L2_WFI_PDN;
290 		val |= L2PGE | EN_PLAT_PDN;
291 		val &= ~COREx_IRQ_WUP(last_core); /* disable IRQ PUP for last core */
292 		val |= COREx_LPM_PUP(last_core); /* enable LPM PUP for last core */
293 		mmio_write_32(IMX_GPC_BASE + LPCR_A53_AD, val);
294 
295 		imx_pup_pdn_slot_config(last_core, true);
296 
297 		/* enable PLAT PGC */
298 		mmio_setbits_32(IMX_GPC_BASE + A53_PLAT_PGC, 0x1);
299 	} else {
300 		/* clear PLAT PGC */
301 		mmio_clrbits_32(IMX_GPC_BASE + A53_PLAT_PGC, 0x1);
302 
303 		/* clear the slot and ack for cluster power down */
304 		imx_pup_pdn_slot_config(last_core, false);
305 
306 		val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC);
307 		val &= ~A53_LPM_MASK; /* clear the C0~1 LPM */
308 		val |= CPU_CLOCK_ON_LPM; /* disable cpu clock in LPM */
309 		mmio_write_32(IMX_GPC_BASE + LPCR_A53_BSC, val);
310 
311 		/* set A53 LPM to RUN mode */
312 		mmio_clrbits_32(IMX_GPC_BASE + LPCR_A53_BSC2, A53_LPM_MASK);
313 
314 		/* clear PLAT/SCU power down */
315 		val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_AD);
316 		val |= EN_L2_WFI_PDN;
317 		val &= ~(L2PGE | EN_PLAT_PDN);
318 		val &= ~COREx_LPM_PUP(last_core);  /* disable C0's LPM PUP */
319 		mmio_write_32(IMX_GPC_BASE + LPCR_A53_AD, val);
320 	}
321 }
322 
323 #define MAX_PLL_NUM	U(12)
324 
325 static const struct pll_override imx8mq_pll[MAX_PLL_NUM] = {
326 	{.reg = 0x0, .override_mask = 0x140000, },
327 	{.reg = 0x8, .override_mask = 0x140000, },
328 	{.reg = 0x10, .override_mask = 0x140000, },
329 	{.reg = 0x18, .override_mask = 0x140000, },
330 	{.reg = 0x20, .override_mask = 0x140000, },
331 	{.reg = 0x28, .override_mask = 0x140000, },
332 	{.reg = 0x30, .override_mask = 0x1555540, },
333 	{.reg = 0x3c, .override_mask = 0x1555540, },
334 	{.reg = 0x48, .override_mask = 0x140, },
335 	{.reg = 0x54, .override_mask = 0x140, },
336 	{.reg = 0x60, .override_mask = 0x140, },
337 	{.reg = 0x70, .override_mask = 0xa, },
338 };
339 
imx_anamix_override(bool enter)340 void imx_anamix_override(bool enter)
341 {
342 	unsigned int i;
343 
344 	/* enable the pll override bit before entering DSM mode */
345 	for (i = 0; i < MAX_PLL_NUM; i++) {
346 		if (enter) {
347 			mmio_setbits_32(IMX_ANAMIX_BASE + imx8mq_pll[i].reg,
348 				imx8mq_pll[i].override_mask);
349 		} else {
350 			mmio_clrbits_32(IMX_ANAMIX_BASE + imx8mq_pll[i].reg,
351 				imx8mq_pll[i].override_mask);
352 		}
353 	}
354 }
355 
imx_gpc_handler(uint32_t smc_fid,u_register_t x1,u_register_t x2,u_register_t x3)356 int imx_gpc_handler(uint32_t smc_fid,
357 			  u_register_t x1,
358 			  u_register_t x2,
359 			  u_register_t x3)
360 {
361 	switch (x1) {
362 	case FSL_SIP_CONFIG_GPC_CORE_WAKE:
363 		imx_gpc_core_wake(x2);
364 		break;
365 	case FSL_SIP_CONFIG_GPC_SET_WAKE:
366 		imx_gpc_set_wake(x2, x3);
367 		break;
368 	case FSL_SIP_CONFIG_GPC_MASK:
369 		imx_gpc_hwirq_mask(x2);
370 		break;
371 	case FSL_SIP_CONFIG_GPC_UNMASK:
372 		imx_gpc_hwirq_unmask(x2);
373 		break;
374 	case FSL_SIP_CONFIG_GPC_SET_AFF:
375 		imx_gpc_set_affinity(x2, x3);
376 		break;
377 	default:
378 		return SMC_UNK;
379 	}
380 
381 	return 0;
382 }
383 
imx_gpc_init(void)384 void imx_gpc_init(void)
385 {
386 	uint32_t val;
387 	unsigned int i, j;
388 
389 	/* mask all the interrupt by default */
390 	for (i = 0U; i < PLATFORM_CORE_COUNT; i++) {
391 		for (j = 0U; j < ARRAY_SIZE(gpc_imr_offset); j++) {
392 			mmio_write_32(gpc_imr_offset[j] + i * 4, ~0x0);
393 		}
394 	}
395 
396 	/* Due to the hardware design requirement, need to make
397 	 * sure GPR interrupt(#32) is unmasked during RUN mode to
398 	 * avoid entering DSM mode by mistake.
399 	 */
400 	for (i = 0U; i < PLATFORM_CORE_COUNT; i++) {
401 		mmio_write_32(gpc_imr_offset[i], ~0x1);
402 	}
403 
404 	/* leave the IOMUX_GPC bit 12 on for core wakeup */
405 	mmio_setbits_32(IMX_IOMUX_GPR_BASE + 0x4, 1 << 12);
406 
407 	/* use external IRQs to wakeup C0~C3 from LPM */
408 	val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC);
409 	val |= IRQ_SRC_A53_WUP;
410 	/* clear the MASTER0 LPM handshake */
411 	val &= ~MASTER0_LPM_HSK;
412 	mmio_write_32(IMX_GPC_BASE + LPCR_A53_BSC, val);
413 
414 	/* mask M4 DSM trigger if M4 is NOT enabled */
415 	mmio_setbits_32(IMX_GPC_BASE + LPCR_M4, DSM_MODE_MASK);
416 
417 	/* set all mix/PU in A53 domain */
418 	mmio_write_32(IMX_GPC_BASE + PGC_CPU_0_1_MAPPING, 0xfffd);
419 
420 	/* set SCU timing */
421 	mmio_write_32(IMX_GPC_BASE + PGC_SCU_TIMING,
422 		      (0x59 << 10) | 0x5B | (0x2 << 20));
423 
424 	/* set DUMMY PDN/PUP ACK by default for A53 domain */
425 	mmio_write_32(IMX_GPC_BASE + PGC_ACK_SEL_A53, A53_DUMMY_PUP_ACK |
426 		A53_DUMMY_PDN_ACK);
427 
428 	/* disable DSM mode by default */
429 	mmio_clrbits_32(IMX_GPC_BASE + SLPCR, DSM_MODE_MASK);
430 
431 	/*
432 	 * USB PHY power up needs to make sure RESET bit in SRC is clear,
433 	 * otherwise, the PU power up bit in GPC will NOT self-cleared.
434 	 * only need to do it once.
435 	 */
436 	mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG1PHY_SCR, 0x1);
437 	mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG2PHY_SCR, 0x1);
438 
439 	/*
440 	 * for USB OTG, the limitation are:
441 	 * 1. before system clock config, the IPG clock run at 12.5MHz, delay time
442 	 *    should be longer than 82us.
443 	 * 2. after system clock config, ipg clock run at 66.5MHz, delay time
444 	 *    be longer that 15.3 us.
445 	 *    Add 100us to make sure the USB OTG SRC is clear safely.
446 	 */
447 	udelay(100);
448 }
449