1/*
2 * Copyright 2022 NXP
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7.section .text, "ax"
8
9#include <asm_macros.S>
10#include <lib/psci/psci.h>
11#include <nxp_timer.h>
12#include <plat_gic.h>
13
14#include "bl31_data.h"
15#include "plat_psci.h"
16#include "platform_def.h"
17
18/*
19 * the BASE address for these offsets is AUX_01_DATA in the
20 * bootcore's psci data region
21 */
22#define DEVDISR2_MASK_OFFSET	0x0    /* references AUX_01_DATA */
23#define DEVDISR5_MASK_OFFSET	0x8    /* references AUX_02_DATA */
24
25/*
26 * the BASE address for these offsets is AUX_04_DATA in the
27 * bootcore's psci data region
28 */
29#define GICD_BASE_ADDR_OFFSET	0x0    /* references AUX_04_DATA */
30#define GICC_BASE_ADDR_OFFSET	0x8    /* references AUX_05_DATA */
31
32#define IPSTPACK_RETRY_CNT	0x10000
33#define DDR_SLEEP_RETRY_CNT	0x10000
34#define CPUACTLR_EL1		S3_1_C15_C2_0
35#define DDR_SDRAM_CFG_2_FRCSR	0x80000000
36#define DDR_SDRAM_CFG_2_OFFSET	0x114
37#define DDR_TIMING_CFG_4_OFFSET	0x160
38#define DDR_CNTRL_BASE_ADDR	0x01080000
39
40#define DLL_LOCK_MASK		0x3
41#define DLL_LOCK_VALUE		0x2
42
43#define ERROR_DDR_SLEEP		-1
44#define ERROR_DDR_WAKE		-2
45#define ERROR_NO_QUIESCE	-3
46
47#define CORE_RESTARTABLE	0
48#define CORE_NOT_RESTARTABLE	1
49
50.global soc_init_lowlevel
51.global soc_init_percpu
52
53.global _soc_core_release
54.global _soc_core_restart
55.global _soc_ck_disabled
56.global _soc_sys_reset
57.global _soc_sys_off
58
59.global _soc_core_prep_off
60.global _soc_core_entr_off
61.global _soc_core_exit_off
62
63.global _soc_core_prep_stdby
64.global _soc_core_entr_stdby
65.global _soc_core_exit_stdby
66.global _soc_core_prep_pwrdn
67.global _soc_core_entr_pwrdn
68.global _soc_core_exit_pwrdn
69.global _soc_clstr_prep_stdby
70.global _soc_clstr_exit_stdby
71.global _soc_clstr_prep_pwrdn
72.global _soc_clstr_exit_pwrdn
73.global _soc_sys_prep_stdby
74.global _soc_sys_exit_stdby
75.global _soc_sys_prep_pwrdn
76.global _soc_sys_pwrdn_wfi
77.global _soc_sys_exit_pwrdn
78
79.global _set_platform_security
80.global _soc_set_start_addr
81
82.equ TZPCDECPROT_0_SET_BASE, 0x02200804
83.equ TZPCDECPROT_1_SET_BASE, 0x02200810
84.equ TZPCDECPROT_2_SET_BASE, 0x0220081C
85
86.equ TZASC_REGION_ATTRIBUTES_0_0, 0x01100110
87
88.equ MPIDR_AFFINITY0_MASK, 0x00FF
89.equ MPIDR_AFFINITY1_MASK, 0xFF00
90.equ CPUECTLR_DISABLE_TWALK_PREFETCH, 0x4000000000
91.equ CPUECTLR_INS_PREFETCH_MASK, 0x1800000000
92.equ CPUECTLR_DAT_PREFETCH_MASK, 0x0300000000
93.equ OSDLR_EL1_DLK_LOCK, 0x1
94.equ CNTP_CTL_EL0_EN, 0x1
95.equ CNTP_CTL_EL0_IMASK, 0x2
96/* shifted value for incrementing cluster count in mpidr */
97.equ MPIDR_CLUSTER, 0x100
98
99/*
100 * This function initialize the soc,
101 * in: none
102 * out: none
103 * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11
104 */
105func soc_init_lowlevel
106	/*
107	 * called from C, so save the non-volatile regs
108	 * save these as pairs of registers to maintain the
109	 * required 16-byte alignment on the stack
110	 */
111	stp	x4, x5, [sp, #-16]!
112	stp	x6, x7, [sp, #-16]!
113	stp	x8, x9, [sp, #-16]!
114	stp	x10, x11, [sp, #-16]!
115	stp	x12, x13, [sp, #-16]!
116	stp	x18, x30, [sp, #-16]!
117
118	/*
119	 * make sure the personality has been established by releasing cores
120	 * that are marked "to-be-disabled" from reset
121	 */
122	bl	release_disabled
123
124	/* set SCRATCHRW7 to 0x0 */
125	ldr	x0, =DCFG_SCRATCHRW7_OFFSET
126	mov	x1, xzr
127	bl	_write_reg_dcfg
128
129	/* restore the aarch32/64 non-volatile registers */
130	ldp	x18, x30, [sp], #16
131	ldp	x12, x13, [sp], #16
132	ldp	x10, x11, [sp], #16
133	ldp	x8, x9, [sp], #16
134	ldp	x6, x7, [sp], #16
135	ldp	x4, x5, [sp], #16
136	ret
137endfunc soc_init_lowlevel
138
139/*
140 * void soc_init_percpu(void)
141 * this function performs any soc-specific initialization that is needed on
142 * a per-core basis
143 * in:  none
144 * out: none
145 * uses x0, x1, x2, x3
146 */
147func soc_init_percpu
148	stp	x4, x30, [sp, #-16]!
149
150	bl	plat_my_core_mask
151	mov	x2, x0
152
153	/* x2 = core mask */
154
155	/* see if this core is marked for prefetch disable */
156	mov	x0, #PREFETCH_DIS_OFFSET
157	bl	_get_global_data
158	tst	x0, x2
159	b.eq	1f
160	bl	_disable_ldstr_pfetch_A53
1611:
162	mov	x0, #NXP_PMU_ADDR
163	bl	enable_timer_base_to_cluster
164	ldp	x4, x30, [sp], #16
165	ret
166endfunc soc_init_percpu
167
168/*
169 * this function sets the security mechanisms in the SoC to implement the
170 * Platform Security Policy
171 */
172func _set_platform_security
173	mov	x3, x30
174
175#if (!SUPPRESS_TZC)
176	/* initialize the tzpc */
177	bl	init_tzpc
178#endif
179
180#if (!SUPPRESS_SEC)
181	/* initialize secmon */
182	bl	initSecMon
183#endif
184	mov	x30, x3
185	ret
186endfunc _set_platform_security
187
188/*
189 * this function writes a 64-bit address to bootlocptrh/l
190 * in:  x0, 64-bit address to write to BOOTLOCPTRL/H
191 * out: none
192 * uses x0, x1, x2
193 */
194func _soc_set_start_addr
195	/* get the 64-bit base address of the dcfg block */
196	ldr	x2, =NXP_DCFG_ADDR
197
198	/* write the 32-bit BOOTLOCPTRL register */
199	mov	x1, x0
200	str	w1, [x2, #DCFG_BOOTLOCPTRL_OFFSET]
201
202	/* write the 32-bit BOOTLOCPTRH register */
203	lsr	x1, x0, #32
204	str	w1, [x2, #DCFG_BOOTLOCPTRH_OFFSET]
205	ret
206endfunc _soc_set_start_addr
207
208/*
209 * part of CPU_ON
210 * this function releases a secondary core from reset
211 * in: x0 = core_mask_lsb
212 * out: none
213 * uses: x0, x1, x2, x3
214 */
215_soc_core_release:
216	mov	x3, x30
217
218	/* x0 = core mask */
219
220	ldr	x1, =NXP_SEC_REGFILE_ADDR
221	/*
222	 * write to CORE_HOLD to tell the bootrom that we want this core
223	 * to run
224	 */
225	str	w0, [x1, #CORE_HOLD_OFFSET]
226
227	/* x0 = core mask */
228
229	/* read-modify-write BRRL to release core */
230	mov	x1, #NXP_RESET_ADDR
231	ldr	w2, [x1, #BRR_OFFSET]
232	orr	w2, w2, w0
233	str	w2, [x1, #BRR_OFFSET]
234	dsb	sy
235	isb
236
237	/* send event */
238	sev
239	isb
240
241	mov	x30, x3
242	ret
243
244/*
245 * this function determines if a core is disabled via COREDISABLEDSR
246 * in:  w0  = core_mask_lsb
247 * out: w0  = 0, core not disabled
248 *      w0 != 0, core disabled
249 * uses x0, x1
250 */
251_soc_ck_disabled:
252	/* get base addr of dcfg block */
253	ldr	x1, =NXP_DCFG_ADDR
254
255	/* read COREDISABLEDSR */
256	ldr	w1, [x1, #DCFG_COREDISABLEDSR_OFFSET]
257
258	/* test core bit */
259	and	w0, w1, w0
260
261	ret
262
263/*
264 * part of CPU_ON
265 * this function restarts a core shutdown via _soc_core_entr_off
266 * in:  x0 = core mask lsb (of the target cpu)
267 * out: x0 == 0, on success
268 *      x0 != 0, on failure
269 * uses x0, x1, x2, x3, x4, x5, x6
270 */
271_soc_core_restart:
272	mov	x6, x30
273	mov	x4, x0
274
275	/* x4 = core mask lsb */
276
277	/* pgm GICD_CTLR - enable secure grp0  */
278	mov	x5, #NXP_GICD_ADDR
279	ldr	w2, [x5, #GICD_CTLR_OFFSET]
280	orr	w2, w2, #GICD_CTLR_EN_GRP_0
281	str	w2, [x5, #GICD_CTLR_OFFSET]
282	dsb	sy
283	isb
284	/* poll on RWP til write completes */
2854:
286	ldr	w2, [x5, #GICD_CTLR_OFFSET]
287	tst	w2, #GICD_CTLR_RWP
288	b.ne	4b
289
290	/*
291	 * x4 = core mask lsb
292	 * x5 = gicd base addr
293	 */
294
295	mov	x0, x4
296	bl	get_mpidr_value
297
298	/*
299	 * x0 = mpidr of target core
300	 * x4 = core mask lsb of target core
301	 * x5 = gicd base addr
302	 */
303
304	/* generate target list bit */
305	and	x1, x0, #MPIDR_AFFINITY0_MASK
306	mov	x2, #1
307	lsl	x2, x2, x1
308	/* get the affinity1 field */
309	and	x1, x0, #MPIDR_AFFINITY1_MASK
310	lsl	x1, x1, #8
311	orr	x2, x2, x1
312	/* insert the INTID for SGI15 */
313	orr	x2, x2, #ICC_SGI0R_EL1_INTID
314	/* fire the SGI */
315	msr	ICC_SGI0R_EL1, x2
316	dsb	sy
317	isb
318
319	/* load '0' on success */
320	mov	x0, xzr
321
322	mov	x30, x6
323	ret
324
325/*
326 * part of CPU_OFF
327 * this function programs SoC & GIC registers in preparation for shutting down
328 * the core
329 * in:  x0 = core mask lsb
330 * out: none
331 * uses x0, x1, x2, x3, x4, x5, x6, x7
332 */
333_soc_core_prep_off:
334	mov	x8, x30
335	mov	x7, x0
336
337	/* x7 = core mask lsb */
338
339	mrs	x1, CPUECTLR_EL1
340	/* set smp and disable L2 snoops in cpuectlr */
341	orr	x1, x1, #CPUECTLR_SMPEN_EN
342	orr	x1, x1, #CPUECTLR_DISABLE_TWALK_PREFETCH
343	bic	x1, x1, #CPUECTLR_INS_PREFETCH_MASK
344	bic	x1, x1, #CPUECTLR_DAT_PREFETCH_MASK
345	/* set retention control in cpuectlr */
346	bic	x1, x1, #CPUECTLR_TIMER_MASK
347	orr	x1, x1, #CPUECTLR_TIMER_8TICKS
348	msr	CPUECTLR_EL1, x1
349
350	/* get redistributor rd base addr for this core */
351	mov	x0, x7
352	bl	get_gic_rd_base
353	mov	x6, x0
354
355	/* get redistributor sgi base addr for this core */
356	mov	x0, x7
357	bl	get_gic_sgi_base
358	mov	x5, x0
359
360	/* x5 = gicr sgi base addr
361	 * x6 = gicr rd  base addr
362	 * x7 = core mask lsb
363	 */
364
365	/* disable SGI 15 at redistributor - GICR_ICENABLER0 */
366	mov	w3, #GICR_ICENABLER0_SGI15
367	str	w3, [x5, #GICR_ICENABLER0_OFFSET]
3682:
369	/* poll on rwp bit in GICR_CTLR */
370	ldr	w4, [x6, #GICR_CTLR_OFFSET]
371	tst	w4, #GICR_CTLR_RWP
372	b.ne	2b
373
374	/* disable GRP1 interrupts at cpu interface */
375	msr	ICC_IGRPEN1_EL3, xzr
376
377	/* disable GRP0 ints at cpu interface */
378	msr	ICC_IGRPEN0_EL1, xzr
379
380	/* program the redistributor - poll on GICR_CTLR.RWP as needed */
381
382	/* define SGI 15 as Grp0 - GICR_IGROUPR0 */
383	ldr	w4, [x5, #GICR_IGROUPR0_OFFSET]
384	bic	w4, w4, #GICR_IGROUPR0_SGI15
385	str	w4, [x5, #GICR_IGROUPR0_OFFSET]
386
387	/* define SGI 15 as Grp0 - GICR_IGRPMODR0 */
388	ldr	w3, [x5, #GICR_IGRPMODR0_OFFSET]
389	bic	w3, w3, #GICR_IGRPMODR0_SGI15
390	str	w3, [x5, #GICR_IGRPMODR0_OFFSET]
391
392	/* set priority of SGI 15 to highest (0x0) - GICR_IPRIORITYR3 */
393	ldr	w4, [x5, #GICR_IPRIORITYR3_OFFSET]
394	bic	w4, w4, #GICR_IPRIORITYR3_SGI15_MASK
395	str	w4, [x5, #GICR_IPRIORITYR3_OFFSET]
396
397	/* enable SGI 15 at redistributor - GICR_ISENABLER0 */
398	mov	w3, #GICR_ISENABLER0_SGI15
399	str	w3, [x5, #GICR_ISENABLER0_OFFSET]
400	dsb	sy
401	isb
4023:
403	/* poll on rwp bit in GICR_CTLR */
404	ldr	w4, [x6, #GICR_CTLR_OFFSET]
405	tst	w4, #GICR_CTLR_RWP
406	b.ne	3b
407
408	/* quiesce the debug interfaces */
409	mrs	x3, osdlr_el1
410	orr	x3, x3, #OSDLR_EL1_DLK_LOCK
411	msr	osdlr_el1, x3
412	isb
413
414	/* enable grp0 ints */
415	mov	x3, #ICC_IGRPEN0_EL1_EN
416	msr	ICC_IGRPEN0_EL1, x3
417
418	/*
419	 * x5 = gicr sgi base addr
420	 * x6 = gicr rd  base addr
421	 * x7 = core mask lsb
422	 */
423
424	/* clear any pending interrupts */
425	mvn	w1, wzr
426	str	w1, [x5, #GICR_ICPENDR0_OFFSET]
427
428	/* make sure system counter is enabled */
429	ldr	x3, =NXP_TIMER_ADDR
430	ldr	w0, [x3, #SYS_COUNTER_CNTCR_OFFSET]
431	tst	w0, #SYS_COUNTER_CNTCR_EN
432	b.ne	4f
433	orr	w0, w0, #SYS_COUNTER_CNTCR_EN
434	str	w0, [x3, #SYS_COUNTER_CNTCR_OFFSET]
4354:
436	/* enable the core timer and mask timer interrupt */
437	mov	x1, #CNTP_CTL_EL0_EN
438	orr	x1, x1, #CNTP_CTL_EL0_IMASK
439	msr	cntp_ctl_el0, x1
440
441	mov	x30, x8
442	ret
443
444/*
445 * part of CPU_OFF
446 * this function performs the final steps to shutdown the core
447 * in:  x0 = core mask lsb
448 * out: none
449 * uses x0, x1, x2, x3, x4, x5
450 */
451_soc_core_entr_off:
452	mov	x5, x30
453	mov	x4, x0
454
455	/* x4 = core mask */
4561:
457	/* enter low-power state by executing wfi */
458	wfi
459
460	/* see if SGI15 woke us up */
461	mrs	x2, ICC_IAR0_EL1
462	mov	x3, #ICC_IAR0_EL1_SGI15
463	cmp	x2, x3
464	b.ne	2f
465
466	/* deactivate the int */
467	msr	ICC_EOIR0_EL1, x2
468
469	/* x4 = core mask */
4702:
471	/* check if core has been turned on */
472	mov	x0, x4
473	bl	_getCoreState
474
475	/* x0 = core state */
476
477	cmp	x0, #CORE_WAKEUP
478	b.ne	1b
479
480	/* if we get here, then we have exited the wfi */
481
482	mov	x30, x5
483	ret
484
485/*
486 * part of CPU_OFF
487 * this function starts the process of starting a core back up
488 * in:  x0 = core mask lsb
489 * out: none
490 * uses x0, x1, x2, x3, x4, x5, x6
491 */
492_soc_core_exit_off:
493	mov	x6, x30
494	mov	x5, x0
495
496	/* disable forwarding of GRP0 ints at cpu interface */
497	msr	ICC_IGRPEN0_EL1, xzr
498
499	/* get redistributor sgi base addr for this core */
500	mov	x0, x5
501	bl	get_gic_sgi_base
502	mov	x4, x0
503
504	/*
505	 * x4 = gicr sgi base addr
506	 * x5 = core mask
507	 */
508
509	/* disable SGI 15 at redistributor - GICR_ICENABLER0 */
510	mov	w1, #GICR_ICENABLER0_SGI15
511	str	w1, [x4, #GICR_ICENABLER0_OFFSET]
512
513	/* get redistributor rd base addr for this core */
514	mov	x0, x5
515	bl	get_gic_rd_base
516	mov	x4, x0
517
518	/* x4 = gicr rd  base addr */
5192:
520	/* poll on rwp bit in GICR_CTLR */
521	ldr	w2, [x4, #GICR_CTLR_OFFSET]
522	tst	w2, #GICR_CTLR_RWP
523	b.ne	2b
524
525	/* x4 = gicr rd  base addr */
526
527	/* unlock the debug interfaces */
528	mrs	x3, osdlr_el1
529	bic	x3, x3, #OSDLR_EL1_DLK_LOCK
530	msr	osdlr_el1, x3
531	isb
532
533	dsb	sy
534	isb
535	mov	x30, x6
536	ret
537
538/*
539 * this function requests a reset of the entire SOC
540 * in:  none
541 * out: none
542 * uses: x0, x1, x2, x3, x4, x5, x6
543 */
544_soc_sys_reset:
545	mov	x3, x30
546
547	/* make sure the mask is cleared in the reset request mask register */
548	mov	x0, #RST_RSTRQMR1_OFFSET
549	mov	w1, wzr
550	bl	_write_reg_reset
551
552	/* set the reset request */
553	mov	x4, #RST_RSTCR_OFFSET
554	mov	x0, x4
555	mov	w1, #RSTCR_RESET_REQ
556	bl	_write_reg_reset
557
558	/* x4 = RST_RSTCR_OFFSET */
559
560	/*
561	 * just in case this address range is mapped as cacheable,
562	 * flush the write out of the dcaches
563	 */
564	mov	x2, #NXP_RESET_ADDR
565	add	x2, x2, x4
566	dc	cvac, x2
567	dsb	st
568	isb
569
570	/* this function does not return */
571	b	.
572
573/*
574 * this function turns off the SoC
575 * Note: this function is not intended to return, and the only allowable
576 *       recovery is POR
577 * in:  none
578 * out: none
579 * uses x0, x1, x2, x3
580 */
581_soc_sys_off:
582	/*
583	 * A-009810: LPM20 entry sequence might cause
584	 * spurious timeout reset request
585	 * workaround: MASK RESET REQ RPTOE
586	 */
587	ldr	x0, =NXP_RESET_ADDR
588	ldr	w1, [x0, #RST_RSTRQMR1_OFFSET]
589	orr	w1, w1, #RSTRQMR_RPTOE_MASK
590	str	w1, [x0, #RST_RSTRQMR1_OFFSET]
591
592	/* disable SEC, QBman spi and qspi */
593	ldr	x2, =NXP_DCFG_ADDR
594	ldr	x0, =DCFG_DEVDISR1_OFFSET
595	ldr	w1, =DCFG_DEVDISR1_SEC
596	str	w1, [x2, x0]
597	ldr	x0, =DCFG_DEVDISR3_OFFSET
598	ldr	w1, =DCFG_DEVDISR3_QBMAIN
599	str	w1, [x2, x0]
600	ldr	x0, =DCFG_DEVDISR4_OFFSET
601	ldr	w1, =DCFG_DEVDISR4_SPI_QSPI
602	str	w1, [x2, x0]
603
604	/* set TPMWAKEMR0 */
605	ldr	x0, =TPMWAKEMR0_ADDR
606	mov	w1, #0x1
607	str	w1, [x0]
608
609	/* disable icache, dcache, mmu @ EL1 */
610	mov	x1, #SCTLR_I_C_M_MASK
611	mrs	x0, sctlr_el1
612	bic	x0, x0, x1
613	msr	sctlr_el1, x0
614
615	/* disable L2 prefetches */
616	mrs	x0, CPUECTLR_EL1
617	orr	x0, x0, #CPUECTLR_SMPEN_EN
618	orr	x0, x0, #CPUECTLR_TIMER_8TICKS
619	msr	CPUECTLR_EL1, x0
620	dsb	sy
621	isb
622
623	/* disable CCN snoop domain */
624	ldr	x0, =NXP_CCI_ADDR
625	mov	w1, #0x1
626	str	w1, [x0]
627
628	mov	x2, #DAIF_SET_MASK
629
630	mrs	x1, spsr_el1
631	orr	x1, x1, x2
632	msr	spsr_el1, x1
633
634	mrs	x1, spsr_el2
635	orr	x1, x1, x2
636	msr	spsr_el2, x1
637
638	bl	get_pmu_idle_cluster_mask
639	mov	x3, #NXP_PMU_ADDR
640
641	/* x3 = pmu base addr */
642
643	/* idle the ACP interfaces */
644	str	w0, [x3, #PMU_CLAINACTSETR_OFFSET]
645
646	/* force the debug interface to be quiescent */
647	mrs	x0, osdlr_el1
648	orr	x0, x0, #0x1
649	msr	osdlr_el1, x0
650
651	bl	get_pmu_flush_cluster_mask
652	/* x3 = pmu base addr */
653	mov	x3, #NXP_PMU_ADDR
654
655	/* clear flush request and status */
656	ldr	x2, =PMU_CLSL2FLUSHCLRR_OFFSET
657	str	w0, [x3, x2]
658
659	/* close the Skyros master port */
660	ldr	x2, =PMU_CLSINACTSETR_OFFSET
661	str	w0, [x3, x2]
662
663	/* request lpm20 */
664	ldr	x0, =PMU_POWMGTCSR_OFFSET
665	ldr	w1, =PMU_POWMGTCSR_VAL
666	str	w1, [x3, x0]
667
668	/* this function does not return */
6691:
670	wfi
671	b	1b
672
673/*
674 * part of CPU_SUSPEND
675 * this function performs SoC-specific programming prior to standby
676 * in:  x0 = core mask lsb
677 * out: none
678 * uses x0, x1
679 */
680_soc_core_prep_stdby:
681	/* clear CPUECTLR_EL1[2:0] */
682	mrs	x1, CPUECTLR_EL1
683	bic	x1, x1, #CPUECTLR_TIMER_MASK
684	msr	CPUECTLR_EL1, x1
685
686	ret
687
688/*
689 * part of CPU_SUSPEND
690 * this function puts the calling core into standby state
691 * in:  x0 = core mask lsb
692 * out: none
693 * uses x0
694 */
695_soc_core_entr_stdby:
696	/* X0 = core mask lsb */
697	dsb	sy
698	isb
699	wfi
700
701	ret
702
703/*
704 * part of CPU_SUSPEND
705 * this function performs any SoC-specific cleanup after standby state
706 * in:  x0 = core mask lsb
707 * out: none
708 * uses none
709 */
710_soc_core_exit_stdby:
711	ret
712
713/*
714 * part of CPU_SUSPEND
715 * this function performs SoC-specific programming prior to power-down
716 * in:  x0 = core mask lsb
717 * out: none
718 * uses x0, x1, x2, x3
719 */
720_soc_core_prep_pwrdn:
721	/* make sure system counter is enabled */
722	ldr	x3, =NXP_TIMER_ADDR
723	ldr	w0, [x3, #SYS_COUNTER_CNTCR_OFFSET]
724	tst	w0, #SYS_COUNTER_CNTCR_EN
725	b.ne	1f
726	orr	w0, w0, #SYS_COUNTER_CNTCR_EN
727	str	w0, [x3, #SYS_COUNTER_CNTCR_OFFSET]
7281:
729	/*
730	 * enable dynamic retention control (CPUECTLR[2:0])
731	 * set the SMPEN bit (CPUECTLR[6])
732	 */
733	mrs	x1, CPUECTLR_EL1
734	bic	x1, x1, #CPUECTLR_RET_MASK
735	orr	x1, x1, #CPUECTLR_TIMER_8TICKS
736	orr	x1, x1, #CPUECTLR_SMPEN_EN
737	msr	CPUECTLR_EL1, x1
738
739	isb
740	ret
741
742/*
743 * part of CPU_SUSPEND
744 * this function puts the calling core into a power-down state
745 * in:  x0 = core mask lsb
746 * out: none
747 * uses x0
748 */
749_soc_core_entr_pwrdn:
750	/* X0 = core mask lsb */
751	dsb	sy
752	isb
753	wfi
754
755	ret
756
757/*
758 * part of CPU_SUSPEND
759 * this function cleans up after a core exits power-down
760 * in:  x0 = core mask lsb
761 * out: none
762 * uses
763 */
764_soc_core_exit_pwrdn:
765	ret
766
767/*
768 * part of CPU_SUSPEND
769 * this function performs SoC-specific programming prior to standby
770 * in:  x0 = core mask lsb
771 * out: none
772 * uses x0, x1
773 */
774_soc_clstr_prep_stdby:
775	/* clear CPUECTLR_EL1[2:0] */
776	mrs	x1, CPUECTLR_EL1
777	bic	x1, x1, #CPUECTLR_TIMER_MASK
778	msr	CPUECTLR_EL1, x1
779
780	ret
781
782/*
783 * part of CPU_SUSPEND
784 * this function performs any SoC-specific cleanup after standby state
785 * in:  x0 = core mask lsb
786 * out: none
787 * uses none
788 */
789_soc_clstr_exit_stdby:
790	ret
791
792/*
793 * part of CPU_SUSPEND
794 * this function performs SoC-specific programming prior to power-down
795 * in:  x0 = core mask lsb
796 * out: none
797 * uses x0, x1, x2, x3
798 */
799_soc_clstr_prep_pwrdn:
800	/* make sure system counter is enabled */
801	ldr	x3, =NXP_TIMER_ADDR
802	ldr	w0, [x3, #SYS_COUNTER_CNTCR_OFFSET]
803	tst	w0, #SYS_COUNTER_CNTCR_EN
804	b.ne	1f
805	orr	w0, w0, #SYS_COUNTER_CNTCR_EN
806	str	w0, [x3, #SYS_COUNTER_CNTCR_OFFSET]
8071:
808	/*
809	 * enable dynamic retention control (CPUECTLR[2:0])
810	 * set the SMPEN bit (CPUECTLR[6])
811	 */
812	mrs	x1, CPUECTLR_EL1
813	bic	x1, x1, #CPUECTLR_RET_MASK
814	orr	x1, x1, #CPUECTLR_TIMER_8TICKS
815	orr	x1, x1, #CPUECTLR_SMPEN_EN
816	msr	CPUECTLR_EL1, x1
817
818	isb
819	ret
820
821/*
822 * part of CPU_SUSPEND
823 * this function cleans up after a core exits power-down
824 * in:  x0 = core mask lsb
825 * out: none
826 * uses
827 */
828_soc_clstr_exit_pwrdn:
829	ret
830
831/*
832 * part of CPU_SUSPEND
833 * this function performs SoC-specific programming prior to standby
834 * in:  x0 = core mask lsb
835 * out: none
836 * uses x0, x1
837 */
838_soc_sys_prep_stdby:
839	/* clear CPUECTLR_EL1[2:0] */
840	mrs	x1, CPUECTLR_EL1
841	bic	x1, x1, #CPUECTLR_TIMER_MASK
842	msr	CPUECTLR_EL1, x1
843
844	ret
845
846/*
847 * part of CPU_SUSPEND
848 * this function performs any SoC-specific cleanup after standby state
849 * in:  x0 = core mask lsb
850 * out: none
851 * uses none
852 */
853_soc_sys_exit_stdby:
854	ret
855
856/*
857 * part of CPU_SUSPEND
858 * this function performs SoC-specific programming prior to
859 * suspend-to-power-down
860 * in:  x0 = core mask lsb
861 * out: none
862 * uses x0
863 */
864_soc_sys_prep_pwrdn:
865	/* set retention control */
866	mrs	x0, CPUECTLR_EL1
867	bic	x0, x0, #CPUECTLR_TIMER_MASK
868	orr	x0, x0, #CPUECTLR_TIMER_8TICKS
869	orr	x0, x0, #CPUECTLR_SMPEN_EN
870	msr	CPUECTLR_EL1, x0
871	dsb	sy
872	isb
873
874	ret
875
876/*
877 * part of CPU_SUSPEND
878 * this function puts the calling core, and potentially the soc, into a
879 * low-power state
880 * in:  x0 = core mask lsb
881 * out: x0 = 0, success
882 *      x0 < 0, failure
883 * uses x0, x1, x2, x3, x4, x5, x6, x7, x8
884 */
885_soc_sys_pwrdn_wfi:
886	/* Save LR to stack */
887	stp	x18, x30, [sp, #-16]!
888
889	/* Poll PCPW20SR for all secondary cores to be placed in PW20 */
890	bl	get_tot_num_cores
891	mov	x3, #0x1
892	lsl	x3, x3, x0
893	sub	x3, x3, #2
8941:
895	mov	x0, #NXP_PMU_ADDR
896	ldr	w1, [x0, #PMU_PCPW20SR_OFFSET]
897	cmp	w1, w3
898	b.ne	1b
899
900	/* backup EPU registers to stack */
901	mov	x3, #NXP_PMU_ADDR
902	ldr	x2, =NXP_EPU_ADDR
903	ldr	w4, [x2, #EPU_EPIMCR10_OFFSET]
904	ldr	w5, [x2, #EPU_EPCCR10_OFFSET]
905	ldr	w6, [x2, #EPU_EPCTR10_OFFSET]
906	ldr	w7, [x2, #EPU_EPGCR_OFFSET]
907	stp	x4, x5, [sp, #-16]!
908	stp	x6, x7, [sp, #-16]!
909
910	/*
911	 * x2 = epu base addr
912	 * x3 = pmu base addr
913	 */
914
915	/* set up EPU event to receive the wake signal from PMU */
916	mov	w4, #EPU_EPIMCR10_VAL
917	mov	w5, #EPU_EPCCR10_VAL
918	mov	w6, #EPU_EPCTR10_VAL
919	mov	w7, #EPU_EPGCR_VAL
920	str	w4, [x2, #EPU_EPIMCR10_OFFSET]
921	str	w5, [x2, #EPU_EPCCR10_OFFSET]
922	str	w6, [x2, #EPU_EPCTR10_OFFSET]
923	str	w7, [x2, #EPU_EPGCR_OFFSET]
924
925	/*
926	 * A-010194: There is logic problem
927	 * in the path of GIC-to-PMU to issue
928	 * wake request to core0
929	 * Workaround: Re-target the wakeup interrupts
930	 * to a core other than the last active core0
931	 */
932	ldr	x2, =NXP_GICD_ADDR
933
934	/* backup flextimer/mmc/usb interrupt router */
935	ldr	x0, =GICD_IROUTER60_OFFSET
936	ldr	x1, =GICD_IROUTER76_OFFSET
937	ldr	w4, [x2, x0]
938	ldr	w5, [x2, x1]
939	ldr	x0, =GICD_IROUTER112_OFFSET
940	ldr	x1, =GICD_IROUTER113_OFFSET
941	ldr	w6, [x2, x0]
942	ldr	w7, [x2, x1]
943	stp	x4, x5, [sp, #-16]!
944	stp	x6,  x7,  [sp, #-16]!
945
946	/*
947	 * x2 = gicd base addr
948	 * x0 = GICD_IROUTER112_OFFSET
949	 * x1 = GICD_IROUTER113_OFFSET
950	 */
951
952	/* re-route interrupt to cluster 1 */
953	ldr	w4, =GICD_IROUTER_VALUE
954	str	w4, [x2, x0]
955	str	w4, [x2, x1]
956	ldr	x0, =GICD_IROUTER60_OFFSET
957	ldr	x1, =GICD_IROUTER76_OFFSET
958	str	w4, [x2, x0]
959	str	w4, [x2, x1]
960	dsb	sy
961	isb
962
963	/* backup flextimer/mmc/usb interrupt enabler */
964	ldr	x0, =GICD_ISENABLER_1
965	ldr	w4, [x2, x0]
966	ldr	x1, =GICD_ISENABLER_2
967	ldr	w5, [x2, x1]
968	stp	x4, x5, [sp, #-16]!
969
970	ldr	x0, =GICD_ISENABLER_3
971	ldr	w4, [x2, x0]
972	ldr	x1, =GICD_ICENABLER_1
973	ldr	w5, [x2, x1]
974	stp	x4, x5, [sp, #-16]!
975
976	ldr	x0, =GICD_ICENABLER_2
977	ldr	w4, [x2, x0]
978	ldr	x1, =GICD_ICENABLER_3
979	ldr	w5, [x2, x1]
980	stp	x4, x5, [sp, #-16]!
981
982	/* enable related interrupt routing */
983	ldr	w4, =GICD_ISENABLER_1_VALUE
984	ldr	x0, =GICD_ISENABLER_1
985	str	w4, [x2, x0]
986	dsb	sy
987	isb
988
989	ldr	w4, =GICD_ISENABLER_2_VALUE
990	ldr	x0, =GICD_ISENABLER_2
991	str	w4, [x2, x0]
992	dsb	sy
993	isb
994
995	ldr	w4, =GICD_ISENABLER_3_VALUE
996	ldr	x0, =GICD_ISENABLER_3
997	str	w4, [x2, x0]
998	dsb	sy
999	isb
1000
1001	/* set POWMGTDCR [STP_PV_EN] = 1 */
1002	ldr	x2, =NXP_POWMGTDCR
1003	ldr	w4, =0x01
1004	str	w4, [x2]
1005
1006	/* program IPSTPCR for override stop request (except DDR) */
1007	mov	x3, #NXP_PMU_ADDR
1008
1009	/* build an override mask for IPSTPCR4/IPSTPACK4/DEVDISR5 */
1010	ldr	x2, =PMU_IPPDEXPCR4_OFFSET
1011	ldr	w7, [x3, x2]
1012
1013	mov	x5, xzr
1014	ldr	x6, =IPPDEXPCR4_MASK
1015	and	x6, x6, x7
1016	cbz	x6, 1f
1017
1018	/*
1019	 * x5 = override mask
1020	 * x6 = IPPDEXPCR bits for DEVDISR5
1021	 * x7 = IPPDEXPCR
1022	 */
1023
1024	/* get the overrides */
1025	orr	x4, x5, #DEVDISR5_FLX_TMR
1026	tst	x6, #IPPDEXPCR_FLX_TMR
1027	csel	x5, x5, x4, EQ
10281:
1029	/* store the DEVDISR5 override mask */
1030	ldr	x2, =BC_PSCI_BASE
1031	add	x2, x2, #AUX_01_DATA
1032	str	w5, [x2, #DEVDISR5_MASK_OFFSET]
1033
1034	mov	x3, #NXP_PMU_ADDR
1035
1036	/* write IPSTPCR0 - no overrides */
1037	ldr	x2, =PMU_IPSTPCR0_OFFSET
1038	ldr	w5, =IPSTPCR0_VALUE
1039	str	w5, [x3, x2]
1040
1041	/* write IPSTPCR1 - no overrides */
1042	ldr	x2, =PMU_IPSTPCR1_OFFSET
1043	ldr	w5, =IPSTPCR1_VALUE
1044	str	w5, [x3, x2]
1045
1046	/* write IPSTPCR2 - no overrides */
1047	ldr	x2, =PMU_IPSTPCR2_OFFSET
1048	ldr	w5, =IPSTPCR2_VALUE
1049	str	w5, [x3, x2]
1050
1051	/* write IPSTPCR3 - no overrides */
1052	ldr	x2, =PMU_IPSTPCR3_OFFSET
1053	ldr	w5, =IPSTPCR3_VALUE
1054	str	w5, [x3, x2]
1055
1056	/* write IPSTPCR4 - overrides possible */
1057	ldr	x2, =BC_PSCI_BASE
1058	add	x2, x2, #AUX_01_DATA
1059	ldr	w6, [x2, #DEVDISR5_MASK_OFFSET]
1060	ldr	x2, =PMU_IPSTPCR4_OFFSET
1061	ldr	w5, =IPSTPCR4_VALUE
1062	bic	x5, x5, x6
1063	str	w5, [x3, x2]
1064
1065	/* write IPSTPCR5 - no overrides */
1066	ldr	x2, =PMU_IPSTPCR5_OFFSET
1067	ldr	w5, =IPSTPCR5_VALUE
1068	str	w5, [x3, x2]
1069
1070	/* write IPSTPCR6 - no overrides */
1071	ldr	x2, =PMU_IPSTPCR6_OFFSET
1072	ldr	w5, =IPSTPCR6_VALUE
1073	str	w5, [x3, x2]
1074
1075	/* poll IPSTPACK for IP stop acknowledgment (except DDR) */
1076	mov	x3, #NXP_PMU_ADDR
1077
1078	/* poll on IPSTPACK0 */
1079	ldr	x2, =PMU_IPSTPACK0_OFFSET
1080	ldr	x4, =IPSTPCR0_VALUE
1081	ldr	x7, =IPSTPACK_RETRY_CNT
10823:
1083	ldr	w0, [x3, x2]
1084	cmp	x0, x4
1085	b.eq	14f
1086	sub	x7, x7, #1
1087	cbnz	x7, 3b
1088
108914:
1090	/* poll on IPSTPACK1 */
1091	ldr	x2, =PMU_IPSTPACK1_OFFSET
1092	ldr	x4, =IPSTPCR1_VALUE
1093	ldr	x7, =IPSTPACK_RETRY_CNT
10944:
1095	ldr	w0, [x3, x2]
1096	cmp	x0, x4
1097	b.eq	15f
1098	sub	x7, x7, #1
1099	cbnz	x7, 4b
1100
110115:
1102	/* poll on IPSTPACK2 */
1103	ldr	x2, =PMU_IPSTPACK2_OFFSET
1104	ldr	x4, =IPSTPCR2_VALUE
1105	ldr	x7, =IPSTPACK_RETRY_CNT
11065:
1107	ldr	w0, [x3, x2]
1108	cmp	x0, x4
1109	b.eq	16f
1110	sub	x7, x7, #1
1111	cbnz	x7, 5b
1112
111316:
1114	/* poll on IPSTPACK3 */
1115	ldr	x2, =PMU_IPSTPACK3_OFFSET
1116	ldr	x4, =IPSTPCR3_VALUE
1117	ldr	x7, =IPSTPACK_RETRY_CNT
11186:
1119	ldr	w0, [x3, x2]
1120	cmp	x0, x4
1121	b.eq	17f
1122	sub	x7, x7, #1
1123	cbnz	x7, 6b
1124
112517:
1126	/* poll on IPSTPACK4 */
1127	ldr	x2, =PMU_IPSTPACK4_OFFSET
1128	ldr	x4, =IPSTPCR4_VALUE
1129	ldr	x7, =IPSTPACK_RETRY_CNT
11307:
1131	ldr	w0, [x3, x2]
1132	cmp	x0, x4
1133	b.eq	18f
1134	sub	x7, x7, #1
1135	cbnz	x7, 7b
1136
113718:
1138	/* poll on IPSTPACK5 */
1139	ldr	x2, =PMU_IPSTPACK5_OFFSET
1140	ldr	x4, =IPSTPCR5_VALUE
1141	ldr	x7, =IPSTPACK_RETRY_CNT
11428:
1143	ldr	w0, [x3, x2]
1144	cmp	x0, x4
1145	b.eq	19f
1146	sub	x7, x7, #1
1147	cbnz	x7, 8b
1148
114919:
1150	/* poll on IPSTPACK6 */
1151	ldr	x2, =PMU_IPSTPACK6_OFFSET
1152	ldr	x4, =IPSTPCR6_VALUE
1153	ldr	x7, =IPSTPACK_RETRY_CNT
11549:
1155	ldr	w0, [x3, x2]
1156	cmp	x0, x4
1157	b.eq	20f
1158	sub	x7, x7, #1
1159	cbnz	x7, 9b
1160
116120:
1162	/* save current DEVDISR states to DDR. */
1163	ldr	x2, =NXP_DCFG_ADDR
1164
1165	/* save DEVDISR1 and load new value */
1166	ldr	x0, =DCFG_DEVDISR1_OFFSET
1167	ldr	w1, [x2, x0]
1168	mov	w13, w1
1169	ldr	x1, =DEVDISR1_VALUE
1170	str	w1, [x2, x0]
1171	/* save DEVDISR2 and load new value */
1172	ldr	x0, =DCFG_DEVDISR2_OFFSET
1173	ldr	w1, [x2, x0]
1174	mov	w14, w1
1175	ldr	x1, =DEVDISR2_VALUE
1176	str	w1, [x2, x0]
1177
1178	/* x6 = DEVDISR5 override mask */
1179
1180	/* save DEVDISR3 and load new value */
1181	ldr	x0, =DCFG_DEVDISR3_OFFSET
1182	ldr	w1, [x2, x0]
1183	mov	w15, w1
1184	ldr	x1, =DEVDISR3_VALUE
1185	str	w1, [x2, x0]
1186
1187	/* save DEVDISR4 and load new value */
1188	ldr	x0, =DCFG_DEVDISR4_OFFSET
1189	ldr	w1, [x2, x0]
1190	mov	w16, w1
1191	/* not stop uart print */
1192	ldr	x1, =0x0000332
1193	str	w1, [x2, x0]
1194
1195	/* save DEVDISR5 and load new value */
1196	ldr	x0, =DCFG_DEVDISR5_OFFSET
1197	ldr	w1, [x2, x0]
1198	mov	w17, w1
1199	/* Enable this wakeup will fail, should enable OCRAM */
1200	ldr	x1, =0x00102300
1201	str	w1, [x2, x0]
1202
1203	/* save DEVDISR6 and load new value */
1204	ldr	x0, =DCFG_DEVDISR6_OFFSET
1205	ldr	w1, [x2, x0]
1206	mov	w18, w1
1207	ldr	x1, =DEVDISR6_VALUE
1208	str	w1, [x2, x0]
1209
1210	/*
1211	 * w13 = DEVDISR1 saved value
1212	 * w14 = DEVDISR2 saved value
1213	 * w15 = DEVDISR3 saved value
1214	 * w16 = DEVDISR4 saved value
1215	 * w17 = DEVDISR5 saved value
1216	 * w18 = DEVDISR6 saved value
1217	 */
1218	/*
1219	 * A-009810: LPM20 entry sequence might cause
1220	 * spurious timeout reset request
1221	 * workaround: MASK RESET REQ RPTOE
1222	 */
1223	ldr	x0, =NXP_RESET_ADDR
1224	ldr	w1, =RSTRQMR_RPTOE_MASK
1225	str	w1, [x0, #RST_RSTRQMR1_OFFSET]
1226
1227	/* disable SEC, QBman spi and qspi */
1228	ldr	x2, =NXP_DCFG_ADDR
1229	ldr	x0, =DCFG_DEVDISR1_OFFSET
1230	ldr	w1, =DCFG_DEVDISR1_SEC
1231	str	w1, [x2, x0]
1232	ldr	x0, =DCFG_DEVDISR3_OFFSET
1233	ldr	w1, =DCFG_DEVDISR3_QBMAIN
1234	str	w1, [x2, x0]
1235	ldr	x0, =DCFG_DEVDISR4_OFFSET
1236	ldr	w1, =DCFG_DEVDISR4_SPI_QSPI
1237	str	w1, [x2, x0]
1238
1239	/*
1240	 * write the GICR_WAKER.ProcessorSleep bits to 1
1241	 * enable the WakeRequest signal
1242	 * x3 is cpu mask starting from cpu7
1243	 */
1244	bl	get_tot_num_cores
1245	sub	x0, x0, #1
1246	mov	x3, #0x1
1247	lsl	x3, x3, x0
12482:
1249	mov	x0, x3
1250	bl	get_gic_rd_base
1251	ldr	w1, [x0, #GICR_WAKER_OFFSET]
1252	orr	w1, w1, #GICR_WAKER_SLEEP_BIT
1253	str	w1, [x0, #GICR_WAKER_OFFSET]
12541:
1255	ldr	w1, [x0, #GICR_WAKER_OFFSET]
1256	cmp	w1, #GICR_WAKER_ASLEEP
1257	b.ne	1b
1258
1259	lsr	x3, x3, #1
1260	cbnz	x3, 2b
1261
1262	/* x3 = pmu base addr */
1263
1264	/* perform Icache Warming Sequence */
1265	ldr	x5, =IPSTPCR4_VALUE
1266	mov	x6, DDR_CNTRL_BASE_ADDR
1267	mov	x7, #NXP_PMU_ADDR
1268	mov	x8, #NXP_DCFG_ADDR
1269	mov	x10, #PMU_IPSTPCR4_OFFSET
1270	mov	x11, #PMU_IPSTPACK4_OFFSET
1271	mov	x12, #PMU_IPSTPCR3_OFFSET
1272	mov	x18, #PMU_IPSTPCR2_OFFSET
1273	mov	x19, #PMU_IPSTPCR1_OFFSET
1274	mov	x21, #PMU_IPSTPCR0_OFFSET
1275	ldr	x22, =DCFG_DEVDISR5_OFFSET
1276	ldr	x23, =NXP_EPU_ADDR
1277	mov	x9, #CORE_RESTARTABLE
1278	bl	final_pwrdown
1279
1280	/*
1281	 * disable the WakeRequest signal on cpu 0-7
1282	 * x3 is cpu mask starting from cpu7
1283	 */
1284	bl	get_tot_num_cores
1285	sub	x0, x0, #1
1286	mov	x3, #0x1
1287	lsl	x3, x3, x0
12882:
1289	mov	x0, x3
1290	bl	get_gic_rd_base
1291	ldr	w1, [x0, #GICR_WAKER_OFFSET]
1292	bic	w1, w1, #GICR_WAKER_SLEEP_BIT
1293	str	w1, [x0, #GICR_WAKER_OFFSET]
12941:
1295	ldr	w1, [x0, #GICR_WAKER_OFFSET]
1296	cbnz	w1, 1b
1297
1298	lsr	x3, x3, #1
1299	cbnz	x3, 2b
1300
1301	/* set SGI for secondary core wakeup */
1302	ldr	x0, =0x1000002
1303	msr	S3_0_C12_C11_7, x0
1304	isb
1305	ldr	x0, =0x2000004
1306	msr	S3_0_C12_C11_7, x0
1307	isb
1308	ldr	x0, =0x3000008
1309	msr	S3_0_C12_C11_7, x0
1310	isb
1311	ldr	x0, =0x4010001
1312	msr	S3_0_C12_C11_7, x0
1313	isb
1314	ldr	x0, =0x5010002
1315	msr	S3_0_C12_C11_7, x0
1316	isb
1317	ldr	x0, =0x6010004
1318	msr	S3_0_C12_C11_7, x0
1319	isb
1320	ldr	x0, =0x7010008
1321	msr	S3_0_C12_C11_7, x0
1322
1323	/* enable SEC, QBman spi and qspi */
1324	ldr	x2, =NXP_DCFG_ADDR
1325	str	wzr, [x2, #DCFG_DEVDISR1_OFFSET]
1326	str	wzr, [x2, #DCFG_DEVDISR3_OFFSET]
1327	str	wzr, [x2, #DCFG_DEVDISR4_OFFSET]
1328
1329	/* clear POWMGTDCR [STP_PV_EN] */
1330	ldr	x2, =NXP_POWMGTDCR
1331	ldr	w4, [x2]
1332	bic	w4, w4, #0x01
1333	str	w4, [x2]
1334
1335	/* restore flextimer/mmc/usb interrupt enabler */
1336	ldr	x3, =NXP_GICD_ADDR
1337	ldp	x0, x2, [sp], #16
1338	ldr	x1, =GICD_ICENABLER_2
1339	mvn	w0, w0
1340	str	w0, [x3, x1]
1341	ldr	x1, =GICD_ICENABLER_3
1342	mvn	w2, w2
1343	str	w2, [x3, x1]
1344
1345	ldp	x0, x2, [sp], #16
1346	ldr	x1, =GICD_ISENABLER_3
1347	str	w0, [x3, x1]
1348	ldr	x1, =GICD_ICENABLER_1
1349	mvn	w2, w2
1350	str	w0, [x3, x1]
1351
1352	ldp	x0, x2, [sp], #16
1353	ldr	x1, =GICD_ISENABLER_1
1354	str	w0, [x3, x1]
1355	ldr	x1, =GICD_ISENABLER_2
1356	str	w0, [x3, x1]
1357
1358	/* restore flextimer/mmc/usb interrupt router */
1359	ldr	x3, =NXP_GICD_ADDR
1360	ldp	x0, x2, [sp], #16
1361	ldr	x1, =GICD_IROUTER113_OFFSET
1362	str	w2, [x3, x1]
1363	ldr	x1, =GICD_IROUTER112_OFFSET
1364	str	w0, [x3, x1]
1365	ldp	x0, x2, [sp], #16
1366	ldr	x1, =GICD_IROUTER76_OFFSET
1367	str	w2, [x3, x1]
1368	ldr	x1, =GICD_IROUTER60_OFFSET
1369	str	w0, [x3, x1]
1370
1371	/* restore EPU registers */
1372	ldr	x3, =NXP_EPU_ADDR
1373	ldp	x0, x2, [sp], #16
1374	str	w2, [x3, #EPU_EPGCR_OFFSET]
1375	str	w0, [x3, #EPU_EPCTR10_OFFSET]
1376	ldp	x2, x1, [sp], #16
1377	str	w1, [x3, #EPU_EPCCR10_OFFSET]
1378	str	w2, [x3, #EPU_EPIMCR10_OFFSET]
1379
1380	isb
1381	/* Restor LR */
1382	ldp	x18, x30, [sp], #16
1383	ret
1384
1385/*
1386 * part of CPU_SUSPEND
1387 * this function performs any SoC-specific cleanup after power-down
1388 * in:  x0 = core mask lsb
1389 * out: none
1390 * uses x0, x1
1391 */
1392_soc_sys_exit_pwrdn:
1393	mrs	x1, SCTLR_EL1
1394	orr	x1, x1, #SCTLR_I_MASK
1395	msr	SCTLR_EL1, x1
1396	isb
1397	ret
1398
1399/*
1400 * this function checks to see if cores which are to be disabled have been
1401 * released from reset - if not, it releases them
1402 * in:  none
1403 * out: none
1404 * uses x0, x1, x2, x3, x4, x5, x6, x7, x8
1405 */
1406release_disabled:
1407	mov	x8, x30
1408
1409	/* read COREDISABLESR */
1410	mov	x0, #NXP_DCFG_ADDR
1411	ldr	w4, [x0, #DCFG_COREDISABLEDSR_OFFSET]
1412
1413	/* get the number of cpus on this device */
1414	mov	x6, #PLATFORM_CORE_COUNT
1415
1416	mov	x0, #NXP_RESET_ADDR
1417	ldr	w5, [x0, #BRR_OFFSET]
1418
1419	/* load the core mask for the first core */
1420	mov	x7, #1
1421
1422	/*
1423	 * x4 = COREDISABLESR
1424	 * x5 = BRR
1425	 * x6 = loop count
1426	 * x7 = core mask bit
1427	 */
14282:
1429	/* check if the core is to be disabled */
1430	tst	x4, x7
1431	b.eq	1f
1432
1433	/* see if disabled cores have already been released from reset */
1434	tst	x5, x7
1435	b.ne	1f
1436
1437	/* if core has not been released, then release it (0-3) */
1438	mov	x0, x7
1439	bl	_soc_core_release
1440
1441	/* record the core state in the data area (0-3) */
1442	mov	x0, x7
1443	mov	x1, #CORE_DISABLED
1444	bl	_setCoreState
1445
14461:
1447	/* decrement the counter */
1448	subs	x6, x6, #1
1449	b.le	3f
1450
1451	/* shift the core mask to the next core */
1452	lsl	x7, x7, #1
1453	/* continue */
1454	b	2b
14553:
1456	mov	x30, x8
1457	ret
1458
1459/*
1460 * write a register in the DCFG block
1461 * in:  x0 = offset
1462 * in:  w1 = value to write
1463 * uses x0, x1, x2
1464 */
1465_write_reg_dcfg:
1466	ldr	x2, =NXP_DCFG_ADDR
1467	str	w1, [x2, x0]
1468	ret
1469
1470/*
1471 * read a register in the DCFG block
1472 * in:  x0 = offset
1473 * out: w0 = value read
1474 * uses x0, x1
1475 */
1476_read_reg_dcfg:
1477	ldr	x1, =NXP_DCFG_ADDR
1478	ldr	w0, [x1, x0]
1479	ret
1480
1481/*
1482 * this function sets up the TrustZone Address Space Controller (TZASC)
1483 * in:  none
1484 * out: none
1485 * uses x0, x1
1486 */
1487init_tzpc:
1488	/*
1489	 * set Non Secure access for all devices protected via TZPC
1490	 * decode Protection-0 Set Reg
1491	 */
1492	ldr	x1, =TZPCDECPROT_0_SET_BASE
1493	/* set decode region to NS, Bits[7:0] */
1494	mov	w0, #0xFF
1495	str	w0, [x1]
1496
1497	/* decode Protection-1 Set Reg */
1498	ldr	x1, =TZPCDECPROT_1_SET_BASE
1499	/* set decode region to NS, Bits[7:0] */
1500	mov	w0, #0xFF
1501	str	w0, [x1]
1502
1503	/* decode Protection-2 Set Reg */
1504	ldr	x1, =TZPCDECPROT_2_SET_BASE
1505	/* set decode region to NS, Bits[7:0] */
1506	mov	w0, #0xFF
1507	str	w0, [x1]
1508
1509	/*
1510	 * entire SRAM as NS
1511	 * secure RAM region size Reg
1512	 */
1513	ldr	x1, =NXP_OCRAM_TZPC_ADDR
1514	/* 0x00000000 = no secure region */
1515	mov	w0, #0x00000000
1516	str	w0, [x1]
1517
1518	ret
1519
1520/* this function performs initialization on SecMon for boot services */
1521initSecMon:
1522	/* read the register hpcomr */
1523	ldr	x1, =NXP_SNVS_ADDR
1524	ldr	w0, [x1, #SECMON_HPCOMR_OFFSET]
1525	/* turn off secure access for the privileged registers */
1526	orr	w0, w0, #SECMON_HPCOMR_NPSWAEN
1527	/* write back */
1528	str	w0, [x1, #SECMON_HPCOMR_OFFSET]
1529
1530	ret
1531
1532/*
1533 * this function returns the redistributor base address for the core specified
1534 * in x1
1535 * in:  x0 - core mask lsb of specified core
1536 * out: x0 = redistributor rd base address for specified core
1537 * uses x0, x1, x2
1538 */
1539get_gic_rd_base:
1540	/* get the 0-based core number */
1541	clz	w1, w0
1542	mov	w2, #0x20
1543	sub	w2, w2, w1
1544	sub	w2, w2, #1
1545
1546	/* x2 = core number / loop counter */
1547
1548	ldr	x0, =NXP_GICR_ADDR
1549	mov	x1, #GIC_RD_OFFSET
15502:
1551	cbz	x2, 1f
1552	add	x0, x0, x1
1553	sub	x2, x2, #1
1554	b	2b
15551:
1556	ret
1557
1558/*
1559 * this function returns the redistributor base address for the core specified
1560 * in x1
1561 * in:  x0 - core mask lsb of specified core
1562 * out: x0 = redistributor sgi base address for specified core
1563 * uses x0, x1, x2
1564 */
1565get_gic_sgi_base:
1566	/* get the 0-based core number */
1567	clz	w1, w0
1568	mov	w2, #0x20
1569	sub	w2, w2, w1
1570	sub	w2, w2, #1
1571
1572	/* x2 = core number / loop counter */
1573
1574	ldr	x0, =NXP_GICR_SGI_ADDR
1575	mov	x1, #GIC_SGI_OFFSET
15762:
1577	cbz	x2, 1f
1578	add	x0, x0, x1
1579	sub	x2, x2, #1
1580	b	2b
15811:
1582	ret
1583
1584/*
1585 * this function returns an mpidr value for a core, given a core_mask_lsb
1586 * in:  x0 = core mask lsb
1587 * out: x0 = affinity2:affinity1:affinity0, where affinity is 8-bits
1588 * uses x0, x1
1589 */
1590get_mpidr_value:
1591	/* convert a core mask to an SoC core number */
1592	clz	w0, w0
1593	mov	w1, #31
1594	sub	w0, w1, w0
1595
1596	/* w0 = SoC core number */
1597
1598	mov	w1, wzr
15992:
1600	cmp	w0, #CORES_PER_CLUSTER
1601	b.lt	1f
1602	sub	w0, w0, #CORES_PER_CLUSTER
1603	add	w1, w1, #MPIDR_CLUSTER
1604	b	2b
1605
1606	/* insert the mpidr core number */
16071:
1608	orr	w0, w1, w0
1609	ret
1610
1611/*
1612 * write a register in the RESET block
1613 * in:  x0 = offset
1614 * in:  w1 = value to write
1615 * uses x0, x1, x2
1616 */
1617_write_reg_reset:
1618	ldr	x2, =NXP_RESET_ADDR
1619	str	w1, [x2, x0]
1620	ret
1621
1622/*
1623 * read a register in the RESET block
1624 * in:  x0 = offset
1625 * out: w0 = value read
1626 * uses x0, x1
1627 */
1628_read_reg_reset:
1629	ldr	x1, =NXP_RESET_ADDR
1630	ldr	w0, [x1, x0]
1631	ret
1632
1633/*
1634 * this function will pwrdown ddr and the final core - it will do this
1635 * by loading itself into the icache and then executing from there
1636 * in:  x5  = ipstpcr4 (IPSTPCR4_VALUE bic DEVDISR5_MASK)
1637 *      x6  = DDR_CNTRL_BASE_ADDR
1638 *      x7  = NXP_PMU_ADDR
1639 *      x8  = NXP_DCFG_ADDR
1640 *      x9  = 0, restartable
1641 *          = 1, non-restartable
1642 *      x10 = PMU_IPSTPCR4_OFFSET
1643 *      x11 = PMU_IPSTPACK4_OFFSET
1644 *      x12 = PMU_IPSTPCR3_OFFSET
1645 *      x18 = PMU_IPSTPCR2_OFFSET
1646 *      x19 = PMU_IPSTPCR1_OFFSET
1647 *      x21 = PMU_IPSTPCR0_OFFSET
1648 *      w13 = DEVDISR1 saved value
1649 *      w14 = DEVDISR2 saved value
1650 *      w15 = DEVDISR3 saved value
1651 *      w16 = DEVDISR4 saved value
1652 *      w17 = DEVDISR5 saved value
1653 *      x22 = DCFG_DEVDISR5_OFFSET
1654 *      x23 = NXP_EPU_ADDR
1655 * out: none
1656 * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x13, x14, x15, x16, x17
1657 * x10, x11, x12, x18, x19, x21, x22, x23
1658 */
1659
1660final_pwrdown:
1661	/* delay */
1662	mov	w4, #0xffffff
1663554:
1664	sub	w4, w4, #1
1665	cmp	w4, #0
1666	b.ge	554b
1667
1668	mov	x0, xzr
1669	b	touch_line_0
1670
1671/* 4Kb aligned */
1672.align 12
1673start_line_0:
1674	mov	x0, #1
1675	/* put ddr in self refresh - start */
1676	mov	x2, #DDR_SDRAM_CFG_2_FRCSR
1677	ldr	w3, [x6, #DDR_SDRAM_CFG_2_OFFSET]
1678	orr	w3, w3, w2
1679	/* put ddr in self refresh - end */
1680	str	w3, [x6, #DDR_SDRAM_CFG_2_OFFSET]
1681	nop
1682	nop
1683touch_line_0:
1684	cbz	x0, touch_line_1
1685
1686start_line_1:
1687	/* quiesce ddr clocks - start */
1688	orr	w3, w5, #DCFG_DEVDISR5_MEM
1689	mov	w4, w3
1690	/* quiesce ddr clocks - end */
1691	str	w4, [x7, x10]
1692	mov	w3, #DCFG_DEVDISR5_MEM
1693	/* poll on ipstpack4 - start */
1694	mov	x2, #DDR_SLEEP_RETRY_CNT
1695	nop
1696	nop
1697touch_line_1:
1698	cbz	x0, touch_line_2
1699
1700start_line_2:
1701	/* x11 = PMU_IPSTPACK4_OFFSET */
1702	ldr	w1, [x7, x11]
1703	tst	w1, w3
1704	b.ne	5f
1705	subs	x2, x2, #1
1706	/* poll on ipstpack4 - end */
1707	b.gt	start_line_2
1708
1709	/* if we get here, we have a timeout err */
1710	mov	w4, w5
1711	/* x10 = PMU_IPSTPCR4_OFFSET re-enable ddr clks interface */
1712	str	w4, [x7, x10]
1713touch_line_2:
1714	cbz	x0, touch_line_3
1715
1716start_line_3:
1717	/* load error code */
1718	mov	x0, #ERROR_DDR_SLEEP
1719	b	2f
17205:
1721	wfe
1722	ldr	w1, [x23, #EPU_EPCTR10_OFFSET]
1723	cbz	w1, 5b
1724
1725	mov	w4, w5
1726touch_line_3:
1727	cbz	x0, touch_line_4
1728
1729start_line_4:
1730	/* re-enable ddr in devdisr5 */
1731	str	w4, [x8, x22]
1732	/* re-enable ddr clk in ipstpcr4 */
1733	str	w4, [x7, x10]
173413:
1735	/* poll on ipstpack4 - start */
1736	ldr	w1, [x7, x11]
1737	tst	w1, w3
1738	b.eq	2f
1739	nop
1740	b	13b
1741	/* poll on ipstpack4 - end */
17422:
1743touch_line_4:
1744	cbz	x0, touch_line_5
1745
1746start_line_5:
1747	/* take ddr out-of self refresh - start */
1748	mov	x2, #DDR_SDRAM_CFG_2_FRCSR
1749	ldr	w3, [x6, #DDR_SDRAM_CFG_2_OFFSET]
1750	mov	w4, w3
1751	bic	w4, w4, w2
1752	mov	w3, w4
1753	/* wait for ddr cntrlr clock- start */
1754	mov	x1, #DDR_SLEEP_RETRY_CNT
17553:
1756	subs	x1, x1, #1
1757touch_line_5:
1758	cbz	x0, touch_line_6
1759
1760start_line_6:
1761	/* wait for ddr cntrlr clock - end */
1762	b.gt	3b
1763	/* take ddr out-of self refresh - end */
1764	str	w3, [x6, #DDR_SDRAM_CFG_2_OFFSET]
1765	mov	w1, w17
1766	/* reset devdisr5 */
1767	str	w1, [x8, #DCFG_DEVDISR5_OFFSET]
1768	mov	w1, w16
1769	/* reset devdisr4 */
1770	str	w1, [x8, #DCFG_DEVDISR4_OFFSET]
1771	mov	w1, w15
1772touch_line_6:
1773	cbz	x0, touch_line_7
1774
1775start_line_7:
1776	/* reset devdisr3 */
1777	str	w1, [x8, #DCFG_DEVDISR3_OFFSET]
1778	mov	w1, w14
1779	/* reset devdisr2 */
1780	str	w1, [x8, #DCFG_DEVDISR2_OFFSET]
1781	mov	w1, w13
1782	/* reset devdisr1 */
1783	str	w1, [x8, #DCFG_DEVDISR1_OFFSET]
1784	/* reset ipstpcr4 */
1785	str	wzr, [x7, x10]
1786	/* reset ipstpcr3 */
1787	str	wzr, [x7, x12]
1788touch_line_7:
1789	cbz	x0, touch_line_8
1790
1791start_line_8:
1792	/* reset ipstpcr2 */
1793	str	wzr, [x7, x18]
1794	/* reset ipstpcr1 */
1795	str	wzr, [x7, x19]
1796	/* reset ipstpcr0 */
1797	str	wzr, [x7, x21]
1798
1799touch_line_8:
1800	cbz	x0, touch_line_9
1801
1802start_line_9:
1803	b	continue_restart
1804touch_line_9:
1805	cbz	x0, start_line_0
1806
1807/* execute here after ddr is back up */
1808continue_restart:
1809	/*
1810	 * if x0 = 1, all is well
1811	 * if x0 < 1, we had an error
1812	 */
1813	cmp	x0, #1
1814	b.ne	4f
1815	mov	x0, #0
18164:
1817	ret
1818