1/*
2 * Copyright 2018-2020 NXP
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
8.section .text, "ax"
9
10#include <asm_macros.S>
11
12#include <lib/psci/psci.h>
13#include <nxp_timer.h>
14#include <plat_gic.h>
15#include <pmu.h>
16
17#include <bl31_data.h>
18#include <plat_psci.h>
19#include <platform_def.h>
20
21.global soc_init_start
22.global soc_init_percpu
23.global soc_init_finish
24.global _set_platform_security
25.global _soc_set_start_addr
26
27.global _soc_core_release
28.global _soc_ck_disabled
29.global _soc_core_restart
30.global _soc_core_prep_off
31.global _soc_core_entr_off
32.global _soc_core_exit_off
33.global _soc_sys_reset
34.global _soc_sys_off
35.global _soc_core_prep_stdby
36.global _soc_core_entr_stdby
37.global _soc_core_exit_stdby
38.global _soc_core_prep_pwrdn
39.global _soc_core_entr_pwrdn
40.global _soc_core_exit_pwrdn
41.global _soc_clstr_prep_stdby
42.global _soc_clstr_exit_stdby
43.global _soc_clstr_prep_pwrdn
44.global _soc_clstr_exit_pwrdn
45.global _soc_sys_prep_stdby
46.global _soc_sys_exit_stdby
47.global _soc_sys_prep_pwrdn
48.global _soc_sys_pwrdn_wfi
49.global _soc_sys_exit_pwrdn
50
51.equ TZPC_BASE,			  0x02200000
52.equ TZPCDECPROT_0_SET_BASE, 0x02200804
53.equ TZPCDECPROT_1_SET_BASE, 0x02200810
54.equ TZPCDECPROT_2_SET_BASE, 0x0220081C
55
56#define CLUSTER_3_CORES_MASK 0xC0
57#define CLUSTER_3_IN_RESET  1
58#define CLUSTER_3_NORMAL	0
59
60/* cluster 3 handling no longer based on frequency, but rather on RCW[850],
61 * which is bit 18 of RCWSR27
62 */
63#define CLUSTER_3_RCW_BIT  0x40000
64
65/* retry count for clock-stop acks */
66.equ CLOCK_RETRY_CNT,  800
67
68/* disable prefetching in the A72 core */
69#define  CPUACTLR_DIS_LS_HW_PRE	0x100000000000000
70#define  CPUACTLR_DIS_L2_TLB_PRE   0x200000
71
72/* Function starts the initialization tasks of the soc,
73 * using secondary cores if they are available
74 *
75 * Called from C, saving the non-volatile regs
76 * save these as pairs of registers to maintain the
77 * required 16-byte alignment on the stack
78 *
79 * in:
80 * out:
81 * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11
82 */
83func soc_init_start
84	stp  x4,  x5,  [sp, #-16]!
85	stp  x6,  x7,  [sp, #-16]!
86	stp  x8,  x9,  [sp, #-16]!
87	stp  x10, x11, [sp, #-16]!
88	stp  x12, x13, [sp, #-16]!
89	stp  x18, x30, [sp, #-16]!
90
91	/* make sure the personality has been
92	 * established by releasing cores that
93	 * are marked "to-be-disabled" from reset
94	 */
95	bl  release_disabled  		/* 0-9 */
96
97	/* init the task flags */
98	bl  _init_task_flags   		/* 0-1 */
99
100	/* set SCRATCHRW7 to 0x0 */
101	ldr  x0, =DCFG_SCRATCHRW7_OFFSET
102	mov  x1, xzr
103	bl   _write_reg_dcfg
104
1051:
106	/* restore the aarch32/64 non-volatile registers */
107	ldp  x18, x30, [sp], #16
108	ldp  x12, x13, [sp], #16
109	ldp  x10, x11, [sp], #16
110	ldp  x8,  x9,  [sp], #16
111	ldp  x6,  x7,  [sp], #16
112	ldp  x4,  x5,  [sp], #16
113	ret
114endfunc soc_init_start
115
116
117/* Function performs any soc-specific initialization that is needed on
118 * a per-core basis.
119 * in:  none
120 * out: none
121 * uses x0, x1, x2, x3
122 */
123func soc_init_percpu
124	stp  x4,  x30,  [sp, #-16]!
125
126	bl   plat_my_core_mask
127	mov  x2, x0				/* x2 = core mask */
128
129	/* Check if this core is marked for prefetch disable
130	 */
131	mov   x0, #PREFETCH_DIS_OFFSET
132	bl	_get_global_data		/* 0-1 */
133	tst   x0, x2
134	b.eq  1f
135	bl	_disable_ldstr_pfetch_A72	/* 0 */
1361:
137	mov  x0, #NXP_PMU_ADDR
138	bl enable_timer_base_to_cluster
139	ldp  x4,  x30,  [sp], #16
140	ret
141endfunc soc_init_percpu
142
143
144/* Function completes the initialization tasks of the soc
145 * in:
146 * out:
147 * uses x0, x1, x2, x3, x4
148 */
149func soc_init_finish
150	stp  x4,  x30,  [sp, #-16]!
151
152	ldp   x4,  x30,  [sp], #16
153	ret
154endfunc soc_init_finish
155
156
157/* Function sets the security mechanisms in the SoC to implement the
158 * Platform Security Policy
159 */
160func _set_platform_security
161	mov  x8, x30
162
163#if (!SUPPRESS_TZC)
164	/* initialize the tzpc */
165	bl   init_tzpc
166#endif
167
168#if (!SUPPRESS_SEC)
169	/* initialize secmon */
170#ifdef NXP_SNVS_ENABLED
171	mov x0, #NXP_SNVS_ADDR
172	bl  init_sec_mon
173#endif
174#endif
175
176	mov  x30, x8
177	ret
178endfunc _set_platform_security
179
180
181/* Function writes a 64-bit address to bootlocptrh/l
182 * in:  x0, 64-bit address to write to BOOTLOCPTRL/H
183 * uses x0, x1, x2
184 */
185func _soc_set_start_addr
186	/* Get the 64-bit base address of the dcfg block */
187	ldr  x2, =NXP_DCFG_ADDR
188
189	/* write the 32-bit BOOTLOCPTRL register */
190	mov  x1, x0
191	str  w1, [x2, #DCFG_BOOTLOCPTRL_OFFSET]
192
193	/* write the 32-bit BOOTLOCPTRH register */
194	lsr  x1, x0, #32
195	str  w1, [x2, #DCFG_BOOTLOCPTRH_OFFSET]
196	ret
197endfunc _soc_set_start_addr
198
199/* Function releases a secondary core from reset
200 * in:   x0 = core_mask_lsb
201 * out:  none
202 * uses: x0, x1, x2, x3
203 */
204func _soc_core_release
205	mov   x3, x30
206
207	ldr  x1, =NXP_SEC_REGFILE_ADDR
208	/* write to CORE_HOLD to tell
209	 * the bootrom that this core is
210	 * expected to run.
211	 */
212	str  w0, [x1, #CORE_HOLD_OFFSET]
213
214	/* read-modify-write BRRL to release core */
215	mov  x1, #NXP_RESET_ADDR
216	ldr  w2, [x1, #BRR_OFFSET]
217
218	/* x0 = core mask */
219	orr  w2, w2, w0
220	str  w2, [x1, #BRR_OFFSET]
221	dsb  sy
222	isb
223
224	/* send event */
225	sev
226	isb
227
228	mov   x30, x3
229	ret
230endfunc _soc_core_release
231
232
233/* Function determines if a core is disabled via COREDISABLEDSR
234 * in:  w0  = core_mask_lsb
235 * out: w0  = 0, core not disabled
236 *	  w0 != 0, core disabled
237 * uses x0, x1
238 */
239func _soc_ck_disabled
240
241	/* get base addr of dcfg block */
242	ldr  x1, =NXP_DCFG_ADDR
243
244	/* read COREDISABLEDSR */
245	ldr  w1, [x1, #DCFG_COREDISABLEDSR_OFFSET]
246
247	/* test core bit */
248	and  w0, w1, w0
249
250	ret
251endfunc _soc_ck_disabled
252
253
254/* Part of CPU_ON
255 * Function restarts a core shutdown via _soc_core_entr_off
256 * in:  x0 = core mask lsb (of the target cpu)
257 * out: x0 == 0, on success
258 *	  x0 != 0, on failure
259 * uses x0, x1, x2, x3, x4, x5, x6
260 */
261func _soc_core_restart
262	mov  x6, x30
263	mov  x4, x0
264
265	/* pgm GICD_CTLR - enable secure grp0  */
266	mov  x5, #NXP_GICD_ADDR
267	ldr  w2, [x5, #GICD_CTLR_OFFSET]
268	orr  w2, w2, #GICD_CTLR_EN_GRP_0
269	str  w2, [x5, #GICD_CTLR_OFFSET]
270	dsb sy
271	isb
272
273	/* poll on RWP til write completes */
2744:
275	ldr  w2, [x5, #GICD_CTLR_OFFSET]
276	tst  w2, #GICD_CTLR_RWP
277	b.ne 4b
278
279	/* x4 = core mask lsb
280	* x5 = gicd base addr
281	*/
282	mov  x0, x4
283	bl   get_mpidr_value
284
285	/* x0 = mpidr of target core
286	* x4 = core mask lsb of target core
287	* x5 = gicd base addr
288	*/
289
290	/* generate target list bit */
291	and  x1, x0, #MPIDR_AFFINITY0_MASK
292	mov  x2, #1
293	lsl  x2, x2, x1
294
295	/* get the affinity1 field */
296	and  x1, x0, #MPIDR_AFFINITY1_MASK
297	lsl  x1, x1, #8
298	orr  x2, x2, x1
299
300	/* insert the INTID for SGI15 */
301	orr  x2, x2, #ICC_SGI0R_EL1_INTID
302
303	/* fire the SGI */
304	msr  ICC_SGI0R_EL1, x2
305	dsb  sy
306	isb
307
308	/* load '0' on success */
309	mov  x0, xzr
310
311	mov  x30, x6
312	ret
313endfunc _soc_core_restart
314
315
316/* Part of CPU_OFF
317 * Function programs SoC & GIC registers in preparation for shutting down
318 * the core
319 * in:  x0 = core mask lsb
320 * out: none
321 * uses x0, x1, x2, x3, x4, x5, x6, x7
322 */
323func _soc_core_prep_off
324	mov  x8, x30
325	mov  x7, x0		/* x7 = core mask lsb */
326
327	mrs  x1, CORTEX_A72_ECTLR_EL1
328
329	/* set smp and disable L2 snoops in cpuectlr */
330	orr  x1, x1, #CPUECTLR_SMPEN_EN
331	orr  x1, x1, #CPUECTLR_DISABLE_TWALK_PREFETCH
332	bic  x1, x1, #CPUECTLR_INS_PREFETCH_MASK
333	bic  x1, x1, #CPUECTLR_DAT_PREFETCH_MASK
334
335	/* set retention control in cpuectlr */
336	bic  x1, x1, #CPUECTLR_TIMER_MASK
337	orr  x1, x1, #CPUECTLR_TIMER_8TICKS
338	msr  CORTEX_A72_ECTLR_EL1, x1
339
340	/* get redistributor rd base addr for this core */
341	mov  x0, x7
342	bl   get_gic_rd_base
343	mov  x6, x0
344
345	/* get redistributor sgi base addr for this core */
346	mov  x0, x7
347	bl   get_gic_sgi_base
348	mov  x5, x0
349
350	/* x5 = gicr sgi base addr
351 	 * x6 = gicr rd  base addr
352	 * x7 = core mask lsb
353	 */
354
355	/* disable SGI 15 at redistributor - GICR_ICENABLER0 */
356	mov  w3, #GICR_ICENABLER0_SGI15
357	str  w3, [x5, #GICR_ICENABLER0_OFFSET]
3582:
359	/* poll on rwp bit in GICR_CTLR */
360	ldr  w4, [x6, #GICR_CTLR_OFFSET]
361	tst  w4, #GICR_CTLR_RWP
362	b.ne 2b
363
364	/* disable GRP1 interrupts at cpu interface */
365	msr  ICC_IGRPEN1_EL3, xzr
366
367	/* disable GRP0 ints at cpu interface */
368	msr  ICC_IGRPEN0_EL1, xzr
369
370	/* program the redistributor - poll on GICR_CTLR.RWP as needed */
371
372	/* define SGI 15 as Grp0 - GICR_IGROUPR0 */
373	ldr  w4, [x5, #GICR_IGROUPR0_OFFSET]
374	bic  w4, w4, #GICR_IGROUPR0_SGI15
375	str  w4, [x5, #GICR_IGROUPR0_OFFSET]
376
377	/* define SGI 15 as Grp0 - GICR_IGRPMODR0 */
378	ldr  w3, [x5, #GICR_IGRPMODR0_OFFSET]
379	bic  w3, w3, #GICR_IGRPMODR0_SGI15
380	str  w3, [x5, #GICR_IGRPMODR0_OFFSET]
381
382	/* set priority of SGI 15 to highest (0x0) - GICR_IPRIORITYR3 */
383	ldr  w4, [x5, #GICR_IPRIORITYR3_OFFSET]
384	bic  w4, w4, #GICR_IPRIORITYR3_SGI15_MASK
385	str  w4, [x5, #GICR_IPRIORITYR3_OFFSET]
386
387	/* enable SGI 15 at redistributor - GICR_ISENABLER0 */
388	mov  w3, #GICR_ISENABLER0_SGI15
389	str  w3, [x5, #GICR_ISENABLER0_OFFSET]
390	dsb  sy
391	isb
3923:
393	/* poll on rwp bit in GICR_CTLR */
394	ldr  w4, [x6, #GICR_CTLR_OFFSET]
395	tst  w4, #GICR_CTLR_RWP
396	b.ne 3b
397
398	/* quiesce the debug interfaces */
399	mrs  x3, osdlr_el1
400	orr  x3, x3, #OSDLR_EL1_DLK_LOCK
401	msr  osdlr_el1, x3
402	isb
403
404	/* enable grp0 ints */
405	mov  x3, #ICC_IGRPEN0_EL1_EN
406	msr  ICC_IGRPEN0_EL1, x3
407
408	/* x5 = gicr sgi base addr
409	 * x6 = gicr rd  base addr
410	 * x7 = core mask lsb
411	 */
412
413	/* clear any pending interrupts */
414	mvn  w1, wzr
415	str  w1, [x5, #GICR_ICPENDR0_OFFSET]
416
417	/* make sure system counter is enabled */
418	ldr  x3, =NXP_TIMER_ADDR
419	ldr  w0, [x3, #SYS_COUNTER_CNTCR_OFFSET]
420	tst  w0, #SYS_COUNTER_CNTCR_EN
421	b.ne 4f
422	orr  w0, w0, #SYS_COUNTER_CNTCR_EN
423	str  w0, [x3, #SYS_COUNTER_CNTCR_OFFSET]
4244:
425	/* enable the core timer and mask timer interrupt */
426	mov  x1, #CNTP_CTL_EL0_EN
427	orr  x1, x1, #CNTP_CTL_EL0_IMASK
428	msr  cntp_ctl_el0, x1
429
430	isb
431	mov  x30, x8
432	ret
433endfunc _soc_core_prep_off
434
435
436/* Part of CPU_OFF:
437 * Function performs the final steps to shutdown the core
438 * in:  x0 = core mask lsb
439 * out: none
440 * uses x0, x1, x2, x3, x4, x5
441 */
442func _soc_core_entr_off
443	mov  x5, x30
444	mov  x4, x0
445
4461:
447	/* enter low-power state by executing wfi */
448	wfi
449
450	/* see if SGI15 woke us up */
451	mrs  x2, ICC_IAR0_EL1
452	mov  x3, #ICC_IAR0_EL1_SGI15
453	cmp  x2, x3
454	b.ne 2f
455
456	/* deactivate the intrrupts. */
457	msr ICC_EOIR0_EL1, x2
458
4592:
460	/* check if core is turned ON */
461	mov  x0, x4
462	/* Fetched the core state in x0 */
463	bl   _getCoreState
464
465	cmp  x0, #CORE_WAKEUP
466	b.ne 1b
467
468	/* Reached here, exited the wfi */
469
470	mov  x30, x5
471	ret
472endfunc _soc_core_entr_off
473
474
475/* Part of CPU_OFF:
476 * Function starts the process of starting a core back up
477 * in:  x0 = core mask lsb
478 * out: none
479 * uses x0, x1, x2, x3, x4, x5, x6
480 */
481func _soc_core_exit_off
482	mov  x6, x30
483	mov  x5, x0
484
485	/* disable forwarding of GRP0 ints at cpu interface */
486	msr  ICC_IGRPEN0_EL1, xzr
487
488	/* get redistributor sgi base addr for this core */
489	mov  x0, x5
490	bl   get_gic_sgi_base
491	mov  x4, x0
492
493	/* x4 = gicr sgi base addr
494	 * x5 = core mask
495	 */
496
497	/* disable SGI 15 at redistributor - GICR_ICENABLER0 */
498	mov  w1, #GICR_ICENABLER0_SGI15
499	str  w1, [x4, #GICR_ICENABLER0_OFFSET]
500
501	/* get redistributor rd base addr for this core */
502	mov  x0, x5
503	bl   get_gic_rd_base
504	mov  x4, x0
505
5062:
507	/* poll on rwp bit in GICR_CTLR */
508	ldr  w2, [x4, #GICR_CTLR_OFFSET]
509	tst  w2, #GICR_CTLR_RWP
510	b.ne 2b
511
512	/* unlock the debug interfaces */
513	mrs  x3, osdlr_el1
514	bic  x3, x3, #OSDLR_EL1_DLK_LOCK
515	msr  osdlr_el1, x3
516	isb
517
518	dsb sy
519	isb
520	mov  x30, x6
521	ret
522endfunc _soc_core_exit_off
523
524
525/* Function requests a reset of the entire SOC
526 * in:  none
527 * out: none
528 * uses: x0, x1, x2, x3, x4, x5, x6
529 */
530func _soc_sys_reset
531	mov  x6, x30
532
533	ldr  x2, =NXP_RST_ADDR
534	/* clear the RST_REQ_MSK and SW_RST_REQ */
535
536	mov  w0, #0x00000000
537	str  w0, [x2, #RSTCNTL_OFFSET]
538
539	/* initiate the sw reset request */
540	mov  w0, #SW_RST_REQ_INIT
541	str  w0, [x2, #RSTCNTL_OFFSET]
542
543	/* In case this address range is mapped as cacheable,
544	 * flush the write out of the dcaches.
545	 */
546	add  x2, x2, #RSTCNTL_OFFSET
547	dc   cvac, x2
548	dsb  st
549	isb
550
551	/* Function does not return */
552	b  .
553endfunc _soc_sys_reset
554
555
556/* Part of SYSTEM_OFF:
557 * Function turns off the SoC clocks
558 * Note: Function is not intended to return, and the only allowable
559 *	   recovery is POR
560 * in:  none
561 * out: none
562 * uses x0, x1, x2, x3
563 */
564func _soc_sys_off
565
566	/* disable sec, QBman, spi and qspi */
567	ldr  x2, =NXP_DCFG_ADDR
568	ldr  x0, =DCFG_DEVDISR1_OFFSET
569	ldr  w1, =DCFG_DEVDISR1_SEC
570	str  w1, [x2, x0]
571	ldr  x0, =DCFG_DEVDISR3_OFFSET
572	ldr  w1, =DCFG_DEVDISR3_QBMAIN
573	str  w1, [x2, x0]
574	ldr  x0, =DCFG_DEVDISR4_OFFSET
575	ldr  w1, =DCFG_DEVDISR4_SPI_QSPI
576	str  w1, [x2, x0]
577
578	/* set TPMWAKEMR0 */
579	ldr  x0, =TPMWAKEMR0_ADDR
580	mov  w1, #0x1
581	str  w1, [x0]
582
583	/* disable icache, dcache, mmu @ EL1 */
584	mov  x1, #SCTLR_I_C_M_MASK
585	mrs  x0, sctlr_el1
586	bic  x0, x0, x1
587	msr  sctlr_el1, x0
588
589	/* disable L2 prefetches */
590	mrs  x0, CORTEX_A72_ECTLR_EL1
591	bic  x1, x1, #CPUECTLR_TIMER_MASK
592	orr  x0, x0, #CPUECTLR_SMPEN_EN
593	orr  x0, x0, #CPUECTLR_TIMER_8TICKS
594	msr  CORTEX_A72_ECTLR_EL1, x0
595	isb
596
597	/* disable CCN snoop domain */
598	mov  x1, #NXP_CCN_HN_F_0_ADDR
599	ldr  x0, =CCN_HN_F_SNP_DMN_CTL_MASK
600	str  x0, [x1, #CCN_HN_F_SNP_DMN_CTL_CLR_OFFSET]
6013:
602	ldr  w2, [x1, #CCN_HN_F_SNP_DMN_CTL_OFFSET]
603	cmp  w2, #0x2
604	b.ne 3b
605
606	mov  x3, #NXP_PMU_ADDR
607
6084:
609	ldr  w1, [x3, #PMU_PCPW20SR_OFFSET]
610	cmp  w1, #PMU_IDLE_CORE_MASK
611	b.ne 4b
612
613	mov  w1, #PMU_IDLE_CLUSTER_MASK
614	str  w1, [x3, #PMU_CLAINACTSETR_OFFSET]
615
6161:
617	ldr  w1, [x3, #PMU_PCPW20SR_OFFSET]
618	cmp  w1, #PMU_IDLE_CORE_MASK
619	b.ne 1b
620
621	mov  w1, #PMU_FLUSH_CLUSTER_MASK
622	str  w1, [x3, #PMU_CLL2FLUSHSETR_OFFSET]
623
6242:
625	ldr  w1, [x3, #PMU_CLL2FLUSHSR_OFFSET]
626	cmp  w1, #PMU_FLUSH_CLUSTER_MASK
627	b.ne 2b
628
629	mov  w1, #PMU_FLUSH_CLUSTER_MASK
630	str  w1, [x3, #PMU_CLSL2FLUSHCLRR_OFFSET]
631
632	mov  w1, #PMU_FLUSH_CLUSTER_MASK
633	str  w1, [x3, #PMU_CLSINACTSETR_OFFSET]
634
635	mov  x2, #DAIF_SET_MASK
636	mrs  x1, spsr_el1
637	orr  x1, x1, x2
638	msr  spsr_el1, x1
639
640	mrs  x1, spsr_el2
641	orr  x1, x1, x2
642	msr  spsr_el2, x1
643
644	/* force the debug interface to be quiescent */
645	mrs  x0, osdlr_el1
646	orr  x0, x0, #0x1
647	msr  osdlr_el1, x0
648
649	/* invalidate all TLB entries at all 3 exception levels */
650	tlbi alle1
651	tlbi alle2
652	tlbi alle3
653
654	/* x3 = pmu base addr */
655
656	/* request lpm20 */
657	ldr  x0, =PMU_POWMGTCSR_OFFSET
658	ldr  w1, =PMU_POWMGTCSR_VAL
659	str  w1, [x3, x0]
660
6615:
662	wfe
663	b.eq  5b
664endfunc _soc_sys_off
665
666
667/* Part of CPU_SUSPEND
668 * Function puts the calling core into standby state
669 * in:  x0 = core mask lsb
670 * out: none
671 * uses x0
672 */
673func _soc_core_entr_stdby
674
675	dsb  sy
676	isb
677	wfi
678
679	ret
680endfunc _soc_core_entr_stdby
681
682
683/* Part of CPU_SUSPEND
684 * Function performs SoC-specific programming prior to standby
685 * in:  x0 = core mask lsb
686 * out: none
687 * uses x0, x1
688 */
689func _soc_core_prep_stdby
690
691	/* clear CORTEX_A72_ECTLR_EL1[2:0] */
692	mrs  x1, CORTEX_A72_ECTLR_EL1
693	bic  x1, x1, #CPUECTLR_TIMER_MASK
694	msr  CORTEX_A72_ECTLR_EL1, x1
695
696	ret
697endfunc _soc_core_prep_stdby
698
699
700/* Part of CPU_SUSPEND
701 * Function performs any SoC-specific cleanup after standby state
702 * in:  x0 = core mask lsb
703 * out: none
704 * uses none
705 */
706func _soc_core_exit_stdby
707
708	ret
709endfunc _soc_core_exit_stdby
710
711
712/* Part of CPU_SUSPEND
713 * Function performs SoC-specific programming prior to power-down
714 * in:  x0 = core mask lsb
715 * out: none
716 * uses none
717 */
718func _soc_core_prep_pwrdn
719
720	/* make sure system counter is enabled */
721	ldr  x2, =NXP_TIMER_ADDR
722	ldr  w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
723	tst  w0, #SYS_COUNTER_CNTCR_EN
724	b.ne 1f
725	orr  w0, w0, #SYS_COUNTER_CNTCR_EN
726	str  w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
7271:
728
729	/* enable dynamic retention control (CPUECTLR[2:0])
730	 * set the SMPEN bit (CPUECTLR[6])
731	 */
732	mrs  x1, CORTEX_A72_ECTLR_EL1
733	bic  x1, x1, #CPUECTLR_RET_MASK
734	orr  x1, x1, #CPUECTLR_TIMER_8TICKS
735	orr  x1, x1, #CPUECTLR_SMPEN_EN
736	msr  CORTEX_A72_ECTLR_EL1, x1
737
738	isb
739	ret
740endfunc _soc_core_prep_pwrdn
741
742
743/* Part of CPU_SUSPEND
744 * Function puts the calling core into a power-down state
745 * in:  x0 = core mask lsb
746 * out: none
747 * uses x0
748 */
749func _soc_core_entr_pwrdn
750
751	/* X0 = core mask lsb */
752
753	dsb  sy
754	isb
755	wfi
756
757	ret
758endfunc _soc_core_entr_pwrdn
759
760
761/* Part of CPU_SUSPEND
762 * Function performs any SoC-specific cleanup after power-down state
763 * in:  x0 = core mask lsb
764 * out: none
765 * uses none
766 */
767func _soc_core_exit_pwrdn
768
769	ret
770endfunc _soc_core_exit_pwrdn
771
772
773/* Part of CPU_SUSPEND
774 * Function performs SoC-specific programming prior to standby
775 * in:  x0 = core mask lsb
776 * out: none
777 * uses x0, x1
778 */
779func _soc_clstr_prep_stdby
780
781	/* clear CORTEX_A72_ECTLR_EL1[2:0] */
782	mrs  x1, CORTEX_A72_ECTLR_EL1
783	bic  x1, x1, #CPUECTLR_TIMER_MASK
784	msr  CORTEX_A72_ECTLR_EL1, x1
785
786	ret
787endfunc _soc_clstr_prep_stdby
788
789
790/* Part of CPU_SUSPEND
791 * Function performs any SoC-specific cleanup after standby state
792 * in:  x0 = core mask lsb
793 * out: none
794 * uses none
795 */
796func _soc_clstr_exit_stdby
797
798	ret
799endfunc _soc_clstr_exit_stdby
800
801
802/* Part of CPU_SUSPEND
803 * Function performs SoC-specific programming prior to power-down
804 * in:  x0 = core mask lsb
805 * out: none
806 * uses none
807 */
808func _soc_clstr_prep_pwrdn
809
810	/* make sure system counter is enabled */
811	ldr  x2, =NXP_TIMER_ADDR
812	ldr  w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
813	tst  w0, #SYS_COUNTER_CNTCR_EN
814	b.ne 1f
815	orr  w0, w0, #SYS_COUNTER_CNTCR_EN
816	str  w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
8171:
818
819	/* enable dynamic retention control (CPUECTLR[2:0])
820	 * set the SMPEN bit (CPUECTLR[6])
821	 */
822	mrs  x1, CORTEX_A72_ECTLR_EL1
823	bic  x1, x1, #CPUECTLR_RET_MASK
824	orr  x1, x1, #CPUECTLR_TIMER_8TICKS
825	orr  x1, x1, #CPUECTLR_SMPEN_EN
826	msr  CORTEX_A72_ECTLR_EL1, x1
827
828	isb
829	ret
830endfunc _soc_clstr_prep_pwrdn
831
832
833/* Part of CPU_SUSPEND
834 * Function performs any SoC-specific cleanup after power-down state
835 * in:  x0 = core mask lsb
836 * out: none
837 * uses none
838 */
839func _soc_clstr_exit_pwrdn
840
841	ret
842endfunc _soc_clstr_exit_pwrdn
843
844
845/* Part of CPU_SUSPEND
846 * Function performs SoC-specific programming prior to standby
847 * in:  x0 = core mask lsb
848 * out: none
849 * uses x0, x1
850 */
851func _soc_sys_prep_stdby
852
853	/* clear CORTEX_A72_ECTLR_EL1[2:0] */
854	mrs  x1, CORTEX_A72_ECTLR_EL1
855	bic  x1, x1, #CPUECTLR_TIMER_MASK
856	msr  CORTEX_A72_ECTLR_EL1, x1
857	ret
858endfunc _soc_sys_prep_stdby
859
860
861/* Part of CPU_SUSPEND
862 * Function performs any SoC-specific cleanup after standby state
863 * in:  x0 = core mask lsb
864 * out: none
865 * uses none
866 */
867func _soc_sys_exit_stdby
868
869	ret
870endfunc _soc_sys_exit_stdby
871
872
873/* Part of CPU_SUSPEND
874 * Function performs SoC-specific programming prior to
875 * suspend-to-power-down
876 * in:  x0 = core mask lsb
877 * out: none
878 * uses x0, x1
879 */
880func _soc_sys_prep_pwrdn
881
882	mrs   x1, CORTEX_A72_ECTLR_EL1
883	/* make sure the smp bit is set */
884	orr   x1, x1, #CPUECTLR_SMPEN_MASK
885	/* set the retention control */
886	orr   x1, x1, #CPUECTLR_RET_8CLK
887	/* disable tablewalk prefetch */
888	orr   x1, x1, #CPUECTLR_DISABLE_TWALK_PREFETCH
889	msr   CORTEX_A72_ECTLR_EL1, x1
890	isb
891
892	ret
893endfunc _soc_sys_prep_pwrdn
894
895
896/* Part of CPU_SUSPEND
897 * Function puts the calling core, and potentially the soc, into a
898 * low-power state
899 * in:  x0 = core mask lsb
900 * out: x0 = 0, success
901 *	  x0 < 0, failure
902 * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14,
903 *	  x15, x16, x17, x18, x19, x20, x21, x28
904 */
905func _soc_sys_pwrdn_wfi
906	mov  x28, x30
907
908	/* disable cluster snooping in the CCN-508 */
909	ldr  x1, =NXP_CCN_HN_F_0_ADDR
910	ldr  x7, [x1, #CCN_HN_F_SNP_DMN_CTL_OFFSET]
911	mov  x6, #CCN_HNF_NODE_COUNT
9121:
913	str  x7, [x1, #CCN_HN_F_SNP_DMN_CTL_CLR_OFFSET]
914	sub  x6, x6, #1
915	add  x1, x1, #CCN_HNF_OFFSET
916	cbnz x6, 1b
917
918	/* x0  = core mask
919	 * x7  = hnf sdcr
920	 */
921
922	ldr  x1, =NXP_PMU_CCSR_ADDR
923	ldr  x2, =NXP_PMU_DCSR_ADDR
924
925	/* enable the stop-request-override */
926	mov  x3, #PMU_POWMGTDCR0_OFFSET
927	mov  x4, #POWMGTDCR_STP_OV_EN
928	str  w4, [x2, x3]
929
930	/* x0  = core mask
931	 * x1  = NXP_PMU_CCSR_ADDR
932	 * x2  = NXP_PMU_DCSR_ADDR
933	 * x7  = hnf sdcr
934	 */
935
936	/* disable prefetching in the A72 core */
937	mrs  x8, CORTEX_A72_CPUACTLR_EL1
938	tst  x8, #CPUACTLR_DIS_LS_HW_PRE
939	b.ne 2f
940	dsb  sy
941	isb
942	/* disable data prefetch */
943	orr  x16, x8, #CPUACTLR_DIS_LS_HW_PRE
944	/* disable tlb prefetch */
945	orr  x16, x16, #CPUACTLR_DIS_L2_TLB_PRE
946	msr  CORTEX_A72_CPUACTLR_EL1, x16
947	isb
948
949	/* x0  = core mask
950	 * x1  = NXP_PMU_CCSR_ADDR
951	 * x2  = NXP_PMU_DCSR_ADDR
952	 * x7  = hnf sdcr
953	 * x8  = cpuactlr
954	 */
955
9562:
957	/* save hnf-sdcr and cpuactlr to stack */
958	stp  x7,  x8,  [sp, #-16]!
959
960	/* x0  = core mask
961	 * x1  = NXP_PMU_CCSR_ADDR
962	 * x2  = NXP_PMU_DCSR_ADDR
963	 */
964
965	/* save the IPSTPCRn registers to stack */
966	mov  x15, #PMU_IPSTPCR0_OFFSET
967	ldr  w9,  [x1, x15]
968	mov  x16, #PMU_IPSTPCR1_OFFSET
969	ldr  w10, [x1, x16]
970	mov  x17, #PMU_IPSTPCR2_OFFSET
971	ldr  w11, [x1, x17]
972	mov  x18, #PMU_IPSTPCR3_OFFSET
973	ldr  w12, [x1, x18]
974	mov  x19, #PMU_IPSTPCR4_OFFSET
975	ldr  w13, [x1, x19]
976	mov  x20, #PMU_IPSTPCR5_OFFSET
977	ldr  w14, [x1, x20]
978
979	stp  x9,  x10,  [sp, #-16]!
980	stp  x11, x12,  [sp, #-16]!
981	stp  x13, x14,  [sp, #-16]!
982
983	/* x0  = core mask
984	 * x1  = NXP_PMU_CCSR_ADDR
985	 * x2  = NXP_PMU_DCSR_ADDR
986	 * x15 = PMU_IPSTPCR0_OFFSET
987	 * x16 = PMU_IPSTPCR1_OFFSET
988	 * x17 = PMU_IPSTPCR2_OFFSET
989	 * x18 = PMU_IPSTPCR3_OFFSET
990	 * x19 = PMU_IPSTPCR4_OFFSET
991	 * x20 = PMU_IPSTPCR5_OFFSET
992	 */
993
994	/* load the full clock mask for IPSTPCR0 */
995	ldr  x3, =DEVDISR1_MASK
996	/* get the exclusions */
997	mov  x21, #PMU_IPPDEXPCR0_OFFSET
998	ldr  w4, [x1, x21]
999	/* apply the exclusions to the mask */
1000	bic  w7, w3, w4
1001	/* stop the clocks in IPSTPCR0 */
1002	str  w7, [x1, x15]
1003
1004	/* use same procedure for IPSTPCR1-IPSTPCR5 */
1005
1006	/* stop the clocks in IPSTPCR1 */
1007	ldr  x5, =DEVDISR2_MASK
1008	mov  x21, #PMU_IPPDEXPCR1_OFFSET
1009	ldr  w6, [x1, x21]
1010	bic  w8, w5, w6
1011	str  w8, [x1, x16]
1012
1013	/* stop the clocks in IPSTPCR2 */
1014	ldr  x3, =DEVDISR3_MASK
1015	mov  x21, #PMU_IPPDEXPCR2_OFFSET
1016	ldr  w4, [x1, x21]
1017	bic  w9, w3, w4
1018	str  w9, [x1, x17]
1019
1020	/* stop the clocks in IPSTPCR3 */
1021	ldr  x5,  =DEVDISR4_MASK
1022	mov  x21, #PMU_IPPDEXPCR3_OFFSET
1023	ldr  w6,  [x1, x21]
1024	bic  w10, w5, w6
1025	str  w10, [x1, x18]
1026
1027	/* stop the clocks in IPSTPCR4
1028	 *   - exclude the ddr clocks as we are currently executing
1029	 *	 out of *some* memory, might be ddr
1030	 *   - exclude the OCRAM clk so that we retain any code/data in
1031	 *	 OCRAM
1032	 *   - may need to exclude the debug clock if we are testing
1033	 */
1034	ldr  x3, =DEVDISR5_MASK
1035	mov  w6, #DEVDISR5_MASK_ALL_MEM
1036	bic  w3, w3, w6
1037
1038	mov  w5, #POLICY_DEBUG_ENABLE
1039	cbz  w5, 3f
1040	mov  w6, #DEVDISR5_MASK_DBG
1041	bic  w3, w3, w6
10423:
1043	mov  x21, #PMU_IPPDEXPCR4_OFFSET
1044	ldr  w4,  [x1, x21]
1045	bic  w11, w3, w4
1046	str  w11, [x1, x19]
1047
1048	/* stop the clocks in IPSTPCR5 */
1049	ldr  x5,  =DEVDISR6_MASK
1050	mov  x21, #PMU_IPPDEXPCR5_OFFSET
1051	ldr  w6,  [x1, x21]
1052	bic  w12, w5, w6
1053	str  w12, [x1, x20]
1054
1055	/* x0  = core mask
1056	 * x1  = NXP_PMU_CCSR_ADDR
1057	 * x2  = NXP_PMU_DCSR_ADDR
1058	 * x7  = IPSTPCR0
1059	 * x8  = IPSTPCR1
1060	 * x9  = IPSTPCR2
1061	 * x10 = IPSTPCR3
1062	 * x11 = IPSTPCR4
1063	 * x12 = IPSTPCR5
1064	 */
1065
1066	/* poll until the clocks are stopped in IPSTPACKSR0 */
1067	mov  w4,  #CLOCK_RETRY_CNT
1068	mov  x21, #PMU_IPSTPACKSR0_OFFSET
10694:
1070	ldr  w5, [x1, x21]
1071	cmp  w5, w7
1072	b.eq 5f
1073	sub  w4, w4, #1
1074	cbnz w4, 4b
1075
1076	/* poll until the clocks are stopped in IPSTPACKSR1 */
10775:
1078	mov  w4,  #CLOCK_RETRY_CNT
1079	mov  x21, #PMU_IPSTPACKSR1_OFFSET
10806:
1081	ldr  w5, [x1, x21]
1082	cmp  w5, w8
1083	b.eq 7f
1084	sub  w4, w4, #1
1085	cbnz w4, 6b
1086
1087	/* poll until the clocks are stopped in IPSTPACKSR2 */
10887:
1089	mov  w4,  #CLOCK_RETRY_CNT
1090	mov  x21, #PMU_IPSTPACKSR2_OFFSET
10918:
1092	ldr  w5, [x1, x21]
1093	cmp  w5, w9
1094	b.eq 9f
1095	sub  w4, w4, #1
1096	cbnz w4, 8b
1097
1098	/* poll until the clocks are stopped in IPSTPACKSR3 */
10999:
1100	mov  w4,  #CLOCK_RETRY_CNT
1101	mov  x21, #PMU_IPSTPACKSR3_OFFSET
110210:
1103	ldr  w5, [x1, x21]
1104	cmp  w5, w10
1105	b.eq 11f
1106	sub  w4, w4, #1
1107	cbnz w4, 10b
1108
1109	/* poll until the clocks are stopped in IPSTPACKSR4 */
111011:
1111	mov  w4,  #CLOCK_RETRY_CNT
1112	mov  x21, #PMU_IPSTPACKSR4_OFFSET
111312:
1114	ldr  w5, [x1, x21]
1115	cmp  w5, w11
1116	b.eq 13f
1117	sub  w4, w4, #1
1118	cbnz w4, 12b
1119
1120	/* poll until the clocks are stopped in IPSTPACKSR5 */
112113:
1122	mov  w4,  #CLOCK_RETRY_CNT
1123	mov  x21, #PMU_IPSTPACKSR5_OFFSET
112414:
1125	ldr  w5, [x1, x21]
1126	cmp  w5, w12
1127	b.eq 15f
1128	sub  w4, w4, #1
1129	cbnz w4, 14b
1130
1131	/* x0  = core mask
1132	 * x1  = NXP_PMU_CCSR_ADDR
1133	 * x2  = NXP_PMU_DCSR_ADDR
1134	 * x7  = IPSTPCR0
1135	 * x8  = IPSTPCR1
1136	 * x9  = IPSTPCR2
1137	 * x10 = IPSTPCR3
1138	 * x11 = IPSTPCR4
1139	 * x12 = IPSTPCR5
1140	 */
1141
114215:
1143	mov  x3, #NXP_DCFG_ADDR
1144
1145	/* save the devdisr registers to stack */
1146	ldr  w13, [x3, #DCFG_DEVDISR1_OFFSET]
1147	ldr  w14, [x3, #DCFG_DEVDISR2_OFFSET]
1148	ldr  w15, [x3, #DCFG_DEVDISR3_OFFSET]
1149	ldr  w16, [x3, #DCFG_DEVDISR4_OFFSET]
1150	ldr  w17, [x3, #DCFG_DEVDISR5_OFFSET]
1151	ldr  w18, [x3, #DCFG_DEVDISR6_OFFSET]
1152
1153	stp  x13, x14,  [sp, #-16]!
1154	stp  x15, x16,  [sp, #-16]!
1155	stp  x17, x18,  [sp, #-16]!
1156
1157	/* power down the IP in DEVDISR1 - corresponds to IPSTPCR0 */
1158	str  w7,  [x3, #DCFG_DEVDISR1_OFFSET]
1159
1160	/* power down the IP in DEVDISR2 - corresponds to IPSTPCR1 */
1161	str  w8, [x3, #DCFG_DEVDISR2_OFFSET]
1162
1163	/* power down the IP in DEVDISR3 - corresponds to IPSTPCR2 */
1164	str  w9,  [x3, #DCFG_DEVDISR3_OFFSET]
1165
1166	/* power down the IP in DEVDISR4 - corresponds to IPSTPCR3 */
1167	str  w10, [x3, #DCFG_DEVDISR4_OFFSET]
1168
1169	/* power down the IP in DEVDISR5 - corresponds to IPSTPCR4 */
1170	str  w11, [x3, #DCFG_DEVDISR5_OFFSET]
1171
1172	/* power down the IP in DEVDISR6 - corresponds to IPSTPCR5 */
1173	str  w12, [x3, #DCFG_DEVDISR6_OFFSET]
1174
1175	/* setup register values for the cache-only sequence */
1176	mov  x4, #NXP_DDR_ADDR
1177	mov  x5, #NXP_DDR2_ADDR
1178	mov  x6, x11
1179	mov  x7, x17
1180	ldr  x12, =PMU_CLAINACTSETR_OFFSET
1181	ldr  x13, =PMU_CLSINACTSETR_OFFSET
1182	ldr  x14, =PMU_CLAINACTCLRR_OFFSET
1183	ldr  x15, =PMU_CLSINACTCLRR_OFFSET
1184
1185	/* x0  = core mask
1186	 * x1  = NXP_PMU_CCSR_ADDR
1187	 * x2  = NXP_PMU_DCSR_ADDR
1188	 * x3  = NXP_DCFG_ADDR
1189	 * x4  = NXP_DDR_ADDR
1190	 * x5  = NXP_DDR2_ADDR
1191	 * w6  = IPSTPCR4
1192	 * w7  = DEVDISR5
1193	 * x12 = PMU_CLAINACTSETR_OFFSET
1194	 * x13 = PMU_CLSINACTSETR_OFFSET
1195	 * x14 = PMU_CLAINACTCLRR_OFFSET
1196	 * x15 = PMU_CLSINACTCLRR_OFFSET
1197	 */
1198
1199	mov  x8, #POLICY_DEBUG_ENABLE
1200	cbnz x8, 29f
1201	/* force the debug interface to be quiescent */
1202	mrs  x9, OSDLR_EL1
1203	orr  x9, x9, #0x1
1204	msr  OSDLR_EL1, x9
1205
1206	/* enter the cache-only sequence */
120729:
1208	bl   final_pwrdown
1209
1210	/* when we are here, the core has come out of wfi and the
1211	 * ddr is back up
1212	 */
1213
1214	mov  x8, #POLICY_DEBUG_ENABLE
1215	cbnz x8, 30f
1216	/* restart the debug interface */
1217	mrs  x9, OSDLR_EL1
1218	mov  x10, #1
1219	bic  x9, x9, x10
1220	msr  OSDLR_EL1, x9
1221
1222	/* get saved DEVDISR regs off stack */
122330:
1224	ldp  x17, x18, [sp], #16
1225	ldp  x15, x16, [sp], #16
1226	ldp  x13, x14, [sp], #16
1227	/* restore DEVDISR regs */
1228	str  w18, [x3, #DCFG_DEVDISR6_OFFSET]
1229	str  w17, [x3, #DCFG_DEVDISR5_OFFSET]
1230	str  w16, [x3, #DCFG_DEVDISR4_OFFSET]
1231	str  w15, [x3, #DCFG_DEVDISR3_OFFSET]
1232	str  w14, [x3, #DCFG_DEVDISR2_OFFSET]
1233	str  w13, [x3, #DCFG_DEVDISR1_OFFSET]
1234	isb
1235
1236	/* get saved IPSTPCRn regs off stack */
1237	ldp  x13, x14, [sp], #16
1238	ldp  x11, x12, [sp], #16
1239	ldp  x9,  x10, [sp], #16
1240
1241	/* restore IPSTPCRn regs */
1242	mov  x15, #PMU_IPSTPCR5_OFFSET
1243	str  w14, [x1, x15]
1244	mov  x16, #PMU_IPSTPCR4_OFFSET
1245	str  w13, [x1, x16]
1246	mov  x17, #PMU_IPSTPCR3_OFFSET
1247	str  w12, [x1, x17]
1248	mov  x18, #PMU_IPSTPCR2_OFFSET
1249	str  w11, [x1, x18]
1250	mov  x19, #PMU_IPSTPCR1_OFFSET
1251	str  w10, [x1, x19]
1252	mov  x20, #PMU_IPSTPCR0_OFFSET
1253	str  w9,  [x1, x20]
1254	isb
1255
1256	/* poll on IPSTPACKCRn regs til IP clocks are restarted */
1257	mov  w4,  #CLOCK_RETRY_CNT
1258	mov  x15, #PMU_IPSTPACKSR5_OFFSET
125916:
1260	ldr  w5, [x1, x15]
1261	and  w5, w5, w14
1262	cbz  w5, 17f
1263	sub  w4, w4, #1
1264	cbnz w4, 16b
1265
126617:
1267	mov  w4,  #CLOCK_RETRY_CNT
1268	mov  x15, #PMU_IPSTPACKSR4_OFFSET
126918:
1270	ldr  w5, [x1, x15]
1271	and  w5, w5, w13
1272	cbz  w5, 19f
1273	sub  w4, w4, #1
1274	cbnz w4, 18b
1275
127619:
1277	mov  w4,  #CLOCK_RETRY_CNT
1278	mov  x15, #PMU_IPSTPACKSR3_OFFSET
127920:
1280	ldr  w5, [x1, x15]
1281	and  w5, w5, w12
1282	cbz  w5, 21f
1283	sub  w4, w4, #1
1284	cbnz w4, 20b
1285
128621:
1287	mov  w4,  #CLOCK_RETRY_CNT
1288	mov  x15, #PMU_IPSTPACKSR2_OFFSET
128922:
1290	ldr  w5, [x1, x15]
1291	and  w5, w5, w11
1292	cbz  w5, 23f
1293	sub  w4, w4, #1
1294	cbnz w4, 22b
1295
129623:
1297	mov  w4,  #CLOCK_RETRY_CNT
1298	mov  x15, #PMU_IPSTPACKSR1_OFFSET
129924:
1300	ldr  w5, [x1, x15]
1301	and  w5, w5, w10
1302	cbz  w5, 25f
1303	sub  w4, w4, #1
1304	cbnz w4, 24b
1305
130625:
1307	mov  w4,  #CLOCK_RETRY_CNT
1308	mov  x15, #PMU_IPSTPACKSR0_OFFSET
130926:
1310	ldr  w5, [x1, x15]
1311	and  w5, w5, w9
1312	cbz  w5, 27f
1313	sub  w4, w4, #1
1314	cbnz w4, 26b
1315
131627:
1317	/* disable the stop-request-override */
1318	mov  x8, #PMU_POWMGTDCR0_OFFSET
1319	mov  w9, #POWMGTDCR_STP_OV_EN
1320	str  w9, [x2, x8]
1321	isb
1322
1323	/* get hnf-sdcr and cpuactlr off stack */
1324	ldp  x7, x8, [sp], #16
1325
1326	/* restore cpuactlr */
1327	msr  CORTEX_A72_CPUACTLR_EL1, x8
1328	isb
1329
1330	/* restore snooping in the hnf nodes */
1331	ldr  x9, =NXP_CCN_HN_F_0_ADDR
1332	mov  x6, #CCN_HNF_NODE_COUNT
133328:
1334	str  x7, [x9, #CCN_HN_F_SNP_DMN_CTL_SET_OFFSET]
1335	sub  x6, x6, #1
1336	add  x9, x9, #CCN_HNF_OFFSET
1337	cbnz x6, 28b
1338	isb
1339
1340	mov  x30, x28
1341	ret
1342endfunc _soc_sys_pwrdn_wfi
1343
1344
1345/* Part of CPU_SUSPEND
1346 * Function performs any SoC-specific cleanup after power-down
1347 * in:  x0 = core mask lsb
1348 * out: none
1349 * uses x0,
1350 */
1351func _soc_sys_exit_pwrdn
1352
1353	mrs   x1, CORTEX_A72_ECTLR_EL1
1354	/* make sure the smp bit is set */
1355	orr   x1, x1, #CPUECTLR_SMPEN_MASK
1356	/* clr the retention control */
1357	mov   x2, #CPUECTLR_RET_8CLK
1358	bic   x1, x1, x2
1359	/* enable tablewalk prefetch */
1360	mov   x2, #CPUECTLR_DISABLE_TWALK_PREFETCH
1361	bic   x1, x1, x2
1362	msr   CORTEX_A72_ECTLR_EL1, x1
1363	isb
1364
1365	ret
1366endfunc _soc_sys_exit_pwrdn
1367
1368
1369/* Function will pwrdown ddr and the final core - it will do this
1370 * by loading itself into the icache and then executing from there
1371 * in:
1372 *   x0  = core mask
1373 *   x1  = NXP_PMU_CCSR_ADDR
1374 *   x2  = NXP_PMU_DCSR_ADDR
1375 *   x3  = NXP_DCFG_ADDR
1376 *   x4  = NXP_DDR_ADDR
1377 *   x5  = NXP_DDR2_ADDR
1378 *   w6  = IPSTPCR4
1379 *   w7  = DEVDISR5
1380 *   x12 = PMU_CLAINACTSETR_OFFSET
1381 *   x13 = PMU_CLSINACTSETR_OFFSET
1382 *   x14 = PMU_CLAINACTCLRR_OFFSET
1383 *   x15 = PMU_CLSINACTCLRR_OFFSET
1384 * out: none
1385 * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x13, x14, x15, x16,
1386 *	  x17, x18
1387 */
1388
1389/* 4Kb aligned */
1390.align 12
1391func final_pwrdown
1392
1393	mov  x0, xzr
1394	b	touch_line_0
1395start_line_0:
1396	mov  x0, #1
1397	/* put ddr controller 1 into self-refresh */
1398	ldr  w8, [x4, #DDR_CFG_2_OFFSET]
1399	orr  w8, w8, #CFG_2_FORCE_REFRESH
1400	str  w8, [x4, #DDR_CFG_2_OFFSET]
1401
1402	/* put ddr controller 2 into self-refresh */
1403	ldr  w8, [x5, #DDR_CFG_2_OFFSET]
1404	orr  w8, w8, #CFG_2_FORCE_REFRESH
1405	str  w8, [x5, #DDR_CFG_2_OFFSET]
1406
1407	/* stop the clocks in both ddr controllers */
1408	mov  w10, #DEVDISR5_MASK_DDR
1409	mov  x16, #PMU_IPSTPCR4_OFFSET
1410	orr  w9,  w6, w10
1411	str  w9,  [x1, x16]
1412	isb
1413
1414	mov  x17, #PMU_IPSTPACKSR4_OFFSET
1415touch_line_0:
1416	cbz  x0, touch_line_1
1417
1418start_line_1:
1419	/* poll IPSTPACKSR4 until
1420	 * ddr controller clocks are stopped.
1421	 */
14221:
1423	ldr  w8, [x1, x17]
1424	and  w8, w8, w10
1425	cmp  w8, w10
1426	b.ne 1b
1427
1428	/* shut down power to the ddr controllers */
1429	orr w9, w7, #DEVDISR5_MASK_DDR
1430	str w9, [x3, #DCFG_DEVDISR5_OFFSET]
1431
1432	/* disable cluster acp ports */
1433	mov  w8, #CLAINACT_DISABLE_ACP
1434	str  w8, [x1, x12]
1435
1436	/* disable skyros ports */
1437	mov  w9, #CLSINACT_DISABLE_SKY
1438	str  w9, [x1, x13]
1439	isb
1440
1441touch_line_1:
1442	cbz  x0, touch_line_2
1443
1444start_line_2:
1445	isb
14463:
1447	wfi
1448
1449	/* if we are here then we are awake
1450	 * - bring this device back up
1451	 */
1452
1453	/* enable skyros ports */
1454	mov  w9, #CLSINACT_DISABLE_SKY
1455	str  w9, [x1, x15]
1456
1457	/* enable acp ports */
1458	mov  w8, #CLAINACT_DISABLE_ACP
1459	str  w8, [x1, x14]
1460	isb
1461
1462	/* bring up the ddr controllers */
1463	str w7, [x3, #DCFG_DEVDISR5_OFFSET]
1464	isb
1465	str w6,  [x1, x16]
1466	isb
1467
1468	nop
1469touch_line_2:
1470	cbz  x0, touch_line_3
1471
1472start_line_3:
1473	/* poll IPSTPACKSR4 until
1474	 * ddr controller clocks are running
1475	 */
1476	mov w10, #DEVDISR5_MASK_DDR
14772:
1478	ldr  w8, [x1, x17]
1479	and  w8, w8, w10
1480	cbnz w8, 2b
1481
1482	/* take ddr controller 2 out of self-refresh */
1483	mov w8, #CFG_2_FORCE_REFRESH
1484	ldr w9, [x5, #DDR_CFG_2_OFFSET]
1485	bic w9, w9, w8
1486	str w9, [x5, #DDR_CFG_2_OFFSET]
1487
1488	/* take ddr controller 1 out of self-refresh */
1489	ldr w9, [x4, #DDR_CFG_2_OFFSET]
1490	bic w9, w9, w8
1491	str w9, [x4, #DDR_CFG_2_OFFSET]
1492	isb
1493
1494	nop
1495	nop
1496	nop
1497touch_line_3:
1498	cbz  x0, start_line_0
1499
1500	/* execute here after ddr is back up */
1501
1502	ret
1503endfunc final_pwrdown
1504
1505/* Function returns CLUSTER_3_NORMAL if the cores of cluster 3 are
1506 * to be handled normally, and it returns CLUSTER_3_IN_RESET if the cores
1507 * are to be held in reset
1508 * in:  none
1509 * out: x0 = #CLUSTER_3_NORMAL,   cluster 3 treated normal
1510 *	  x0 = #CLUSTER_3_IN_RESET, cluster 3 cores held in reset
1511 * uses x0, x1, x2
1512 */
1513func cluster3InReset
1514
1515	/* default return is treat cores normal */
1516	mov  x0, #CLUSTER_3_NORMAL
1517
1518	/* read RCW_SR27 register */
1519	mov  x1, #NXP_DCFG_ADDR
1520	ldr  w2, [x1, #RCW_SR27_OFFSET]
1521
1522	/* test the cluster 3 bit */
1523	tst  w2, #CLUSTER_3_RCW_BIT
1524	b.eq 1f
1525
1526	/* if we are here, then the bit was set */
1527	mov  x0, #CLUSTER_3_IN_RESET
15281:
1529	ret
1530endfunc cluster3InReset
1531
1532
1533/* Function checks to see if cores which are to be disabled have been
1534 * released from reset - if not, it releases them
1535 * Note: there may be special handling of cluster 3 cores depending upon the
1536 *	   sys clk frequency
1537 * in:  none
1538 * out: none
1539 * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9
1540 */
1541func release_disabled
1542	mov  x9, x30
1543
1544	/* check if we need to keep cluster 3 cores in reset */
1545	bl   cluster3InReset		/*  0-2  */
1546	mov  x8, x0
1547
1548	/* x8 = cluster 3 handling */
1549
1550	/* read COREDISABLESR */
1551	mov  x0, #NXP_DCFG_ADDR
1552	ldr  w4, [x0, #DCFG_COREDISABLEDSR_OFFSET]
1553	cmp  x8, #CLUSTER_3_IN_RESET
1554	b.ne 4f
1555
1556	/* the cluster 3 cores are to be held in reset, so remove
1557	 * them from the disable mask
1558	 */
1559	bic  x4, x4, #CLUSTER_3_CORES_MASK
15604:
1561	/* get the number of cpus on this device */
1562	mov   x6, #PLATFORM_CORE_COUNT
1563
1564	mov  x0, #NXP_RESET_ADDR
1565	ldr  w5, [x0, #BRR_OFFSET]
1566
1567	/* load the core mask for the first core */
1568	mov  x7, #1
1569
1570	/* x4 = COREDISABLESR
1571	 * x5 = BRR
1572	 * x6 = loop count
1573	 * x7 = core mask bit
1574	 */
15752:
1576	/* check if the core is to be disabled */
1577	tst  x4, x7
1578	b.eq 1f
1579
1580	/* see if disabled cores have already been released from reset */
1581	tst  x5, x7
1582	b.ne 5f
1583
1584	/* if core has not been released, then release it (0-3) */
1585	mov  x0, x7
1586	bl   _soc_core_release
1587
1588	/* record the core state in the data area (0-3) */
1589	mov  x0, x7
1590	mov  x1, #CORE_STATE_DATA
1591	mov  x2, #CORE_DISABLED
1592	bl   _setCoreData
1593
15941:
1595	/* see if this is a cluster 3 core */
1596	mov   x3, #CLUSTER_3_CORES_MASK
1597	tst   x3, x7
1598	b.eq  5f
1599
1600	/* this is a cluster 3 core - see if it needs to be held in reset */
1601	cmp  x8, #CLUSTER_3_IN_RESET
1602	b.ne 5f
1603
1604	/* record the core state as disabled in the data area (0-3) */
1605	mov  x0, x7
1606	mov  x1, #CORE_STATE_DATA
1607	mov  x2, #CORE_DISABLED
1608	bl   _setCoreData
1609
16105:
1611	/* decrement the counter */
1612	subs  x6, x6, #1
1613	b.le  3f
1614
1615	/* shift the core mask to the next core */
1616	lsl   x7, x7, #1
1617	/* continue */
1618	b	 2b
16193:
1620	cmp  x8, #CLUSTER_3_IN_RESET
1621	b.ne 6f
1622
1623	/* we need to hold the cluster 3 cores in reset,
1624	 * so mark them in the COREDISR and COREDISABLEDSR registers as
1625	 * "disabled", and the rest of the sw stack will leave them alone
1626	 * thinking that they have been disabled
1627	 */
1628	mov  x0, #NXP_DCFG_ADDR
1629	ldr  w1, [x0, #DCFG_COREDISR_OFFSET]
1630	orr  w1, w1, #CLUSTER_3_CORES_MASK
1631	str  w1, [x0, #DCFG_COREDISR_OFFSET]
1632
1633	ldr  w2, [x0, #DCFG_COREDISABLEDSR_OFFSET]
1634	orr  w2, w2, #CLUSTER_3_CORES_MASK
1635	str  w2, [x0, #DCFG_COREDISABLEDSR_OFFSET]
1636	dsb  sy
1637	isb
1638
1639#if (PSCI_TEST)
1640	/* x0 = NXP_DCFG_ADDR : read COREDISABLESR */
1641	ldr  w4, [x0, #DCFG_COREDISABLEDSR_OFFSET]
1642	/* read COREDISR */
1643	ldr  w3, [x0, #DCFG_COREDISR_OFFSET]
1644#endif
1645
16466:
1647	mov  x30, x9
1648	ret
1649
1650endfunc release_disabled
1651
1652
1653/* Function setc up the TrustZone Address Space Controller (TZASC)
1654 * in:  none
1655 * out: none
1656 * uses x0, x1
1657 */
1658func init_tzpc
1659
1660	/* set Non Secure access for all devices protected via TZPC */
1661
1662	/* decode Protection-0 Set Reg */
1663	ldr	x1, =TZPCDECPROT_0_SET_BASE
1664	/* set decode region to NS, Bits[7:0] */
1665	mov	w0, #0xFF
1666	str	w0, [x1]
1667
1668	/* decode Protection-1 Set Reg */
1669	ldr	x1, =TZPCDECPROT_1_SET_BASE
1670	/* set decode region to NS, Bits[7:0] */
1671	mov	w0, #0xFF
1672	str	w0, [x1]
1673
1674	/* decode Protection-2 Set Reg */
1675	ldr	x1, =TZPCDECPROT_2_SET_BASE
1676	/* set decode region to NS, Bits[7:0] */
1677	mov	w0, #0xFF
1678	str	w0, [x1]
1679
1680	/* entire SRAM as NS */
1681	/* secure RAM region size Reg */
1682	ldr	x1, =TZPC_BASE
1683	/* 0x00000000 = no secure region */
1684	mov	w0, #0x00000000
1685	str	w0, [x1]
1686
1687	ret
1688endfunc init_tzpc
1689
1690/* write a register in the DCFG block
1691 * in:  x0 = offset
1692 * in:  w1 = value to write
1693 * uses x0, x1, x2
1694 */
1695func _write_reg_dcfg
1696	ldr  x2, =NXP_DCFG_ADDR
1697	str  w1, [x2, x0]
1698	ret
1699endfunc _write_reg_dcfg
1700
1701
1702/* read a register in the DCFG block
1703 * in:  x0 = offset
1704 * out: w0 = value read
1705 * uses x0, x1, x2
1706 */
1707func _read_reg_dcfg
1708	ldr  x2, =NXP_DCFG_ADDR
1709	ldr  w1, [x2, x0]
1710	mov  w0, w1
1711	ret
1712endfunc _read_reg_dcfg
1713
1714
1715/* Function returns an mpidr value for a core, given a core_mask_lsb
1716 * in:  x0 = core mask lsb
1717 * out: x0 = affinity2:affinity1:affinity0, where affinity is 8-bits
1718 * uses x0, x1
1719 */
1720func get_mpidr_value
1721
1722	/* convert a core mask to an SoC core number */
1723	clz  w0, w0
1724	mov  w1, #31
1725	sub  w0, w1, w0
1726
1727	/* get the mpidr core number from the SoC core number */
1728	mov  w1, wzr
1729	tst  x0, #1
1730	b.eq 1f
1731	orr  w1, w1, #1
1732
17331:
1734	/* extract the cluster number */
1735	lsr  w0, w0, #1
1736	orr  w0, w1, w0, lsl #8
1737
1738	ret
1739endfunc get_mpidr_value
1740
1741
1742/* Function returns the redistributor base address for the core specified
1743 * in x1
1744 * in:  x0 - core mask lsb of specified core
1745 * out: x0 = redistributor rd base address for specified core
1746 * uses x0, x1, x2
1747 */
1748func get_gic_rd_base
1749	clz  w1, w0
1750	mov  w2, #0x20
1751	sub  w2, w2, w1
1752	sub  w2, w2, #1
1753
1754	ldr  x0, =NXP_GICR_ADDR
1755	mov  x1, #GIC_RD_OFFSET
1756
1757	/* x2 = core number
1758	 * loop counter
1759	 */
17602:
1761	cbz  x2, 1f
1762	add  x0, x0, x1
1763	sub  x2, x2, #1
1764	b	2b
17651:
1766	ret
1767endfunc get_gic_rd_base
1768
1769
1770/* Function returns the redistributor base address for the core specified
1771 * in x1
1772 * in:  x0 - core mask lsb of specified core
1773 * out: x0 = redistributor sgi base address for specified core
1774 * uses x0, x1, x2
1775 */
1776func get_gic_sgi_base
1777	clz  w1, w0
1778	mov  w2, #0x20
1779	sub  w2, w2, w1
1780	sub  w2, w2, #1
1781
1782	ldr  x0, =NXP_GICR_SGI_ADDR
1783	mov  x1, #GIC_SGI_OFFSET
1784
1785	/* loop counter */
17862:
1787	cbz  x2, 1f		/* x2 = core number */
1788	add  x0, x0, x1
1789	sub  x2, x2, #1
1790	b	2b
17911:
1792	ret
1793endfunc get_gic_sgi_base
1794
1795/* Function writes a register in the RESET block
1796 * in:  x0 = offset
1797 * in:  w1 = value to write
1798 * uses x0, x1, x2
1799 */
1800func _write_reg_reset
1801	ldr  x2, =NXP_RESET_ADDR
1802	str  w1, [x2, x0]
1803	ret
1804endfunc _write_reg_reset
1805
1806
1807/* Function reads a register in the RESET block
1808 * in:  x0 = offset
1809 * out: w0 = value read
1810 * uses x0, x1
1811 */
1812func _read_reg_reset
1813	ldr  x1, =NXP_RESET_ADDR
1814	ldr  w0, [x1, x0]
1815	ret
1816endfunc _read_reg_reset
1817