xref: /aosp_15_r20/external/arm-trusted-firmware/lib/cpus/aarch64/cpu_helpers.S (revision 54fd6939e177f8ff529b10183254802c76df6d08)
1*54fd6939SJiyong Park/*
2*54fd6939SJiyong Park * Copyright (c) 2014-2021, Arm Limited and Contributors. All rights reserved.
3*54fd6939SJiyong Park *
4*54fd6939SJiyong Park * SPDX-License-Identifier: BSD-3-Clause
5*54fd6939SJiyong Park */
6*54fd6939SJiyong Park
7*54fd6939SJiyong Park#include <arch.h>
8*54fd6939SJiyong Park#include <asm_macros.S>
9*54fd6939SJiyong Park#include <assert_macros.S>
10*54fd6939SJiyong Park#include <common/bl_common.h>
11*54fd6939SJiyong Park#include <common/debug.h>
12*54fd6939SJiyong Park#include <cpu_macros.S>
13*54fd6939SJiyong Park#include <lib/cpus/errata_report.h>
14*54fd6939SJiyong Park#include <lib/el3_runtime/cpu_data.h>
15*54fd6939SJiyong Park
16*54fd6939SJiyong Park /* Reset fn is needed in BL at reset vector */
17*54fd6939SJiyong Park#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3)
18*54fd6939SJiyong Park	/*
19*54fd6939SJiyong Park	 * The reset handler common to all platforms.  After a matching
20*54fd6939SJiyong Park	 * cpu_ops structure entry is found, the correponding reset_handler
21*54fd6939SJiyong Park	 * in the cpu_ops is invoked.
22*54fd6939SJiyong Park	 * Clobbers: x0 - x19, x30
23*54fd6939SJiyong Park	 */
24*54fd6939SJiyong Park	.globl	reset_handler
25*54fd6939SJiyong Parkfunc reset_handler
26*54fd6939SJiyong Park	mov	x19, x30
27*54fd6939SJiyong Park
28*54fd6939SJiyong Park	/* The plat_reset_handler can clobber x0 - x18, x30 */
29*54fd6939SJiyong Park	bl	plat_reset_handler
30*54fd6939SJiyong Park
31*54fd6939SJiyong Park	/* Get the matching cpu_ops pointer */
32*54fd6939SJiyong Park	bl	get_cpu_ops_ptr
33*54fd6939SJiyong Park#if ENABLE_ASSERTIONS
34*54fd6939SJiyong Park	cmp	x0, #0
35*54fd6939SJiyong Park	ASM_ASSERT(ne)
36*54fd6939SJiyong Park#endif
37*54fd6939SJiyong Park
38*54fd6939SJiyong Park	/* Get the cpu_ops reset handler */
39*54fd6939SJiyong Park	ldr	x2, [x0, #CPU_RESET_FUNC]
40*54fd6939SJiyong Park	mov	x30, x19
41*54fd6939SJiyong Park	cbz	x2, 1f
42*54fd6939SJiyong Park
43*54fd6939SJiyong Park	/* The cpu_ops reset handler can clobber x0 - x19, x30 */
44*54fd6939SJiyong Park	br	x2
45*54fd6939SJiyong Park1:
46*54fd6939SJiyong Park	ret
47*54fd6939SJiyong Parkendfunc reset_handler
48*54fd6939SJiyong Park
49*54fd6939SJiyong Park#endif
50*54fd6939SJiyong Park
51*54fd6939SJiyong Park#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in  BL31 */
52*54fd6939SJiyong Park	/*
53*54fd6939SJiyong Park	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
54*54fd6939SJiyong Park	 *
55*54fd6939SJiyong Park	 * Prepare CPU power down function for all platforms. The function takes
56*54fd6939SJiyong Park	 * a domain level to be powered down as its parameter. After the cpu_ops
57*54fd6939SJiyong Park	 * pointer is retrieved from cpu_data, the handler for requested power
58*54fd6939SJiyong Park	 * level is called.
59*54fd6939SJiyong Park	 */
60*54fd6939SJiyong Park	.globl	prepare_cpu_pwr_dwn
61*54fd6939SJiyong Parkfunc prepare_cpu_pwr_dwn
62*54fd6939SJiyong Park	/*
63*54fd6939SJiyong Park	 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
64*54fd6939SJiyong Park	 * power down handler for the last power level
65*54fd6939SJiyong Park	 */
66*54fd6939SJiyong Park	mov_imm	x2, (CPU_MAX_PWR_DWN_OPS - 1)
67*54fd6939SJiyong Park	cmp	x0, x2
68*54fd6939SJiyong Park	csel	x2, x2, x0, hi
69*54fd6939SJiyong Park
70*54fd6939SJiyong Park	mrs	x1, tpidr_el3
71*54fd6939SJiyong Park	ldr	x0, [x1, #CPU_DATA_CPU_OPS_PTR]
72*54fd6939SJiyong Park#if ENABLE_ASSERTIONS
73*54fd6939SJiyong Park	cmp	x0, #0
74*54fd6939SJiyong Park	ASM_ASSERT(ne)
75*54fd6939SJiyong Park#endif
76*54fd6939SJiyong Park
77*54fd6939SJiyong Park	/* Get the appropriate power down handler */
78*54fd6939SJiyong Park	mov	x1, #CPU_PWR_DWN_OPS
79*54fd6939SJiyong Park	add	x1, x1, x2, lsl #3
80*54fd6939SJiyong Park	ldr	x1, [x0, x1]
81*54fd6939SJiyong Park#if ENABLE_ASSERTIONS
82*54fd6939SJiyong Park	cmp	x1, #0
83*54fd6939SJiyong Park	ASM_ASSERT(ne)
84*54fd6939SJiyong Park#endif
85*54fd6939SJiyong Park	br	x1
86*54fd6939SJiyong Parkendfunc prepare_cpu_pwr_dwn
87*54fd6939SJiyong Park
88*54fd6939SJiyong Park
89*54fd6939SJiyong Park	/*
90*54fd6939SJiyong Park	 * Initializes the cpu_ops_ptr if not already initialized
91*54fd6939SJiyong Park	 * in cpu_data. This can be called without a runtime stack, but may
92*54fd6939SJiyong Park	 * only be called after the MMU is enabled.
93*54fd6939SJiyong Park	 * clobbers: x0 - x6, x10
94*54fd6939SJiyong Park	 */
95*54fd6939SJiyong Park	.globl	init_cpu_ops
96*54fd6939SJiyong Parkfunc init_cpu_ops
97*54fd6939SJiyong Park	mrs	x6, tpidr_el3
98*54fd6939SJiyong Park	ldr	x0, [x6, #CPU_DATA_CPU_OPS_PTR]
99*54fd6939SJiyong Park	cbnz	x0, 1f
100*54fd6939SJiyong Park	mov	x10, x30
101*54fd6939SJiyong Park	bl	get_cpu_ops_ptr
102*54fd6939SJiyong Park#if ENABLE_ASSERTIONS
103*54fd6939SJiyong Park	cmp	x0, #0
104*54fd6939SJiyong Park	ASM_ASSERT(ne)
105*54fd6939SJiyong Park#endif
106*54fd6939SJiyong Park	str	x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
107*54fd6939SJiyong Park	mov x30, x10
108*54fd6939SJiyong Park1:
109*54fd6939SJiyong Park	ret
110*54fd6939SJiyong Parkendfunc init_cpu_ops
111*54fd6939SJiyong Park#endif /* IMAGE_BL31 */
112*54fd6939SJiyong Park
113*54fd6939SJiyong Park#if defined(IMAGE_BL31) && CRASH_REPORTING
114*54fd6939SJiyong Park	/*
115*54fd6939SJiyong Park	 * The cpu specific registers which need to be reported in a crash
116*54fd6939SJiyong Park	 * are reported via cpu_ops cpu_reg_dump function. After a matching
117*54fd6939SJiyong Park	 * cpu_ops structure entry is found, the correponding cpu_reg_dump
118*54fd6939SJiyong Park	 * in the cpu_ops is invoked.
119*54fd6939SJiyong Park	 */
120*54fd6939SJiyong Park	.globl	do_cpu_reg_dump
121*54fd6939SJiyong Parkfunc do_cpu_reg_dump
122*54fd6939SJiyong Park	mov	x16, x30
123*54fd6939SJiyong Park
124*54fd6939SJiyong Park	/* Get the matching cpu_ops pointer */
125*54fd6939SJiyong Park	bl	get_cpu_ops_ptr
126*54fd6939SJiyong Park	cbz	x0, 1f
127*54fd6939SJiyong Park
128*54fd6939SJiyong Park	/* Get the cpu_ops cpu_reg_dump */
129*54fd6939SJiyong Park	ldr	x2, [x0, #CPU_REG_DUMP]
130*54fd6939SJiyong Park	cbz	x2, 1f
131*54fd6939SJiyong Park	blr	x2
132*54fd6939SJiyong Park1:
133*54fd6939SJiyong Park	mov	x30, x16
134*54fd6939SJiyong Park	ret
135*54fd6939SJiyong Parkendfunc do_cpu_reg_dump
136*54fd6939SJiyong Park#endif
137*54fd6939SJiyong Park
138*54fd6939SJiyong Park	/*
139*54fd6939SJiyong Park	 * The below function returns the cpu_ops structure matching the
140*54fd6939SJiyong Park	 * midr of the core. It reads the MIDR_EL1 and finds the matching
141*54fd6939SJiyong Park	 * entry in cpu_ops entries. Only the implementation and part number
142*54fd6939SJiyong Park	 * are used to match the entries.
143*54fd6939SJiyong Park	 *
144*54fd6939SJiyong Park	 * If cpu_ops for the MIDR_EL1 cannot be found and
145*54fd6939SJiyong Park	 * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a
146*54fd6939SJiyong Park	 * default cpu_ops with an MIDR value of 0.
147*54fd6939SJiyong Park	 * (Implementation number 0x0 should be reserved for software use
148*54fd6939SJiyong Park	 * and therefore no clashes should happen with that default value).
149*54fd6939SJiyong Park	 *
150*54fd6939SJiyong Park	 * Return :
151*54fd6939SJiyong Park	 *     x0 - The matching cpu_ops pointer on Success
152*54fd6939SJiyong Park	 *     x0 - 0 on failure.
153*54fd6939SJiyong Park	 * Clobbers : x0 - x5
154*54fd6939SJiyong Park	 */
155*54fd6939SJiyong Park	.globl	get_cpu_ops_ptr
156*54fd6939SJiyong Parkfunc get_cpu_ops_ptr
157*54fd6939SJiyong Park	/* Read the MIDR_EL1 */
158*54fd6939SJiyong Park	mrs	x2, midr_el1
159*54fd6939SJiyong Park	mov_imm	x3, CPU_IMPL_PN_MASK
160*54fd6939SJiyong Park
161*54fd6939SJiyong Park	/* Retain only the implementation and part number using mask */
162*54fd6939SJiyong Park	and	w2, w2, w3
163*54fd6939SJiyong Park
164*54fd6939SJiyong Park	/* Get the cpu_ops end location */
165*54fd6939SJiyong Park	adr	x5, (__CPU_OPS_END__ + CPU_MIDR)
166*54fd6939SJiyong Park
167*54fd6939SJiyong Park	/* Initialize the return parameter */
168*54fd6939SJiyong Park	mov	x0, #0
169*54fd6939SJiyong Park1:
170*54fd6939SJiyong Park	/* Get the cpu_ops start location */
171*54fd6939SJiyong Park	adr	x4, (__CPU_OPS_START__ + CPU_MIDR)
172*54fd6939SJiyong Park
173*54fd6939SJiyong Park2:
174*54fd6939SJiyong Park	/* Check if we have reached end of list */
175*54fd6939SJiyong Park	cmp	x4, x5
176*54fd6939SJiyong Park	b.eq	search_def_ptr
177*54fd6939SJiyong Park
178*54fd6939SJiyong Park	/* load the midr from the cpu_ops */
179*54fd6939SJiyong Park	ldr	x1, [x4], #CPU_OPS_SIZE
180*54fd6939SJiyong Park	and	w1, w1, w3
181*54fd6939SJiyong Park
182*54fd6939SJiyong Park	/* Check if midr matches to midr of this core */
183*54fd6939SJiyong Park	cmp	w1, w2
184*54fd6939SJiyong Park	b.ne	2b
185*54fd6939SJiyong Park
186*54fd6939SJiyong Park	/* Subtract the increment and offset to get the cpu-ops pointer */
187*54fd6939SJiyong Park	sub	x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
188*54fd6939SJiyong Park#if ENABLE_ASSERTIONS
189*54fd6939SJiyong Park	cmp	x0, #0
190*54fd6939SJiyong Park	ASM_ASSERT(ne)
191*54fd6939SJiyong Park#endif
192*54fd6939SJiyong Park#ifdef SUPPORT_UNKNOWN_MPID
193*54fd6939SJiyong Park	cbnz	x2, exit_mpid_found
194*54fd6939SJiyong Park	/* Mark the unsupported MPID flag */
195*54fd6939SJiyong Park	adrp	x1, unsupported_mpid_flag
196*54fd6939SJiyong Park	add	x1, x1, :lo12:unsupported_mpid_flag
197*54fd6939SJiyong Park	str	w2, [x1]
198*54fd6939SJiyong Parkexit_mpid_found:
199*54fd6939SJiyong Park#endif
200*54fd6939SJiyong Park	ret
201*54fd6939SJiyong Park
202*54fd6939SJiyong Park	/*
203*54fd6939SJiyong Park	 * Search again for a default pointer (MIDR = 0x0)
204*54fd6939SJiyong Park	 * or return error if already searched.
205*54fd6939SJiyong Park	 */
206*54fd6939SJiyong Parksearch_def_ptr:
207*54fd6939SJiyong Park#ifdef SUPPORT_UNKNOWN_MPID
208*54fd6939SJiyong Park	cbz	x2, error_exit
209*54fd6939SJiyong Park	mov	x2, #0
210*54fd6939SJiyong Park	b	1b
211*54fd6939SJiyong Parkerror_exit:
212*54fd6939SJiyong Park#endif
213*54fd6939SJiyong Park	ret
214*54fd6939SJiyong Parkendfunc get_cpu_ops_ptr
215*54fd6939SJiyong Park
216*54fd6939SJiyong Park/*
217*54fd6939SJiyong Park * Extract CPU revision and variant, and combine them into a single numeric for
218*54fd6939SJiyong Park * easier comparison.
219*54fd6939SJiyong Park */
220*54fd6939SJiyong Park	.globl	cpu_get_rev_var
221*54fd6939SJiyong Parkfunc cpu_get_rev_var
222*54fd6939SJiyong Park	mrs	x1, midr_el1
223*54fd6939SJiyong Park
224*54fd6939SJiyong Park	/*
225*54fd6939SJiyong Park	 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
226*54fd6939SJiyong Park	 * as variant[7:4] and revision[3:0] of x0.
227*54fd6939SJiyong Park	 *
228*54fd6939SJiyong Park	 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
229*54fd6939SJiyong Park	 * extract x1[3:0] into x0[3:0] retaining other bits.
230*54fd6939SJiyong Park	 */
231*54fd6939SJiyong Park	ubfx	x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
232*54fd6939SJiyong Park	bfxil	x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
233*54fd6939SJiyong Park	ret
234*54fd6939SJiyong Parkendfunc cpu_get_rev_var
235*54fd6939SJiyong Park
236*54fd6939SJiyong Park/*
237*54fd6939SJiyong Park * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
238*54fd6939SJiyong Park * application purposes. If the revision-variant is less than or same as a given
239*54fd6939SJiyong Park * value, indicates that errata applies; otherwise not.
240*54fd6939SJiyong Park *
241*54fd6939SJiyong Park * Shall clobber: x0-x3
242*54fd6939SJiyong Park */
243*54fd6939SJiyong Park	.globl	cpu_rev_var_ls
244*54fd6939SJiyong Parkfunc cpu_rev_var_ls
245*54fd6939SJiyong Park	mov	x2, #ERRATA_APPLIES
246*54fd6939SJiyong Park	mov	x3, #ERRATA_NOT_APPLIES
247*54fd6939SJiyong Park	cmp	x0, x1
248*54fd6939SJiyong Park	csel	x0, x2, x3, ls
249*54fd6939SJiyong Park	ret
250*54fd6939SJiyong Parkendfunc cpu_rev_var_ls
251*54fd6939SJiyong Park
252*54fd6939SJiyong Park/*
253*54fd6939SJiyong Park * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
254*54fd6939SJiyong Park * application purposes. If the revision-variant is higher than or same as a
255*54fd6939SJiyong Park * given value, indicates that errata applies; otherwise not.
256*54fd6939SJiyong Park *
257*54fd6939SJiyong Park * Shall clobber: x0-x3
258*54fd6939SJiyong Park */
259*54fd6939SJiyong Park	.globl	cpu_rev_var_hs
260*54fd6939SJiyong Parkfunc cpu_rev_var_hs
261*54fd6939SJiyong Park	mov	x2, #ERRATA_APPLIES
262*54fd6939SJiyong Park	mov	x3, #ERRATA_NOT_APPLIES
263*54fd6939SJiyong Park	cmp	x0, x1
264*54fd6939SJiyong Park	csel	x0, x2, x3, hs
265*54fd6939SJiyong Park	ret
266*54fd6939SJiyong Parkendfunc cpu_rev_var_hs
267*54fd6939SJiyong Park
268*54fd6939SJiyong Park/*
269*54fd6939SJiyong Park * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata
270*54fd6939SJiyong Park * application purposes. If the revision-variant is between or includes the given
271*54fd6939SJiyong Park * values, this indicates that errata applies; otherwise not.
272*54fd6939SJiyong Park *
273*54fd6939SJiyong Park * Shall clobber: x0-x4
274*54fd6939SJiyong Park */
275*54fd6939SJiyong Park	.globl	cpu_rev_var_range
276*54fd6939SJiyong Parkfunc cpu_rev_var_range
277*54fd6939SJiyong Park	mov	x3, #ERRATA_APPLIES
278*54fd6939SJiyong Park	mov	x4, #ERRATA_NOT_APPLIES
279*54fd6939SJiyong Park	cmp	x0, x1
280*54fd6939SJiyong Park	csel	x1, x3, x4, hs
281*54fd6939SJiyong Park	cbz	x1, 1f
282*54fd6939SJiyong Park	cmp	x0, x2
283*54fd6939SJiyong Park	csel	x1, x3, x4, ls
284*54fd6939SJiyong Park1:
285*54fd6939SJiyong Park	mov	x0, x1
286*54fd6939SJiyong Park	ret
287*54fd6939SJiyong Parkendfunc cpu_rev_var_range
288*54fd6939SJiyong Park
289*54fd6939SJiyong Park#if REPORT_ERRATA
290*54fd6939SJiyong Park/*
291*54fd6939SJiyong Park * void print_errata_status(void);
292*54fd6939SJiyong Park *
293*54fd6939SJiyong Park * Function to print errata status for CPUs of its class. Must be called only:
294*54fd6939SJiyong Park *
295*54fd6939SJiyong Park *   - with MMU and data caches are enabled;
296*54fd6939SJiyong Park *   - after cpu_ops have been initialized in per-CPU data.
297*54fd6939SJiyong Park */
298*54fd6939SJiyong Park	.globl print_errata_status
299*54fd6939SJiyong Parkfunc print_errata_status
300*54fd6939SJiyong Park#ifdef IMAGE_BL1
301*54fd6939SJiyong Park	/*
302*54fd6939SJiyong Park	 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
303*54fd6939SJiyong Park	 * directly.
304*54fd6939SJiyong Park	 */
305*54fd6939SJiyong Park	stp	xzr, x30, [sp, #-16]!
306*54fd6939SJiyong Park	bl	get_cpu_ops_ptr
307*54fd6939SJiyong Park	ldp	xzr, x30, [sp], #16
308*54fd6939SJiyong Park	ldr	x1, [x0, #CPU_ERRATA_FUNC]
309*54fd6939SJiyong Park	cbnz	x1, .Lprint
310*54fd6939SJiyong Park#else
311*54fd6939SJiyong Park	/*
312*54fd6939SJiyong Park	 * Retrieve pointer to cpu_ops from per-CPU data, and further, the
313*54fd6939SJiyong Park	 * errata printing function. If it's non-NULL, jump to the function in
314*54fd6939SJiyong Park	 * turn.
315*54fd6939SJiyong Park	 */
316*54fd6939SJiyong Park	mrs	x0, tpidr_el3
317*54fd6939SJiyong Park#if ENABLE_ASSERTIONS
318*54fd6939SJiyong Park	cmp	x0, #0
319*54fd6939SJiyong Park	ASM_ASSERT(ne)
320*54fd6939SJiyong Park#endif
321*54fd6939SJiyong Park	ldr	x1, [x0, #CPU_DATA_CPU_OPS_PTR]
322*54fd6939SJiyong Park#if ENABLE_ASSERTIONS
323*54fd6939SJiyong Park	cmp	x1, #0
324*54fd6939SJiyong Park	ASM_ASSERT(ne)
325*54fd6939SJiyong Park#endif
326*54fd6939SJiyong Park	ldr	x0, [x1, #CPU_ERRATA_FUNC]
327*54fd6939SJiyong Park	cbz	x0, .Lnoprint
328*54fd6939SJiyong Park
329*54fd6939SJiyong Park	/*
330*54fd6939SJiyong Park	 * Printing errata status requires atomically testing the printed flag.
331*54fd6939SJiyong Park	 */
332*54fd6939SJiyong Park	stp	x19, x30, [sp, #-16]!
333*54fd6939SJiyong Park	mov	x19, x0
334*54fd6939SJiyong Park
335*54fd6939SJiyong Park	/*
336*54fd6939SJiyong Park	 * Load pointers to errata lock and printed flag. Call
337*54fd6939SJiyong Park	 * errata_needs_reporting to check whether this CPU needs to report
338*54fd6939SJiyong Park	 * errata status pertaining to its class.
339*54fd6939SJiyong Park	 */
340*54fd6939SJiyong Park	ldr	x0, [x1, #CPU_ERRATA_LOCK]
341*54fd6939SJiyong Park	ldr	x1, [x1, #CPU_ERRATA_PRINTED]
342*54fd6939SJiyong Park	bl	errata_needs_reporting
343*54fd6939SJiyong Park	mov	x1, x19
344*54fd6939SJiyong Park	ldp	x19, x30, [sp], #16
345*54fd6939SJiyong Park	cbnz	x0, .Lprint
346*54fd6939SJiyong Park#endif
347*54fd6939SJiyong Park.Lnoprint:
348*54fd6939SJiyong Park	ret
349*54fd6939SJiyong Park.Lprint:
350*54fd6939SJiyong Park	/* Jump to errata reporting function for this CPU */
351*54fd6939SJiyong Park	br	x1
352*54fd6939SJiyong Parkendfunc print_errata_status
353*54fd6939SJiyong Park#endif
354*54fd6939SJiyong Park
355*54fd6939SJiyong Park/*
356*54fd6939SJiyong Park * int check_wa_cve_2017_5715(void);
357*54fd6939SJiyong Park *
358*54fd6939SJiyong Park * This function returns:
359*54fd6939SJiyong Park *  - ERRATA_APPLIES when firmware mitigation is required.
360*54fd6939SJiyong Park *  - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
361*54fd6939SJiyong Park *  - ERRATA_MISSING when firmware mitigation would be required but
362*54fd6939SJiyong Park *    is not compiled in.
363*54fd6939SJiyong Park *
364*54fd6939SJiyong Park * NOTE: Must be called only after cpu_ops have been initialized
365*54fd6939SJiyong Park *       in per-CPU data.
366*54fd6939SJiyong Park */
367*54fd6939SJiyong Park	.globl	check_wa_cve_2017_5715
368*54fd6939SJiyong Parkfunc check_wa_cve_2017_5715
369*54fd6939SJiyong Park	mrs	x0, tpidr_el3
370*54fd6939SJiyong Park#if ENABLE_ASSERTIONS
371*54fd6939SJiyong Park	cmp	x0, #0
372*54fd6939SJiyong Park	ASM_ASSERT(ne)
373*54fd6939SJiyong Park#endif
374*54fd6939SJiyong Park	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
375*54fd6939SJiyong Park#if ENABLE_ASSERTIONS
376*54fd6939SJiyong Park	cmp	x0, #0
377*54fd6939SJiyong Park	ASM_ASSERT(ne)
378*54fd6939SJiyong Park#endif
379*54fd6939SJiyong Park	ldr	x0, [x0, #CPU_EXTRA1_FUNC]
380*54fd6939SJiyong Park	/*
381*54fd6939SJiyong Park	 * If the reserved function pointer is NULL, this CPU
382*54fd6939SJiyong Park	 * is unaffected by CVE-2017-5715 so bail out.
383*54fd6939SJiyong Park	 */
384*54fd6939SJiyong Park	cmp	x0, #0
385*54fd6939SJiyong Park	beq	1f
386*54fd6939SJiyong Park	br	x0
387*54fd6939SJiyong Park1:
388*54fd6939SJiyong Park	mov	x0, #ERRATA_NOT_APPLIES
389*54fd6939SJiyong Park	ret
390*54fd6939SJiyong Parkendfunc check_wa_cve_2017_5715
391*54fd6939SJiyong Park
392*54fd6939SJiyong Park/*
393*54fd6939SJiyong Park * void *wa_cve_2018_3639_get_disable_ptr(void);
394*54fd6939SJiyong Park *
395*54fd6939SJiyong Park * Returns a function pointer which is used to disable mitigation
396*54fd6939SJiyong Park * for CVE-2018-3639.
397*54fd6939SJiyong Park * The function pointer is only returned on cores that employ
398*54fd6939SJiyong Park * dynamic mitigation.  If the core uses static mitigation or is
399*54fd6939SJiyong Park * unaffected by CVE-2018-3639 this function returns NULL.
400*54fd6939SJiyong Park *
401*54fd6939SJiyong Park * NOTE: Must be called only after cpu_ops have been initialized
402*54fd6939SJiyong Park *       in per-CPU data.
403*54fd6939SJiyong Park */
404*54fd6939SJiyong Park	.globl	wa_cve_2018_3639_get_disable_ptr
405*54fd6939SJiyong Parkfunc wa_cve_2018_3639_get_disable_ptr
406*54fd6939SJiyong Park	mrs	x0, tpidr_el3
407*54fd6939SJiyong Park#if ENABLE_ASSERTIONS
408*54fd6939SJiyong Park	cmp	x0, #0
409*54fd6939SJiyong Park	ASM_ASSERT(ne)
410*54fd6939SJiyong Park#endif
411*54fd6939SJiyong Park	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
412*54fd6939SJiyong Park#if ENABLE_ASSERTIONS
413*54fd6939SJiyong Park	cmp	x0, #0
414*54fd6939SJiyong Park	ASM_ASSERT(ne)
415*54fd6939SJiyong Park#endif
416*54fd6939SJiyong Park	ldr	x0, [x0, #CPU_EXTRA2_FUNC]
417*54fd6939SJiyong Park	ret
418*54fd6939SJiyong Parkendfunc wa_cve_2018_3639_get_disable_ptr
419