1/*
2 * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <common/bl_common.h>
11#include <common/debug.h>
12#include <cpu_macros.S>
13#include <lib/cpus/cpu_ops.h>
14#include <lib/cpus/errata.h>
15#include <lib/el3_runtime/cpu_data.h>
16
17 /* Reset fn is needed in BL at reset vector */
18#if defined(IMAGE_BL1) || defined(IMAGE_BL31) ||	\
19	(defined(IMAGE_BL2) && RESET_TO_BL2)
20	/*
21	 * The reset handler common to all platforms.  After a matching
22	 * cpu_ops structure entry is found, the correponding reset_handler
23	 * in the cpu_ops is invoked.
24	 * Clobbers: x0 - x19, x30
25	 */
26	.globl	reset_handler
27func reset_handler
28	mov	x19, x30
29
30	/* The plat_reset_handler can clobber x0 - x18, x30 */
31	bl	plat_reset_handler
32
33	/* Get the matching cpu_ops pointer */
34	bl	get_cpu_ops_ptr
35
36#if ENABLE_ASSERTIONS
37	/*
38	 * Assert if invalid cpu_ops obtained. If this is not valid, it may
39	 * suggest that the proper CPU file hasn't been included.
40	 */
41	cmp	x0, #0
42	ASM_ASSERT(ne)
43#endif
44
45	/* Get the cpu_ops reset handler */
46	ldr	x2, [x0, #CPU_RESET_FUNC]
47	mov	x30, x19
48	cbz	x2, 1f
49
50	/* The cpu_ops reset handler can clobber x0 - x19, x30 */
51	br	x2
521:
53	ret
54endfunc reset_handler
55
56#endif
57
58#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in  BL31 */
59	/*
60	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
61	 *
62	 * Prepare CPU power down function for all platforms. The function takes
63	 * a domain level to be powered down as its parameter. After the cpu_ops
64	 * pointer is retrieved from cpu_data, the handler for requested power
65	 * level is called.
66	 */
67	.globl	prepare_cpu_pwr_dwn
68func prepare_cpu_pwr_dwn
69	/*
70	 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
71	 * power down handler for the last power level
72	 */
73	mov_imm	x2, (CPU_MAX_PWR_DWN_OPS - 1)
74	cmp	x0, x2
75	csel	x2, x2, x0, hi
76
77	mrs	x1, tpidr_el3
78	ldr	x0, [x1, #CPU_DATA_CPU_OPS_PTR]
79#if ENABLE_ASSERTIONS
80	cmp	x0, #0
81	ASM_ASSERT(ne)
82#endif
83
84	/* Get the appropriate power down handler */
85	mov	x1, #CPU_PWR_DWN_OPS
86	add	x1, x1, x2, lsl #3
87	ldr	x1, [x0, x1]
88#if ENABLE_ASSERTIONS
89	cmp	x1, #0
90	ASM_ASSERT(ne)
91#endif
92	br	x1
93endfunc prepare_cpu_pwr_dwn
94
95
96	/*
97	 * Initializes the cpu_ops_ptr if not already initialized
98	 * in cpu_data. This can be called without a runtime stack, but may
99	 * only be called after the MMU is enabled.
100	 * clobbers: x0 - x6, x10
101	 */
102	.globl	init_cpu_ops
103func init_cpu_ops
104	mrs	x6, tpidr_el3
105	ldr	x0, [x6, #CPU_DATA_CPU_OPS_PTR]
106	cbnz	x0, 1f
107	mov	x10, x30
108	bl	get_cpu_ops_ptr
109	str	x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
110	mov x30, x10
1111:
112	ret
113endfunc init_cpu_ops
114#endif /* IMAGE_BL31 */
115
116#if defined(IMAGE_BL31) && CRASH_REPORTING
117	/*
118	 * The cpu specific registers which need to be reported in a crash
119	 * are reported via cpu_ops cpu_reg_dump function. After a matching
120	 * cpu_ops structure entry is found, the correponding cpu_reg_dump
121	 * in the cpu_ops is invoked.
122	 */
123	.globl	do_cpu_reg_dump
124func do_cpu_reg_dump
125	mov	x16, x30
126
127	/* Get the matching cpu_ops pointer */
128	bl	get_cpu_ops_ptr
129	cbz	x0, 1f
130
131	/* Get the cpu_ops cpu_reg_dump */
132	ldr	x2, [x0, #CPU_REG_DUMP]
133	cbz	x2, 1f
134	blr	x2
1351:
136	mov	x30, x16
137	ret
138endfunc do_cpu_reg_dump
139#endif
140
141	/*
142	 * The below function returns the cpu_ops structure matching the
143	 * midr of the core. It reads the MIDR_EL1 and finds the matching
144	 * entry in cpu_ops entries. Only the implementation and part number
145	 * are used to match the entries.
146	 *
147	 * If cpu_ops for the MIDR_EL1 cannot be found and
148	 * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a
149	 * default cpu_ops with an MIDR value of 0.
150	 * (Implementation number 0x0 should be reserved for software use
151	 * and therefore no clashes should happen with that default value).
152	 *
153	 * Return :
154	 *     x0 - The matching cpu_ops pointer on Success
155	 *     x0 - 0 on failure.
156	 * Clobbers : x0 - x5
157	 */
158	.globl	get_cpu_ops_ptr
159func get_cpu_ops_ptr
160	/* Read the MIDR_EL1 */
161	mrs	x2, midr_el1
162	mov_imm	x3, CPU_IMPL_PN_MASK
163
164	/* Retain only the implementation and part number using mask */
165	and	w2, w2, w3
166
167	/* Get the cpu_ops end location */
168	adr_l	x5, (__CPU_OPS_END__ + CPU_MIDR)
169
170	/* Initialize the return parameter */
171	mov	x0, #0
1721:
173	/* Get the cpu_ops start location */
174	adr_l	x4, (__CPU_OPS_START__ + CPU_MIDR)
175
1762:
177	/* Check if we have reached end of list */
178	cmp	x4, x5
179	b.eq	search_def_ptr
180
181	/* load the midr from the cpu_ops */
182	ldr	x1, [x4], #CPU_OPS_SIZE
183	and	w1, w1, w3
184
185	/* Check if midr matches to midr of this core */
186	cmp	w1, w2
187	b.ne	2b
188
189	/* Subtract the increment and offset to get the cpu-ops pointer */
190	sub	x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
191#if ENABLE_ASSERTIONS
192	cmp	x0, #0
193	ASM_ASSERT(ne)
194#endif
195#ifdef SUPPORT_UNKNOWN_MPID
196	cbnz	x2, exit_mpid_found
197	/* Mark the unsupported MPID flag */
198	adrp	x1, unsupported_mpid_flag
199	add	x1, x1, :lo12:unsupported_mpid_flag
200	str	w2, [x1]
201exit_mpid_found:
202#endif
203	ret
204
205	/*
206	 * Search again for a default pointer (MIDR = 0x0)
207	 * or return error if already searched.
208	 */
209search_def_ptr:
210#ifdef SUPPORT_UNKNOWN_MPID
211	cbz	x2, error_exit
212	mov	x2, #0
213	b	1b
214error_exit:
215#endif
216	ret
217endfunc get_cpu_ops_ptr
218
219/*
220 * Extract CPU revision and variant, and combine them into a single numeric for
221 * easier comparison.
222 */
223	.globl	cpu_get_rev_var
224func cpu_get_rev_var
225	mrs	x1, midr_el1
226
227	/*
228	 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
229	 * as variant[7:4] and revision[3:0] of x0.
230	 *
231	 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
232	 * extract x1[3:0] into x0[3:0] retaining other bits.
233	 */
234	ubfx	x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
235	bfxil	x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
236	ret
237endfunc cpu_get_rev_var
238
239/*
240 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
241 * application purposes. If the revision-variant is less than or same as a given
242 * value, indicates that errata applies; otherwise not.
243 *
244 * Shall clobber: x0-x3
245 */
246	.globl	cpu_rev_var_ls
247func cpu_rev_var_ls
248	mov	x2, #ERRATA_APPLIES
249	mov	x3, #ERRATA_NOT_APPLIES
250	cmp	x0, x1
251	csel	x0, x2, x3, ls
252	ret
253endfunc cpu_rev_var_ls
254
255/*
256 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
257 * application purposes. If the revision-variant is higher than or same as a
258 * given value, indicates that errata applies; otherwise not.
259 *
260 * Shall clobber: x0-x3
261 */
262	.globl	cpu_rev_var_hs
263func cpu_rev_var_hs
264	mov	x2, #ERRATA_APPLIES
265	mov	x3, #ERRATA_NOT_APPLIES
266	cmp	x0, x1
267	csel	x0, x2, x3, hs
268	ret
269endfunc cpu_rev_var_hs
270
271/*
272 * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata
273 * application purposes. If the revision-variant is between or includes the given
274 * values, this indicates that errata applies; otherwise not.
275 *
276 * Shall clobber: x0-x4
277 */
278	.globl	cpu_rev_var_range
279func cpu_rev_var_range
280	mov	x3, #ERRATA_APPLIES
281	mov	x4, #ERRATA_NOT_APPLIES
282	cmp	x0, x1
283	csel	x1, x3, x4, hs
284	cbz	x1, 1f
285	cmp	x0, x2
286	csel	x1, x3, x4, ls
2871:
288	mov	x0, x1
289	ret
290endfunc cpu_rev_var_range
291
292/*
293 * int check_wa_cve_2017_5715(void);
294 *
295 * This function returns:
296 *  - ERRATA_APPLIES when firmware mitigation is required.
297 *  - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
298 *  - ERRATA_MISSING when firmware mitigation would be required but
299 *    is not compiled in.
300 *
301 * NOTE: Must be called only after cpu_ops have been initialized
302 *       in per-CPU data.
303 */
304	.globl	check_wa_cve_2017_5715
305func check_wa_cve_2017_5715
306	mrs	x0, tpidr_el3
307#if ENABLE_ASSERTIONS
308	cmp	x0, #0
309	ASM_ASSERT(ne)
310#endif
311	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
312#if ENABLE_ASSERTIONS
313	cmp	x0, #0
314	ASM_ASSERT(ne)
315#endif
316	ldr	x0, [x0, #CPU_EXTRA1_FUNC]
317	/*
318	 * If the reserved function pointer is NULL, this CPU
319	 * is unaffected by CVE-2017-5715 so bail out.
320	 */
321	cmp	x0, #CPU_NO_EXTRA1_FUNC
322	beq	1f
323	br	x0
3241:
325	mov	x0, #ERRATA_NOT_APPLIES
326	ret
327endfunc check_wa_cve_2017_5715
328
329/*
330 * void *wa_cve_2018_3639_get_disable_ptr(void);
331 *
332 * Returns a function pointer which is used to disable mitigation
333 * for CVE-2018-3639.
334 * The function pointer is only returned on cores that employ
335 * dynamic mitigation.  If the core uses static mitigation or is
336 * unaffected by CVE-2018-3639 this function returns NULL.
337 *
338 * NOTE: Must be called only after cpu_ops have been initialized
339 *       in per-CPU data.
340 */
341	.globl	wa_cve_2018_3639_get_disable_ptr
342func wa_cve_2018_3639_get_disable_ptr
343	mrs	x0, tpidr_el3
344#if ENABLE_ASSERTIONS
345	cmp	x0, #0
346	ASM_ASSERT(ne)
347#endif
348	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
349#if ENABLE_ASSERTIONS
350	cmp	x0, #0
351	ASM_ASSERT(ne)
352#endif
353	ldr	x0, [x0, #CPU_EXTRA2_FUNC]
354	ret
355endfunc wa_cve_2018_3639_get_disable_ptr
356
357/*
358 * int check_smccc_arch_wa3_applies(void);
359 *
360 * This function checks whether SMCCC_ARCH_WORKAROUND_3 is enabled to mitigate
361 * CVE-2022-23960 for this CPU. It returns:
362 *  - ERRATA_APPLIES when SMCCC_ARCH_WORKAROUND_3 can be invoked to mitigate
363 *    the CVE.
364 *  - ERRATA_NOT_APPLIES when SMCCC_ARCH_WORKAROUND_3 should not be invoked to
365 *    mitigate the CVE.
366 *
367 * NOTE: Must be called only after cpu_ops have been initialized
368 *       in per-CPU data.
369 */
370	.globl	check_smccc_arch_wa3_applies
371func check_smccc_arch_wa3_applies
372	mrs	x0, tpidr_el3
373#if ENABLE_ASSERTIONS
374	cmp	x0, #0
375	ASM_ASSERT(ne)
376#endif
377	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
378#if ENABLE_ASSERTIONS
379	cmp	x0, #0
380	ASM_ASSERT(ne)
381#endif
382	ldr	x0, [x0, #CPU_EXTRA3_FUNC]
383	/*
384	 * If the reserved function pointer is NULL, this CPU
385	 * is unaffected by CVE-2022-23960 so bail out.
386	 */
387	cmp	x0, #CPU_NO_EXTRA3_FUNC
388	beq	1f
389	br	x0
3901:
391	mov	x0, #ERRATA_NOT_APPLIES
392	ret
393endfunc check_smccc_arch_wa3_applies
394