1/*
2 * Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <context.h>
11#include <el3_common_macros.S>
12
13#if CTX_INCLUDE_FPREGS
14	.global	fpregs_context_save
15	.global	fpregs_context_restore
16#endif /* CTX_INCLUDE_FPREGS */
17	.global	prepare_el3_entry
18	.global	restore_gp_pmcr_pauth_regs
19	.global save_and_update_ptw_el1_sys_regs
20	.global	el3_exit
21
22/* ------------------------------------------------------------------
23 * The following function follows the aapcs_64 strictly to use
24 * x9-x17 (temporary caller-saved registers according to AArch64 PCS)
25 * to save floating point register context. It assumes that 'x0' is
26 * pointing to a 'fp_regs' structure where the register context will
27 * be saved.
28 *
29 * Access to VFP registers will trap if CPTR_EL3.TFP is set.
30 * However currently we don't use VFP registers nor set traps in
31 * Trusted Firmware, and assume it's cleared.
32 *
33 * TODO: Revisit when VFP is used in secure world
34 * ------------------------------------------------------------------
35 */
36#if CTX_INCLUDE_FPREGS
37func fpregs_context_save
38	stp	q0, q1, [x0, #CTX_FP_Q0]
39	stp	q2, q3, [x0, #CTX_FP_Q2]
40	stp	q4, q5, [x0, #CTX_FP_Q4]
41	stp	q6, q7, [x0, #CTX_FP_Q6]
42	stp	q8, q9, [x0, #CTX_FP_Q8]
43	stp	q10, q11, [x0, #CTX_FP_Q10]
44	stp	q12, q13, [x0, #CTX_FP_Q12]
45	stp	q14, q15, [x0, #CTX_FP_Q14]
46	stp	q16, q17, [x0, #CTX_FP_Q16]
47	stp	q18, q19, [x0, #CTX_FP_Q18]
48	stp	q20, q21, [x0, #CTX_FP_Q20]
49	stp	q22, q23, [x0, #CTX_FP_Q22]
50	stp	q24, q25, [x0, #CTX_FP_Q24]
51	stp	q26, q27, [x0, #CTX_FP_Q26]
52	stp	q28, q29, [x0, #CTX_FP_Q28]
53	stp	q30, q31, [x0, #CTX_FP_Q30]
54
55	mrs	x9, fpsr
56	str	x9, [x0, #CTX_FP_FPSR]
57
58	mrs	x10, fpcr
59	str	x10, [x0, #CTX_FP_FPCR]
60
61#if CTX_INCLUDE_AARCH32_REGS
62	mrs	x11, fpexc32_el2
63	str	x11, [x0, #CTX_FP_FPEXC32_EL2]
64#endif /* CTX_INCLUDE_AARCH32_REGS */
65	ret
66endfunc fpregs_context_save
67
68/* ------------------------------------------------------------------
69 * The following function follows the aapcs_64 strictly to use x9-x17
70 * (temporary caller-saved registers according to AArch64 PCS) to
71 * restore floating point register context. It assumes that 'x0' is
72 * pointing to a 'fp_regs' structure from where the register context
73 * will be restored.
74 *
75 * Access to VFP registers will trap if CPTR_EL3.TFP is set.
76 * However currently we don't use VFP registers nor set traps in
77 * Trusted Firmware, and assume it's cleared.
78 *
79 * TODO: Revisit when VFP is used in secure world
80 * ------------------------------------------------------------------
81 */
82func fpregs_context_restore
83	ldp	q0, q1, [x0, #CTX_FP_Q0]
84	ldp	q2, q3, [x0, #CTX_FP_Q2]
85	ldp	q4, q5, [x0, #CTX_FP_Q4]
86	ldp	q6, q7, [x0, #CTX_FP_Q6]
87	ldp	q8, q9, [x0, #CTX_FP_Q8]
88	ldp	q10, q11, [x0, #CTX_FP_Q10]
89	ldp	q12, q13, [x0, #CTX_FP_Q12]
90	ldp	q14, q15, [x0, #CTX_FP_Q14]
91	ldp	q16, q17, [x0, #CTX_FP_Q16]
92	ldp	q18, q19, [x0, #CTX_FP_Q18]
93	ldp	q20, q21, [x0, #CTX_FP_Q20]
94	ldp	q22, q23, [x0, #CTX_FP_Q22]
95	ldp	q24, q25, [x0, #CTX_FP_Q24]
96	ldp	q26, q27, [x0, #CTX_FP_Q26]
97	ldp	q28, q29, [x0, #CTX_FP_Q28]
98	ldp	q30, q31, [x0, #CTX_FP_Q30]
99
100	ldr	x9, [x0, #CTX_FP_FPSR]
101	msr	fpsr, x9
102
103	ldr	x10, [x0, #CTX_FP_FPCR]
104	msr	fpcr, x10
105
106#if CTX_INCLUDE_AARCH32_REGS
107	ldr	x11, [x0, #CTX_FP_FPEXC32_EL2]
108	msr	fpexc32_el2, x11
109#endif /* CTX_INCLUDE_AARCH32_REGS */
110
111	/*
112	 * No explict ISB required here as ERET to
113	 * switch to secure EL1 or non-secure world
114	 * covers it
115	 */
116
117	ret
118endfunc fpregs_context_restore
119#endif /* CTX_INCLUDE_FPREGS */
120
121	/*
122	 * Set SCR_EL3.EA bit to enable SErrors at EL3
123	 */
124	.macro enable_serror_at_el3
125	mrs     x8, scr_el3
126	orr     x8, x8, #SCR_EA_BIT
127	msr     scr_el3, x8
128	.endm
129
130	/*
131	 * Set the PSTATE bits not set when the exception was taken as
132	 * described in the AArch64.TakeException() pseudocode function
133	 * in ARM DDI 0487F.c page J1-7635 to a default value.
134	 */
135	.macro set_unset_pstate_bits
136	/*
137	 * If Data Independent Timing (DIT) functionality is implemented,
138	 * always enable DIT in EL3
139	 */
140#if ENABLE_FEAT_DIT
141#if ENABLE_FEAT_DIT == 2
142	mrs	x8, id_aa64pfr0_el1
143	and	x8, x8, #(ID_AA64PFR0_DIT_MASK << ID_AA64PFR0_DIT_SHIFT)
144	cbz	x8, 1f
145#endif
146	mov     x8, #DIT_BIT
147	msr     DIT, x8
1481:
149#endif /* ENABLE_FEAT_DIT */
150	.endm /* set_unset_pstate_bits */
151
152/*-------------------------------------------------------------------------
153 * This macro checks the ENABLE_FEAT_MPAM state, performs ID register
154 * check to see if the platform supports MPAM extension and restores MPAM3
155 * register value if it is FEAT_STATE_ENABLED/FEAT_STATE_CHECKED.
156 *
157 * This is particularly more complicated because we can't check
158 * if the platform supports MPAM  by looking for status of a particular bit
159 * in the MDCR_EL3 or CPTR_EL3 register like other extensions.
160 * ------------------------------------------------------------------------
161 */
162
163	.macro	restore_mpam3_el3
164#if ENABLE_FEAT_MPAM
165#if ENABLE_FEAT_MPAM == 2
166
167	mrs x8, id_aa64pfr0_el1
168	lsr x8, x8, #(ID_AA64PFR0_MPAM_SHIFT)
169	and x8, x8, #(ID_AA64PFR0_MPAM_MASK)
170	mrs x7, id_aa64pfr1_el1
171	lsr x7, x7, #(ID_AA64PFR1_MPAM_FRAC_SHIFT)
172	and x7, x7, #(ID_AA64PFR1_MPAM_FRAC_MASK)
173	orr x7, x7, x8
174	cbz x7, no_mpam
175#endif
176	/* -----------------------------------------------------------
177	 * Restore MPAM3_EL3 register as per context state
178	 * Currently we only enable MPAM for NS world and trap to EL3
179	 * for MPAM access in lower ELs of Secure and Realm world
180	 * x9 holds address of the per_world context
181	 * -----------------------------------------------------------
182	 */
183
184	ldr	x17, [x9, #CTX_MPAM3_EL3]
185	msr	S3_6_C10_C5_0, x17 /* mpam3_el3 */
186
187no_mpam:
188#endif
189	.endm /* restore_mpam3_el3 */
190
191/* ------------------------------------------------------------------
192 * The following macro is used to save and restore all the general
193 * purpose and ARMv8.3-PAuth (if enabled) registers.
194 * It also checks if the Secure Cycle Counter (PMCCNTR_EL0)
195 * is disabled in EL3/Secure (ARMv8.5-PMU), wherein PMCCNTR_EL0
196 * needs not to be saved/restored during world switch.
197 *
198 * Ideally we would only save and restore the callee saved registers
199 * when a world switch occurs but that type of implementation is more
200 * complex. So currently we will always save and restore these
201 * registers on entry and exit of EL3.
202 * clobbers: x18
203 * ------------------------------------------------------------------
204 */
205	.macro save_gp_pmcr_pauth_regs
206	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
207	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
208	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
209	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
210	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
211	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
212	stp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
213	stp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
214	stp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
215	stp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
216	stp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
217	stp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
218	stp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
219	stp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
220	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
221	mrs	x18, sp_el0
222	str	x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
223
224	/* PMUv3 is presumed to be always present */
225	mrs	x9, pmcr_el0
226	str	x9, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
227	/* Disable cycle counter when event counting is prohibited */
228	orr	x9, x9, #PMCR_EL0_DP_BIT
229	msr	pmcr_el0, x9
230	isb
231#if CTX_INCLUDE_PAUTH_REGS
232	/* ----------------------------------------------------------
233 	 * Save the ARMv8.3-PAuth keys as they are not banked
234 	 * by exception level
235	 * ----------------------------------------------------------
236	 */
237	add	x19, sp, #CTX_PAUTH_REGS_OFFSET
238
239	mrs	x20, APIAKeyLo_EL1	/* x21:x20 = APIAKey */
240	mrs	x21, APIAKeyHi_EL1
241	mrs	x22, APIBKeyLo_EL1	/* x23:x22 = APIBKey */
242	mrs	x23, APIBKeyHi_EL1
243	mrs	x24, APDAKeyLo_EL1	/* x25:x24 = APDAKey */
244	mrs	x25, APDAKeyHi_EL1
245	mrs	x26, APDBKeyLo_EL1	/* x27:x26 = APDBKey */
246	mrs	x27, APDBKeyHi_EL1
247	mrs	x28, APGAKeyLo_EL1	/* x29:x28 = APGAKey */
248	mrs	x29, APGAKeyHi_EL1
249
250	stp	x20, x21, [x19, #CTX_PACIAKEY_LO]
251	stp	x22, x23, [x19, #CTX_PACIBKEY_LO]
252	stp	x24, x25, [x19, #CTX_PACDAKEY_LO]
253	stp	x26, x27, [x19, #CTX_PACDBKEY_LO]
254	stp	x28, x29, [x19, #CTX_PACGAKEY_LO]
255#endif /* CTX_INCLUDE_PAUTH_REGS */
256	.endm /* save_gp_pmcr_pauth_regs */
257
258/* -----------------------------------------------------------------
259 * This function saves the context and sets the PSTATE to a known
260 * state, preparing entry to el3.
261 * Save all the general purpose and ARMv8.3-PAuth (if enabled)
262 * registers.
263 * Then set any of the PSTATE bits that are not set by hardware
264 * according to the Aarch64.TakeException pseudocode in the Arm
265 * Architecture Reference Manual to a default value for EL3.
266 * clobbers: x17
267 * -----------------------------------------------------------------
268 */
269func prepare_el3_entry
270	save_gp_pmcr_pauth_regs
271	enable_serror_at_el3
272	/*
273	 * Set the PSTATE bits not described in the Aarch64.TakeException
274	 * pseudocode to their default values.
275	 */
276	set_unset_pstate_bits
277	ret
278endfunc prepare_el3_entry
279
280/* ------------------------------------------------------------------
281 * This function restores ARMv8.3-PAuth (if enabled) and all general
282 * purpose registers except x30 from the CPU context.
283 * x30 register must be explicitly restored by the caller.
284 * ------------------------------------------------------------------
285 */
286func restore_gp_pmcr_pauth_regs
287#if CTX_INCLUDE_PAUTH_REGS
288 	/* Restore the ARMv8.3 PAuth keys */
289	add	x10, sp, #CTX_PAUTH_REGS_OFFSET
290
291	ldp	x0, x1, [x10, #CTX_PACIAKEY_LO]	/* x1:x0 = APIAKey */
292	ldp	x2, x3, [x10, #CTX_PACIBKEY_LO]	/* x3:x2 = APIBKey */
293	ldp	x4, x5, [x10, #CTX_PACDAKEY_LO]	/* x5:x4 = APDAKey */
294	ldp	x6, x7, [x10, #CTX_PACDBKEY_LO]	/* x7:x6 = APDBKey */
295	ldp	x8, x9, [x10, #CTX_PACGAKEY_LO]	/* x9:x8 = APGAKey */
296
297	msr	APIAKeyLo_EL1, x0
298	msr	APIAKeyHi_EL1, x1
299	msr	APIBKeyLo_EL1, x2
300	msr	APIBKeyHi_EL1, x3
301	msr	APDAKeyLo_EL1, x4
302	msr	APDAKeyHi_EL1, x5
303	msr	APDBKeyLo_EL1, x6
304	msr	APDBKeyHi_EL1, x7
305	msr	APGAKeyLo_EL1, x8
306	msr	APGAKeyHi_EL1, x9
307#endif /* CTX_INCLUDE_PAUTH_REGS */
308
309	/* PMUv3 is presumed to be always present */
310	ldr	x0, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
311	msr	pmcr_el0, x0
312	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
313	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
314	ldp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
315	ldp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
316	ldp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
317	ldp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
318	ldp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
319	ldp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
320	ldp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
321	ldp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
322	ldp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
323	ldp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
324	ldp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
325	ldp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
326	ldr	x28, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
327	msr	sp_el0, x28
328	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
329	ret
330endfunc restore_gp_pmcr_pauth_regs
331
332/*
333 * In case of ERRATA_SPECULATIVE_AT, save SCTLR_EL1 and TCR_EL1
334 * registers and update EL1 registers to disable stage1 and stage2
335 * page table walk
336 */
337func save_and_update_ptw_el1_sys_regs
338	/* ----------------------------------------------------------
339	 * Save only sctlr_el1 and tcr_el1 registers
340	 * ----------------------------------------------------------
341	 */
342	mrs	x29, sctlr_el1
343	str	x29, [sp, #(CTX_EL1_SYSREGS_OFFSET + CTX_SCTLR_EL1)]
344	mrs	x29, tcr_el1
345	str	x29, [sp, #(CTX_EL1_SYSREGS_OFFSET + CTX_TCR_EL1)]
346
347	/* ------------------------------------------------------------
348	 * Must follow below order in order to disable page table
349	 * walk for lower ELs (EL1 and EL0). First step ensures that
350	 * page table walk is disabled for stage1 and second step
351	 * ensures that page table walker should use TCR_EL1.EPDx
352	 * bits to perform address translation. ISB ensures that CPU
353	 * does these 2 steps in order.
354	 *
355	 * 1. Update TCR_EL1.EPDx bits to disable page table walk by
356	 *    stage1.
357	 * 2. Enable MMU bit to avoid identity mapping via stage2
358	 *    and force TCR_EL1.EPDx to be used by the page table
359	 *    walker.
360	 * ------------------------------------------------------------
361	 */
362	orr	x29, x29, #(TCR_EPD0_BIT)
363	orr	x29, x29, #(TCR_EPD1_BIT)
364	msr	tcr_el1, x29
365	isb
366	mrs	x29, sctlr_el1
367	orr	x29, x29, #SCTLR_M_BIT
368	msr	sctlr_el1, x29
369	isb
370
371	ret
372endfunc save_and_update_ptw_el1_sys_regs
373
374/* -----------------------------------------------------------------
375* The below macro returns the address of the per_world context for
376* the security state, retrieved through "get_security_state" macro.
377* The per_world context address is returned in the register argument.
378* Clobbers: x9, x10
379* ------------------------------------------------------------------
380*/
381
382.macro get_per_world_context _reg:req
383	ldr 	x10, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
384	get_security_state x9, x10
385	mov_imm	x10, (CTX_PERWORLD_EL3STATE_END - CTX_CPTR_EL3)
386	mul	x9, x9, x10
387	adrp	x10, per_world_context
388	add	x10, x10, :lo12:per_world_context
389	add	x9, x9, x10
390	mov 	\_reg, x9
391.endm
392
393/* ------------------------------------------------------------------
394 * This routine assumes that the SP_EL3 is pointing to a valid
395 * context structure from where the gp regs and other special
396 * registers can be retrieved.
397 * ------------------------------------------------------------------
398 */
399func el3_exit
400#if ENABLE_ASSERTIONS
401	/* el3_exit assumes SP_EL0 on entry */
402	mrs	x17, spsel
403	cmp	x17, #MODE_SP_EL0
404	ASM_ASSERT(eq)
405#endif /* ENABLE_ASSERTIONS */
406
407	/* ----------------------------------------------------------
408	 * Save the current SP_EL0 i.e. the EL3 runtime stack which
409	 * will be used for handling the next SMC.
410	 * Then switch to SP_EL3.
411	 * ----------------------------------------------------------
412	 */
413	mov	x17, sp
414	msr	spsel, #MODE_SP_ELX
415	str	x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
416
417	/* ----------------------------------------------------------
418	 * Restore CPTR_EL3.
419	 * ZCR is only restored if SVE is supported and enabled.
420	 * Synchronization is required before zcr_el3 is addressed.
421	 * ----------------------------------------------------------
422	 */
423
424	/* The address of the per_world context is stored in x9 */
425	get_per_world_context x9
426
427	ldp	x19, x20, [x9, #CTX_CPTR_EL3]
428	msr	cptr_el3, x19
429
430#if IMAGE_BL31
431	ands	x19, x19, #CPTR_EZ_BIT
432	beq	sve_not_enabled
433
434	isb
435	msr	S3_6_C1_C2_0, x20 /* zcr_el3 */
436sve_not_enabled:
437
438	restore_mpam3_el3
439
440#endif /* IMAGE_BL31 */
441
442#if IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639
443	/* ----------------------------------------------------------
444	 * Restore mitigation state as it was on entry to EL3
445	 * ----------------------------------------------------------
446	 */
447	ldr	x17, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
448	cbz	x17, 1f
449	blr	x17
4501:
451#endif /* IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639 */
452
453#if IMAGE_BL31
454	synchronize_errors
455#endif /* IMAGE_BL31 */
456
457	/* ----------------------------------------------------------
458	 * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
459	 * ----------------------------------------------------------
460	 */
461	ldr	x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
462	ldp	x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
463	msr	scr_el3, x18
464	msr	spsr_el3, x16
465	msr	elr_el3, x17
466
467	restore_ptw_el1_sys_regs
468
469	/* ----------------------------------------------------------
470	 * Restore general purpose (including x30), PMCR_EL0 and
471	 * ARMv8.3-PAuth registers.
472	 * Exit EL3 via ERET to a lower exception level.
473 	 * ----------------------------------------------------------
474 	 */
475	bl	restore_gp_pmcr_pauth_regs
476	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
477
478#ifdef IMAGE_BL31
479	/* Clear the EL3 flag as we are exiting el3 */
480	str	xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
481#endif /* IMAGE_BL31 */
482
483	exception_return
484
485endfunc el3_exit
486