1/*
2 * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <platform_def.h>
8
9#include <arch.h>
10#include <asm_macros.S>
11#include <bl32/tsp/tsp.h>
12#include <lib/xlat_tables/xlat_tables_defs.h>
13#include <smccc_helpers.h>
14
15#include "../tsp_private.h"
16
17
18	.globl	tsp_entrypoint
19	.globl  tsp_vector_table
20#if SPMC_AT_EL3
21	.globl tsp_cpu_on_entry
22#endif
23
24
25
26	/* ---------------------------------------------
27	 * Populate the params in x0-x7 from the pointer
28	 * to the smc args structure in x0.
29	 * ---------------------------------------------
30	 */
31	.macro restore_args_call_smc
32	ldp	x6, x7, [x0, #SMC_ARG6]
33	ldp	x4, x5, [x0, #SMC_ARG4]
34	ldp	x2, x3, [x0, #SMC_ARG2]
35	ldp	x0, x1, [x0, #SMC_ARG0]
36	smc	#0
37	.endm
38
39	.macro	save_eret_context reg1 reg2
40	mrs	\reg1, elr_el1
41	mrs	\reg2, spsr_el1
42	stp	\reg1, \reg2, [sp, #-0x10]!
43	stp	x30, x18, [sp, #-0x10]!
44	.endm
45
46	.macro restore_eret_context reg1 reg2
47	ldp	x30, x18, [sp], #0x10
48	ldp	\reg1, \reg2, [sp], #0x10
49	msr	elr_el1, \reg1
50	msr	spsr_el1, \reg2
51	.endm
52
53func tsp_entrypoint _align=3
54
55#if ENABLE_PIE
56		/*
57		 * ------------------------------------------------------------
58		 * If PIE is enabled fixup the Global descriptor Table only
59		 * once during primary core cold boot path.
60		 *
61		 * Compile time base address, required for fixup, is calculated
62		 * using "pie_fixup" label present within first page.
63		 * ------------------------------------------------------------
64		 */
65	pie_fixup:
66		ldr	x0, =pie_fixup
67		and	x0, x0, #~(PAGE_SIZE_MASK)
68		mov_imm	x1, (BL32_LIMIT - BL32_BASE)
69		add	x1, x1, x0
70		bl	fixup_gdt_reloc
71#endif /* ENABLE_PIE */
72
73	/* ---------------------------------------------
74	 * Set the exception vector to something sane.
75	 * ---------------------------------------------
76	 */
77	adr	x0, tsp_exceptions
78	msr	vbar_el1, x0
79	isb
80
81	/* ---------------------------------------------
82	 * Enable the SError interrupt now that the
83	 * exception vectors have been setup.
84	 * ---------------------------------------------
85	 */
86	msr	daifclr, #DAIF_ABT_BIT
87
88	/* ---------------------------------------------
89	 * Enable the instruction cache, stack pointer
90	 * and data access alignment checks and disable
91	 * speculative loads.
92	 * ---------------------------------------------
93	 */
94	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
95	mrs	x0, sctlr_el1
96	orr	x0, x0, x1
97	bic	x0, x0, #SCTLR_DSSBS_BIT
98	msr	sctlr_el1, x0
99	isb
100
101	/* ---------------------------------------------
102	 * Invalidate the RW memory used by the BL32
103	 * image. This includes the data and NOBITS
104	 * sections. This is done to safeguard against
105	 * possible corruption of this memory by dirty
106	 * cache lines in a system cache as a result of
107	 * use by an earlier boot loader stage. If PIE
108	 * is enabled however, RO sections including the
109	 * GOT may be modified during pie fixup.
110	 * Therefore, to be on the safe side, invalidate
111	 * the entire image region if PIE is enabled.
112	 * ---------------------------------------------
113	 */
114#if ENABLE_PIE
115#if SEPARATE_CODE_AND_RODATA
116	adrp	x0, __TEXT_START__
117	add	x0, x0, :lo12:__TEXT_START__
118#else
119	adrp	x0, __RO_START__
120	add	x0, x0, :lo12:__RO_START__
121#endif /* SEPARATE_CODE_AND_RODATA */
122#else
123	adrp	x0, __RW_START__
124	add	x0, x0, :lo12:__RW_START__
125#endif /* ENABLE_PIE */
126	adrp	x1, __RW_END__
127	add     x1, x1, :lo12:__RW_END__
128	sub	x1, x1, x0
129	bl	inv_dcache_range
130
131	/* ---------------------------------------------
132	 * Zero out NOBITS sections. There are 2 of them:
133	 *   - the .bss section;
134	 *   - the coherent memory section.
135	 * ---------------------------------------------
136	 */
137	adrp	x0, __BSS_START__
138	add	x0, x0, :lo12:__BSS_START__
139	adrp	x1, __BSS_END__
140	add	x1, x1, :lo12:__BSS_END__
141	sub	x1, x1, x0
142	bl	zeromem
143
144#if USE_COHERENT_MEM
145	adrp	x0, __COHERENT_RAM_START__
146	add	x0, x0, :lo12:__COHERENT_RAM_START__
147	adrp	x1, __COHERENT_RAM_END_UNALIGNED__
148	add	x1, x1, :lo12:__COHERENT_RAM_END_UNALIGNED__
149	sub	x1, x1, x0
150	bl	zeromem
151#endif
152
153	/* --------------------------------------------
154	 * Allocate a stack whose memory will be marked
155	 * as Normal-IS-WBWA when the MMU is enabled.
156	 * There is no risk of reading stale stack
157	 * memory after enabling the MMU as only the
158	 * primary cpu is running at the moment.
159	 * --------------------------------------------
160	 */
161	bl	plat_set_my_stack
162
163	/* ---------------------------------------------
164	 * Initialize the stack protector canary before
165	 * any C code is called.
166	 * ---------------------------------------------
167	 */
168#if STACK_PROTECTOR_ENABLED
169	bl	update_stack_protector_canary
170#endif
171
172	/* ---------------------------------------------
173	 * Perform TSP setup
174	 * ---------------------------------------------
175	 */
176	bl	tsp_setup
177
178#if ENABLE_PAUTH
179	/* ---------------------------------------------
180	 * Program APIAKey_EL1
181	 * and enable pointer authentication
182	 * ---------------------------------------------
183	 */
184	bl	pauth_init_enable_el1
185#endif /* ENABLE_PAUTH */
186
187	/* ---------------------------------------------
188	 * Jump to main function.
189	 * ---------------------------------------------
190	 */
191	bl	tsp_main
192
193	/* ---------------------------------------------
194	 * Tell TSPD that we are done initialising
195	 * ---------------------------------------------
196	 */
197	mov	x1, x0
198	mov	x0, #TSP_ENTRY_DONE
199	smc	#0
200
201tsp_entrypoint_panic:
202	b	tsp_entrypoint_panic
203endfunc tsp_entrypoint
204
205
206	/* -------------------------------------------
207	 * Table of entrypoint vectors provided to the
208	 * TSPD for the various entrypoints
209	 * -------------------------------------------
210	 */
211vector_base tsp_vector_table
212	b	tsp_yield_smc_entry
213	b	tsp_fast_smc_entry
214	b	tsp_cpu_on_entry
215	b	tsp_cpu_off_entry
216	b	tsp_cpu_resume_entry
217	b	tsp_cpu_suspend_entry
218	b	tsp_sel1_intr_entry
219	b	tsp_system_off_entry
220	b	tsp_system_reset_entry
221	b	tsp_abort_yield_smc_entry
222
223	/*---------------------------------------------
224	 * This entrypoint is used by the TSPD when this
225	 * cpu is to be turned off through a CPU_OFF
226	 * psci call to ask the TSP to perform any
227	 * bookeeping necessary. In the current
228	 * implementation, the TSPD expects the TSP to
229	 * re-initialise its state so nothing is done
230	 * here except for acknowledging the request.
231	 * ---------------------------------------------
232	 */
233func tsp_cpu_off_entry
234	bl	tsp_cpu_off_main
235	restore_args_call_smc
236endfunc tsp_cpu_off_entry
237
238	/*---------------------------------------------
239	 * This entrypoint is used by the TSPD when the
240	 * system is about to be switched off (through
241	 * a SYSTEM_OFF psci call) to ask the TSP to
242	 * perform any necessary bookkeeping.
243	 * ---------------------------------------------
244	 */
245func tsp_system_off_entry
246	bl	tsp_system_off_main
247	restore_args_call_smc
248endfunc tsp_system_off_entry
249
250	/*---------------------------------------------
251	 * This entrypoint is used by the TSPD when the
252	 * system is about to be reset (through a
253	 * SYSTEM_RESET psci call) to ask the TSP to
254	 * perform any necessary bookkeeping.
255	 * ---------------------------------------------
256	 */
257func tsp_system_reset_entry
258	bl	tsp_system_reset_main
259	restore_args_call_smc
260endfunc tsp_system_reset_entry
261
262	/*---------------------------------------------
263	 * This entrypoint is used by the TSPD when this
264	 * cpu is turned on using a CPU_ON psci call to
265	 * ask the TSP to initialise itself i.e. setup
266	 * the mmu, stacks etc. Minimal architectural
267	 * state will be initialised by the TSPD when
268	 * this function is entered i.e. Caches and MMU
269	 * will be turned off, the execution state
270	 * will be aarch64 and exceptions masked.
271	 * ---------------------------------------------
272	 */
273func tsp_cpu_on_entry
274	/* ---------------------------------------------
275	 * Set the exception vector to something sane.
276	 * ---------------------------------------------
277	 */
278	adr	x0, tsp_exceptions
279	msr	vbar_el1, x0
280	isb
281
282	/* Enable the SError interrupt */
283	msr	daifclr, #DAIF_ABT_BIT
284
285	/* ---------------------------------------------
286	 * Enable the instruction cache, stack pointer
287	 * and data access alignment checks
288	 * ---------------------------------------------
289	 */
290	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
291	mrs	x0, sctlr_el1
292	orr	x0, x0, x1
293	msr	sctlr_el1, x0
294	isb
295
296	/* --------------------------------------------
297	 * Give ourselves a stack whose memory will be
298	 * marked as Normal-IS-WBWA when the MMU is
299	 * enabled.
300	 * --------------------------------------------
301	 */
302	bl	plat_set_my_stack
303
304	/* --------------------------------------------
305	 * Enable MMU and D-caches together.
306	 * --------------------------------------------
307	 */
308	mov	x0, #0
309	bl	bl32_plat_enable_mmu
310
311#if ENABLE_PAUTH
312	/* ---------------------------------------------
313	 * Program APIAKey_EL1
314	 * and enable pointer authentication
315	 * ---------------------------------------------
316	 */
317	bl	pauth_init_enable_el1
318#endif /* ENABLE_PAUTH */
319
320	/* ---------------------------------------------
321	 * Enter C runtime to perform any remaining
322	 * book keeping
323	 * ---------------------------------------------
324	 */
325	bl	tsp_cpu_on_main
326	restore_args_call_smc
327
328	/* Should never reach here */
329tsp_cpu_on_entry_panic:
330	b	tsp_cpu_on_entry_panic
331endfunc tsp_cpu_on_entry
332
333	/*---------------------------------------------
334	 * This entrypoint is used by the TSPD when this
335	 * cpu is to be suspended through a CPU_SUSPEND
336	 * psci call to ask the TSP to perform any
337	 * bookeeping necessary. In the current
338	 * implementation, the TSPD saves and restores
339	 * the EL1 state.
340	 * ---------------------------------------------
341	 */
342func tsp_cpu_suspend_entry
343	bl	tsp_cpu_suspend_main
344	restore_args_call_smc
345endfunc tsp_cpu_suspend_entry
346
347	/*-------------------------------------------------
348	 * This entrypoint is used by the TSPD to pass
349	 * control for `synchronously` handling a S-EL1
350	 * Interrupt which was triggered while executing
351	 * in normal world. 'x0' contains a magic number
352	 * which indicates this. TSPD expects control to
353	 * be handed back at the end of interrupt
354	 * processing. This is done through an SMC.
355	 * The handover agreement is:
356	 *
357	 * 1. PSTATE.DAIF are set upon entry. 'x1' has
358	 *    the ELR_EL3 from the non-secure state.
359	 * 2. TSP has to preserve the callee saved
360	 *    general purpose registers, SP_EL1/EL0 and
361	 *    LR.
362	 * 3. TSP has to preserve the system and vfp
363	 *    registers (if applicable).
364	 * 4. TSP can use 'x0-x18' to enable its C
365	 *    runtime.
366	 * 5. TSP returns to TSPD using an SMC with
367	 *    'x0' = TSP_HANDLED_S_EL1_INTR
368	 * ------------------------------------------------
369	 */
370func	tsp_sel1_intr_entry
371#if DEBUG
372	mov_imm	x2, TSP_HANDLE_SEL1_INTR_AND_RETURN
373	cmp	x0, x2
374	b.ne	tsp_sel1_int_entry_panic
375#endif
376	/*-------------------------------------------------
377	 * Save any previous context needed to perform
378	 * an exception return from S-EL1 e.g. context
379	 * from a previous Non secure Interrupt.
380	 * Update statistics and handle the S-EL1
381	 * interrupt before returning to the TSPD.
382	 * IRQ/FIQs are not enabled since that will
383	 * complicate the implementation. Execution
384	 * will be transferred back to the normal world
385	 * in any case. The handler can return 0
386	 * if the interrupt was handled or TSP_PREEMPTED
387	 * if the expected interrupt was preempted
388	 * by an interrupt that should be handled in EL3
389	 * e.g. Group 0 interrupt in GICv3. In both
390	 * the cases switch to EL3 using SMC with id
391	 * TSP_HANDLED_S_EL1_INTR. Any other return value
392	 * from the handler will result in panic.
393	 * ------------------------------------------------
394	 */
395	save_eret_context x2 x3
396	bl	tsp_update_sync_sel1_intr_stats
397	bl	tsp_common_int_handler
398	/* Check if the S-EL1 interrupt has been handled */
399	cbnz	x0, tsp_sel1_intr_check_preemption
400	b	tsp_sel1_intr_return
401tsp_sel1_intr_check_preemption:
402	/* Check if the S-EL1 interrupt has been preempted */
403	mov_imm	x1, TSP_PREEMPTED
404	cmp	x0, x1
405	b.ne	tsp_sel1_int_entry_panic
406tsp_sel1_intr_return:
407	mov_imm	x0, TSP_HANDLED_S_EL1_INTR
408	restore_eret_context x2 x3
409	smc	#0
410
411	/* Should never reach here */
412tsp_sel1_int_entry_panic:
413	no_ret	plat_panic_handler
414endfunc tsp_sel1_intr_entry
415
416	/*---------------------------------------------
417	 * This entrypoint is used by the TSPD when this
418	 * cpu resumes execution after an earlier
419	 * CPU_SUSPEND psci call to ask the TSP to
420	 * restore its saved context. In the current
421	 * implementation, the TSPD saves and restores
422	 * EL1 state so nothing is done here apart from
423	 * acknowledging the request.
424	 * ---------------------------------------------
425	 */
426func tsp_cpu_resume_entry
427	bl	tsp_cpu_resume_main
428	restore_args_call_smc
429
430	/* Should never reach here */
431	no_ret	plat_panic_handler
432endfunc tsp_cpu_resume_entry
433
434	/*---------------------------------------------
435	 * This entrypoint is used by the TSPD to ask
436	 * the TSP to service a fast smc request.
437	 * ---------------------------------------------
438	 */
439func tsp_fast_smc_entry
440	bl	tsp_smc_handler
441	restore_args_call_smc
442
443	/* Should never reach here */
444	no_ret	plat_panic_handler
445endfunc tsp_fast_smc_entry
446
447	/*---------------------------------------------
448	 * This entrypoint is used by the TSPD to ask
449	 * the TSP to service a Yielding SMC request.
450	 * We will enable preemption during execution
451	 * of tsp_smc_handler.
452	 * ---------------------------------------------
453	 */
454func tsp_yield_smc_entry
455	msr	daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
456	bl	tsp_smc_handler
457	msr	daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
458	restore_args_call_smc
459
460	/* Should never reach here */
461	no_ret	plat_panic_handler
462endfunc tsp_yield_smc_entry
463
464	/*---------------------------------------------------------------------
465	 * This entrypoint is used by the TSPD to abort a pre-empted Yielding
466	 * SMC. It could be on behalf of non-secure world or because a CPU
467	 * suspend/CPU off request needs to abort the preempted SMC.
468	 * --------------------------------------------------------------------
469	 */
470func tsp_abort_yield_smc_entry
471
472	/*
473	 * Exceptions masking is already done by the TSPD when entering this
474	 * hook so there is no need to do it here.
475	 */
476
477	/* Reset the stack used by the pre-empted SMC */
478	bl	plat_set_my_stack
479
480	/*
481	 * Allow some cleanup such as releasing locks.
482	 */
483	bl	tsp_abort_smc_handler
484
485	restore_args_call_smc
486
487	/* Should never reach here */
488	bl	plat_panic_handler
489endfunc tsp_abort_yield_smc_entry
490