xref: /aosp_15_r20/external/arm-trusted-firmware/lib/aarch64/misc_helpers.S (revision 54fd6939e177f8ff529b10183254802c76df6d08)
1*54fd6939SJiyong Park/*
2*54fd6939SJiyong Park * Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved.
3*54fd6939SJiyong Park *
4*54fd6939SJiyong Park * SPDX-License-Identifier: BSD-3-Clause
5*54fd6939SJiyong Park */
6*54fd6939SJiyong Park
7*54fd6939SJiyong Park#include <arch.h>
8*54fd6939SJiyong Park#include <asm_macros.S>
9*54fd6939SJiyong Park#include <assert_macros.S>
10*54fd6939SJiyong Park#include <common/bl_common.h>
11*54fd6939SJiyong Park#include <lib/xlat_tables/xlat_tables_defs.h>
12*54fd6939SJiyong Park
13*54fd6939SJiyong Park	.globl	smc
14*54fd6939SJiyong Park
15*54fd6939SJiyong Park	.globl	zero_normalmem
16*54fd6939SJiyong Park	.globl	zeromem
17*54fd6939SJiyong Park	.globl	memcpy16
18*54fd6939SJiyong Park	.globl	gpt_tlbi_by_pa
19*54fd6939SJiyong Park
20*54fd6939SJiyong Park	.globl	disable_mmu_el1
21*54fd6939SJiyong Park	.globl	disable_mmu_el3
22*54fd6939SJiyong Park	.globl	disable_mmu_icache_el1
23*54fd6939SJiyong Park	.globl	disable_mmu_icache_el3
24*54fd6939SJiyong Park	.globl	fixup_gdt_reloc
25*54fd6939SJiyong Park#if SUPPORT_VFP
26*54fd6939SJiyong Park	.globl	enable_vfp
27*54fd6939SJiyong Park#endif
28*54fd6939SJiyong Park
29*54fd6939SJiyong Parkfunc smc
30*54fd6939SJiyong Park	smc	#0
31*54fd6939SJiyong Parkendfunc smc
32*54fd6939SJiyong Park
33*54fd6939SJiyong Park/* -----------------------------------------------------------------------
34*54fd6939SJiyong Park * void zero_normalmem(void *mem, unsigned int length);
35*54fd6939SJiyong Park *
36*54fd6939SJiyong Park * Initialise a region in normal memory to 0. This functions complies with the
37*54fd6939SJiyong Park * AAPCS and can be called from C code.
38*54fd6939SJiyong Park *
39*54fd6939SJiyong Park * NOTE: MMU must be enabled when using this function as it can only operate on
40*54fd6939SJiyong Park *       normal memory. It is intended to be mainly used from C code when MMU
41*54fd6939SJiyong Park *       is usually enabled.
42*54fd6939SJiyong Park * -----------------------------------------------------------------------
43*54fd6939SJiyong Park */
44*54fd6939SJiyong Park.equ	zero_normalmem, zeromem_dczva
45*54fd6939SJiyong Park
46*54fd6939SJiyong Park/* -----------------------------------------------------------------------
47*54fd6939SJiyong Park * void zeromem(void *mem, unsigned int length);
48*54fd6939SJiyong Park *
49*54fd6939SJiyong Park * Initialise a region of device memory to 0. This functions complies with the
50*54fd6939SJiyong Park * AAPCS and can be called from C code.
51*54fd6939SJiyong Park *
52*54fd6939SJiyong Park * NOTE: When data caches and MMU are enabled, zero_normalmem can usually be
53*54fd6939SJiyong Park *       used instead for faster zeroing.
54*54fd6939SJiyong Park *
55*54fd6939SJiyong Park * -----------------------------------------------------------------------
56*54fd6939SJiyong Park */
57*54fd6939SJiyong Parkfunc zeromem
58*54fd6939SJiyong Park	/* x2 is the address past the last zeroed address */
59*54fd6939SJiyong Park	add	x2, x0, x1
60*54fd6939SJiyong Park	/*
61*54fd6939SJiyong Park	 * Uses the fallback path that does not use DC ZVA instruction and
62*54fd6939SJiyong Park	 * therefore does not need enabled MMU
63*54fd6939SJiyong Park	 */
64*54fd6939SJiyong Park	b	.Lzeromem_dczva_fallback_entry
65*54fd6939SJiyong Parkendfunc zeromem
66*54fd6939SJiyong Park
67*54fd6939SJiyong Park/* -----------------------------------------------------------------------
68*54fd6939SJiyong Park * void zeromem_dczva(void *mem, unsigned int length);
69*54fd6939SJiyong Park *
70*54fd6939SJiyong Park * Fill a region of normal memory of size "length" in bytes with null bytes.
71*54fd6939SJiyong Park * MMU must be enabled and the memory be of
72*54fd6939SJiyong Park * normal type. This is because this function internally uses the DC ZVA
73*54fd6939SJiyong Park * instruction, which generates an Alignment fault if used on any type of
74*54fd6939SJiyong Park * Device memory (see section D3.4.9 of the ARMv8 ARM, issue k). When the MMU
75*54fd6939SJiyong Park * is disabled, all memory behaves like Device-nGnRnE memory (see section
76*54fd6939SJiyong Park * D4.2.8), hence the requirement on the MMU being enabled.
77*54fd6939SJiyong Park * NOTE: The code assumes that the block size as defined in DCZID_EL0
78*54fd6939SJiyong Park *       register is at least 16 bytes.
79*54fd6939SJiyong Park *
80*54fd6939SJiyong Park * -----------------------------------------------------------------------
81*54fd6939SJiyong Park */
82*54fd6939SJiyong Parkfunc zeromem_dczva
83*54fd6939SJiyong Park
84*54fd6939SJiyong Park	/*
85*54fd6939SJiyong Park	 * The function consists of a series of loops that zero memory one byte
86*54fd6939SJiyong Park	 * at a time, 16 bytes at a time or using the DC ZVA instruction to
87*54fd6939SJiyong Park	 * zero aligned block of bytes, which is assumed to be more than 16.
88*54fd6939SJiyong Park	 * In the case where the DC ZVA instruction cannot be used or if the
89*54fd6939SJiyong Park	 * first 16 bytes loop would overflow, there is fallback path that does
90*54fd6939SJiyong Park	 * not use DC ZVA.
91*54fd6939SJiyong Park	 * Note: The fallback path is also used by the zeromem function that
92*54fd6939SJiyong Park	 *       branches to it directly.
93*54fd6939SJiyong Park	 *
94*54fd6939SJiyong Park	 *              +---------+   zeromem_dczva
95*54fd6939SJiyong Park	 *              |  entry  |
96*54fd6939SJiyong Park	 *              +----+----+
97*54fd6939SJiyong Park	 *                   |
98*54fd6939SJiyong Park	 *                   v
99*54fd6939SJiyong Park	 *              +---------+
100*54fd6939SJiyong Park	 *              | checks  |>o-------+ (If any check fails, fallback)
101*54fd6939SJiyong Park	 *              +----+----+         |
102*54fd6939SJiyong Park	 *                   |              |---------------+
103*54fd6939SJiyong Park	 *                   v              | Fallback path |
104*54fd6939SJiyong Park	 *            +------+------+       |---------------+
105*54fd6939SJiyong Park	 *            | 1 byte loop |       |
106*54fd6939SJiyong Park	 *            +------+------+ .Lzeromem_dczva_initial_1byte_aligned_end
107*54fd6939SJiyong Park	 *                   |              |
108*54fd6939SJiyong Park	 *                   v              |
109*54fd6939SJiyong Park	 *           +-------+-------+      |
110*54fd6939SJiyong Park	 *           | 16 bytes loop |      |
111*54fd6939SJiyong Park	 *           +-------+-------+      |
112*54fd6939SJiyong Park	 *                   |              |
113*54fd6939SJiyong Park	 *                   v              |
114*54fd6939SJiyong Park	 *            +------+------+ .Lzeromem_dczva_blocksize_aligned
115*54fd6939SJiyong Park	 *            | DC ZVA loop |       |
116*54fd6939SJiyong Park	 *            +------+------+       |
117*54fd6939SJiyong Park	 *       +--------+  |              |
118*54fd6939SJiyong Park	 *       |        |  |              |
119*54fd6939SJiyong Park	 *       |        v  v              |
120*54fd6939SJiyong Park	 *       |   +-------+-------+ .Lzeromem_dczva_final_16bytes_aligned
121*54fd6939SJiyong Park	 *       |   | 16 bytes loop |      |
122*54fd6939SJiyong Park	 *       |   +-------+-------+      |
123*54fd6939SJiyong Park	 *       |           |              |
124*54fd6939SJiyong Park	 *       |           v              |
125*54fd6939SJiyong Park	 *       |    +------+------+ .Lzeromem_dczva_final_1byte_aligned
126*54fd6939SJiyong Park	 *       |    | 1 byte loop |       |
127*54fd6939SJiyong Park	 *       |    +-------------+       |
128*54fd6939SJiyong Park	 *       |           |              |
129*54fd6939SJiyong Park	 *       |           v              |
130*54fd6939SJiyong Park	 *       |       +---+--+           |
131*54fd6939SJiyong Park	 *       |       | exit |           |
132*54fd6939SJiyong Park	 *       |       +------+           |
133*54fd6939SJiyong Park	 *       |			    |
134*54fd6939SJiyong Park	 *       |           +--------------+    +------------------+ zeromem
135*54fd6939SJiyong Park	 *       |           |  +----------------| zeromem function |
136*54fd6939SJiyong Park	 *       |           |  |                +------------------+
137*54fd6939SJiyong Park	 *       |           v  v
138*54fd6939SJiyong Park	 *       |    +-------------+ .Lzeromem_dczva_fallback_entry
139*54fd6939SJiyong Park	 *       |    | 1 byte loop |
140*54fd6939SJiyong Park	 *       |    +------+------+
141*54fd6939SJiyong Park	 *       |           |
142*54fd6939SJiyong Park	 *       +-----------+
143*54fd6939SJiyong Park	 */
144*54fd6939SJiyong Park
145*54fd6939SJiyong Park	/*
146*54fd6939SJiyong Park	 * Readable names for registers
147*54fd6939SJiyong Park	 *
148*54fd6939SJiyong Park	 * Registers x0, x1 and x2 are also set by zeromem which
149*54fd6939SJiyong Park	 * branches into the fallback path directly, so cursor, length and
150*54fd6939SJiyong Park	 * stop_address should not be retargeted to other registers.
151*54fd6939SJiyong Park	 */
152*54fd6939SJiyong Park	cursor       .req x0 /* Start address and then current address */
153*54fd6939SJiyong Park	length       .req x1 /* Length in bytes of the region to zero out */
154*54fd6939SJiyong Park	/* Reusing x1 as length is never used after block_mask is set */
155*54fd6939SJiyong Park	block_mask   .req x1 /* Bitmask of the block size read in DCZID_EL0 */
156*54fd6939SJiyong Park	stop_address .req x2 /* Address past the last zeroed byte */
157*54fd6939SJiyong Park	block_size   .req x3 /* Size of a block in bytes as read in DCZID_EL0 */
158*54fd6939SJiyong Park	tmp1         .req x4
159*54fd6939SJiyong Park	tmp2         .req x5
160*54fd6939SJiyong Park
161*54fd6939SJiyong Park#if ENABLE_ASSERTIONS
162*54fd6939SJiyong Park	/*
163*54fd6939SJiyong Park	 * Check for M bit (MMU enabled) of the current SCTLR_EL(1|3)
164*54fd6939SJiyong Park	 * register value and panic if the MMU is disabled.
165*54fd6939SJiyong Park	 */
166*54fd6939SJiyong Park#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && \
167*54fd6939SJiyong Park	(BL2_AT_EL3 || ENABLE_RME))
168*54fd6939SJiyong Park	mrs	tmp1, sctlr_el3
169*54fd6939SJiyong Park#else
170*54fd6939SJiyong Park	mrs	tmp1, sctlr_el1
171*54fd6939SJiyong Park#endif
172*54fd6939SJiyong Park
173*54fd6939SJiyong Park	tst	tmp1, #SCTLR_M_BIT
174*54fd6939SJiyong Park	ASM_ASSERT(ne)
175*54fd6939SJiyong Park#endif /* ENABLE_ASSERTIONS */
176*54fd6939SJiyong Park
177*54fd6939SJiyong Park	/* stop_address is the address past the last to zero */
178*54fd6939SJiyong Park	add	stop_address, cursor, length
179*54fd6939SJiyong Park
180*54fd6939SJiyong Park	/*
181*54fd6939SJiyong Park	 * Get block_size = (log2(<block size>) >> 2) (see encoding of
182*54fd6939SJiyong Park	 * dczid_el0 reg)
183*54fd6939SJiyong Park	 */
184*54fd6939SJiyong Park	mrs	block_size, dczid_el0
185*54fd6939SJiyong Park
186*54fd6939SJiyong Park	/*
187*54fd6939SJiyong Park	 * Select the 4 lowest bits and convert the extracted log2(<block size
188*54fd6939SJiyong Park	 * in words>) to <block size in bytes>
189*54fd6939SJiyong Park	 */
190*54fd6939SJiyong Park	ubfx	block_size, block_size, #0, #4
191*54fd6939SJiyong Park	mov	tmp2, #(1 << 2)
192*54fd6939SJiyong Park	lsl	block_size, tmp2, block_size
193*54fd6939SJiyong Park
194*54fd6939SJiyong Park#if ENABLE_ASSERTIONS
195*54fd6939SJiyong Park	/*
196*54fd6939SJiyong Park	 * Assumes block size is at least 16 bytes to avoid manual realignment
197*54fd6939SJiyong Park	 * of the cursor at the end of the DCZVA loop.
198*54fd6939SJiyong Park	 */
199*54fd6939SJiyong Park	cmp	block_size, #16
200*54fd6939SJiyong Park	ASM_ASSERT(hs)
201*54fd6939SJiyong Park#endif
202*54fd6939SJiyong Park	/*
203*54fd6939SJiyong Park	 * Not worth doing all the setup for a region less than a block and
204*54fd6939SJiyong Park	 * protects against zeroing a whole block when the area to zero is
205*54fd6939SJiyong Park	 * smaller than that. Also, as it is assumed that the block size is at
206*54fd6939SJiyong Park	 * least 16 bytes, this also protects the initial aligning loops from
207*54fd6939SJiyong Park	 * trying to zero 16 bytes when length is less than 16.
208*54fd6939SJiyong Park	 */
209*54fd6939SJiyong Park	cmp	length, block_size
210*54fd6939SJiyong Park	b.lo	.Lzeromem_dczva_fallback_entry
211*54fd6939SJiyong Park
212*54fd6939SJiyong Park	/*
213*54fd6939SJiyong Park	 * Calculate the bitmask of the block alignment. It will never
214*54fd6939SJiyong Park	 * underflow as the block size is between 4 bytes and 2kB.
215*54fd6939SJiyong Park	 * block_mask = block_size - 1
216*54fd6939SJiyong Park	 */
217*54fd6939SJiyong Park	sub	block_mask, block_size, #1
218*54fd6939SJiyong Park
219*54fd6939SJiyong Park	/*
220*54fd6939SJiyong Park	 * length alias should not be used after this point unless it is
221*54fd6939SJiyong Park	 * defined as a register other than block_mask's.
222*54fd6939SJiyong Park	 */
223*54fd6939SJiyong Park	 .unreq length
224*54fd6939SJiyong Park
225*54fd6939SJiyong Park	/*
226*54fd6939SJiyong Park	 * If the start address is already aligned to zero block size, go
227*54fd6939SJiyong Park	 * straight to the cache zeroing loop. This is safe because at this
228*54fd6939SJiyong Park	 * point, the length cannot be smaller than a block size.
229*54fd6939SJiyong Park	 */
230*54fd6939SJiyong Park	tst	cursor, block_mask
231*54fd6939SJiyong Park	b.eq	.Lzeromem_dczva_blocksize_aligned
232*54fd6939SJiyong Park
233*54fd6939SJiyong Park	/*
234*54fd6939SJiyong Park	 * Calculate the first block-size-aligned address. It is assumed that
235*54fd6939SJiyong Park	 * the zero block size is at least 16 bytes. This address is the last
236*54fd6939SJiyong Park	 * address of this initial loop.
237*54fd6939SJiyong Park	 */
238*54fd6939SJiyong Park	orr	tmp1, cursor, block_mask
239*54fd6939SJiyong Park	add	tmp1, tmp1, #1
240*54fd6939SJiyong Park
241*54fd6939SJiyong Park	/*
242*54fd6939SJiyong Park	 * If the addition overflows, skip the cache zeroing loops. This is
243*54fd6939SJiyong Park	 * quite unlikely however.
244*54fd6939SJiyong Park	 */
245*54fd6939SJiyong Park	cbz	tmp1, .Lzeromem_dczva_fallback_entry
246*54fd6939SJiyong Park
247*54fd6939SJiyong Park	/*
248*54fd6939SJiyong Park	 * If the first block-size-aligned address is past the last address,
249*54fd6939SJiyong Park	 * fallback to the simpler code.
250*54fd6939SJiyong Park	 */
251*54fd6939SJiyong Park	cmp	tmp1, stop_address
252*54fd6939SJiyong Park	b.hi	.Lzeromem_dczva_fallback_entry
253*54fd6939SJiyong Park
254*54fd6939SJiyong Park	/*
255*54fd6939SJiyong Park	 * If the start address is already aligned to 16 bytes, skip this loop.
256*54fd6939SJiyong Park	 * It is safe to do this because tmp1 (the stop address of the initial
257*54fd6939SJiyong Park	 * 16 bytes loop) will never be greater than the final stop address.
258*54fd6939SJiyong Park	 */
259*54fd6939SJiyong Park	tst	cursor, #0xf
260*54fd6939SJiyong Park	b.eq	.Lzeromem_dczva_initial_1byte_aligned_end
261*54fd6939SJiyong Park
262*54fd6939SJiyong Park	/* Calculate the next address aligned to 16 bytes */
263*54fd6939SJiyong Park	orr	tmp2, cursor, #0xf
264*54fd6939SJiyong Park	add	tmp2, tmp2, #1
265*54fd6939SJiyong Park	/* If it overflows, fallback to the simple path (unlikely) */
266*54fd6939SJiyong Park	cbz	tmp2, .Lzeromem_dczva_fallback_entry
267*54fd6939SJiyong Park	/*
268*54fd6939SJiyong Park	 * Next aligned address cannot be after the stop address because the
269*54fd6939SJiyong Park	 * length cannot be smaller than 16 at this point.
270*54fd6939SJiyong Park	 */
271*54fd6939SJiyong Park
272*54fd6939SJiyong Park	/* First loop: zero byte per byte */
273*54fd6939SJiyong Park1:
274*54fd6939SJiyong Park	strb	wzr, [cursor], #1
275*54fd6939SJiyong Park	cmp	cursor, tmp2
276*54fd6939SJiyong Park	b.ne	1b
277*54fd6939SJiyong Park.Lzeromem_dczva_initial_1byte_aligned_end:
278*54fd6939SJiyong Park
279*54fd6939SJiyong Park	/*
280*54fd6939SJiyong Park	 * Second loop: we need to zero 16 bytes at a time from cursor to tmp1
281*54fd6939SJiyong Park	 * before being able to use the code that deals with block-size-aligned
282*54fd6939SJiyong Park	 * addresses.
283*54fd6939SJiyong Park	 */
284*54fd6939SJiyong Park	cmp	cursor, tmp1
285*54fd6939SJiyong Park	b.hs	2f
286*54fd6939SJiyong Park1:
287*54fd6939SJiyong Park	stp	xzr, xzr, [cursor], #16
288*54fd6939SJiyong Park	cmp	cursor, tmp1
289*54fd6939SJiyong Park	b.lo	1b
290*54fd6939SJiyong Park2:
291*54fd6939SJiyong Park
292*54fd6939SJiyong Park	/*
293*54fd6939SJiyong Park	 * Third loop: zero a block at a time using DC ZVA cache block zeroing
294*54fd6939SJiyong Park	 * instruction.
295*54fd6939SJiyong Park	 */
296*54fd6939SJiyong Park.Lzeromem_dczva_blocksize_aligned:
297*54fd6939SJiyong Park	/*
298*54fd6939SJiyong Park	 * Calculate the last block-size-aligned address. If the result equals
299*54fd6939SJiyong Park	 * to the start address, the loop will exit immediately.
300*54fd6939SJiyong Park	 */
301*54fd6939SJiyong Park	bic	tmp1, stop_address, block_mask
302*54fd6939SJiyong Park
303*54fd6939SJiyong Park	cmp	cursor, tmp1
304*54fd6939SJiyong Park	b.hs	2f
305*54fd6939SJiyong Park1:
306*54fd6939SJiyong Park	/* Zero the block containing the cursor */
307*54fd6939SJiyong Park	dc	zva, cursor
308*54fd6939SJiyong Park	/* Increment the cursor by the size of a block */
309*54fd6939SJiyong Park	add	cursor, cursor, block_size
310*54fd6939SJiyong Park	cmp	cursor, tmp1
311*54fd6939SJiyong Park	b.lo	1b
312*54fd6939SJiyong Park2:
313*54fd6939SJiyong Park
314*54fd6939SJiyong Park	/*
315*54fd6939SJiyong Park	 * Fourth loop: zero 16 bytes at a time and then byte per byte the
316*54fd6939SJiyong Park	 * remaining area
317*54fd6939SJiyong Park	 */
318*54fd6939SJiyong Park.Lzeromem_dczva_final_16bytes_aligned:
319*54fd6939SJiyong Park	/*
320*54fd6939SJiyong Park	 * Calculate the last 16 bytes aligned address. It is assumed that the
321*54fd6939SJiyong Park	 * block size will never be smaller than 16 bytes so that the current
322*54fd6939SJiyong Park	 * cursor is aligned to at least 16 bytes boundary.
323*54fd6939SJiyong Park	 */
324*54fd6939SJiyong Park	bic	tmp1, stop_address, #15
325*54fd6939SJiyong Park
326*54fd6939SJiyong Park	cmp	cursor, tmp1
327*54fd6939SJiyong Park	b.hs	2f
328*54fd6939SJiyong Park1:
329*54fd6939SJiyong Park	stp	xzr, xzr, [cursor], #16
330*54fd6939SJiyong Park	cmp	cursor, tmp1
331*54fd6939SJiyong Park	b.lo	1b
332*54fd6939SJiyong Park2:
333*54fd6939SJiyong Park
334*54fd6939SJiyong Park	/* Fifth and final loop: zero byte per byte */
335*54fd6939SJiyong Park.Lzeromem_dczva_final_1byte_aligned:
336*54fd6939SJiyong Park	cmp	cursor, stop_address
337*54fd6939SJiyong Park	b.eq	2f
338*54fd6939SJiyong Park1:
339*54fd6939SJiyong Park	strb	wzr, [cursor], #1
340*54fd6939SJiyong Park	cmp	cursor, stop_address
341*54fd6939SJiyong Park	b.ne	1b
342*54fd6939SJiyong Park2:
343*54fd6939SJiyong Park	ret
344*54fd6939SJiyong Park
345*54fd6939SJiyong Park	/* Fallback for unaligned start addresses */
346*54fd6939SJiyong Park.Lzeromem_dczva_fallback_entry:
347*54fd6939SJiyong Park	/*
348*54fd6939SJiyong Park	 * If the start address is already aligned to 16 bytes, skip this loop.
349*54fd6939SJiyong Park	 */
350*54fd6939SJiyong Park	tst	cursor, #0xf
351*54fd6939SJiyong Park	b.eq	.Lzeromem_dczva_final_16bytes_aligned
352*54fd6939SJiyong Park
353*54fd6939SJiyong Park	/* Calculate the next address aligned to 16 bytes */
354*54fd6939SJiyong Park	orr	tmp1, cursor, #15
355*54fd6939SJiyong Park	add	tmp1, tmp1, #1
356*54fd6939SJiyong Park	/* If it overflows, fallback to byte per byte zeroing */
357*54fd6939SJiyong Park	cbz	tmp1, .Lzeromem_dczva_final_1byte_aligned
358*54fd6939SJiyong Park	/* If the next aligned address is after the stop address, fall back */
359*54fd6939SJiyong Park	cmp	tmp1, stop_address
360*54fd6939SJiyong Park	b.hs	.Lzeromem_dczva_final_1byte_aligned
361*54fd6939SJiyong Park
362*54fd6939SJiyong Park	/* Fallback entry loop: zero byte per byte */
363*54fd6939SJiyong Park1:
364*54fd6939SJiyong Park	strb	wzr, [cursor], #1
365*54fd6939SJiyong Park	cmp	cursor, tmp1
366*54fd6939SJiyong Park	b.ne	1b
367*54fd6939SJiyong Park
368*54fd6939SJiyong Park	b	.Lzeromem_dczva_final_16bytes_aligned
369*54fd6939SJiyong Park
370*54fd6939SJiyong Park	.unreq	cursor
371*54fd6939SJiyong Park	/*
372*54fd6939SJiyong Park	 * length is already unreq'ed to reuse the register for another
373*54fd6939SJiyong Park	 * variable.
374*54fd6939SJiyong Park	 */
375*54fd6939SJiyong Park	.unreq	stop_address
376*54fd6939SJiyong Park	.unreq	block_size
377*54fd6939SJiyong Park	.unreq	block_mask
378*54fd6939SJiyong Park	.unreq	tmp1
379*54fd6939SJiyong Park	.unreq	tmp2
380*54fd6939SJiyong Parkendfunc zeromem_dczva
381*54fd6939SJiyong Park
382*54fd6939SJiyong Park/* --------------------------------------------------------------------------
383*54fd6939SJiyong Park * void memcpy16(void *dest, const void *src, unsigned int length)
384*54fd6939SJiyong Park *
385*54fd6939SJiyong Park * Copy length bytes from memory area src to memory area dest.
386*54fd6939SJiyong Park * The memory areas should not overlap.
387*54fd6939SJiyong Park * Destination and source addresses must be 16-byte aligned.
388*54fd6939SJiyong Park * --------------------------------------------------------------------------
389*54fd6939SJiyong Park */
390*54fd6939SJiyong Parkfunc memcpy16
391*54fd6939SJiyong Park#if ENABLE_ASSERTIONS
392*54fd6939SJiyong Park	orr	x3, x0, x1
393*54fd6939SJiyong Park	tst	x3, #0xf
394*54fd6939SJiyong Park	ASM_ASSERT(eq)
395*54fd6939SJiyong Park#endif
396*54fd6939SJiyong Park/* copy 16 bytes at a time */
397*54fd6939SJiyong Parkm_loop16:
398*54fd6939SJiyong Park	cmp	x2, #16
399*54fd6939SJiyong Park	b.lo	m_loop1
400*54fd6939SJiyong Park	ldp	x3, x4, [x1], #16
401*54fd6939SJiyong Park	stp	x3, x4, [x0], #16
402*54fd6939SJiyong Park	sub	x2, x2, #16
403*54fd6939SJiyong Park	b	m_loop16
404*54fd6939SJiyong Park/* copy byte per byte */
405*54fd6939SJiyong Parkm_loop1:
406*54fd6939SJiyong Park	cbz	x2, m_end
407*54fd6939SJiyong Park	ldrb	w3, [x1], #1
408*54fd6939SJiyong Park	strb	w3, [x0], #1
409*54fd6939SJiyong Park	subs	x2, x2, #1
410*54fd6939SJiyong Park	b.ne	m_loop1
411*54fd6939SJiyong Parkm_end:
412*54fd6939SJiyong Park	ret
413*54fd6939SJiyong Parkendfunc memcpy16
414*54fd6939SJiyong Park
415*54fd6939SJiyong Park/* ---------------------------------------------------------------------------
416*54fd6939SJiyong Park * Disable the MMU at EL3
417*54fd6939SJiyong Park * ---------------------------------------------------------------------------
418*54fd6939SJiyong Park */
419*54fd6939SJiyong Park
420*54fd6939SJiyong Parkfunc disable_mmu_el3
421*54fd6939SJiyong Park	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
422*54fd6939SJiyong Parkdo_disable_mmu_el3:
423*54fd6939SJiyong Park	mrs	x0, sctlr_el3
424*54fd6939SJiyong Park	bic	x0, x0, x1
425*54fd6939SJiyong Park	msr	sctlr_el3, x0
426*54fd6939SJiyong Park	isb	/* ensure MMU is off */
427*54fd6939SJiyong Park	dsb	sy
428*54fd6939SJiyong Park	ret
429*54fd6939SJiyong Parkendfunc disable_mmu_el3
430*54fd6939SJiyong Park
431*54fd6939SJiyong Park
432*54fd6939SJiyong Parkfunc disable_mmu_icache_el3
433*54fd6939SJiyong Park	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
434*54fd6939SJiyong Park	b	do_disable_mmu_el3
435*54fd6939SJiyong Parkendfunc disable_mmu_icache_el3
436*54fd6939SJiyong Park
437*54fd6939SJiyong Park/* ---------------------------------------------------------------------------
438*54fd6939SJiyong Park * Disable the MMU at EL1
439*54fd6939SJiyong Park * ---------------------------------------------------------------------------
440*54fd6939SJiyong Park */
441*54fd6939SJiyong Park
442*54fd6939SJiyong Parkfunc disable_mmu_el1
443*54fd6939SJiyong Park	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
444*54fd6939SJiyong Parkdo_disable_mmu_el1:
445*54fd6939SJiyong Park	mrs	x0, sctlr_el1
446*54fd6939SJiyong Park	bic	x0, x0, x1
447*54fd6939SJiyong Park	msr	sctlr_el1, x0
448*54fd6939SJiyong Park	isb	/* ensure MMU is off */
449*54fd6939SJiyong Park	dsb	sy
450*54fd6939SJiyong Park	ret
451*54fd6939SJiyong Parkendfunc disable_mmu_el1
452*54fd6939SJiyong Park
453*54fd6939SJiyong Park
454*54fd6939SJiyong Parkfunc disable_mmu_icache_el1
455*54fd6939SJiyong Park	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
456*54fd6939SJiyong Park	b	do_disable_mmu_el1
457*54fd6939SJiyong Parkendfunc disable_mmu_icache_el1
458*54fd6939SJiyong Park
459*54fd6939SJiyong Park/* ---------------------------------------------------------------------------
460*54fd6939SJiyong Park * Enable the use of VFP at EL3
461*54fd6939SJiyong Park * ---------------------------------------------------------------------------
462*54fd6939SJiyong Park */
463*54fd6939SJiyong Park#if SUPPORT_VFP
464*54fd6939SJiyong Parkfunc enable_vfp
465*54fd6939SJiyong Park	mrs	x0, cpacr_el1
466*54fd6939SJiyong Park	orr	x0, x0, #CPACR_VFP_BITS
467*54fd6939SJiyong Park	msr	cpacr_el1, x0
468*54fd6939SJiyong Park	mrs	x0, cptr_el3
469*54fd6939SJiyong Park	mov	x1, #AARCH64_CPTR_TFP
470*54fd6939SJiyong Park	bic	x0, x0, x1
471*54fd6939SJiyong Park	msr	cptr_el3, x0
472*54fd6939SJiyong Park	isb
473*54fd6939SJiyong Park	ret
474*54fd6939SJiyong Parkendfunc enable_vfp
475*54fd6939SJiyong Park#endif
476*54fd6939SJiyong Park
477*54fd6939SJiyong Park/* ---------------------------------------------------------------------------
478*54fd6939SJiyong Park * Helper to fixup Global Descriptor table (GDT) and dynamic relocations
479*54fd6939SJiyong Park * (.rela.dyn) at runtime.
480*54fd6939SJiyong Park *
481*54fd6939SJiyong Park * This function is meant to be used when the firmware is compiled with -fpie
482*54fd6939SJiyong Park * and linked with -pie options. We rely on the linker script exporting
483*54fd6939SJiyong Park * appropriate markers for start and end of the section. For GOT, we
484*54fd6939SJiyong Park * expect __GOT_START__ and __GOT_END__. Similarly for .rela.dyn, we expect
485*54fd6939SJiyong Park * __RELA_START__ and __RELA_END__.
486*54fd6939SJiyong Park *
487*54fd6939SJiyong Park * The function takes the limits of the memory to apply fixups to as
488*54fd6939SJiyong Park * arguments (which is usually the limits of the relocable BL image).
489*54fd6939SJiyong Park *   x0 -  the start of the fixup region
490*54fd6939SJiyong Park *   x1 -  the limit of the fixup region
491*54fd6939SJiyong Park * These addresses have to be 4KB page aligned.
492*54fd6939SJiyong Park * ---------------------------------------------------------------------------
493*54fd6939SJiyong Park */
494*54fd6939SJiyong Park
495*54fd6939SJiyong Park/* Relocation codes */
496*54fd6939SJiyong Park#define	R_AARCH64_NONE		0
497*54fd6939SJiyong Park#define	R_AARCH64_RELATIVE	1027
498*54fd6939SJiyong Park
499*54fd6939SJiyong Parkfunc fixup_gdt_reloc
500*54fd6939SJiyong Park	mov	x6, x0
501*54fd6939SJiyong Park	mov	x7, x1
502*54fd6939SJiyong Park
503*54fd6939SJiyong Park#if ENABLE_ASSERTIONS
504*54fd6939SJiyong Park	/* Test if the limits are 4KB aligned */
505*54fd6939SJiyong Park	orr	x0, x0, x1
506*54fd6939SJiyong Park	tst	x0, #(PAGE_SIZE_MASK)
507*54fd6939SJiyong Park	ASM_ASSERT(eq)
508*54fd6939SJiyong Park#endif
509*54fd6939SJiyong Park	/*
510*54fd6939SJiyong Park	 * Calculate the offset based on return address in x30.
511*54fd6939SJiyong Park	 * Assume that this function is called within a page at the start of
512*54fd6939SJiyong Park	 * fixup region.
513*54fd6939SJiyong Park	 */
514*54fd6939SJiyong Park	and	x2, x30, #~(PAGE_SIZE_MASK)
515*54fd6939SJiyong Park	subs	x0, x2, x6	/* Diff(S) = Current Address - Compiled Address */
516*54fd6939SJiyong Park	b.eq	3f		/* Diff(S) = 0. No relocation needed */
517*54fd6939SJiyong Park
518*54fd6939SJiyong Park	adrp	x1, __GOT_START__
519*54fd6939SJiyong Park	add	x1, x1, :lo12:__GOT_START__
520*54fd6939SJiyong Park	adrp	x2, __GOT_END__
521*54fd6939SJiyong Park	add	x2, x2, :lo12:__GOT_END__
522*54fd6939SJiyong Park
523*54fd6939SJiyong Park	/*
524*54fd6939SJiyong Park	 * GOT is an array of 64_bit addresses which must be fixed up as
525*54fd6939SJiyong Park	 * new_addr = old_addr + Diff(S).
526*54fd6939SJiyong Park	 * The new_addr is the address currently the binary is executing from
527*54fd6939SJiyong Park	 * and old_addr is the address at compile time.
528*54fd6939SJiyong Park	 */
529*54fd6939SJiyong Park1:	ldr	x3, [x1]
530*54fd6939SJiyong Park
531*54fd6939SJiyong Park	/* Skip adding offset if address is < lower limit */
532*54fd6939SJiyong Park	cmp	x3, x6
533*54fd6939SJiyong Park	b.lo	2f
534*54fd6939SJiyong Park
535*54fd6939SJiyong Park	/* Skip adding offset if address is >= upper limit */
536*54fd6939SJiyong Park	cmp	x3, x7
537*54fd6939SJiyong Park	b.hs	2f
538*54fd6939SJiyong Park	add	x3, x3, x0
539*54fd6939SJiyong Park	str	x3, [x1]
540*54fd6939SJiyong Park
541*54fd6939SJiyong Park2:	add	x1, x1, #8
542*54fd6939SJiyong Park	cmp	x1, x2
543*54fd6939SJiyong Park	b.lo	1b
544*54fd6939SJiyong Park
545*54fd6939SJiyong Park	/* Starting dynamic relocations. Use adrp/adr to get RELA_START and END */
546*54fd6939SJiyong Park3:	adrp	x1, __RELA_START__
547*54fd6939SJiyong Park	add	x1, x1, :lo12:__RELA_START__
548*54fd6939SJiyong Park	adrp	x2, __RELA_END__
549*54fd6939SJiyong Park	add	x2, x2, :lo12:__RELA_END__
550*54fd6939SJiyong Park
551*54fd6939SJiyong Park	/*
552*54fd6939SJiyong Park	 * According to ELF-64 specification, the RELA data structure is as
553*54fd6939SJiyong Park	 * follows:
554*54fd6939SJiyong Park	 *	typedef struct {
555*54fd6939SJiyong Park	 *		Elf64_Addr r_offset;
556*54fd6939SJiyong Park	 *		Elf64_Xword r_info;
557*54fd6939SJiyong Park	 *		Elf64_Sxword r_addend;
558*54fd6939SJiyong Park	 *	} Elf64_Rela;
559*54fd6939SJiyong Park	 *
560*54fd6939SJiyong Park	 * r_offset is address of reference
561*54fd6939SJiyong Park	 * r_info is symbol index and type of relocation (in this case
562*54fd6939SJiyong Park	 * code 1027 which corresponds to R_AARCH64_RELATIVE).
563*54fd6939SJiyong Park	 * r_addend is constant part of expression.
564*54fd6939SJiyong Park	 *
565*54fd6939SJiyong Park	 * Size of Elf64_Rela structure is 24 bytes.
566*54fd6939SJiyong Park	 */
567*54fd6939SJiyong Park
568*54fd6939SJiyong Park	/* Skip R_AARCH64_NONE entry with code 0 */
569*54fd6939SJiyong Park1:	ldr	x3, [x1, #8]
570*54fd6939SJiyong Park	cbz	x3, 2f
571*54fd6939SJiyong Park
572*54fd6939SJiyong Park#if ENABLE_ASSERTIONS
573*54fd6939SJiyong Park	/* Assert that the relocation type is R_AARCH64_RELATIVE */
574*54fd6939SJiyong Park	cmp	x3, #R_AARCH64_RELATIVE
575*54fd6939SJiyong Park	ASM_ASSERT(eq)
576*54fd6939SJiyong Park#endif
577*54fd6939SJiyong Park	ldr	x3, [x1]	/* r_offset */
578*54fd6939SJiyong Park	add	x3, x0, x3
579*54fd6939SJiyong Park	ldr	x4, [x1, #16]	/* r_addend */
580*54fd6939SJiyong Park
581*54fd6939SJiyong Park	/* Skip adding offset if r_addend is < lower limit */
582*54fd6939SJiyong Park	cmp	x4, x6
583*54fd6939SJiyong Park	b.lo	2f
584*54fd6939SJiyong Park
585*54fd6939SJiyong Park	/* Skip adding offset if r_addend entry is >= upper limit */
586*54fd6939SJiyong Park	cmp	x4, x7
587*54fd6939SJiyong Park	b.hs	2f
588*54fd6939SJiyong Park
589*54fd6939SJiyong Park	add	x4, x0, x4	/* Diff(S) + r_addend */
590*54fd6939SJiyong Park	str	x4, [x3]
591*54fd6939SJiyong Park
592*54fd6939SJiyong Park2:	add	x1, x1, #24
593*54fd6939SJiyong Park	cmp	x1, x2
594*54fd6939SJiyong Park	b.lo	1b
595*54fd6939SJiyong Park	ret
596*54fd6939SJiyong Parkendfunc fixup_gdt_reloc
597*54fd6939SJiyong Park
598*54fd6939SJiyong Park/*
599*54fd6939SJiyong Park * TODO: Currently only supports size of 4KB,
600*54fd6939SJiyong Park * support other sizes as well.
601*54fd6939SJiyong Park */
602*54fd6939SJiyong Parkfunc gpt_tlbi_by_pa
603*54fd6939SJiyong Park#if ENABLE_ASSERTIONS
604*54fd6939SJiyong Park	cmp	x1, #PAGE_SIZE_4KB
605*54fd6939SJiyong Park	ASM_ASSERT(eq)
606*54fd6939SJiyong Park	tst	x0, #(PAGE_SIZE_MASK)
607*54fd6939SJiyong Park	ASM_ASSERT(eq)
608*54fd6939SJiyong Park#endif
609*54fd6939SJiyong Park	lsr	x0, x0, #FOUR_KB_SHIFT	/* 4KB size encoding is zero */
610*54fd6939SJiyong Park	sys	#6, c8, c4, #3, x0 	/* TLBI RPAOS, <Xt> */
611*54fd6939SJiyong Park	dsb	sy
612*54fd6939SJiyong Park	ret
613*54fd6939SJiyong Parkendfunc gpt_tlbi_by_pa
614