1/*
2 * Copyright (c) 2016-2023, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6#ifndef ASM_MACROS_S
7#define ASM_MACROS_S
8
9#include <arch.h>
10#include <common/asm_macros_common.S>
11#include <lib/cpus/cpu_ops.h>
12#include <lib/spinlock.h>
13
14/*
15 * TLBI instruction with type specifier that implements the workaround for
16 * errata 813419 of Cortex-A57.
17 */
18#if ERRATA_A57_813419
19#define TLB_INVALIDATE(_reg, _coproc) \
20	stcopr	_reg, _coproc; \
21	dsb	ish; \
22	stcopr	_reg, _coproc
23#else
24#define TLB_INVALIDATE(_reg, _coproc) \
25	stcopr	_reg, _coproc
26#endif
27
28	/*
29	 * Co processor register accessors
30	 */
31	.macro ldcopr reg, coproc, opc1, CRn, CRm, opc2
32	mrc	\coproc, \opc1, \reg, \CRn, \CRm, \opc2
33	.endm
34
35	.macro ldcopr16 reg1, reg2, coproc, opc1, CRm
36	mrrc	\coproc, \opc1, \reg1, \reg2, \CRm
37	.endm
38
39	.macro stcopr reg, coproc, opc1, CRn, CRm, opc2
40	mcr	\coproc, \opc1, \reg, \CRn, \CRm, \opc2
41	.endm
42
43	.macro stcopr16 reg1, reg2, coproc, opc1, CRm
44	mcrr	\coproc, \opc1, \reg1, \reg2, \CRm
45	.endm
46
47	/* Cache line size helpers */
48	.macro	dcache_line_size  reg, tmp
49	ldcopr	\tmp, CTR
50	ubfx	\tmp, \tmp, #CTR_DMINLINE_SHIFT, #CTR_DMINLINE_WIDTH
51	mov	\reg, #CPU_WORD_SIZE
52	lsl	\reg, \reg, \tmp
53	.endm
54
55	.macro	icache_line_size  reg, tmp
56	ldcopr	\tmp, CTR
57	and	\tmp, \tmp, #CTR_IMINLINE_MASK
58	mov	\reg, #CPU_WORD_SIZE
59	lsl	\reg, \reg, \tmp
60	.endm
61
62	/*
63	 * Declare the exception vector table, enforcing it is aligned on a
64	 * 32 byte boundary.
65	 */
66	.macro vector_base  label
67	.section .vectors, "ax"
68	.align 5
69	\label:
70	.endm
71
72	/*
73	 * This macro calculates the base address of the current CPU's multi
74	 * processor(MP) stack using the plat_my_core_pos() index, the name of
75	 * the stack storage and the size of each stack.
76	 * Out: r0 = physical address of stack base
77	 * Clobber: r14, r1, r2
78	 */
79	.macro get_my_mp_stack _name, _size
80	bl	plat_my_core_pos
81	ldr r2, =(\_name + \_size)
82	mov r1, #\_size
83	mla r0, r0, r1, r2
84	.endm
85
86	/*
87	 * This macro calculates the base address of a uniprocessor(UP) stack
88	 * using the name of the stack storage and the size of the stack
89	 * Out: r0 = physical address of stack base
90	 */
91	.macro get_up_stack _name, _size
92	ldr r0, =(\_name + \_size)
93	.endm
94
95#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION)
96	/*
97	 * Macro for mitigating against speculative execution.
98	 * ARMv7 cores without Virtualization extension do not support the
99	 * eret instruction.
100	 */
101	.macro exception_return
102	movs	pc, lr
103	dsb	nsh
104	isb
105	.endm
106
107#else
108	/*
109	 * Macro for mitigating against speculative execution beyond ERET. Uses the
110	 * speculation barrier instruction introduced by FEAT_SB, if it's enabled.
111	 */
112	.macro exception_return
113	eret
114#if ENABLE_FEAT_SB
115	sb
116#else
117	dsb	nsh
118	isb
119#endif
120	.endm
121#endif
122
123	/* Macro for error synchronization */
124	.macro synchronize_errors
125	/* Complete any stores that may return an abort */
126	dsb	sy
127	/* Synchronise the CPU context with the completion of the dsb */
128	isb
129	.endm
130
131#if (ARM_ARCH_MAJOR == 7)
132	/* ARMv7 does not support stl instruction */
133	.macro stl _reg, _write_lock
134	dmb
135	str	\_reg, \_write_lock
136	dsb
137	.endm
138#endif
139
140	/*
141	 * Helper macro to generate the best mov/movw/movt combinations
142	 * according to the value to be moved.
143	 */
144	.macro mov_imm _reg, _val
145		.if ((\_val) & 0xffff0000) == 0
146			mov	\_reg, #(\_val)
147		.else
148			movw	\_reg, #((\_val) & 0xffff)
149			movt	\_reg, #((\_val) >> 16)
150		.endif
151	.endm
152
153	/*
154	 * Macro to mark instances where we're jumping to a function and don't
155	 * expect a return. To provide the function being jumped to with
156	 * additional information, we use 'bl' instruction to jump rather than
157	 * 'b'.
158         *
159	 * Debuggers infer the location of a call from where LR points to, which
160	 * is usually the instruction after 'bl'. If this macro expansion
161	 * happens to be the last location in a function, that'll cause the LR
162	 * to point a location beyond the function, thereby misleading debugger
163	 * back trace. We therefore insert a 'nop' after the function call for
164	 * debug builds, unless 'skip_nop' parameter is non-zero.
165	 */
166	.macro no_ret _func:req, skip_nop=0
167	bl	\_func
168#if DEBUG
169	.ifeq \skip_nop
170	nop
171	.endif
172#endif
173	.endm
174
175	/*
176	 * Reserve space for a spin lock in assembly file.
177	 */
178	.macro define_asm_spinlock _name:req
179	.align	SPINLOCK_ASM_ALIGN
180	\_name:
181	.space	SPINLOCK_ASM_SIZE
182	.endm
183
184	/*
185	 * Helper macro to OR the bottom 32 bits of `_val` into `_reg_l`
186	 * and the top 32 bits of `_val` into `_reg_h`.  If either the bottom
187	 * or top word of `_val` is zero, the corresponding OR operation
188	 * is skipped.
189	 */
190	.macro orr64_imm _reg_l, _reg_h, _val
191		.if (\_val >> 32)
192			orr \_reg_h, \_reg_h, #(\_val >> 32)
193		.endif
194		.if (\_val & 0xffffffff)
195			orr \_reg_l, \_reg_l, #(\_val & 0xffffffff)
196		.endif
197	.endm
198
199	/*
200	 * Helper macro to bitwise-clear bits in `_reg_l` and
201	 * `_reg_h` given a 64 bit immediate `_val`.  The set bits
202	 * in the bottom word of `_val` dictate which bits from
203	 * `_reg_l` should be cleared.  Similarly, the set bits in
204	 * the top word of `_val` dictate which bits from `_reg_h`
205	 * should be cleared.  If either the bottom or top word of
206	 * `_val` is zero, the corresponding BIC operation is skipped.
207	 */
208	.macro bic64_imm _reg_l, _reg_h, _val
209		.if (\_val >> 32)
210			bic \_reg_h, \_reg_h, #(\_val >> 32)
211		.endif
212		.if (\_val & 0xffffffff)
213			bic \_reg_l, \_reg_l, #(\_val & 0xffffffff)
214		.endif
215	.endm
216
217	/*
218	 * Helper macro for carrying out division in software when
219	 * hardware division is not suported. \top holds the dividend
220	 * in the function call and the remainder after
221	 * the function is executed. \bot holds the divisor. \div holds
222	 * the quotient and \temp is a temporary registed used in calcualtion.
223	 * The division algorithm has been obtained from:
224	 * http://www.keil.com/support/man/docs/armasm/armasm_dom1359731155623.htm
225	 */
226	.macro	softudiv	div:req,top:req,bot:req,temp:req
227
228	mov     \temp, \bot
229	cmp     \temp, \top, lsr #1
230div1:
231	movls   \temp, \temp, lsl #1
232	cmp     \temp, \top, lsr #1
233	bls     div1
234	mov     \div, #0
235
236div2:
237	cmp     \top, \temp
238	subcs   \top, \top,\temp
239	ADC     \div, \div, \div
240	mov     \temp, \temp, lsr #1
241	cmp     \temp, \bot
242	bhs     div2
243	.endm
244
245	/*
246	 * Helper macro to instruction adr <reg>, <symbol> where <symbol> is
247	 * within the range +/- 4 GB.
248	 */
249	.macro adr_l, dst, sym
250	adrp	\dst, \sym
251	add	\dst, \dst, :lo12:\sym
252	.endm
253#endif /* ASM_MACROS_S */
254