xref: /aosp_15_r20/external/arm-trusted-firmware/lib/locks/exclusive/aarch64/spinlock.S (revision 54fd6939e177f8ff529b10183254802c76df6d08)
1*54fd6939SJiyong Park/*
2*54fd6939SJiyong Park * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
3*54fd6939SJiyong Park *
4*54fd6939SJiyong Park * SPDX-License-Identifier: BSD-3-Clause
5*54fd6939SJiyong Park */
6*54fd6939SJiyong Park
7*54fd6939SJiyong Park#include <asm_macros.S>
8*54fd6939SJiyong Park
9*54fd6939SJiyong Park	.globl	spin_lock
10*54fd6939SJiyong Park	.globl	spin_unlock
11*54fd6939SJiyong Park
12*54fd6939SJiyong Park#if USE_SPINLOCK_CAS
13*54fd6939SJiyong Park#if !ARM_ARCH_AT_LEAST(8, 1)
14*54fd6939SJiyong Park#error USE_SPINLOCK_CAS option requires at least an ARMv8.1 platform
15*54fd6939SJiyong Park#endif
16*54fd6939SJiyong Park
17*54fd6939SJiyong Park/*
18*54fd6939SJiyong Park * When compiled for ARMv8.1 or later, choose spin locks based on Compare and
19*54fd6939SJiyong Park * Swap instruction.
20*54fd6939SJiyong Park */
21*54fd6939SJiyong Park
22*54fd6939SJiyong Park/*
23*54fd6939SJiyong Park * Acquire lock using Compare and Swap instruction.
24*54fd6939SJiyong Park *
25*54fd6939SJiyong Park * Compare for 0 with acquire semantics, and swap 1. If failed to acquire, use
26*54fd6939SJiyong Park * load exclusive semantics to monitor the address and enter WFE.
27*54fd6939SJiyong Park *
28*54fd6939SJiyong Park * void spin_lock(spinlock_t *lock);
29*54fd6939SJiyong Park */
30*54fd6939SJiyong Parkfunc spin_lock
31*54fd6939SJiyong Park	mov	w2, #1
32*54fd6939SJiyong Park1:	mov	w1, wzr
33*54fd6939SJiyong Park2:	casa	w1, w2, [x0]
34*54fd6939SJiyong Park	cbz	w1, 3f
35*54fd6939SJiyong Park	ldxr	w1, [x0]
36*54fd6939SJiyong Park	cbz	w1, 2b
37*54fd6939SJiyong Park	wfe
38*54fd6939SJiyong Park	b	1b
39*54fd6939SJiyong Park3:
40*54fd6939SJiyong Park	ret
41*54fd6939SJiyong Parkendfunc spin_lock
42*54fd6939SJiyong Park
43*54fd6939SJiyong Park#else /* !USE_SPINLOCK_CAS */
44*54fd6939SJiyong Park
45*54fd6939SJiyong Park/*
46*54fd6939SJiyong Park * Acquire lock using load-/store-exclusive instruction pair.
47*54fd6939SJiyong Park *
48*54fd6939SJiyong Park * void spin_lock(spinlock_t *lock);
49*54fd6939SJiyong Park */
50*54fd6939SJiyong Parkfunc spin_lock
51*54fd6939SJiyong Park	mov	w2, #1
52*54fd6939SJiyong Park	sevl
53*54fd6939SJiyong Parkl1:	wfe
54*54fd6939SJiyong Parkl2:	ldaxr	w1, [x0]
55*54fd6939SJiyong Park	cbnz	w1, l1
56*54fd6939SJiyong Park	stxr	w1, w2, [x0]
57*54fd6939SJiyong Park	cbnz	w1, l2
58*54fd6939SJiyong Park	ret
59*54fd6939SJiyong Parkendfunc spin_lock
60*54fd6939SJiyong Park
61*54fd6939SJiyong Park#endif /* USE_SPINLOCK_CAS */
62*54fd6939SJiyong Park
63*54fd6939SJiyong Park/*
64*54fd6939SJiyong Park * Release lock previously acquired by spin_lock.
65*54fd6939SJiyong Park *
66*54fd6939SJiyong Park * Use store-release to unconditionally clear the spinlock variable.
67*54fd6939SJiyong Park * Store operation generates an event to all cores waiting in WFE
68*54fd6939SJiyong Park * when address is monitored by the global monitor.
69*54fd6939SJiyong Park *
70*54fd6939SJiyong Park * void spin_unlock(spinlock_t *lock);
71*54fd6939SJiyong Park */
72*54fd6939SJiyong Parkfunc spin_unlock
73*54fd6939SJiyong Park	stlr	wzr, [x0]
74*54fd6939SJiyong Park	ret
75*54fd6939SJiyong Parkendfunc spin_unlock
76