1// Copyright 2015 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Note: some of these functions are semantically inlined
6// by the compiler (in src/cmd/compile/internal/gc/ssa.go).
7
8#include "textflag.h"
9
10TEXT ·Loaduintptr(SB), NOSPLIT, $0-16
11	JMP	·Load64(SB)
12
13TEXT ·Loaduint(SB), NOSPLIT, $0-16
14	JMP	·Load64(SB)
15
16TEXT ·Loadint32(SB), NOSPLIT, $0-12
17	JMP	·Load(SB)
18
19TEXT ·Loadint64(SB), NOSPLIT, $0-16
20	JMP	·Load64(SB)
21
22// bool Cas(int32 *val, int32 old, int32 new)
23// Atomically:
24//	if(*val == old){
25//		*val = new;
26//		return 1;
27//	} else
28//		return 0;
29TEXT ·Cas(SB),NOSPLIT,$0-17
30	MOVQ	ptr+0(FP), BX
31	MOVL	old+8(FP), AX
32	MOVL	new+12(FP), CX
33	LOCK
34	CMPXCHGL	CX, 0(BX)
35	SETEQ	ret+16(FP)
36	RET
37
38// bool	·Cas64(uint64 *val, uint64 old, uint64 new)
39// Atomically:
40//	if(*val == old){
41//		*val = new;
42//		return 1;
43//	} else {
44//		return 0;
45//	}
46TEXT ·Cas64(SB), NOSPLIT, $0-25
47	MOVQ	ptr+0(FP), BX
48	MOVQ	old+8(FP), AX
49	MOVQ	new+16(FP), CX
50	LOCK
51	CMPXCHGQ	CX, 0(BX)
52	SETEQ	ret+24(FP)
53	RET
54
55// bool Casp1(void **val, void *old, void *new)
56// Atomically:
57//	if(*val == old){
58//		*val = new;
59//		return 1;
60//	} else
61//		return 0;
62TEXT ·Casp1(SB), NOSPLIT, $0-25
63	MOVQ	ptr+0(FP), BX
64	MOVQ	old+8(FP), AX
65	MOVQ	new+16(FP), CX
66	LOCK
67	CMPXCHGQ	CX, 0(BX)
68	SETEQ	ret+24(FP)
69	RET
70
71TEXT ·Casint32(SB), NOSPLIT, $0-17
72	JMP	·Cas(SB)
73
74TEXT ·Casint64(SB), NOSPLIT, $0-25
75	JMP	·Cas64(SB)
76
77TEXT ·Casuintptr(SB), NOSPLIT, $0-25
78	JMP	·Cas64(SB)
79
80TEXT ·CasRel(SB), NOSPLIT, $0-17
81	JMP	·Cas(SB)
82
83// uint32 Xadd(uint32 volatile *val, int32 delta)
84// Atomically:
85//	*val += delta;
86//	return *val;
87TEXT ·Xadd(SB), NOSPLIT, $0-20
88	MOVQ	ptr+0(FP), BX
89	MOVL	delta+8(FP), AX
90	MOVL	AX, CX
91	LOCK
92	XADDL	AX, 0(BX)
93	ADDL	CX, AX
94	MOVL	AX, ret+16(FP)
95	RET
96
97// uint64 Xadd64(uint64 volatile *val, int64 delta)
98// Atomically:
99//	*val += delta;
100//	return *val;
101TEXT ·Xadd64(SB), NOSPLIT, $0-24
102	MOVQ	ptr+0(FP), BX
103	MOVQ	delta+8(FP), AX
104	MOVQ	AX, CX
105	LOCK
106	XADDQ	AX, 0(BX)
107	ADDQ	CX, AX
108	MOVQ	AX, ret+16(FP)
109	RET
110
111TEXT ·Xaddint32(SB), NOSPLIT, $0-20
112	JMP	·Xadd(SB)
113
114TEXT ·Xaddint64(SB), NOSPLIT, $0-24
115	JMP	·Xadd64(SB)
116
117TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
118	JMP	·Xadd64(SB)
119
120// uint32 Xchg(ptr *uint32, new uint32)
121// Atomically:
122//	old := *ptr;
123//	*ptr = new;
124//	return old;
125TEXT ·Xchg(SB), NOSPLIT, $0-20
126	MOVQ	ptr+0(FP), BX
127	MOVL	new+8(FP), AX
128	XCHGL	AX, 0(BX)
129	MOVL	AX, ret+16(FP)
130	RET
131
132// uint64 Xchg64(ptr *uint64, new uint64)
133// Atomically:
134//	old := *ptr;
135//	*ptr = new;
136//	return old;
137TEXT ·Xchg64(SB), NOSPLIT, $0-24
138	MOVQ	ptr+0(FP), BX
139	MOVQ	new+8(FP), AX
140	XCHGQ	AX, 0(BX)
141	MOVQ	AX, ret+16(FP)
142	RET
143
144TEXT ·Xchgint32(SB), NOSPLIT, $0-20
145	JMP	·Xchg(SB)
146
147TEXT ·Xchgint64(SB), NOSPLIT, $0-24
148	JMP	·Xchg64(SB)
149
150TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
151	JMP	·Xchg64(SB)
152
153TEXT ·StorepNoWB(SB), NOSPLIT, $0-16
154	MOVQ	ptr+0(FP), BX
155	MOVQ	val+8(FP), AX
156	XCHGQ	AX, 0(BX)
157	RET
158
159TEXT ·Store(SB), NOSPLIT, $0-12
160	MOVQ	ptr+0(FP), BX
161	MOVL	val+8(FP), AX
162	XCHGL	AX, 0(BX)
163	RET
164
165TEXT ·Store8(SB), NOSPLIT, $0-9
166	MOVQ	ptr+0(FP), BX
167	MOVB	val+8(FP), AX
168	XCHGB	AX, 0(BX)
169	RET
170
171TEXT ·Store64(SB), NOSPLIT, $0-16
172	MOVQ	ptr+0(FP), BX
173	MOVQ	val+8(FP), AX
174	XCHGQ	AX, 0(BX)
175	RET
176
177TEXT ·Storeint32(SB), NOSPLIT, $0-12
178	JMP	·Store(SB)
179
180TEXT ·Storeint64(SB), NOSPLIT, $0-16
181	JMP	·Store64(SB)
182
183TEXT ·Storeuintptr(SB), NOSPLIT, $0-16
184	JMP	·Store64(SB)
185
186TEXT ·StoreRel(SB), NOSPLIT, $0-12
187	JMP	·Store(SB)
188
189TEXT ·StoreRel64(SB), NOSPLIT, $0-16
190	JMP	·Store64(SB)
191
192TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16
193	JMP	·Store64(SB)
194
195// void	·Or8(byte volatile*, byte);
196TEXT ·Or8(SB), NOSPLIT, $0-9
197	MOVQ	ptr+0(FP), AX
198	MOVB	val+8(FP), BX
199	LOCK
200	ORB	BX, (AX)
201	RET
202
203// void	·And8(byte volatile*, byte);
204TEXT ·And8(SB), NOSPLIT, $0-9
205	MOVQ	ptr+0(FP), AX
206	MOVB	val+8(FP), BX
207	LOCK
208	ANDB	BX, (AX)
209	RET
210
211// func Or(addr *uint32, v uint32)
212TEXT ·Or(SB), NOSPLIT, $0-12
213	MOVQ	ptr+0(FP), AX
214	MOVL	val+8(FP), BX
215	LOCK
216	ORL	BX, (AX)
217	RET
218
219// func And(addr *uint32, v uint32)
220TEXT ·And(SB), NOSPLIT, $0-12
221	MOVQ	ptr+0(FP), AX
222	MOVL	val+8(FP), BX
223	LOCK
224	ANDL	BX, (AX)
225	RET
226
227// func Or32(addr *uint32, v uint32) old uint32
228TEXT ·Or32(SB), NOSPLIT, $0-20
229	MOVQ	ptr+0(FP), BX
230	MOVL	val+8(FP), CX
231casloop:
232	MOVL 	CX, DX
233	MOVL	(BX), AX
234	ORL	AX, DX
235	LOCK
236	CMPXCHGL	DX, (BX)
237	JNZ casloop
238	MOVL 	AX, ret+16(FP)
239	RET
240
241// func And32(addr *uint32, v uint32) old uint32
242TEXT ·And32(SB), NOSPLIT, $0-20
243	MOVQ	ptr+0(FP), BX
244	MOVL	val+8(FP), CX
245casloop:
246	MOVL 	CX, DX
247	MOVL	(BX), AX
248	ANDL	AX, DX
249	LOCK
250	CMPXCHGL	DX, (BX)
251	JNZ casloop
252	MOVL 	AX, ret+16(FP)
253	RET
254
255// func Or64(addr *uint64, v uint64) old uint64
256TEXT ·Or64(SB), NOSPLIT, $0-24
257	MOVQ	ptr+0(FP), BX
258	MOVQ	val+8(FP), CX
259casloop:
260	MOVQ 	CX, DX
261	MOVQ	(BX), AX
262	ORQ	AX, DX
263	LOCK
264	CMPXCHGQ	DX, (BX)
265	JNZ casloop
266	MOVQ 	AX, ret+16(FP)
267	RET
268
269// func And64(addr *uint64, v uint64) old uint64
270TEXT ·And64(SB), NOSPLIT, $0-24
271	MOVQ	ptr+0(FP), BX
272	MOVQ	val+8(FP), CX
273casloop:
274	MOVQ 	CX, DX
275	MOVQ	(BX), AX
276	ANDQ	AX, DX
277	LOCK
278	CMPXCHGQ	DX, (BX)
279	JNZ casloop
280	MOVQ 	AX, ret+16(FP)
281	RET
282
283// func Anduintptr(addr *uintptr, v uintptr) old uintptr
284TEXT ·Anduintptr(SB), NOSPLIT, $0-24
285	JMP	·And64(SB)
286
287// func Oruintptr(addr *uintptr, v uintptr) old uintptr
288TEXT ·Oruintptr(SB), NOSPLIT, $0-24
289	JMP	·Or64(SB)
290