1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright IBM Corp. 1999, 2011
4 *
5 * Author(s): Martin Schwidefsky <[email protected]>,
6 */
7
8 #ifndef __ASM_CMPXCHG_H
9 #define __ASM_CMPXCHG_H
10
11 #include <linux/mmdebug.h>
12 #include <linux/types.h>
13 #include <linux/bug.h>
14 #include <asm/asm.h>
15
16 void __cmpxchg_called_with_bad_pointer(void);
17
__cs_asm(u64 ptr,u32 old,u32 new)18 static __always_inline u32 __cs_asm(u64 ptr, u32 old, u32 new)
19 {
20 asm volatile(
21 " cs %[old],%[new],%[ptr]\n"
22 : [old] "+d" (old), [ptr] "+Q" (*(u32 *)ptr)
23 : [new] "d" (new)
24 : "memory", "cc");
25 return old;
26 }
27
__csg_asm(u64 ptr,u64 old,u64 new)28 static __always_inline u64 __csg_asm(u64 ptr, u64 old, u64 new)
29 {
30 asm volatile(
31 " csg %[old],%[new],%[ptr]\n"
32 : [old] "+d" (old), [ptr] "+QS" (*(u64 *)ptr)
33 : [new] "d" (new)
34 : "memory", "cc");
35 return old;
36 }
37
__arch_cmpxchg1(u64 ptr,u8 old,u8 new)38 static inline u8 __arch_cmpxchg1(u64 ptr, u8 old, u8 new)
39 {
40 union {
41 u8 b[4];
42 u32 w;
43 } old32, new32;
44 u32 prev;
45 int i;
46
47 i = ptr & 3;
48 ptr &= ~0x3;
49 prev = READ_ONCE(*(u32 *)ptr);
50 do {
51 old32.w = prev;
52 if (old32.b[i] != old)
53 return old32.b[i];
54 new32.w = old32.w;
55 new32.b[i] = new;
56 prev = __cs_asm(ptr, old32.w, new32.w);
57 } while (prev != old32.w);
58 return old;
59 }
60
__arch_cmpxchg2(u64 ptr,u16 old,u16 new)61 static inline u16 __arch_cmpxchg2(u64 ptr, u16 old, u16 new)
62 {
63 union {
64 u16 b[2];
65 u32 w;
66 } old32, new32;
67 u32 prev;
68 int i;
69
70 i = (ptr & 3) >> 1;
71 ptr &= ~0x3;
72 prev = READ_ONCE(*(u32 *)ptr);
73 do {
74 old32.w = prev;
75 if (old32.b[i] != old)
76 return old32.b[i];
77 new32.w = old32.w;
78 new32.b[i] = new;
79 prev = __cs_asm(ptr, old32.w, new32.w);
80 } while (prev != old32.w);
81 return old;
82 }
83
__arch_cmpxchg(u64 ptr,u64 old,u64 new,int size)84 static __always_inline u64 __arch_cmpxchg(u64 ptr, u64 old, u64 new, int size)
85 {
86 switch (size) {
87 case 1: return __arch_cmpxchg1(ptr, old & 0xff, new & 0xff);
88 case 2: return __arch_cmpxchg2(ptr, old & 0xffff, new & 0xffff);
89 case 4: return __cs_asm(ptr, old & 0xffffffff, new & 0xffffffff);
90 case 8: return __csg_asm(ptr, old, new);
91 default: __cmpxchg_called_with_bad_pointer();
92 }
93 return old;
94 }
95
96 #define arch_cmpxchg(ptr, o, n) \
97 ({ \
98 (__typeof__(*(ptr)))__arch_cmpxchg((unsigned long)(ptr), \
99 (unsigned long)(o), \
100 (unsigned long)(n), \
101 sizeof(*(ptr))); \
102 })
103
104 #define arch_cmpxchg64 arch_cmpxchg
105 #define arch_cmpxchg_local arch_cmpxchg
106 #define arch_cmpxchg64_local arch_cmpxchg
107
108 #ifdef __HAVE_ASM_FLAG_OUTPUTS__
109
110 #define arch_try_cmpxchg(ptr, oldp, new) \
111 ({ \
112 __typeof__(ptr) __oldp = (__typeof__(ptr))(oldp); \
113 __typeof__(*(ptr)) __old = *__oldp; \
114 __typeof__(*(ptr)) __new = (new); \
115 __typeof__(*(ptr)) __prev; \
116 int __cc; \
117 \
118 switch (sizeof(*(ptr))) { \
119 case 1: \
120 case 2: { \
121 __prev = arch_cmpxchg((ptr), (__old), (__new)); \
122 __cc = (__prev != __old); \
123 if (unlikely(__cc)) \
124 *__oldp = __prev; \
125 break; \
126 } \
127 case 4: { \
128 asm volatile( \
129 " cs %[__old],%[__new],%[__ptr]\n" \
130 : [__old] "+d" (*__oldp), \
131 [__ptr] "+Q" (*(ptr)), \
132 "=@cc" (__cc) \
133 : [__new] "d" (__new) \
134 : "memory"); \
135 break; \
136 } \
137 case 8: { \
138 asm volatile( \
139 " csg %[__old],%[__new],%[__ptr]\n" \
140 : [__old] "+d" (*__oldp), \
141 [__ptr] "+QS" (*(ptr)), \
142 "=@cc" (__cc) \
143 : [__new] "d" (__new) \
144 : "memory"); \
145 break; \
146 } \
147 default: \
148 __cmpxchg_called_with_bad_pointer(); \
149 } \
150 likely(__cc == 0); \
151 })
152
153 #else /* __HAVE_ASM_FLAG_OUTPUTS__ */
154
155 #define arch_try_cmpxchg(ptr, oldp, new) \
156 ({ \
157 __typeof__((ptr)) __oldp = (__typeof__(ptr))(oldp); \
158 __typeof__(*(ptr)) __old = *__oldp; \
159 __typeof__(*(ptr)) __new = (new); \
160 __typeof__(*(ptr)) __prev; \
161 \
162 __prev = arch_cmpxchg((ptr), (__old), (__new)); \
163 if (unlikely(__prev != __old)) \
164 *__oldp = __prev; \
165 likely(__prev == __old); \
166 })
167
168 #endif /* __HAVE_ASM_FLAG_OUTPUTS__ */
169
170 #define arch_try_cmpxchg64 arch_try_cmpxchg
171 #define arch_try_cmpxchg_local arch_try_cmpxchg
172 #define arch_try_cmpxchg64_local arch_try_cmpxchg
173
174 void __xchg_called_with_bad_pointer(void);
175
__arch_xchg1(u64 ptr,u8 x)176 static inline u8 __arch_xchg1(u64 ptr, u8 x)
177 {
178 int shift = (3 ^ (ptr & 3)) << 3;
179 u32 mask, old, new;
180
181 ptr &= ~0x3;
182 mask = ~(0xff << shift);
183 old = READ_ONCE(*(u32 *)ptr);
184 do {
185 new = old & mask;
186 new |= x << shift;
187 } while (!arch_try_cmpxchg((u32 *)ptr, &old, new));
188 return old >> shift;
189 }
190
__arch_xchg2(u64 ptr,u16 x)191 static inline u16 __arch_xchg2(u64 ptr, u16 x)
192 {
193 int shift = (2 ^ (ptr & 2)) << 3;
194 u32 mask, old, new;
195
196 ptr &= ~0x3;
197 mask = ~(0xffff << shift);
198 old = READ_ONCE(*(u32 *)ptr);
199 do {
200 new = old & mask;
201 new |= x << shift;
202 } while (!arch_try_cmpxchg((u32 *)ptr, &old, new));
203 return old >> shift;
204 }
205
__arch_xchg(u64 ptr,u64 x,int size)206 static __always_inline u64 __arch_xchg(u64 ptr, u64 x, int size)
207 {
208 switch (size) {
209 case 1:
210 return __arch_xchg1(ptr, x & 0xff);
211 case 2:
212 return __arch_xchg2(ptr, x & 0xffff);
213 case 4: {
214 u32 old = READ_ONCE(*(u32 *)ptr);
215
216 do {
217 } while (!arch_try_cmpxchg((u32 *)ptr, &old, x & 0xffffffff));
218 return old;
219 }
220 case 8: {
221 u64 old = READ_ONCE(*(u64 *)ptr);
222
223 do {
224 } while (!arch_try_cmpxchg((u64 *)ptr, &old, x));
225 return old;
226 }
227 }
228 __xchg_called_with_bad_pointer();
229 return x;
230 }
231
232 #define arch_xchg(ptr, x) \
233 ({ \
234 (__typeof__(*(ptr)))__arch_xchg((unsigned long)(ptr), \
235 (unsigned long)(x), \
236 sizeof(*(ptr))); \
237 })
238
239 #define system_has_cmpxchg128() 1
240
arch_cmpxchg128(volatile u128 * ptr,u128 old,u128 new)241 static __always_inline u128 arch_cmpxchg128(volatile u128 *ptr, u128 old, u128 new)
242 {
243 asm volatile(
244 " cdsg %[old],%[new],%[ptr]\n"
245 : [old] "+d" (old), [ptr] "+QS" (*ptr)
246 : [new] "d" (new)
247 : "memory", "cc");
248 return old;
249 }
250
251 #define arch_cmpxchg128 arch_cmpxchg128
252 #define arch_cmpxchg128_local arch_cmpxchg128
253
254 #ifdef __HAVE_ASM_FLAG_OUTPUTS__
255
arch_try_cmpxchg128(volatile u128 * ptr,u128 * oldp,u128 new)256 static __always_inline bool arch_try_cmpxchg128(volatile u128 *ptr, u128 *oldp, u128 new)
257 {
258 int cc;
259
260 asm volatile(
261 " cdsg %[old],%[new],%[ptr]\n"
262 : [old] "+d" (*oldp), [ptr] "+QS" (*ptr), "=@cc" (cc)
263 : [new] "d" (new)
264 : "memory");
265 return likely(cc == 0);
266 }
267
268 #define arch_try_cmpxchg128 arch_try_cmpxchg128
269 #define arch_try_cmpxchg128_local arch_try_cmpxchg128
270
271 #endif /* __HAVE_ASM_FLAG_OUTPUTS__ */
272
273 #endif /* __ASM_CMPXCHG_H */
274