xref: /nrf52832-nimble/nordic/nrfx/soc/nrfx_atomic_internal.h (revision 150812a83cab50279bd772ef6db1bfaf255f2c5b)
1 /*
2  * Copyright (c) 2016 - 2018, Nordic Semiconductor ASA
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice, this
9  *    list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its
16  *    contributors may be used to endorse or promote products derived from this
17  *    software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifndef NRFX_ATOMIC_INTERNAL_H__
33 #define NRFX_ATOMIC_INTERNAL_H__
34 
35 #include <nrfx.h>
36 
37 #ifdef __cplusplus
38 extern "C" {
39 #endif
40 
41 /* Only Cortex-M cores > 3 support LDREX/STREX instructions. */
42 #if ((__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U)) == 0
43 #error "Unsupported core version"
44 #endif
45 
46 #if defined ( __CC_ARM )
nrfx_atomic_internal_mov(nrfx_atomic_u32_t * p_ptr,uint32_t value,uint32_t * p_new)47 static __asm uint32_t nrfx_atomic_internal_mov(nrfx_atomic_u32_t * p_ptr,
48                                                uint32_t value,
49                                                uint32_t * p_new)
50 {
51     /* The base standard specifies that arguments are passed in core registers r0-r3 and on the stack.
52      * Registers r4 and r5 must be saved on the stack. Note that only even number of register pushes are
53      * allowed. This is a requirement of the Procedure Call Standard for the ARM Architecture [AAPCS].
54      */
55     push  {r4, r5}
56     mov   r4, r0
57 
58 loop_mov
59     ldrex r0, [r4]
60     mov   r5, r1
61     strex r3, r5, [r4]
62     cmp   r3, #0
63     bne   loop_mov
64 
65     str   r5, [r2]
66     pop   {r4, r5}
67     bx    lr
68 }
69 
70 
nrfx_atomic_internal_orr(nrfx_atomic_u32_t * p_ptr,uint32_t value,uint32_t * p_new)71 static __asm uint32_t nrfx_atomic_internal_orr(nrfx_atomic_u32_t * p_ptr,
72                                               uint32_t value,
73                                               uint32_t * p_new)
74 {
75     push  {r4, r5}
76     mov   r4, r0
77 
78 loop_orr
79     ldrex r0, [r4]
80     orr   r5, r0, r1
81     strex r3, r5, [r4]
82     cmp   r3, #0
83     bne   loop_orr
84 
85     str   r5, [r2]
86     pop   {r4, r5}
87     bx    lr
88 }
89 
nrfx_atomic_internal_and(nrfx_atomic_u32_t * p_ptr,uint32_t value,uint32_t * p_new)90 static __asm uint32_t nrfx_atomic_internal_and(nrfx_atomic_u32_t * p_ptr,
91                                               uint32_t value,
92                                               uint32_t * p_new)
93 {
94     push  {r4, r5}
95     mov   r4, r0
96 
97 loop_and
98     ldrex r0, [r4]
99     and   r5, r0, r1
100     strex r3, r5, [r4]
101     cmp   r3, #0
102     bne   loop_and
103 
104     str   r5, [r2]
105     pop   {r4, r5}
106     bx    lr
107 }
108 
nrfx_atomic_internal_eor(nrfx_atomic_u32_t * p_ptr,uint32_t value,uint32_t * p_new)109 static __asm uint32_t nrfx_atomic_internal_eor(nrfx_atomic_u32_t * p_ptr,
110                                               uint32_t value,
111                                               uint32_t * p_new)
112 {
113     push  {r4, r5}
114     mov   r4, r0
115 
116 loop_eor
117     ldrex r0, [r4]
118     eor   r5, r0, r1
119     strex r3, r5, [r4]
120     cmp   r3, #0
121     bne   loop_eor
122 
123     str   r5, [r2]
124     pop   {r4, r5}
125     bx    lr
126 }
127 
nrfx_atomic_internal_add(nrfx_atomic_u32_t * p_ptr,uint32_t value,uint32_t * p_new)128 static __asm uint32_t nrfx_atomic_internal_add(nrfx_atomic_u32_t * p_ptr,
129                                               uint32_t value,
130                                               uint32_t * p_new)
131 {
132     push  {r4, r5}
133     mov   r4, r0
134 
135 loop_add
136     ldrex r0, [r4]
137     add   r5, r0, r1
138     strex r3, r5, [r4]
139     cmp   r3, #0
140     bne   loop_add
141 
142     str   r5, [r2]
143     pop   {r4, r5}
144     bx    lr
145 }
146 
nrfx_atomic_internal_sub(nrfx_atomic_u32_t * p_ptr,uint32_t value,uint32_t * p_new)147 static __asm uint32_t nrfx_atomic_internal_sub(nrfx_atomic_u32_t * p_ptr,
148                                               uint32_t value,
149                                               uint32_t * p_new)
150 {
151     push  {r4, r5}
152     mov   r4, r0
153 
154 loop_sub
155     ldrex r0, [r4]
156     sub   r5, r0, r1
157     strex r3, r5, [r4]
158     cmp   r3, #0
159     bne   loop_sub
160 
161     str   r5, [r2]
162     pop   {r4, r5}
163     bx    lr
164 }
165 
nrfx_atomic_internal_cmp_exch(nrfx_atomic_u32_t * p_data,uint32_t * p_expected,uint32_t value)166 static __asm bool nrfx_atomic_internal_cmp_exch(nrfx_atomic_u32_t * p_data,
167                                                uint32_t *         p_expected,
168                                                uint32_t           value)
169 {
170 #define RET_REG  r0
171 #define P_EXPC   r1
172 #define VALUE    r2
173 #define STR_RES  r3
174 #define P_DATA   r4
175 #define EXPC_VAL r5
176 #define ACT_VAL  r6
177 
178     push {r4-r6}
179     mov  P_DATA, r0
180     mov  RET_REG, #0
181 
182 loop_cmp_exch
183     ldrex   ACT_VAL, [P_DATA]
184     ldr     EXPC_VAL, [P_EXPC]
185     cmp     ACT_VAL, EXPC_VAL
186     ittee   eq
187     strexeq STR_RES, VALUE, [P_DATA]
188     moveq   RET_REG, #1
189     strexne STR_RES, ACT_VAL, [P_DATA]
190     strne   ACT_VAL, [P_EXPC]
191     cmp     STR_RES, #0
192     itt     ne
193     movne   RET_REG, #0
194     bne     loop_cmp_exch
195 
196     pop {r4-r6}
197     bx    lr
198 
199 #undef RET_REG
200 #undef P_EXPC
201 #undef VALUE
202 #undef STR_RES
203 #undef P_DATA
204 #undef EXPC_VAL
205 #undef ACT_VAL
206 }
207 
nrfx_atomic_internal_sub_hs(nrfx_atomic_u32_t * p_ptr,uint32_t value,uint32_t * p_new)208 static __asm uint32_t nrfx_atomic_internal_sub_hs(nrfx_atomic_u32_t * p_ptr,
209                                                  uint32_t value,
210                                                  uint32_t * p_new)
211 {
212     push  {r4, r5}
213     mov   r4, r0
214 
215 loop_sub_ge
216     ldrex r0, [r4]
217     cmp   r0, r1
218     ite   hs
219     subhs r5, r0, r1
220     movlo r5, r0
221     strex r3, r5, [r4]
222     cmp   r3, #0
223     bne   loop_sub_ge
224 
225     str   r5, [r2]
226     pop   {r4, r5}
227     bx    lr
228 }
229 
230 
231 #define NRFX_ATOMIC_OP(asm_op, old_val, new_val, ptr, value)          \
232         old_val = nrfx_atomic_internal_##asm_op(ptr, value, &new_val)
233 
234 #elif defined ( __ICCARM__ ) || defined ( __GNUC__ )
235 
236 /**
237  * @brief Atomic operation generic macro.
238  *
239  * @param[in]  asm_op   Operation: mov, orr, and, eor, add, sub.
240  * @param[out] old_val  Atomic object output (uint32_t), value before operation.
241  * @param[out] new_val  Atomic object output (uint32_t), value after operation.
242  * @param[in]  value    Atomic operation operand.
243  */
244 #define NRFX_ATOMIC_OP(asm_op, old_val, new_val, ptr, value)    \
245 {                                                               \
246     uint32_t tmp_reg;                                           \
247             __ASM volatile(                                     \
248     "1:     ldrex   %["#old_val"], [%["#ptr"]]\n"               \
249     NRFX_ATOMIC_OP_##asm_op(new_val, old_val, value)            \
250     "       strex   %[tmp_reg], %["#new_val"], [%["#ptr"]]\n"   \
251     "       teq     %[tmp_reg], #0\n"                           \
252     "       bne.n     1b"                                       \
253             :                                                   \
254         [old_val] "=&r" (old_val),                              \
255         [new_val] "=&r" (new_val),                              \
256         [tmp_reg] "=&r" (tmp_reg)                               \
257             :                                                   \
258         [ptr]   "r" (ptr),                                      \
259         [value] "r" (value)                                     \
260             : "cc");                                            \
261     (void)tmp_reg;                                              \
262 }
263 
264 #define NRFX_ATOMIC_OP_mov(new_val, old_val, value) "mov %["#new_val"], %["#value"]\n"
265 #define NRFX_ATOMIC_OP_orr(new_val, old_val, value) "orr %["#new_val"], %["#old_val"], %["#value"]\n"
266 #define NRFX_ATOMIC_OP_and(new_val, old_val, value) "and %["#new_val"], %["#old_val"], %["#value"]\n"
267 #define NRFX_ATOMIC_OP_eor(new_val, old_val, value) "eor %["#new_val"], %["#old_val"], %["#value"]\n"
268 #define NRFX_ATOMIC_OP_add(new_val, old_val, value) "add %["#new_val"], %["#old_val"], %["#value"]\n"
269 #define NRFX_ATOMIC_OP_sub(new_val, old_val, value) "sub %["#new_val"], %["#old_val"], %["#value"]\n"
270 #define NRFX_ATOMIC_OP_sub_hs(new_val, old_val, value)    \
271     "cmp %["#old_val"], %["#value"]\n "                   \
272     "ite hs\n"                                            \
273     "subhs %["#new_val"], %["#old_val"], %["#value"]\n"   \
274     "movlo %["#new_val"], %["#old_val"]\n"
275 
276 static inline bool nrfx_atomic_internal_cmp_exch(nrfx_atomic_u32_t * p_data,
277                                                 uint32_t *         p_expected,
278                                                 uint32_t           value)
279 {
280     bool res = false;
281     /* Temporary register used in the inline asm code for getting the result
282      * of the strex* operations (no need to initialize it).
283      */
284     uint32_t tmp_reg;
285     uint32_t act_val = 0;
286     uint32_t exp_val = 0;
287     __ASM volatile(
288     "1:     ldrex   %[act_val], [%[ptr]]\n"
289     "       ldr     %[exp_val], [%[expc]]\n"
290     "       cmp     %[act_val], %[exp_val]\n"
291     "       ittee   eq\n"
292     "       strexeq %[tmp_reg], %[value], [%[ptr]]\n"
293     "       moveq   %[res], #1\n"
294     "       strexne %[tmp_reg], %[act_val], [%[ptr]]\n"
295     "       strne   %[act_val], [%[expc]]\n"
296     "       cmp     %[tmp_reg], #0\n"
297     "       itt     ne\n"
298     "       movne   %[res], #0\n"
299     "       bne.n   1b"
300             :
301         [res]     "=&r" (res),
302         [exp_val] "=&r" (exp_val),
303         [act_val] "=&r" (act_val),
304         [tmp_reg] "=&r" (tmp_reg)
305             :
306                 "0" (res),
307                 "1" (exp_val),
308                 "2" (act_val),
309         [expc]  "r" (p_expected),
310         [ptr]   "r" (p_data),
311         [value] "r" (value)
312             : "cc");
313     (void)tmp_reg;
314     return res;
315 }
316 
317 #else
318 #error "Unsupported compiler"
319 #endif
320 
321 #ifdef __cplusplus
322 }
323 #endif
324 
325 #endif // NRFX_ATOMIC_INTERNAL_H__
326