1*150812a8SEvalZero /**************************************************************************//**
2*150812a8SEvalZero * @file cmsis_armcc_V6.h
3*150812a8SEvalZero * @brief CMSIS Cortex-M Core Function/Instruction Header File
4*150812a8SEvalZero * @version V4.30
5*150812a8SEvalZero * @date 20. October 2015
6*150812a8SEvalZero ******************************************************************************/
7*150812a8SEvalZero /* Copyright (c) 2009 - 2015 ARM LIMITED
8*150812a8SEvalZero
9*150812a8SEvalZero All rights reserved.
10*150812a8SEvalZero Redistribution and use in source and binary forms, with or without
11*150812a8SEvalZero modification, are permitted provided that the following conditions are met:
12*150812a8SEvalZero - Redistributions of source code must retain the above copyright
13*150812a8SEvalZero notice, this list of conditions and the following disclaimer.
14*150812a8SEvalZero - Redistributions in binary form must reproduce the above copyright
15*150812a8SEvalZero notice, this list of conditions and the following disclaimer in the
16*150812a8SEvalZero documentation and/or other materials provided with the distribution.
17*150812a8SEvalZero - Neither the name of ARM nor the names of its contributors may be used
18*150812a8SEvalZero to endorse or promote products derived from this software without
19*150812a8SEvalZero specific prior written permission.
20*150812a8SEvalZero *
21*150812a8SEvalZero THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22*150812a8SEvalZero AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23*150812a8SEvalZero IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24*150812a8SEvalZero ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
25*150812a8SEvalZero LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26*150812a8SEvalZero CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27*150812a8SEvalZero SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28*150812a8SEvalZero INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29*150812a8SEvalZero CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30*150812a8SEvalZero ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31*150812a8SEvalZero POSSIBILITY OF SUCH DAMAGE.
32*150812a8SEvalZero ---------------------------------------------------------------------------*/
33*150812a8SEvalZero
34*150812a8SEvalZero
35*150812a8SEvalZero #ifndef __CMSIS_ARMCC_V6_H
36*150812a8SEvalZero #define __CMSIS_ARMCC_V6_H
37*150812a8SEvalZero
38*150812a8SEvalZero
39*150812a8SEvalZero /* ########################### Core Function Access ########################### */
40*150812a8SEvalZero /** \ingroup CMSIS_Core_FunctionInterface
41*150812a8SEvalZero \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
42*150812a8SEvalZero @{
43*150812a8SEvalZero */
44*150812a8SEvalZero
45*150812a8SEvalZero /**
46*150812a8SEvalZero \brief Enable IRQ Interrupts
47*150812a8SEvalZero \details Enables IRQ interrupts by clearing the I-bit in the CPSR.
48*150812a8SEvalZero Can only be executed in Privileged modes.
49*150812a8SEvalZero */
__enable_irq(void)50*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __enable_irq(void)
51*150812a8SEvalZero {
52*150812a8SEvalZero __ASM volatile ("cpsie i" : : : "memory");
53*150812a8SEvalZero }
54*150812a8SEvalZero
55*150812a8SEvalZero
56*150812a8SEvalZero /**
57*150812a8SEvalZero \brief Disable IRQ Interrupts
58*150812a8SEvalZero \details Disables IRQ interrupts by setting the I-bit in the CPSR.
59*150812a8SEvalZero Can only be executed in Privileged modes.
60*150812a8SEvalZero */
__disable_irq(void)61*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __disable_irq(void)
62*150812a8SEvalZero {
63*150812a8SEvalZero __ASM volatile ("cpsid i" : : : "memory");
64*150812a8SEvalZero }
65*150812a8SEvalZero
66*150812a8SEvalZero
67*150812a8SEvalZero /**
68*150812a8SEvalZero \brief Get Control Register
69*150812a8SEvalZero \details Returns the content of the Control Register.
70*150812a8SEvalZero \return Control Register value
71*150812a8SEvalZero */
__get_CONTROL(void)72*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_CONTROL(void)
73*150812a8SEvalZero {
74*150812a8SEvalZero uint32_t result;
75*150812a8SEvalZero
76*150812a8SEvalZero __ASM volatile ("MRS %0, control" : "=r" (result) );
77*150812a8SEvalZero return(result);
78*150812a8SEvalZero }
79*150812a8SEvalZero
80*150812a8SEvalZero
81*150812a8SEvalZero #if (__ARM_FEATURE_CMSE == 3U)
82*150812a8SEvalZero /**
83*150812a8SEvalZero \brief Get Control Register (non-secure)
84*150812a8SEvalZero \details Returns the content of the non-secure Control Register when in secure mode.
85*150812a8SEvalZero \return non-secure Control Register value
86*150812a8SEvalZero */
__TZ_get_CONTROL_NS(void)87*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __TZ_get_CONTROL_NS(void)
88*150812a8SEvalZero {
89*150812a8SEvalZero uint32_t result;
90*150812a8SEvalZero
91*150812a8SEvalZero __ASM volatile ("MRS %0, control_ns" : "=r" (result) );
92*150812a8SEvalZero return(result);
93*150812a8SEvalZero }
94*150812a8SEvalZero #endif
95*150812a8SEvalZero
96*150812a8SEvalZero
97*150812a8SEvalZero /**
98*150812a8SEvalZero \brief Set Control Register
99*150812a8SEvalZero \details Writes the given value to the Control Register.
100*150812a8SEvalZero \param [in] control Control Register value to set
101*150812a8SEvalZero */
__set_CONTROL(uint32_t control)102*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __set_CONTROL(uint32_t control)
103*150812a8SEvalZero {
104*150812a8SEvalZero __ASM volatile ("MSR control, %0" : : "r" (control) : "memory");
105*150812a8SEvalZero }
106*150812a8SEvalZero
107*150812a8SEvalZero
108*150812a8SEvalZero #if (__ARM_FEATURE_CMSE == 3U)
109*150812a8SEvalZero /**
110*150812a8SEvalZero \brief Set Control Register (non-secure)
111*150812a8SEvalZero \details Writes the given value to the non-secure Control Register when in secure state.
112*150812a8SEvalZero \param [in] control Control Register value to set
113*150812a8SEvalZero */
__TZ_set_CONTROL_NS(uint32_t control)114*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __TZ_set_CONTROL_NS(uint32_t control)
115*150812a8SEvalZero {
116*150812a8SEvalZero __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory");
117*150812a8SEvalZero }
118*150812a8SEvalZero #endif
119*150812a8SEvalZero
120*150812a8SEvalZero
121*150812a8SEvalZero /**
122*150812a8SEvalZero \brief Get IPSR Register
123*150812a8SEvalZero \details Returns the content of the IPSR Register.
124*150812a8SEvalZero \return IPSR Register value
125*150812a8SEvalZero */
__get_IPSR(void)126*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_IPSR(void)
127*150812a8SEvalZero {
128*150812a8SEvalZero uint32_t result;
129*150812a8SEvalZero
130*150812a8SEvalZero __ASM volatile ("MRS %0, ipsr" : "=r" (result) );
131*150812a8SEvalZero return(result);
132*150812a8SEvalZero }
133*150812a8SEvalZero
134*150812a8SEvalZero
135*150812a8SEvalZero #if (__ARM_FEATURE_CMSE == 3U)
136*150812a8SEvalZero /**
137*150812a8SEvalZero \brief Get IPSR Register (non-secure)
138*150812a8SEvalZero \details Returns the content of the non-secure IPSR Register when in secure state.
139*150812a8SEvalZero \return IPSR Register value
140*150812a8SEvalZero */
__TZ_get_IPSR_NS(void)141*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __TZ_get_IPSR_NS(void)
142*150812a8SEvalZero {
143*150812a8SEvalZero uint32_t result;
144*150812a8SEvalZero
145*150812a8SEvalZero __ASM volatile ("MRS %0, ipsr_ns" : "=r" (result) );
146*150812a8SEvalZero return(result);
147*150812a8SEvalZero }
148*150812a8SEvalZero #endif
149*150812a8SEvalZero
150*150812a8SEvalZero
151*150812a8SEvalZero /**
152*150812a8SEvalZero \brief Get APSR Register
153*150812a8SEvalZero \details Returns the content of the APSR Register.
154*150812a8SEvalZero \return APSR Register value
155*150812a8SEvalZero */
__get_APSR(void)156*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_APSR(void)
157*150812a8SEvalZero {
158*150812a8SEvalZero uint32_t result;
159*150812a8SEvalZero
160*150812a8SEvalZero __ASM volatile ("MRS %0, apsr" : "=r" (result) );
161*150812a8SEvalZero return(result);
162*150812a8SEvalZero }
163*150812a8SEvalZero
164*150812a8SEvalZero
165*150812a8SEvalZero #if (__ARM_FEATURE_CMSE == 3U)
166*150812a8SEvalZero /**
167*150812a8SEvalZero \brief Get APSR Register (non-secure)
168*150812a8SEvalZero \details Returns the content of the non-secure APSR Register when in secure state.
169*150812a8SEvalZero \return APSR Register value
170*150812a8SEvalZero */
__TZ_get_APSR_NS(void)171*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __TZ_get_APSR_NS(void)
172*150812a8SEvalZero {
173*150812a8SEvalZero uint32_t result;
174*150812a8SEvalZero
175*150812a8SEvalZero __ASM volatile ("MRS %0, apsr_ns" : "=r" (result) );
176*150812a8SEvalZero return(result);
177*150812a8SEvalZero }
178*150812a8SEvalZero #endif
179*150812a8SEvalZero
180*150812a8SEvalZero
181*150812a8SEvalZero /**
182*150812a8SEvalZero \brief Get xPSR Register
183*150812a8SEvalZero \details Returns the content of the xPSR Register.
184*150812a8SEvalZero \return xPSR Register value
185*150812a8SEvalZero */
__get_xPSR(void)186*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_xPSR(void)
187*150812a8SEvalZero {
188*150812a8SEvalZero uint32_t result;
189*150812a8SEvalZero
190*150812a8SEvalZero __ASM volatile ("MRS %0, xpsr" : "=r" (result) );
191*150812a8SEvalZero return(result);
192*150812a8SEvalZero }
193*150812a8SEvalZero
194*150812a8SEvalZero
195*150812a8SEvalZero #if (__ARM_FEATURE_CMSE == 3U)
196*150812a8SEvalZero /**
197*150812a8SEvalZero \brief Get xPSR Register (non-secure)
198*150812a8SEvalZero \details Returns the content of the non-secure xPSR Register when in secure state.
199*150812a8SEvalZero \return xPSR Register value
200*150812a8SEvalZero */
__TZ_get_xPSR_NS(void)201*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __TZ_get_xPSR_NS(void)
202*150812a8SEvalZero {
203*150812a8SEvalZero uint32_t result;
204*150812a8SEvalZero
205*150812a8SEvalZero __ASM volatile ("MRS %0, xpsr_ns" : "=r" (result) );
206*150812a8SEvalZero return(result);
207*150812a8SEvalZero }
208*150812a8SEvalZero #endif
209*150812a8SEvalZero
210*150812a8SEvalZero
211*150812a8SEvalZero /**
212*150812a8SEvalZero \brief Get Process Stack Pointer
213*150812a8SEvalZero \details Returns the current value of the Process Stack Pointer (PSP).
214*150812a8SEvalZero \return PSP Register value
215*150812a8SEvalZero */
__get_PSP(void)216*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_PSP(void)
217*150812a8SEvalZero {
218*150812a8SEvalZero register uint32_t result;
219*150812a8SEvalZero
220*150812a8SEvalZero __ASM volatile ("MRS %0, psp" : "=r" (result) );
221*150812a8SEvalZero return(result);
222*150812a8SEvalZero }
223*150812a8SEvalZero
224*150812a8SEvalZero
225*150812a8SEvalZero #if (__ARM_FEATURE_CMSE == 3U)
226*150812a8SEvalZero /**
227*150812a8SEvalZero \brief Get Process Stack Pointer (non-secure)
228*150812a8SEvalZero \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state.
229*150812a8SEvalZero \return PSP Register value
230*150812a8SEvalZero */
__TZ_get_PSP_NS(void)231*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __TZ_get_PSP_NS(void)
232*150812a8SEvalZero {
233*150812a8SEvalZero register uint32_t result;
234*150812a8SEvalZero
235*150812a8SEvalZero __ASM volatile ("MRS %0, psp_ns" : "=r" (result) );
236*150812a8SEvalZero return(result);
237*150812a8SEvalZero }
238*150812a8SEvalZero #endif
239*150812a8SEvalZero
240*150812a8SEvalZero
241*150812a8SEvalZero /**
242*150812a8SEvalZero \brief Set Process Stack Pointer
243*150812a8SEvalZero \details Assigns the given value to the Process Stack Pointer (PSP).
244*150812a8SEvalZero \param [in] topOfProcStack Process Stack Pointer value to set
245*150812a8SEvalZero */
__set_PSP(uint32_t topOfProcStack)246*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __set_PSP(uint32_t topOfProcStack)
247*150812a8SEvalZero {
248*150812a8SEvalZero __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : "sp");
249*150812a8SEvalZero }
250*150812a8SEvalZero
251*150812a8SEvalZero
252*150812a8SEvalZero #if (__ARM_FEATURE_CMSE == 3U)
253*150812a8SEvalZero /**
254*150812a8SEvalZero \brief Set Process Stack Pointer (non-secure)
255*150812a8SEvalZero \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state.
256*150812a8SEvalZero \param [in] topOfProcStack Process Stack Pointer value to set
257*150812a8SEvalZero */
__TZ_set_PSP_NS(uint32_t topOfProcStack)258*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack)
259*150812a8SEvalZero {
260*150812a8SEvalZero __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : "sp");
261*150812a8SEvalZero }
262*150812a8SEvalZero #endif
263*150812a8SEvalZero
264*150812a8SEvalZero
265*150812a8SEvalZero /**
266*150812a8SEvalZero \brief Get Main Stack Pointer
267*150812a8SEvalZero \details Returns the current value of the Main Stack Pointer (MSP).
268*150812a8SEvalZero \return MSP Register value
269*150812a8SEvalZero */
__get_MSP(void)270*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_MSP(void)
271*150812a8SEvalZero {
272*150812a8SEvalZero register uint32_t result;
273*150812a8SEvalZero
274*150812a8SEvalZero __ASM volatile ("MRS %0, msp" : "=r" (result) );
275*150812a8SEvalZero return(result);
276*150812a8SEvalZero }
277*150812a8SEvalZero
278*150812a8SEvalZero
279*150812a8SEvalZero #if (__ARM_FEATURE_CMSE == 3U)
280*150812a8SEvalZero /**
281*150812a8SEvalZero \brief Get Main Stack Pointer (non-secure)
282*150812a8SEvalZero \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state.
283*150812a8SEvalZero \return MSP Register value
284*150812a8SEvalZero */
__TZ_get_MSP_NS(void)285*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __TZ_get_MSP_NS(void)
286*150812a8SEvalZero {
287*150812a8SEvalZero register uint32_t result;
288*150812a8SEvalZero
289*150812a8SEvalZero __ASM volatile ("MRS %0, msp_ns" : "=r" (result) );
290*150812a8SEvalZero return(result);
291*150812a8SEvalZero }
292*150812a8SEvalZero #endif
293*150812a8SEvalZero
294*150812a8SEvalZero
295*150812a8SEvalZero /**
296*150812a8SEvalZero \brief Set Main Stack Pointer
297*150812a8SEvalZero \details Assigns the given value to the Main Stack Pointer (MSP).
298*150812a8SEvalZero \param [in] topOfMainStack Main Stack Pointer value to set
299*150812a8SEvalZero */
__set_MSP(uint32_t topOfMainStack)300*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __set_MSP(uint32_t topOfMainStack)
301*150812a8SEvalZero {
302*150812a8SEvalZero __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : "sp");
303*150812a8SEvalZero }
304*150812a8SEvalZero
305*150812a8SEvalZero
306*150812a8SEvalZero #if (__ARM_FEATURE_CMSE == 3U)
307*150812a8SEvalZero /**
308*150812a8SEvalZero \brief Set Main Stack Pointer (non-secure)
309*150812a8SEvalZero \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state.
310*150812a8SEvalZero \param [in] topOfMainStack Main Stack Pointer value to set
311*150812a8SEvalZero */
__TZ_set_MSP_NS(uint32_t topOfMainStack)312*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack)
313*150812a8SEvalZero {
314*150812a8SEvalZero __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : "sp");
315*150812a8SEvalZero }
316*150812a8SEvalZero #endif
317*150812a8SEvalZero
318*150812a8SEvalZero
319*150812a8SEvalZero /**
320*150812a8SEvalZero \brief Get Priority Mask
321*150812a8SEvalZero \details Returns the current state of the priority mask bit from the Priority Mask Register.
322*150812a8SEvalZero \return Priority Mask value
323*150812a8SEvalZero */
__get_PRIMASK(void)324*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_PRIMASK(void)
325*150812a8SEvalZero {
326*150812a8SEvalZero uint32_t result;
327*150812a8SEvalZero
328*150812a8SEvalZero __ASM volatile ("MRS %0, primask" : "=r" (result) );
329*150812a8SEvalZero return(result);
330*150812a8SEvalZero }
331*150812a8SEvalZero
332*150812a8SEvalZero
333*150812a8SEvalZero #if (__ARM_FEATURE_CMSE == 3U)
334*150812a8SEvalZero /**
335*150812a8SEvalZero \brief Get Priority Mask (non-secure)
336*150812a8SEvalZero \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state.
337*150812a8SEvalZero \return Priority Mask value
338*150812a8SEvalZero */
__TZ_get_PRIMASK_NS(void)339*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __TZ_get_PRIMASK_NS(void)
340*150812a8SEvalZero {
341*150812a8SEvalZero uint32_t result;
342*150812a8SEvalZero
343*150812a8SEvalZero __ASM volatile ("MRS %0, primask_ns" : "=r" (result) );
344*150812a8SEvalZero return(result);
345*150812a8SEvalZero }
346*150812a8SEvalZero #endif
347*150812a8SEvalZero
348*150812a8SEvalZero
349*150812a8SEvalZero /**
350*150812a8SEvalZero \brief Set Priority Mask
351*150812a8SEvalZero \details Assigns the given value to the Priority Mask Register.
352*150812a8SEvalZero \param [in] priMask Priority Mask
353*150812a8SEvalZero */
__set_PRIMASK(uint32_t priMask)354*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __set_PRIMASK(uint32_t priMask)
355*150812a8SEvalZero {
356*150812a8SEvalZero __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory");
357*150812a8SEvalZero }
358*150812a8SEvalZero
359*150812a8SEvalZero
360*150812a8SEvalZero #if (__ARM_FEATURE_CMSE == 3U)
361*150812a8SEvalZero /**
362*150812a8SEvalZero \brief Set Priority Mask (non-secure)
363*150812a8SEvalZero \details Assigns the given value to the non-secure Priority Mask Register when in secure state.
364*150812a8SEvalZero \param [in] priMask Priority Mask
365*150812a8SEvalZero */
__TZ_set_PRIMASK_NS(uint32_t priMask)366*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __TZ_set_PRIMASK_NS(uint32_t priMask)
367*150812a8SEvalZero {
368*150812a8SEvalZero __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory");
369*150812a8SEvalZero }
370*150812a8SEvalZero #endif
371*150812a8SEvalZero
372*150812a8SEvalZero
373*150812a8SEvalZero #if ((__ARM_ARCH_7M__ == 1U) || (__ARM_ARCH_7EM__ == 1U) || (__ARM_ARCH_8M__ == 1U)) /* ToDo: ARMCC_V6: check if this is ok for cortex >=3 */
374*150812a8SEvalZero
375*150812a8SEvalZero /**
376*150812a8SEvalZero \brief Enable FIQ
377*150812a8SEvalZero \details Enables FIQ interrupts by clearing the F-bit in the CPSR.
378*150812a8SEvalZero Can only be executed in Privileged modes.
379*150812a8SEvalZero */
__enable_fault_irq(void)380*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __enable_fault_irq(void)
381*150812a8SEvalZero {
382*150812a8SEvalZero __ASM volatile ("cpsie f" : : : "memory");
383*150812a8SEvalZero }
384*150812a8SEvalZero
385*150812a8SEvalZero
386*150812a8SEvalZero /**
387*150812a8SEvalZero \brief Disable FIQ
388*150812a8SEvalZero \details Disables FIQ interrupts by setting the F-bit in the CPSR.
389*150812a8SEvalZero Can only be executed in Privileged modes.
390*150812a8SEvalZero */
__disable_fault_irq(void)391*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __disable_fault_irq(void)
392*150812a8SEvalZero {
393*150812a8SEvalZero __ASM volatile ("cpsid f" : : : "memory");
394*150812a8SEvalZero }
395*150812a8SEvalZero
396*150812a8SEvalZero
397*150812a8SEvalZero /**
398*150812a8SEvalZero \brief Get Base Priority
399*150812a8SEvalZero \details Returns the current value of the Base Priority register.
400*150812a8SEvalZero \return Base Priority register value
401*150812a8SEvalZero */
__get_BASEPRI(void)402*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_BASEPRI(void)
403*150812a8SEvalZero {
404*150812a8SEvalZero uint32_t result;
405*150812a8SEvalZero
406*150812a8SEvalZero __ASM volatile ("MRS %0, basepri" : "=r" (result) );
407*150812a8SEvalZero return(result);
408*150812a8SEvalZero }
409*150812a8SEvalZero
410*150812a8SEvalZero
411*150812a8SEvalZero #if (__ARM_FEATURE_CMSE == 3U)
412*150812a8SEvalZero /**
413*150812a8SEvalZero \brief Get Base Priority (non-secure)
414*150812a8SEvalZero \details Returns the current value of the non-secure Base Priority register when in secure state.
415*150812a8SEvalZero \return Base Priority register value
416*150812a8SEvalZero */
__TZ_get_BASEPRI_NS(void)417*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __TZ_get_BASEPRI_NS(void)
418*150812a8SEvalZero {
419*150812a8SEvalZero uint32_t result;
420*150812a8SEvalZero
421*150812a8SEvalZero __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) );
422*150812a8SEvalZero return(result);
423*150812a8SEvalZero }
424*150812a8SEvalZero #endif
425*150812a8SEvalZero
426*150812a8SEvalZero
427*150812a8SEvalZero /**
428*150812a8SEvalZero \brief Set Base Priority
429*150812a8SEvalZero \details Assigns the given value to the Base Priority register.
430*150812a8SEvalZero \param [in] basePri Base Priority value to set
431*150812a8SEvalZero */
__set_BASEPRI(uint32_t value)432*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __set_BASEPRI(uint32_t value)
433*150812a8SEvalZero {
434*150812a8SEvalZero __ASM volatile ("MSR basepri, %0" : : "r" (value) : "memory");
435*150812a8SEvalZero }
436*150812a8SEvalZero
437*150812a8SEvalZero
438*150812a8SEvalZero #if (__ARM_FEATURE_CMSE == 3U)
439*150812a8SEvalZero /**
440*150812a8SEvalZero \brief Set Base Priority (non-secure)
441*150812a8SEvalZero \details Assigns the given value to the non-secure Base Priority register when in secure state.
442*150812a8SEvalZero \param [in] basePri Base Priority value to set
443*150812a8SEvalZero */
__TZ_set_BASEPRI_NS(uint32_t value)444*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __TZ_set_BASEPRI_NS(uint32_t value)
445*150812a8SEvalZero {
446*150812a8SEvalZero __ASM volatile ("MSR basepri_ns, %0" : : "r" (value) : "memory");
447*150812a8SEvalZero }
448*150812a8SEvalZero #endif
449*150812a8SEvalZero
450*150812a8SEvalZero
451*150812a8SEvalZero /**
452*150812a8SEvalZero \brief Set Base Priority with condition
453*150812a8SEvalZero \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled,
454*150812a8SEvalZero or the new value increases the BASEPRI priority level.
455*150812a8SEvalZero \param [in] basePri Base Priority value to set
456*150812a8SEvalZero */
__set_BASEPRI_MAX(uint32_t value)457*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __set_BASEPRI_MAX(uint32_t value)
458*150812a8SEvalZero {
459*150812a8SEvalZero __ASM volatile ("MSR basepri_max, %0" : : "r" (value) : "memory");
460*150812a8SEvalZero }
461*150812a8SEvalZero
462*150812a8SEvalZero
463*150812a8SEvalZero #if (__ARM_FEATURE_CMSE == 3U)
464*150812a8SEvalZero /**
465*150812a8SEvalZero \brief Set Base Priority with condition (non_secure)
466*150812a8SEvalZero \details Assigns the given value to the non-secure Base Priority register when in secure state only if BASEPRI masking is disabled,
467*150812a8SEvalZero or the new value increases the BASEPRI priority level.
468*150812a8SEvalZero \param [in] basePri Base Priority value to set
469*150812a8SEvalZero */
__TZ_set_BASEPRI_MAX_NS(uint32_t value)470*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __TZ_set_BASEPRI_MAX_NS(uint32_t value)
471*150812a8SEvalZero {
472*150812a8SEvalZero __ASM volatile ("MSR basepri_max_ns, %0" : : "r" (value) : "memory");
473*150812a8SEvalZero }
474*150812a8SEvalZero #endif
475*150812a8SEvalZero
476*150812a8SEvalZero
477*150812a8SEvalZero /**
478*150812a8SEvalZero \brief Get Fault Mask
479*150812a8SEvalZero \details Returns the current value of the Fault Mask register.
480*150812a8SEvalZero \return Fault Mask register value
481*150812a8SEvalZero */
__get_FAULTMASK(void)482*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_FAULTMASK(void)
483*150812a8SEvalZero {
484*150812a8SEvalZero uint32_t result;
485*150812a8SEvalZero
486*150812a8SEvalZero __ASM volatile ("MRS %0, faultmask" : "=r" (result) );
487*150812a8SEvalZero return(result);
488*150812a8SEvalZero }
489*150812a8SEvalZero
490*150812a8SEvalZero
491*150812a8SEvalZero #if (__ARM_FEATURE_CMSE == 3U)
492*150812a8SEvalZero /**
493*150812a8SEvalZero \brief Get Fault Mask (non-secure)
494*150812a8SEvalZero \details Returns the current value of the non-secure Fault Mask register when in secure state.
495*150812a8SEvalZero \return Fault Mask register value
496*150812a8SEvalZero */
__TZ_get_FAULTMASK_NS(void)497*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __TZ_get_FAULTMASK_NS(void)
498*150812a8SEvalZero {
499*150812a8SEvalZero uint32_t result;
500*150812a8SEvalZero
501*150812a8SEvalZero __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) );
502*150812a8SEvalZero return(result);
503*150812a8SEvalZero }
504*150812a8SEvalZero #endif
505*150812a8SEvalZero
506*150812a8SEvalZero
507*150812a8SEvalZero /**
508*150812a8SEvalZero \brief Set Fault Mask
509*150812a8SEvalZero \details Assigns the given value to the Fault Mask register.
510*150812a8SEvalZero \param [in] faultMask Fault Mask value to set
511*150812a8SEvalZero */
__set_FAULTMASK(uint32_t faultMask)512*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask)
513*150812a8SEvalZero {
514*150812a8SEvalZero __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory");
515*150812a8SEvalZero }
516*150812a8SEvalZero
517*150812a8SEvalZero
518*150812a8SEvalZero #if (__ARM_FEATURE_CMSE == 3U)
519*150812a8SEvalZero /**
520*150812a8SEvalZero \brief Set Fault Mask (non-secure)
521*150812a8SEvalZero \details Assigns the given value to the non-secure Fault Mask register when in secure state.
522*150812a8SEvalZero \param [in] faultMask Fault Mask value to set
523*150812a8SEvalZero */
__TZ_set_FAULTMASK_NS(uint32_t faultMask)524*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask)
525*150812a8SEvalZero {
526*150812a8SEvalZero __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory");
527*150812a8SEvalZero }
528*150812a8SEvalZero #endif
529*150812a8SEvalZero
530*150812a8SEvalZero
531*150812a8SEvalZero #endif /* ((__ARM_ARCH_7M__ == 1U) || (__ARM_ARCH_8M__ == 1U)) */
532*150812a8SEvalZero
533*150812a8SEvalZero
534*150812a8SEvalZero #if (__ARM_ARCH_8M__ == 1U)
535*150812a8SEvalZero
536*150812a8SEvalZero /**
537*150812a8SEvalZero \brief Get Process Stack Pointer Limit
538*150812a8SEvalZero \details Returns the current value of the Process Stack Pointer Limit (PSPLIM).
539*150812a8SEvalZero \return PSPLIM Register value
540*150812a8SEvalZero */
__get_PSPLIM(void)541*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_PSPLIM(void)
542*150812a8SEvalZero {
543*150812a8SEvalZero register uint32_t result;
544*150812a8SEvalZero
545*150812a8SEvalZero __ASM volatile ("MRS %0, psplim" : "=r" (result) );
546*150812a8SEvalZero return(result);
547*150812a8SEvalZero }
548*150812a8SEvalZero
549*150812a8SEvalZero
550*150812a8SEvalZero #if (__ARM_FEATURE_CMSE == 3U) && (__ARM_ARCH_PROFILE == 'M') /* ToDo: ARMCC_V6: check predefined macro for mainline */
551*150812a8SEvalZero /**
552*150812a8SEvalZero \brief Get Process Stack Pointer Limit (non-secure)
553*150812a8SEvalZero \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state.
554*150812a8SEvalZero \return PSPLIM Register value
555*150812a8SEvalZero */
__TZ_get_PSPLIM_NS(void)556*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __TZ_get_PSPLIM_NS(void)
557*150812a8SEvalZero {
558*150812a8SEvalZero register uint32_t result;
559*150812a8SEvalZero
560*150812a8SEvalZero __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) );
561*150812a8SEvalZero return(result);
562*150812a8SEvalZero }
563*150812a8SEvalZero #endif
564*150812a8SEvalZero
565*150812a8SEvalZero
566*150812a8SEvalZero /**
567*150812a8SEvalZero \brief Set Process Stack Pointer Limit
568*150812a8SEvalZero \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM).
569*150812a8SEvalZero \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set
570*150812a8SEvalZero */
__set_PSPLIM(uint32_t ProcStackPtrLimit)571*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit)
572*150812a8SEvalZero {
573*150812a8SEvalZero __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit));
574*150812a8SEvalZero }
575*150812a8SEvalZero
576*150812a8SEvalZero
577*150812a8SEvalZero #if (__ARM_FEATURE_CMSE == 3U) && (__ARM_ARCH_PROFILE == 'M') /* ToDo: ARMCC_V6: check predefined macro for mainline */
578*150812a8SEvalZero /**
579*150812a8SEvalZero \brief Set Process Stack Pointer (non-secure)
580*150812a8SEvalZero \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state.
581*150812a8SEvalZero \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set
582*150812a8SEvalZero */
__TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit)583*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit)
584*150812a8SEvalZero {
585*150812a8SEvalZero __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit));
586*150812a8SEvalZero }
587*150812a8SEvalZero #endif
588*150812a8SEvalZero
589*150812a8SEvalZero
590*150812a8SEvalZero /**
591*150812a8SEvalZero \brief Get Main Stack Pointer Limit
592*150812a8SEvalZero \details Returns the current value of the Main Stack Pointer Limit (MSPLIM).
593*150812a8SEvalZero \return MSPLIM Register value
594*150812a8SEvalZero */
__get_MSPLIM(void)595*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_MSPLIM(void)
596*150812a8SEvalZero {
597*150812a8SEvalZero register uint32_t result;
598*150812a8SEvalZero
599*150812a8SEvalZero __ASM volatile ("MRS %0, msplim" : "=r" (result) );
600*150812a8SEvalZero
601*150812a8SEvalZero return(result);
602*150812a8SEvalZero }
603*150812a8SEvalZero
604*150812a8SEvalZero
605*150812a8SEvalZero #if (__ARM_FEATURE_CMSE == 3U) && (__ARM_ARCH_PROFILE == 'M') /* ToDo: ARMCC_V6: check predefined macro for mainline */
606*150812a8SEvalZero /**
607*150812a8SEvalZero \brief Get Main Stack Pointer Limit (non-secure)
608*150812a8SEvalZero \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state.
609*150812a8SEvalZero \return MSPLIM Register value
610*150812a8SEvalZero */
__TZ_get_MSPLIM_NS(void)611*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __TZ_get_MSPLIM_NS(void)
612*150812a8SEvalZero {
613*150812a8SEvalZero register uint32_t result;
614*150812a8SEvalZero
615*150812a8SEvalZero __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) );
616*150812a8SEvalZero return(result);
617*150812a8SEvalZero }
618*150812a8SEvalZero #endif
619*150812a8SEvalZero
620*150812a8SEvalZero
621*150812a8SEvalZero /**
622*150812a8SEvalZero \brief Set Main Stack Pointer Limit
623*150812a8SEvalZero \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM).
624*150812a8SEvalZero \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set
625*150812a8SEvalZero */
__set_MSPLIM(uint32_t MainStackPtrLimit)626*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __set_MSPLIM(uint32_t MainStackPtrLimit)
627*150812a8SEvalZero {
628*150812a8SEvalZero __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit));
629*150812a8SEvalZero }
630*150812a8SEvalZero
631*150812a8SEvalZero
632*150812a8SEvalZero #if (__ARM_FEATURE_CMSE == 3U) && (__ARM_ARCH_PROFILE == 'M') /* ToDo: ARMCC_V6: check predefined macro for mainline */
633*150812a8SEvalZero /**
634*150812a8SEvalZero \brief Set Main Stack Pointer Limit (non-secure)
635*150812a8SEvalZero \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state.
636*150812a8SEvalZero \param [in] MainStackPtrLimit Main Stack Pointer value to set
637*150812a8SEvalZero */
__TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit)638*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit)
639*150812a8SEvalZero {
640*150812a8SEvalZero __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit));
641*150812a8SEvalZero }
642*150812a8SEvalZero #endif
643*150812a8SEvalZero
644*150812a8SEvalZero #endif /* (__ARM_ARCH_8M__ == 1U) */
645*150812a8SEvalZero
646*150812a8SEvalZero
647*150812a8SEvalZero #if ((__ARM_ARCH_7EM__ == 1U) || (__ARM_ARCH_8M__ == 1U)) /* ToDo: ARMCC_V6: check if this is ok for cortex >=4 */
648*150812a8SEvalZero
649*150812a8SEvalZero /**
650*150812a8SEvalZero \brief Get FPSCR
651*150812a8SEvalZero \details eturns the current value of the Floating Point Status/Control register.
652*150812a8SEvalZero \return Floating Point Status/Control register value
653*150812a8SEvalZero */
654*150812a8SEvalZero #define __get_FPSCR __builtin_arm_get_fpscr
655*150812a8SEvalZero #if 0
656*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_FPSCR(void)
657*150812a8SEvalZero {
658*150812a8SEvalZero #if (__FPU_PRESENT == 1U) && (__FPU_USED == 1U)
659*150812a8SEvalZero uint32_t result;
660*150812a8SEvalZero
661*150812a8SEvalZero __ASM volatile (""); /* Empty asm statement works as a scheduling barrier */
662*150812a8SEvalZero __ASM volatile ("VMRS %0, fpscr" : "=r" (result) );
663*150812a8SEvalZero __ASM volatile ("");
664*150812a8SEvalZero return(result);
665*150812a8SEvalZero #else
666*150812a8SEvalZero return(0);
667*150812a8SEvalZero #endif
668*150812a8SEvalZero }
669*150812a8SEvalZero #endif
670*150812a8SEvalZero
671*150812a8SEvalZero #if (__ARM_FEATURE_CMSE == 3U)
672*150812a8SEvalZero /**
673*150812a8SEvalZero \brief Get FPSCR (non-secure)
674*150812a8SEvalZero \details Returns the current value of the non-secure Floating Point Status/Control register when in secure state.
675*150812a8SEvalZero \return Floating Point Status/Control register value
676*150812a8SEvalZero */
__TZ_get_FPSCR_NS(void)677*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __TZ_get_FPSCR_NS(void)
678*150812a8SEvalZero {
679*150812a8SEvalZero #if (__FPU_PRESENT == 1U) && (__FPU_USED == 1U)
680*150812a8SEvalZero uint32_t result;
681*150812a8SEvalZero
682*150812a8SEvalZero __ASM volatile (""); /* Empty asm statement works as a scheduling barrier */
683*150812a8SEvalZero __ASM volatile ("VMRS %0, fpscr_ns" : "=r" (result) );
684*150812a8SEvalZero __ASM volatile ("");
685*150812a8SEvalZero return(result);
686*150812a8SEvalZero #else
687*150812a8SEvalZero return(0);
688*150812a8SEvalZero #endif
689*150812a8SEvalZero }
690*150812a8SEvalZero #endif
691*150812a8SEvalZero
692*150812a8SEvalZero
693*150812a8SEvalZero /**
694*150812a8SEvalZero \brief Set FPSCR
695*150812a8SEvalZero \details Assigns the given value to the Floating Point Status/Control register.
696*150812a8SEvalZero \param [in] fpscr Floating Point Status/Control value to set
697*150812a8SEvalZero */
698*150812a8SEvalZero #define __set_FPSCR __builtin_arm_set_fpscr
699*150812a8SEvalZero #if 0
700*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
701*150812a8SEvalZero {
702*150812a8SEvalZero #if (__FPU_PRESENT == 1U) && (__FPU_USED == 1U)
703*150812a8SEvalZero __ASM volatile (""); /* Empty asm statement works as a scheduling barrier */
704*150812a8SEvalZero __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc");
705*150812a8SEvalZero __ASM volatile ("");
706*150812a8SEvalZero #endif
707*150812a8SEvalZero }
708*150812a8SEvalZero #endif
709*150812a8SEvalZero
710*150812a8SEvalZero #if (__ARM_FEATURE_CMSE == 3U)
711*150812a8SEvalZero /**
712*150812a8SEvalZero \brief Set FPSCR (non-secure)
713*150812a8SEvalZero \details Assigns the given value to the non-secure Floating Point Status/Control register when in secure state.
714*150812a8SEvalZero \param [in] fpscr Floating Point Status/Control value to set
715*150812a8SEvalZero */
__TZ_set_FPSCR_NS(uint32_t fpscr)716*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __TZ_set_FPSCR_NS(uint32_t fpscr)
717*150812a8SEvalZero {
718*150812a8SEvalZero #if (__FPU_PRESENT == 1U) && (__FPU_USED == 1U)
719*150812a8SEvalZero __ASM volatile (""); /* Empty asm statement works as a scheduling barrier */
720*150812a8SEvalZero __ASM volatile ("VMSR fpscr_ns, %0" : : "r" (fpscr) : "vfpcc");
721*150812a8SEvalZero __ASM volatile ("");
722*150812a8SEvalZero #endif
723*150812a8SEvalZero }
724*150812a8SEvalZero #endif
725*150812a8SEvalZero
726*150812a8SEvalZero #endif /* ((__ARM_ARCH_7EM__ == 1U) || (__ARM_ARCH_8M__ == 1U)) */
727*150812a8SEvalZero
728*150812a8SEvalZero
729*150812a8SEvalZero
730*150812a8SEvalZero /*@} end of CMSIS_Core_RegAccFunctions */
731*150812a8SEvalZero
732*150812a8SEvalZero
733*150812a8SEvalZero /* ########################## Core Instruction Access ######################### */
734*150812a8SEvalZero /** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
735*150812a8SEvalZero Access to dedicated instructions
736*150812a8SEvalZero @{
737*150812a8SEvalZero */
738*150812a8SEvalZero
739*150812a8SEvalZero /* Define macros for porting to both thumb1 and thumb2.
740*150812a8SEvalZero * For thumb1, use low register (r0-r7), specified by constraint "l"
741*150812a8SEvalZero * Otherwise, use general registers, specified by constraint "r" */
742*150812a8SEvalZero #if defined (__thumb__) && !defined (__thumb2__)
743*150812a8SEvalZero #define __CMSIS_GCC_OUT_REG(r) "=l" (r)
744*150812a8SEvalZero #define __CMSIS_GCC_USE_REG(r) "l" (r)
745*150812a8SEvalZero #else
746*150812a8SEvalZero #define __CMSIS_GCC_OUT_REG(r) "=r" (r)
747*150812a8SEvalZero #define __CMSIS_GCC_USE_REG(r) "r" (r)
748*150812a8SEvalZero #endif
749*150812a8SEvalZero
750*150812a8SEvalZero /**
751*150812a8SEvalZero \brief No Operation
752*150812a8SEvalZero \details No Operation does nothing. This instruction can be used for code alignment purposes.
753*150812a8SEvalZero */
754*150812a8SEvalZero #define __NOP __builtin_arm_nop
755*150812a8SEvalZero
756*150812a8SEvalZero /**
757*150812a8SEvalZero \brief Wait For Interrupt
758*150812a8SEvalZero \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs.
759*150812a8SEvalZero */
760*150812a8SEvalZero #define __WFI __builtin_arm_wfi
761*150812a8SEvalZero
762*150812a8SEvalZero
763*150812a8SEvalZero /**
764*150812a8SEvalZero \brief Wait For Event
765*150812a8SEvalZero \details Wait For Event is a hint instruction that permits the processor to enter
766*150812a8SEvalZero a low-power state until one of a number of events occurs.
767*150812a8SEvalZero */
768*150812a8SEvalZero #define __WFE __builtin_arm_wfe
769*150812a8SEvalZero
770*150812a8SEvalZero
771*150812a8SEvalZero /**
772*150812a8SEvalZero \brief Send Event
773*150812a8SEvalZero \details Send Event is a hint instruction. It causes an event to be signaled to the CPU.
774*150812a8SEvalZero */
775*150812a8SEvalZero #define __SEV __builtin_arm_sev
776*150812a8SEvalZero
777*150812a8SEvalZero
778*150812a8SEvalZero /**
779*150812a8SEvalZero \brief Instruction Synchronization Barrier
780*150812a8SEvalZero \details Instruction Synchronization Barrier flushes the pipeline in the processor,
781*150812a8SEvalZero so that all instructions following the ISB are fetched from cache or memory,
782*150812a8SEvalZero after the instruction has been completed.
783*150812a8SEvalZero */
784*150812a8SEvalZero #define __ISB() __builtin_arm_isb(0xF);
785*150812a8SEvalZero
786*150812a8SEvalZero /**
787*150812a8SEvalZero \brief Data Synchronization Barrier
788*150812a8SEvalZero \details Acts as a special kind of Data Memory Barrier.
789*150812a8SEvalZero It completes when all explicit memory accesses before this instruction complete.
790*150812a8SEvalZero */
791*150812a8SEvalZero #define __DSB() __builtin_arm_dsb(0xF);
792*150812a8SEvalZero
793*150812a8SEvalZero
794*150812a8SEvalZero /**
795*150812a8SEvalZero \brief Data Memory Barrier
796*150812a8SEvalZero \details Ensures the apparent order of the explicit memory operations before
797*150812a8SEvalZero and after the instruction, without ensuring their completion.
798*150812a8SEvalZero */
799*150812a8SEvalZero #define __DMB() __builtin_arm_dmb(0xF);
800*150812a8SEvalZero
801*150812a8SEvalZero
802*150812a8SEvalZero /**
803*150812a8SEvalZero \brief Reverse byte order (32 bit)
804*150812a8SEvalZero \details Reverses the byte order in integer value.
805*150812a8SEvalZero \param [in] value Value to reverse
806*150812a8SEvalZero \return Reversed value
807*150812a8SEvalZero */
808*150812a8SEvalZero #define __REV __builtin_bswap32
809*150812a8SEvalZero
810*150812a8SEvalZero
811*150812a8SEvalZero /**
812*150812a8SEvalZero \brief Reverse byte order (16 bit)
813*150812a8SEvalZero \details Reverses the byte order in two unsigned short values.
814*150812a8SEvalZero \param [in] value Value to reverse
815*150812a8SEvalZero \return Reversed value
816*150812a8SEvalZero */
817*150812a8SEvalZero #define __REV16 __builtin_bswap16 /* ToDo: ARMCC_V6: check if __builtin_bswap16 could be used */
818*150812a8SEvalZero #if 0
819*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __REV16(uint32_t value)
820*150812a8SEvalZero {
821*150812a8SEvalZero uint32_t result;
822*150812a8SEvalZero
823*150812a8SEvalZero __ASM volatile ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
824*150812a8SEvalZero return(result);
825*150812a8SEvalZero }
826*150812a8SEvalZero #endif
827*150812a8SEvalZero
828*150812a8SEvalZero
829*150812a8SEvalZero /**
830*150812a8SEvalZero \brief Reverse byte order in signed short value
831*150812a8SEvalZero \details Reverses the byte order in a signed short value with sign extension to integer.
832*150812a8SEvalZero \param [in] value Value to reverse
833*150812a8SEvalZero \return Reversed value
834*150812a8SEvalZero */
835*150812a8SEvalZero /* ToDo: ARMCC_V6: check if __builtin_bswap16 could be used */
__REVSH(int32_t value)836*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE int32_t __REVSH(int32_t value)
837*150812a8SEvalZero {
838*150812a8SEvalZero int32_t result;
839*150812a8SEvalZero
840*150812a8SEvalZero __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
841*150812a8SEvalZero return(result);
842*150812a8SEvalZero }
843*150812a8SEvalZero
844*150812a8SEvalZero
845*150812a8SEvalZero /**
846*150812a8SEvalZero \brief Rotate Right in unsigned value (32 bit)
847*150812a8SEvalZero \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
848*150812a8SEvalZero \param [in] op1 Value to rotate
849*150812a8SEvalZero \param [in] op2 Number of Bits to rotate
850*150812a8SEvalZero \return Rotated value
851*150812a8SEvalZero */
__ROR(uint32_t op1,uint32_t op2)852*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
853*150812a8SEvalZero {
854*150812a8SEvalZero return (op1 >> op2) | (op1 << (32U - op2));
855*150812a8SEvalZero }
856*150812a8SEvalZero
857*150812a8SEvalZero
858*150812a8SEvalZero /**
859*150812a8SEvalZero \brief Breakpoint
860*150812a8SEvalZero \details Causes the processor to enter Debug state.
861*150812a8SEvalZero Debug tools can use this to investigate system state when the instruction at a particular address is reached.
862*150812a8SEvalZero \param [in] value is ignored by the processor.
863*150812a8SEvalZero If required, a debugger can use it to store additional information about the breakpoint.
864*150812a8SEvalZero */
865*150812a8SEvalZero #define __BKPT(value) __ASM volatile ("bkpt "#value)
866*150812a8SEvalZero
867*150812a8SEvalZero
868*150812a8SEvalZero /**
869*150812a8SEvalZero \brief Reverse bit order of value
870*150812a8SEvalZero \details Reverses the bit order of the given value.
871*150812a8SEvalZero \param [in] value Value to reverse
872*150812a8SEvalZero \return Reversed value
873*150812a8SEvalZero */
874*150812a8SEvalZero /* ToDo: ARMCC_V6: check if __builtin_arm_rbit is supported */
__RBIT(uint32_t value)875*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value)
876*150812a8SEvalZero {
877*150812a8SEvalZero uint32_t result;
878*150812a8SEvalZero
879*150812a8SEvalZero #if ((__ARM_ARCH_7M__ == 1U) || (__ARM_ARCH_7EM__ == 1U) || (__ARM_ARCH_8M__ == 1U)) /* ToDo: ARMCC_V6: check if this is ok for cortex >=3 */
880*150812a8SEvalZero __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) );
881*150812a8SEvalZero #else
882*150812a8SEvalZero int32_t s = 4 /*sizeof(v)*/ * 8 - 1; /* extra shift needed at end */
883*150812a8SEvalZero
884*150812a8SEvalZero result = value; /* r will be reversed bits of v; first get LSB of v */
885*150812a8SEvalZero for (value >>= 1U; value; value >>= 1U)
886*150812a8SEvalZero {
887*150812a8SEvalZero result <<= 1U;
888*150812a8SEvalZero result |= value & 1U;
889*150812a8SEvalZero s--;
890*150812a8SEvalZero }
891*150812a8SEvalZero result <<= s; /* shift when v's highest bits are zero */
892*150812a8SEvalZero #endif
893*150812a8SEvalZero return(result);
894*150812a8SEvalZero }
895*150812a8SEvalZero
896*150812a8SEvalZero
897*150812a8SEvalZero /**
898*150812a8SEvalZero \brief Count leading zeros
899*150812a8SEvalZero \details Counts the number of leading zeros of a data value.
900*150812a8SEvalZero \param [in] value Value to count the leading zeros
901*150812a8SEvalZero \return number of leading zeros in value
902*150812a8SEvalZero */
903*150812a8SEvalZero #define __CLZ __builtin_clz
904*150812a8SEvalZero
905*150812a8SEvalZero
906*150812a8SEvalZero #if ((__ARM_ARCH_7M__ == 1U) || (__ARM_ARCH_7EM__ == 1U) || (__ARM_ARCH_8M__ == 1U)) /* ToDo: ARMCC_V6: check if this is ok for cortex >=3 */
907*150812a8SEvalZero
908*150812a8SEvalZero /**
909*150812a8SEvalZero \brief LDR Exclusive (8 bit)
910*150812a8SEvalZero \details Executes a exclusive LDR instruction for 8 bit value.
911*150812a8SEvalZero \param [in] ptr Pointer to data
912*150812a8SEvalZero \return value of type uint8_t at (*ptr)
913*150812a8SEvalZero */
914*150812a8SEvalZero #define __LDREXB (uint8_t)__builtin_arm_ldrex
915*150812a8SEvalZero
916*150812a8SEvalZero
917*150812a8SEvalZero /**
918*150812a8SEvalZero \brief LDR Exclusive (16 bit)
919*150812a8SEvalZero \details Executes a exclusive LDR instruction for 16 bit values.
920*150812a8SEvalZero \param [in] ptr Pointer to data
921*150812a8SEvalZero \return value of type uint16_t at (*ptr)
922*150812a8SEvalZero */
923*150812a8SEvalZero #define __LDREXH (uint16_t)__builtin_arm_ldrex
924*150812a8SEvalZero
925*150812a8SEvalZero
926*150812a8SEvalZero /**
927*150812a8SEvalZero \brief LDR Exclusive (32 bit)
928*150812a8SEvalZero \details Executes a exclusive LDR instruction for 32 bit values.
929*150812a8SEvalZero \param [in] ptr Pointer to data
930*150812a8SEvalZero \return value of type uint32_t at (*ptr)
931*150812a8SEvalZero */
932*150812a8SEvalZero #define __LDREXW (uint32_t)__builtin_arm_ldrex
933*150812a8SEvalZero
934*150812a8SEvalZero
935*150812a8SEvalZero /**
936*150812a8SEvalZero \brief STR Exclusive (8 bit)
937*150812a8SEvalZero \details Executes a exclusive STR instruction for 8 bit values.
938*150812a8SEvalZero \param [in] value Value to store
939*150812a8SEvalZero \param [in] ptr Pointer to location
940*150812a8SEvalZero \return 0 Function succeeded
941*150812a8SEvalZero \return 1 Function failed
942*150812a8SEvalZero */
943*150812a8SEvalZero #define __STREXB (uint32_t)__builtin_arm_strex
944*150812a8SEvalZero
945*150812a8SEvalZero
946*150812a8SEvalZero /**
947*150812a8SEvalZero \brief STR Exclusive (16 bit)
948*150812a8SEvalZero \details Executes a exclusive STR instruction for 16 bit values.
949*150812a8SEvalZero \param [in] value Value to store
950*150812a8SEvalZero \param [in] ptr Pointer to location
951*150812a8SEvalZero \return 0 Function succeeded
952*150812a8SEvalZero \return 1 Function failed
953*150812a8SEvalZero */
954*150812a8SEvalZero #define __STREXH (uint32_t)__builtin_arm_strex
955*150812a8SEvalZero
956*150812a8SEvalZero
957*150812a8SEvalZero /**
958*150812a8SEvalZero \brief STR Exclusive (32 bit)
959*150812a8SEvalZero \details Executes a exclusive STR instruction for 32 bit values.
960*150812a8SEvalZero \param [in] value Value to store
961*150812a8SEvalZero \param [in] ptr Pointer to location
962*150812a8SEvalZero \return 0 Function succeeded
963*150812a8SEvalZero \return 1 Function failed
964*150812a8SEvalZero */
965*150812a8SEvalZero #define __STREXW (uint32_t)__builtin_arm_strex
966*150812a8SEvalZero
967*150812a8SEvalZero
968*150812a8SEvalZero /**
969*150812a8SEvalZero \brief Remove the exclusive lock
970*150812a8SEvalZero \details Removes the exclusive lock which is created by LDREX.
971*150812a8SEvalZero */
972*150812a8SEvalZero #define __CLREX __builtin_arm_clrex
973*150812a8SEvalZero
974*150812a8SEvalZero
975*150812a8SEvalZero /**
976*150812a8SEvalZero \brief Signed Saturate
977*150812a8SEvalZero \details Saturates a signed value.
978*150812a8SEvalZero \param [in] value Value to be saturated
979*150812a8SEvalZero \param [in] sat Bit position to saturate to (1..32)
980*150812a8SEvalZero \return Saturated value
981*150812a8SEvalZero */
982*150812a8SEvalZero /*#define __SSAT __builtin_arm_ssat*/
983*150812a8SEvalZero #define __SSAT(ARG1,ARG2) \
984*150812a8SEvalZero ({ \
985*150812a8SEvalZero int32_t __RES, __ARG1 = (ARG1); \
986*150812a8SEvalZero __ASM ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
987*150812a8SEvalZero __RES; \
988*150812a8SEvalZero })
989*150812a8SEvalZero
990*150812a8SEvalZero
991*150812a8SEvalZero /**
992*150812a8SEvalZero \brief Unsigned Saturate
993*150812a8SEvalZero \details Saturates an unsigned value.
994*150812a8SEvalZero \param [in] value Value to be saturated
995*150812a8SEvalZero \param [in] sat Bit position to saturate to (0..31)
996*150812a8SEvalZero \return Saturated value
997*150812a8SEvalZero */
998*150812a8SEvalZero #define __USAT __builtin_arm_usat
999*150812a8SEvalZero #if 0
1000*150812a8SEvalZero #define __USAT(ARG1,ARG2) \
1001*150812a8SEvalZero ({ \
1002*150812a8SEvalZero uint32_t __RES, __ARG1 = (ARG1); \
1003*150812a8SEvalZero __ASM ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
1004*150812a8SEvalZero __RES; \
1005*150812a8SEvalZero })
1006*150812a8SEvalZero #endif
1007*150812a8SEvalZero
1008*150812a8SEvalZero
1009*150812a8SEvalZero /**
1010*150812a8SEvalZero \brief Rotate Right with Extend (32 bit)
1011*150812a8SEvalZero \details Moves each bit of a bitstring right by one bit.
1012*150812a8SEvalZero The carry input is shifted in at the left end of the bitstring.
1013*150812a8SEvalZero \param [in] value Value to rotate
1014*150812a8SEvalZero \return Rotated value
1015*150812a8SEvalZero */
__RRX(uint32_t value)1016*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __RRX(uint32_t value)
1017*150812a8SEvalZero {
1018*150812a8SEvalZero uint32_t result;
1019*150812a8SEvalZero
1020*150812a8SEvalZero __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
1021*150812a8SEvalZero return(result);
1022*150812a8SEvalZero }
1023*150812a8SEvalZero
1024*150812a8SEvalZero
1025*150812a8SEvalZero /**
1026*150812a8SEvalZero \brief LDRT Unprivileged (8 bit)
1027*150812a8SEvalZero \details Executes a Unprivileged LDRT instruction for 8 bit value.
1028*150812a8SEvalZero \param [in] ptr Pointer to data
1029*150812a8SEvalZero \return value of type uint8_t at (*ptr)
1030*150812a8SEvalZero */
__LDRBT(volatile uint8_t * ptr)1031*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint8_t __LDRBT(volatile uint8_t *ptr)
1032*150812a8SEvalZero {
1033*150812a8SEvalZero uint32_t result;
1034*150812a8SEvalZero
1035*150812a8SEvalZero __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) );
1036*150812a8SEvalZero return ((uint8_t) result); /* Add explicit type cast here */
1037*150812a8SEvalZero }
1038*150812a8SEvalZero
1039*150812a8SEvalZero
1040*150812a8SEvalZero /**
1041*150812a8SEvalZero \brief LDRT Unprivileged (16 bit)
1042*150812a8SEvalZero \details Executes a Unprivileged LDRT instruction for 16 bit values.
1043*150812a8SEvalZero \param [in] ptr Pointer to data
1044*150812a8SEvalZero \return value of type uint16_t at (*ptr)
1045*150812a8SEvalZero */
__LDRHT(volatile uint16_t * ptr)1046*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint16_t __LDRHT(volatile uint16_t *ptr)
1047*150812a8SEvalZero {
1048*150812a8SEvalZero uint32_t result;
1049*150812a8SEvalZero
1050*150812a8SEvalZero __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) );
1051*150812a8SEvalZero return ((uint16_t) result); /* Add explicit type cast here */
1052*150812a8SEvalZero }
1053*150812a8SEvalZero
1054*150812a8SEvalZero
1055*150812a8SEvalZero /**
1056*150812a8SEvalZero \brief LDRT Unprivileged (32 bit)
1057*150812a8SEvalZero \details Executes a Unprivileged LDRT instruction for 32 bit values.
1058*150812a8SEvalZero \param [in] ptr Pointer to data
1059*150812a8SEvalZero \return value of type uint32_t at (*ptr)
1060*150812a8SEvalZero */
__LDRT(volatile uint32_t * ptr)1061*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __LDRT(volatile uint32_t *ptr)
1062*150812a8SEvalZero {
1063*150812a8SEvalZero uint32_t result;
1064*150812a8SEvalZero
1065*150812a8SEvalZero __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) );
1066*150812a8SEvalZero return(result);
1067*150812a8SEvalZero }
1068*150812a8SEvalZero
1069*150812a8SEvalZero
1070*150812a8SEvalZero /**
1071*150812a8SEvalZero \brief STRT Unprivileged (8 bit)
1072*150812a8SEvalZero \details Executes a Unprivileged STRT instruction for 8 bit values.
1073*150812a8SEvalZero \param [in] value Value to store
1074*150812a8SEvalZero \param [in] ptr Pointer to location
1075*150812a8SEvalZero */
__STRBT(uint8_t value,volatile uint8_t * ptr)1076*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __STRBT(uint8_t value, volatile uint8_t *ptr)
1077*150812a8SEvalZero {
1078*150812a8SEvalZero __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
1079*150812a8SEvalZero }
1080*150812a8SEvalZero
1081*150812a8SEvalZero
1082*150812a8SEvalZero /**
1083*150812a8SEvalZero \brief STRT Unprivileged (16 bit)
1084*150812a8SEvalZero \details Executes a Unprivileged STRT instruction for 16 bit values.
1085*150812a8SEvalZero \param [in] value Value to store
1086*150812a8SEvalZero \param [in] ptr Pointer to location
1087*150812a8SEvalZero */
__STRHT(uint16_t value,volatile uint16_t * ptr)1088*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __STRHT(uint16_t value, volatile uint16_t *ptr)
1089*150812a8SEvalZero {
1090*150812a8SEvalZero __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
1091*150812a8SEvalZero }
1092*150812a8SEvalZero
1093*150812a8SEvalZero
1094*150812a8SEvalZero /**
1095*150812a8SEvalZero \brief STRT Unprivileged (32 bit)
1096*150812a8SEvalZero \details Executes a Unprivileged STRT instruction for 32 bit values.
1097*150812a8SEvalZero \param [in] value Value to store
1098*150812a8SEvalZero \param [in] ptr Pointer to location
1099*150812a8SEvalZero */
__STRT(uint32_t value,volatile uint32_t * ptr)1100*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __STRT(uint32_t value, volatile uint32_t *ptr)
1101*150812a8SEvalZero {
1102*150812a8SEvalZero __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) );
1103*150812a8SEvalZero }
1104*150812a8SEvalZero
1105*150812a8SEvalZero #endif /* ((__ARM_ARCH_7M__ == 1U) || (__ARM_ARCH_7EM__ == 1U) || (__ARM_ARCH_8M__ == 1U)) */
1106*150812a8SEvalZero
1107*150812a8SEvalZero
1108*150812a8SEvalZero #if (__ARM_ARCH_8M__ == 1U)
1109*150812a8SEvalZero
1110*150812a8SEvalZero /**
1111*150812a8SEvalZero \brief Load-Acquire (8 bit)
1112*150812a8SEvalZero \details Executes a LDAB instruction for 8 bit value.
1113*150812a8SEvalZero \param [in] ptr Pointer to data
1114*150812a8SEvalZero \return value of type uint8_t at (*ptr)
1115*150812a8SEvalZero */
__LDAB(volatile uint8_t * ptr)1116*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint8_t __LDAB(volatile uint8_t *ptr)
1117*150812a8SEvalZero {
1118*150812a8SEvalZero uint32_t result;
1119*150812a8SEvalZero
1120*150812a8SEvalZero __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) );
1121*150812a8SEvalZero return ((uint8_t) result);
1122*150812a8SEvalZero }
1123*150812a8SEvalZero
1124*150812a8SEvalZero
1125*150812a8SEvalZero /**
1126*150812a8SEvalZero \brief Load-Acquire (16 bit)
1127*150812a8SEvalZero \details Executes a LDAH instruction for 16 bit values.
1128*150812a8SEvalZero \param [in] ptr Pointer to data
1129*150812a8SEvalZero \return value of type uint16_t at (*ptr)
1130*150812a8SEvalZero */
__LDAH(volatile uint16_t * ptr)1131*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint16_t __LDAH(volatile uint16_t *ptr)
1132*150812a8SEvalZero {
1133*150812a8SEvalZero uint32_t result;
1134*150812a8SEvalZero
1135*150812a8SEvalZero __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) );
1136*150812a8SEvalZero return ((uint16_t) result);
1137*150812a8SEvalZero }
1138*150812a8SEvalZero
1139*150812a8SEvalZero
1140*150812a8SEvalZero /**
1141*150812a8SEvalZero \brief Load-Acquire (32 bit)
1142*150812a8SEvalZero \details Executes a LDA instruction for 32 bit values.
1143*150812a8SEvalZero \param [in] ptr Pointer to data
1144*150812a8SEvalZero \return value of type uint32_t at (*ptr)
1145*150812a8SEvalZero */
__LDA(volatile uint32_t * ptr)1146*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __LDA(volatile uint32_t *ptr)
1147*150812a8SEvalZero {
1148*150812a8SEvalZero uint32_t result;
1149*150812a8SEvalZero
1150*150812a8SEvalZero __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) );
1151*150812a8SEvalZero return(result);
1152*150812a8SEvalZero }
1153*150812a8SEvalZero
1154*150812a8SEvalZero
1155*150812a8SEvalZero /**
1156*150812a8SEvalZero \brief Store-Release (8 bit)
1157*150812a8SEvalZero \details Executes a STLB instruction for 8 bit values.
1158*150812a8SEvalZero \param [in] value Value to store
1159*150812a8SEvalZero \param [in] ptr Pointer to location
1160*150812a8SEvalZero */
__STLB(uint8_t value,volatile uint8_t * ptr)1161*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __STLB(uint8_t value, volatile uint8_t *ptr)
1162*150812a8SEvalZero {
1163*150812a8SEvalZero __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
1164*150812a8SEvalZero }
1165*150812a8SEvalZero
1166*150812a8SEvalZero
1167*150812a8SEvalZero /**
1168*150812a8SEvalZero \brief Store-Release (16 bit)
1169*150812a8SEvalZero \details Executes a STLH instruction for 16 bit values.
1170*150812a8SEvalZero \param [in] value Value to store
1171*150812a8SEvalZero \param [in] ptr Pointer to location
1172*150812a8SEvalZero */
__STLH(uint16_t value,volatile uint16_t * ptr)1173*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __STLH(uint16_t value, volatile uint16_t *ptr)
1174*150812a8SEvalZero {
1175*150812a8SEvalZero __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
1176*150812a8SEvalZero }
1177*150812a8SEvalZero
1178*150812a8SEvalZero
1179*150812a8SEvalZero /**
1180*150812a8SEvalZero \brief Store-Release (32 bit)
1181*150812a8SEvalZero \details Executes a STL instruction for 32 bit values.
1182*150812a8SEvalZero \param [in] value Value to store
1183*150812a8SEvalZero \param [in] ptr Pointer to location
1184*150812a8SEvalZero */
__STL(uint32_t value,volatile uint32_t * ptr)1185*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __STL(uint32_t value, volatile uint32_t *ptr)
1186*150812a8SEvalZero {
1187*150812a8SEvalZero __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
1188*150812a8SEvalZero }
1189*150812a8SEvalZero
1190*150812a8SEvalZero
1191*150812a8SEvalZero /**
1192*150812a8SEvalZero \brief Load-Acquire Exclusive (8 bit)
1193*150812a8SEvalZero \details Executes a LDAB exclusive instruction for 8 bit value.
1194*150812a8SEvalZero \param [in] ptr Pointer to data
1195*150812a8SEvalZero \return value of type uint8_t at (*ptr)
1196*150812a8SEvalZero */
1197*150812a8SEvalZero #define __LDAEXB (uint8_t)__builtin_arm_ldaex
1198*150812a8SEvalZero
1199*150812a8SEvalZero
1200*150812a8SEvalZero /**
1201*150812a8SEvalZero \brief Load-Acquire Exclusive (16 bit)
1202*150812a8SEvalZero \details Executes a LDAH exclusive instruction for 16 bit values.
1203*150812a8SEvalZero \param [in] ptr Pointer to data
1204*150812a8SEvalZero \return value of type uint16_t at (*ptr)
1205*150812a8SEvalZero */
1206*150812a8SEvalZero #define __LDAEXH (uint16_t)__builtin_arm_ldaex
1207*150812a8SEvalZero
1208*150812a8SEvalZero
1209*150812a8SEvalZero /**
1210*150812a8SEvalZero \brief Load-Acquire Exclusive (32 bit)
1211*150812a8SEvalZero \details Executes a LDA exclusive instruction for 32 bit values.
1212*150812a8SEvalZero \param [in] ptr Pointer to data
1213*150812a8SEvalZero \return value of type uint32_t at (*ptr)
1214*150812a8SEvalZero */
1215*150812a8SEvalZero #define __LDAEX (uint32_t)__builtin_arm_ldaex
1216*150812a8SEvalZero
1217*150812a8SEvalZero
1218*150812a8SEvalZero /**
1219*150812a8SEvalZero \brief Store-Release Exclusive (8 bit)
1220*150812a8SEvalZero \details Executes a STLB exclusive instruction for 8 bit values.
1221*150812a8SEvalZero \param [in] value Value to store
1222*150812a8SEvalZero \param [in] ptr Pointer to location
1223*150812a8SEvalZero \return 0 Function succeeded
1224*150812a8SEvalZero \return 1 Function failed
1225*150812a8SEvalZero */
1226*150812a8SEvalZero #define __STLEXB (uint32_t)__builtin_arm_stlex
1227*150812a8SEvalZero
1228*150812a8SEvalZero
1229*150812a8SEvalZero /**
1230*150812a8SEvalZero \brief Store-Release Exclusive (16 bit)
1231*150812a8SEvalZero \details Executes a STLH exclusive instruction for 16 bit values.
1232*150812a8SEvalZero \param [in] value Value to store
1233*150812a8SEvalZero \param [in] ptr Pointer to location
1234*150812a8SEvalZero \return 0 Function succeeded
1235*150812a8SEvalZero \return 1 Function failed
1236*150812a8SEvalZero */
1237*150812a8SEvalZero #define __STLEXH (uint32_t)__builtin_arm_stlex
1238*150812a8SEvalZero
1239*150812a8SEvalZero
1240*150812a8SEvalZero /**
1241*150812a8SEvalZero \brief Store-Release Exclusive (32 bit)
1242*150812a8SEvalZero \details Executes a STL exclusive instruction for 32 bit values.
1243*150812a8SEvalZero \param [in] value Value to store
1244*150812a8SEvalZero \param [in] ptr Pointer to location
1245*150812a8SEvalZero \return 0 Function succeeded
1246*150812a8SEvalZero \return 1 Function failed
1247*150812a8SEvalZero */
1248*150812a8SEvalZero #define __STLEX (uint32_t)__builtin_arm_stlex
1249*150812a8SEvalZero
1250*150812a8SEvalZero #endif /* (__ARM_ARCH_8M__ == 1U) */
1251*150812a8SEvalZero
1252*150812a8SEvalZero /*@}*/ /* end of group CMSIS_Core_InstructionInterface */
1253*150812a8SEvalZero
1254*150812a8SEvalZero
1255*150812a8SEvalZero /* ################### Compiler specific Intrinsics ########################### */
1256*150812a8SEvalZero /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
1257*150812a8SEvalZero Access to dedicated SIMD instructions
1258*150812a8SEvalZero @{
1259*150812a8SEvalZero */
1260*150812a8SEvalZero
1261*150812a8SEvalZero #if (__ARM_FEATURE_DSP == 1U) /* ToDo: ARMCC_V6: This should be ARCH >= ARMv7-M + SIMD */
1262*150812a8SEvalZero
__SADD8(uint32_t op1,uint32_t op2)1263*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
1264*150812a8SEvalZero {
1265*150812a8SEvalZero uint32_t result;
1266*150812a8SEvalZero
1267*150812a8SEvalZero __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1268*150812a8SEvalZero return(result);
1269*150812a8SEvalZero }
1270*150812a8SEvalZero
__QADD8(uint32_t op1,uint32_t op2)1271*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
1272*150812a8SEvalZero {
1273*150812a8SEvalZero uint32_t result;
1274*150812a8SEvalZero
1275*150812a8SEvalZero __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1276*150812a8SEvalZero return(result);
1277*150812a8SEvalZero }
1278*150812a8SEvalZero
__SHADD8(uint32_t op1,uint32_t op2)1279*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
1280*150812a8SEvalZero {
1281*150812a8SEvalZero uint32_t result;
1282*150812a8SEvalZero
1283*150812a8SEvalZero __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1284*150812a8SEvalZero return(result);
1285*150812a8SEvalZero }
1286*150812a8SEvalZero
__UADD8(uint32_t op1,uint32_t op2)1287*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
1288*150812a8SEvalZero {
1289*150812a8SEvalZero uint32_t result;
1290*150812a8SEvalZero
1291*150812a8SEvalZero __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1292*150812a8SEvalZero return(result);
1293*150812a8SEvalZero }
1294*150812a8SEvalZero
__UQADD8(uint32_t op1,uint32_t op2)1295*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
1296*150812a8SEvalZero {
1297*150812a8SEvalZero uint32_t result;
1298*150812a8SEvalZero
1299*150812a8SEvalZero __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1300*150812a8SEvalZero return(result);
1301*150812a8SEvalZero }
1302*150812a8SEvalZero
__UHADD8(uint32_t op1,uint32_t op2)1303*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
1304*150812a8SEvalZero {
1305*150812a8SEvalZero uint32_t result;
1306*150812a8SEvalZero
1307*150812a8SEvalZero __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1308*150812a8SEvalZero return(result);
1309*150812a8SEvalZero }
1310*150812a8SEvalZero
1311*150812a8SEvalZero
__SSUB8(uint32_t op1,uint32_t op2)1312*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
1313*150812a8SEvalZero {
1314*150812a8SEvalZero uint32_t result;
1315*150812a8SEvalZero
1316*150812a8SEvalZero __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1317*150812a8SEvalZero return(result);
1318*150812a8SEvalZero }
1319*150812a8SEvalZero
__QSUB8(uint32_t op1,uint32_t op2)1320*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
1321*150812a8SEvalZero {
1322*150812a8SEvalZero uint32_t result;
1323*150812a8SEvalZero
1324*150812a8SEvalZero __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1325*150812a8SEvalZero return(result);
1326*150812a8SEvalZero }
1327*150812a8SEvalZero
__SHSUB8(uint32_t op1,uint32_t op2)1328*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
1329*150812a8SEvalZero {
1330*150812a8SEvalZero uint32_t result;
1331*150812a8SEvalZero
1332*150812a8SEvalZero __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1333*150812a8SEvalZero return(result);
1334*150812a8SEvalZero }
1335*150812a8SEvalZero
__USUB8(uint32_t op1,uint32_t op2)1336*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
1337*150812a8SEvalZero {
1338*150812a8SEvalZero uint32_t result;
1339*150812a8SEvalZero
1340*150812a8SEvalZero __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1341*150812a8SEvalZero return(result);
1342*150812a8SEvalZero }
1343*150812a8SEvalZero
__UQSUB8(uint32_t op1,uint32_t op2)1344*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
1345*150812a8SEvalZero {
1346*150812a8SEvalZero uint32_t result;
1347*150812a8SEvalZero
1348*150812a8SEvalZero __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1349*150812a8SEvalZero return(result);
1350*150812a8SEvalZero }
1351*150812a8SEvalZero
__UHSUB8(uint32_t op1,uint32_t op2)1352*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
1353*150812a8SEvalZero {
1354*150812a8SEvalZero uint32_t result;
1355*150812a8SEvalZero
1356*150812a8SEvalZero __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1357*150812a8SEvalZero return(result);
1358*150812a8SEvalZero }
1359*150812a8SEvalZero
1360*150812a8SEvalZero
__SADD16(uint32_t op1,uint32_t op2)1361*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
1362*150812a8SEvalZero {
1363*150812a8SEvalZero uint32_t result;
1364*150812a8SEvalZero
1365*150812a8SEvalZero __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1366*150812a8SEvalZero return(result);
1367*150812a8SEvalZero }
1368*150812a8SEvalZero
__QADD16(uint32_t op1,uint32_t op2)1369*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
1370*150812a8SEvalZero {
1371*150812a8SEvalZero uint32_t result;
1372*150812a8SEvalZero
1373*150812a8SEvalZero __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1374*150812a8SEvalZero return(result);
1375*150812a8SEvalZero }
1376*150812a8SEvalZero
__SHADD16(uint32_t op1,uint32_t op2)1377*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
1378*150812a8SEvalZero {
1379*150812a8SEvalZero uint32_t result;
1380*150812a8SEvalZero
1381*150812a8SEvalZero __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1382*150812a8SEvalZero return(result);
1383*150812a8SEvalZero }
1384*150812a8SEvalZero
__UADD16(uint32_t op1,uint32_t op2)1385*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
1386*150812a8SEvalZero {
1387*150812a8SEvalZero uint32_t result;
1388*150812a8SEvalZero
1389*150812a8SEvalZero __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1390*150812a8SEvalZero return(result);
1391*150812a8SEvalZero }
1392*150812a8SEvalZero
__UQADD16(uint32_t op1,uint32_t op2)1393*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
1394*150812a8SEvalZero {
1395*150812a8SEvalZero uint32_t result;
1396*150812a8SEvalZero
1397*150812a8SEvalZero __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1398*150812a8SEvalZero return(result);
1399*150812a8SEvalZero }
1400*150812a8SEvalZero
__UHADD16(uint32_t op1,uint32_t op2)1401*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
1402*150812a8SEvalZero {
1403*150812a8SEvalZero uint32_t result;
1404*150812a8SEvalZero
1405*150812a8SEvalZero __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1406*150812a8SEvalZero return(result);
1407*150812a8SEvalZero }
1408*150812a8SEvalZero
__SSUB16(uint32_t op1,uint32_t op2)1409*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
1410*150812a8SEvalZero {
1411*150812a8SEvalZero uint32_t result;
1412*150812a8SEvalZero
1413*150812a8SEvalZero __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1414*150812a8SEvalZero return(result);
1415*150812a8SEvalZero }
1416*150812a8SEvalZero
__QSUB16(uint32_t op1,uint32_t op2)1417*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
1418*150812a8SEvalZero {
1419*150812a8SEvalZero uint32_t result;
1420*150812a8SEvalZero
1421*150812a8SEvalZero __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1422*150812a8SEvalZero return(result);
1423*150812a8SEvalZero }
1424*150812a8SEvalZero
__SHSUB16(uint32_t op1,uint32_t op2)1425*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
1426*150812a8SEvalZero {
1427*150812a8SEvalZero uint32_t result;
1428*150812a8SEvalZero
1429*150812a8SEvalZero __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1430*150812a8SEvalZero return(result);
1431*150812a8SEvalZero }
1432*150812a8SEvalZero
__USUB16(uint32_t op1,uint32_t op2)1433*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
1434*150812a8SEvalZero {
1435*150812a8SEvalZero uint32_t result;
1436*150812a8SEvalZero
1437*150812a8SEvalZero __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1438*150812a8SEvalZero return(result);
1439*150812a8SEvalZero }
1440*150812a8SEvalZero
__UQSUB16(uint32_t op1,uint32_t op2)1441*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
1442*150812a8SEvalZero {
1443*150812a8SEvalZero uint32_t result;
1444*150812a8SEvalZero
1445*150812a8SEvalZero __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1446*150812a8SEvalZero return(result);
1447*150812a8SEvalZero }
1448*150812a8SEvalZero
__UHSUB16(uint32_t op1,uint32_t op2)1449*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
1450*150812a8SEvalZero {
1451*150812a8SEvalZero uint32_t result;
1452*150812a8SEvalZero
1453*150812a8SEvalZero __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1454*150812a8SEvalZero return(result);
1455*150812a8SEvalZero }
1456*150812a8SEvalZero
__SASX(uint32_t op1,uint32_t op2)1457*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
1458*150812a8SEvalZero {
1459*150812a8SEvalZero uint32_t result;
1460*150812a8SEvalZero
1461*150812a8SEvalZero __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1462*150812a8SEvalZero return(result);
1463*150812a8SEvalZero }
1464*150812a8SEvalZero
__QASX(uint32_t op1,uint32_t op2)1465*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
1466*150812a8SEvalZero {
1467*150812a8SEvalZero uint32_t result;
1468*150812a8SEvalZero
1469*150812a8SEvalZero __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1470*150812a8SEvalZero return(result);
1471*150812a8SEvalZero }
1472*150812a8SEvalZero
__SHASX(uint32_t op1,uint32_t op2)1473*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
1474*150812a8SEvalZero {
1475*150812a8SEvalZero uint32_t result;
1476*150812a8SEvalZero
1477*150812a8SEvalZero __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1478*150812a8SEvalZero return(result);
1479*150812a8SEvalZero }
1480*150812a8SEvalZero
__UASX(uint32_t op1,uint32_t op2)1481*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
1482*150812a8SEvalZero {
1483*150812a8SEvalZero uint32_t result;
1484*150812a8SEvalZero
1485*150812a8SEvalZero __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1486*150812a8SEvalZero return(result);
1487*150812a8SEvalZero }
1488*150812a8SEvalZero
__UQASX(uint32_t op1,uint32_t op2)1489*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
1490*150812a8SEvalZero {
1491*150812a8SEvalZero uint32_t result;
1492*150812a8SEvalZero
1493*150812a8SEvalZero __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1494*150812a8SEvalZero return(result);
1495*150812a8SEvalZero }
1496*150812a8SEvalZero
__UHASX(uint32_t op1,uint32_t op2)1497*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
1498*150812a8SEvalZero {
1499*150812a8SEvalZero uint32_t result;
1500*150812a8SEvalZero
1501*150812a8SEvalZero __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1502*150812a8SEvalZero return(result);
1503*150812a8SEvalZero }
1504*150812a8SEvalZero
__SSAX(uint32_t op1,uint32_t op2)1505*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
1506*150812a8SEvalZero {
1507*150812a8SEvalZero uint32_t result;
1508*150812a8SEvalZero
1509*150812a8SEvalZero __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1510*150812a8SEvalZero return(result);
1511*150812a8SEvalZero }
1512*150812a8SEvalZero
__QSAX(uint32_t op1,uint32_t op2)1513*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
1514*150812a8SEvalZero {
1515*150812a8SEvalZero uint32_t result;
1516*150812a8SEvalZero
1517*150812a8SEvalZero __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1518*150812a8SEvalZero return(result);
1519*150812a8SEvalZero }
1520*150812a8SEvalZero
__SHSAX(uint32_t op1,uint32_t op2)1521*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
1522*150812a8SEvalZero {
1523*150812a8SEvalZero uint32_t result;
1524*150812a8SEvalZero
1525*150812a8SEvalZero __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1526*150812a8SEvalZero return(result);
1527*150812a8SEvalZero }
1528*150812a8SEvalZero
__USAX(uint32_t op1,uint32_t op2)1529*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
1530*150812a8SEvalZero {
1531*150812a8SEvalZero uint32_t result;
1532*150812a8SEvalZero
1533*150812a8SEvalZero __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1534*150812a8SEvalZero return(result);
1535*150812a8SEvalZero }
1536*150812a8SEvalZero
__UQSAX(uint32_t op1,uint32_t op2)1537*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
1538*150812a8SEvalZero {
1539*150812a8SEvalZero uint32_t result;
1540*150812a8SEvalZero
1541*150812a8SEvalZero __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1542*150812a8SEvalZero return(result);
1543*150812a8SEvalZero }
1544*150812a8SEvalZero
__UHSAX(uint32_t op1,uint32_t op2)1545*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
1546*150812a8SEvalZero {
1547*150812a8SEvalZero uint32_t result;
1548*150812a8SEvalZero
1549*150812a8SEvalZero __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1550*150812a8SEvalZero return(result);
1551*150812a8SEvalZero }
1552*150812a8SEvalZero
__USAD8(uint32_t op1,uint32_t op2)1553*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
1554*150812a8SEvalZero {
1555*150812a8SEvalZero uint32_t result;
1556*150812a8SEvalZero
1557*150812a8SEvalZero __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1558*150812a8SEvalZero return(result);
1559*150812a8SEvalZero }
1560*150812a8SEvalZero
__USADA8(uint32_t op1,uint32_t op2,uint32_t op3)1561*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
1562*150812a8SEvalZero {
1563*150812a8SEvalZero uint32_t result;
1564*150812a8SEvalZero
1565*150812a8SEvalZero __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1566*150812a8SEvalZero return(result);
1567*150812a8SEvalZero }
1568*150812a8SEvalZero
1569*150812a8SEvalZero #define __SSAT16(ARG1,ARG2) \
1570*150812a8SEvalZero ({ \
1571*150812a8SEvalZero uint32_t __RES, __ARG1 = (ARG1); \
1572*150812a8SEvalZero __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
1573*150812a8SEvalZero __RES; \
1574*150812a8SEvalZero })
1575*150812a8SEvalZero
1576*150812a8SEvalZero #define __USAT16(ARG1,ARG2) \
1577*150812a8SEvalZero ({ \
1578*150812a8SEvalZero uint32_t __RES, __ARG1 = (ARG1); \
1579*150812a8SEvalZero __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
1580*150812a8SEvalZero __RES; \
1581*150812a8SEvalZero })
1582*150812a8SEvalZero
__UXTB16(uint32_t op1)1583*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
1584*150812a8SEvalZero {
1585*150812a8SEvalZero uint32_t result;
1586*150812a8SEvalZero
1587*150812a8SEvalZero __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
1588*150812a8SEvalZero return(result);
1589*150812a8SEvalZero }
1590*150812a8SEvalZero
__UXTAB16(uint32_t op1,uint32_t op2)1591*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
1592*150812a8SEvalZero {
1593*150812a8SEvalZero uint32_t result;
1594*150812a8SEvalZero
1595*150812a8SEvalZero __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1596*150812a8SEvalZero return(result);
1597*150812a8SEvalZero }
1598*150812a8SEvalZero
__SXTB16(uint32_t op1)1599*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
1600*150812a8SEvalZero {
1601*150812a8SEvalZero uint32_t result;
1602*150812a8SEvalZero
1603*150812a8SEvalZero __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
1604*150812a8SEvalZero return(result);
1605*150812a8SEvalZero }
1606*150812a8SEvalZero
__SXTAB16(uint32_t op1,uint32_t op2)1607*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
1608*150812a8SEvalZero {
1609*150812a8SEvalZero uint32_t result;
1610*150812a8SEvalZero
1611*150812a8SEvalZero __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1612*150812a8SEvalZero return(result);
1613*150812a8SEvalZero }
1614*150812a8SEvalZero
__SMUAD(uint32_t op1,uint32_t op2)1615*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
1616*150812a8SEvalZero {
1617*150812a8SEvalZero uint32_t result;
1618*150812a8SEvalZero
1619*150812a8SEvalZero __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1620*150812a8SEvalZero return(result);
1621*150812a8SEvalZero }
1622*150812a8SEvalZero
__SMUADX(uint32_t op1,uint32_t op2)1623*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
1624*150812a8SEvalZero {
1625*150812a8SEvalZero uint32_t result;
1626*150812a8SEvalZero
1627*150812a8SEvalZero __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1628*150812a8SEvalZero return(result);
1629*150812a8SEvalZero }
1630*150812a8SEvalZero
__SMLAD(uint32_t op1,uint32_t op2,uint32_t op3)1631*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
1632*150812a8SEvalZero {
1633*150812a8SEvalZero uint32_t result;
1634*150812a8SEvalZero
1635*150812a8SEvalZero __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1636*150812a8SEvalZero return(result);
1637*150812a8SEvalZero }
1638*150812a8SEvalZero
__SMLADX(uint32_t op1,uint32_t op2,uint32_t op3)1639*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
1640*150812a8SEvalZero {
1641*150812a8SEvalZero uint32_t result;
1642*150812a8SEvalZero
1643*150812a8SEvalZero __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1644*150812a8SEvalZero return(result);
1645*150812a8SEvalZero }
1646*150812a8SEvalZero
__SMLALD(uint32_t op1,uint32_t op2,uint64_t acc)1647*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
1648*150812a8SEvalZero {
1649*150812a8SEvalZero union llreg_u{
1650*150812a8SEvalZero uint32_t w32[2];
1651*150812a8SEvalZero uint64_t w64;
1652*150812a8SEvalZero } llr;
1653*150812a8SEvalZero llr.w64 = acc;
1654*150812a8SEvalZero
1655*150812a8SEvalZero #ifndef __ARMEB__ /* Little endian */
1656*150812a8SEvalZero __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1657*150812a8SEvalZero #else /* Big endian */
1658*150812a8SEvalZero __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1659*150812a8SEvalZero #endif
1660*150812a8SEvalZero
1661*150812a8SEvalZero return(llr.w64);
1662*150812a8SEvalZero }
1663*150812a8SEvalZero
__SMLALDX(uint32_t op1,uint32_t op2,uint64_t acc)1664*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
1665*150812a8SEvalZero {
1666*150812a8SEvalZero union llreg_u{
1667*150812a8SEvalZero uint32_t w32[2];
1668*150812a8SEvalZero uint64_t w64;
1669*150812a8SEvalZero } llr;
1670*150812a8SEvalZero llr.w64 = acc;
1671*150812a8SEvalZero
1672*150812a8SEvalZero #ifndef __ARMEB__ /* Little endian */
1673*150812a8SEvalZero __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1674*150812a8SEvalZero #else /* Big endian */
1675*150812a8SEvalZero __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1676*150812a8SEvalZero #endif
1677*150812a8SEvalZero
1678*150812a8SEvalZero return(llr.w64);
1679*150812a8SEvalZero }
1680*150812a8SEvalZero
__SMUSD(uint32_t op1,uint32_t op2)1681*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
1682*150812a8SEvalZero {
1683*150812a8SEvalZero uint32_t result;
1684*150812a8SEvalZero
1685*150812a8SEvalZero __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1686*150812a8SEvalZero return(result);
1687*150812a8SEvalZero }
1688*150812a8SEvalZero
__SMUSDX(uint32_t op1,uint32_t op2)1689*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
1690*150812a8SEvalZero {
1691*150812a8SEvalZero uint32_t result;
1692*150812a8SEvalZero
1693*150812a8SEvalZero __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1694*150812a8SEvalZero return(result);
1695*150812a8SEvalZero }
1696*150812a8SEvalZero
__SMLSD(uint32_t op1,uint32_t op2,uint32_t op3)1697*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
1698*150812a8SEvalZero {
1699*150812a8SEvalZero uint32_t result;
1700*150812a8SEvalZero
1701*150812a8SEvalZero __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1702*150812a8SEvalZero return(result);
1703*150812a8SEvalZero }
1704*150812a8SEvalZero
__SMLSDX(uint32_t op1,uint32_t op2,uint32_t op3)1705*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
1706*150812a8SEvalZero {
1707*150812a8SEvalZero uint32_t result;
1708*150812a8SEvalZero
1709*150812a8SEvalZero __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1710*150812a8SEvalZero return(result);
1711*150812a8SEvalZero }
1712*150812a8SEvalZero
__SMLSLD(uint32_t op1,uint32_t op2,uint64_t acc)1713*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)
1714*150812a8SEvalZero {
1715*150812a8SEvalZero union llreg_u{
1716*150812a8SEvalZero uint32_t w32[2];
1717*150812a8SEvalZero uint64_t w64;
1718*150812a8SEvalZero } llr;
1719*150812a8SEvalZero llr.w64 = acc;
1720*150812a8SEvalZero
1721*150812a8SEvalZero #ifndef __ARMEB__ /* Little endian */
1722*150812a8SEvalZero __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1723*150812a8SEvalZero #else /* Big endian */
1724*150812a8SEvalZero __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1725*150812a8SEvalZero #endif
1726*150812a8SEvalZero
1727*150812a8SEvalZero return(llr.w64);
1728*150812a8SEvalZero }
1729*150812a8SEvalZero
__SMLSLDX(uint32_t op1,uint32_t op2,uint64_t acc)1730*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)
1731*150812a8SEvalZero {
1732*150812a8SEvalZero union llreg_u{
1733*150812a8SEvalZero uint32_t w32[2];
1734*150812a8SEvalZero uint64_t w64;
1735*150812a8SEvalZero } llr;
1736*150812a8SEvalZero llr.w64 = acc;
1737*150812a8SEvalZero
1738*150812a8SEvalZero #ifndef __ARMEB__ /* Little endian */
1739*150812a8SEvalZero __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1740*150812a8SEvalZero #else /* Big endian */
1741*150812a8SEvalZero __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1742*150812a8SEvalZero #endif
1743*150812a8SEvalZero
1744*150812a8SEvalZero return(llr.w64);
1745*150812a8SEvalZero }
1746*150812a8SEvalZero
__SEL(uint32_t op1,uint32_t op2)1747*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
1748*150812a8SEvalZero {
1749*150812a8SEvalZero uint32_t result;
1750*150812a8SEvalZero
1751*150812a8SEvalZero __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1752*150812a8SEvalZero return(result);
1753*150812a8SEvalZero }
1754*150812a8SEvalZero
__QADD(int32_t op1,int32_t op2)1755*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE int32_t __QADD( int32_t op1, int32_t op2)
1756*150812a8SEvalZero {
1757*150812a8SEvalZero int32_t result;
1758*150812a8SEvalZero
1759*150812a8SEvalZero __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1760*150812a8SEvalZero return(result);
1761*150812a8SEvalZero }
1762*150812a8SEvalZero
__QSUB(int32_t op1,int32_t op2)1763*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE int32_t __QSUB( int32_t op1, int32_t op2)
1764*150812a8SEvalZero {
1765*150812a8SEvalZero int32_t result;
1766*150812a8SEvalZero
1767*150812a8SEvalZero __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1768*150812a8SEvalZero return(result);
1769*150812a8SEvalZero }
1770*150812a8SEvalZero
1771*150812a8SEvalZero #define __PKHBT(ARG1,ARG2,ARG3) \
1772*150812a8SEvalZero ({ \
1773*150812a8SEvalZero uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
1774*150812a8SEvalZero __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
1775*150812a8SEvalZero __RES; \
1776*150812a8SEvalZero })
1777*150812a8SEvalZero
1778*150812a8SEvalZero #define __PKHTB(ARG1,ARG2,ARG3) \
1779*150812a8SEvalZero ({ \
1780*150812a8SEvalZero uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
1781*150812a8SEvalZero if (ARG3 == 0) \
1782*150812a8SEvalZero __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
1783*150812a8SEvalZero else \
1784*150812a8SEvalZero __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
1785*150812a8SEvalZero __RES; \
1786*150812a8SEvalZero })
1787*150812a8SEvalZero
__SMMLA(int32_t op1,int32_t op2,int32_t op3)1788*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
1789*150812a8SEvalZero {
1790*150812a8SEvalZero int32_t result;
1791*150812a8SEvalZero
1792*150812a8SEvalZero __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
1793*150812a8SEvalZero return(result);
1794*150812a8SEvalZero }
1795*150812a8SEvalZero
1796*150812a8SEvalZero #endif /* (__ARM_FEATURE_DSP == 1U) */
1797*150812a8SEvalZero /*@} end of group CMSIS_SIMD_intrinsics */
1798*150812a8SEvalZero
1799*150812a8SEvalZero
1800*150812a8SEvalZero #endif /* __CMSIS_ARMCC_V6_H */
1801