xref: /btstack/port/stm32-l073rz-nucleo-em9304/Drivers/CMSIS/Include/cmsis_gcc.h (revision e838079242074edcbcbb400962776e15fe6ca6cb)
1*e8380792SMatthias Ringwald /**************************************************************************//**
2*e8380792SMatthias Ringwald  * @file     cmsis_gcc.h
3*e8380792SMatthias Ringwald  * @brief    CMSIS Cortex-M Core Function/Instruction Header File
4*e8380792SMatthias Ringwald  * @version  V4.30
5*e8380792SMatthias Ringwald  * @date     20. October 2015
6*e8380792SMatthias Ringwald  ******************************************************************************/
7*e8380792SMatthias Ringwald /* Copyright (c) 2009 - 2015 ARM LIMITED
8*e8380792SMatthias Ringwald 
9*e8380792SMatthias Ringwald    All rights reserved.
10*e8380792SMatthias Ringwald    Redistribution and use in source and binary forms, with or without
11*e8380792SMatthias Ringwald    modification, are permitted provided that the following conditions are met:
12*e8380792SMatthias Ringwald    - Redistributions of source code must retain the above copyright
13*e8380792SMatthias Ringwald      notice, this list of conditions and the following disclaimer.
14*e8380792SMatthias Ringwald    - Redistributions in binary form must reproduce the above copyright
15*e8380792SMatthias Ringwald      notice, this list of conditions and the following disclaimer in the
16*e8380792SMatthias Ringwald      documentation and/or other materials provided with the distribution.
17*e8380792SMatthias Ringwald    - Neither the name of ARM nor the names of its contributors may be used
18*e8380792SMatthias Ringwald      to endorse or promote products derived from this software without
19*e8380792SMatthias Ringwald      specific prior written permission.
20*e8380792SMatthias Ringwald    *
21*e8380792SMatthias Ringwald    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22*e8380792SMatthias Ringwald    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23*e8380792SMatthias Ringwald    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24*e8380792SMatthias Ringwald    ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
25*e8380792SMatthias Ringwald    LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26*e8380792SMatthias Ringwald    CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27*e8380792SMatthias Ringwald    SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28*e8380792SMatthias Ringwald    INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29*e8380792SMatthias Ringwald    CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30*e8380792SMatthias Ringwald    ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31*e8380792SMatthias Ringwald    POSSIBILITY OF SUCH DAMAGE.
32*e8380792SMatthias Ringwald    ---------------------------------------------------------------------------*/
33*e8380792SMatthias Ringwald 
34*e8380792SMatthias Ringwald 
35*e8380792SMatthias Ringwald #ifndef __CMSIS_GCC_H
36*e8380792SMatthias Ringwald #define __CMSIS_GCC_H
37*e8380792SMatthias Ringwald 
38*e8380792SMatthias Ringwald /* ignore some GCC warnings */
39*e8380792SMatthias Ringwald #if defined ( __GNUC__ )
40*e8380792SMatthias Ringwald #pragma GCC diagnostic push
41*e8380792SMatthias Ringwald #pragma GCC diagnostic ignored "-Wsign-conversion"
42*e8380792SMatthias Ringwald #pragma GCC diagnostic ignored "-Wconversion"
43*e8380792SMatthias Ringwald #pragma GCC diagnostic ignored "-Wunused-parameter"
44*e8380792SMatthias Ringwald #endif
45*e8380792SMatthias Ringwald 
46*e8380792SMatthias Ringwald 
47*e8380792SMatthias Ringwald /* ###########################  Core Function Access  ########################### */
48*e8380792SMatthias Ringwald /** \ingroup  CMSIS_Core_FunctionInterface
49*e8380792SMatthias Ringwald     \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
50*e8380792SMatthias Ringwald   @{
51*e8380792SMatthias Ringwald  */
52*e8380792SMatthias Ringwald 
53*e8380792SMatthias Ringwald /**
54*e8380792SMatthias Ringwald   \brief   Enable IRQ Interrupts
55*e8380792SMatthias Ringwald   \details Enables IRQ interrupts by clearing the I-bit in the CPSR.
56*e8380792SMatthias Ringwald            Can only be executed in Privileged modes.
57*e8380792SMatthias Ringwald  */
__enable_irq(void)58*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_irq(void)
59*e8380792SMatthias Ringwald {
60*e8380792SMatthias Ringwald   __ASM volatile ("cpsie i" : : : "memory");
61*e8380792SMatthias Ringwald }
62*e8380792SMatthias Ringwald 
63*e8380792SMatthias Ringwald 
64*e8380792SMatthias Ringwald /**
65*e8380792SMatthias Ringwald   \brief   Disable IRQ Interrupts
66*e8380792SMatthias Ringwald   \details Disables IRQ interrupts by setting the I-bit in the CPSR.
67*e8380792SMatthias Ringwald   Can only be executed in Privileged modes.
68*e8380792SMatthias Ringwald  */
__disable_irq(void)69*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_irq(void)
70*e8380792SMatthias Ringwald {
71*e8380792SMatthias Ringwald   __ASM volatile ("cpsid i" : : : "memory");
72*e8380792SMatthias Ringwald }
73*e8380792SMatthias Ringwald 
74*e8380792SMatthias Ringwald 
75*e8380792SMatthias Ringwald /**
76*e8380792SMatthias Ringwald   \brief   Get Control Register
77*e8380792SMatthias Ringwald   \details Returns the content of the Control Register.
78*e8380792SMatthias Ringwald   \return               Control Register value
79*e8380792SMatthias Ringwald  */
__get_CONTROL(void)80*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CONTROL(void)
81*e8380792SMatthias Ringwald {
82*e8380792SMatthias Ringwald   uint32_t result;
83*e8380792SMatthias Ringwald 
84*e8380792SMatthias Ringwald   __ASM volatile ("MRS %0, control" : "=r" (result) );
85*e8380792SMatthias Ringwald   return(result);
86*e8380792SMatthias Ringwald }
87*e8380792SMatthias Ringwald 
88*e8380792SMatthias Ringwald 
89*e8380792SMatthias Ringwald /**
90*e8380792SMatthias Ringwald   \brief   Set Control Register
91*e8380792SMatthias Ringwald   \details Writes the given value to the Control Register.
92*e8380792SMatthias Ringwald   \param [in]    control  Control Register value to set
93*e8380792SMatthias Ringwald  */
__set_CONTROL(uint32_t control)94*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_CONTROL(uint32_t control)
95*e8380792SMatthias Ringwald {
96*e8380792SMatthias Ringwald   __ASM volatile ("MSR control, %0" : : "r" (control) : "memory");
97*e8380792SMatthias Ringwald }
98*e8380792SMatthias Ringwald 
99*e8380792SMatthias Ringwald 
100*e8380792SMatthias Ringwald /**
101*e8380792SMatthias Ringwald   \brief   Get IPSR Register
102*e8380792SMatthias Ringwald   \details Returns the content of the IPSR Register.
103*e8380792SMatthias Ringwald   \return               IPSR Register value
104*e8380792SMatthias Ringwald  */
__get_IPSR(void)105*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_IPSR(void)
106*e8380792SMatthias Ringwald {
107*e8380792SMatthias Ringwald   uint32_t result;
108*e8380792SMatthias Ringwald 
109*e8380792SMatthias Ringwald   __ASM volatile ("MRS %0, ipsr" : "=r" (result) );
110*e8380792SMatthias Ringwald   return(result);
111*e8380792SMatthias Ringwald }
112*e8380792SMatthias Ringwald 
113*e8380792SMatthias Ringwald 
114*e8380792SMatthias Ringwald /**
115*e8380792SMatthias Ringwald   \brief   Get APSR Register
116*e8380792SMatthias Ringwald   \details Returns the content of the APSR Register.
117*e8380792SMatthias Ringwald   \return               APSR Register value
118*e8380792SMatthias Ringwald  */
__get_APSR(void)119*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_APSR(void)
120*e8380792SMatthias Ringwald {
121*e8380792SMatthias Ringwald   uint32_t result;
122*e8380792SMatthias Ringwald 
123*e8380792SMatthias Ringwald   __ASM volatile ("MRS %0, apsr" : "=r" (result) );
124*e8380792SMatthias Ringwald   return(result);
125*e8380792SMatthias Ringwald }
126*e8380792SMatthias Ringwald 
127*e8380792SMatthias Ringwald 
128*e8380792SMatthias Ringwald /**
129*e8380792SMatthias Ringwald   \brief   Get xPSR Register
130*e8380792SMatthias Ringwald   \details Returns the content of the xPSR Register.
131*e8380792SMatthias Ringwald 
132*e8380792SMatthias Ringwald     \return               xPSR Register value
133*e8380792SMatthias Ringwald  */
__get_xPSR(void)134*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_xPSR(void)
135*e8380792SMatthias Ringwald {
136*e8380792SMatthias Ringwald   uint32_t result;
137*e8380792SMatthias Ringwald 
138*e8380792SMatthias Ringwald   __ASM volatile ("MRS %0, xpsr" : "=r" (result) );
139*e8380792SMatthias Ringwald   return(result);
140*e8380792SMatthias Ringwald }
141*e8380792SMatthias Ringwald 
142*e8380792SMatthias Ringwald 
143*e8380792SMatthias Ringwald /**
144*e8380792SMatthias Ringwald   \brief   Get Process Stack Pointer
145*e8380792SMatthias Ringwald   \details Returns the current value of the Process Stack Pointer (PSP).
146*e8380792SMatthias Ringwald   \return               PSP Register value
147*e8380792SMatthias Ringwald  */
__get_PSP(void)148*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PSP(void)
149*e8380792SMatthias Ringwald {
150*e8380792SMatthias Ringwald   register uint32_t result;
151*e8380792SMatthias Ringwald 
152*e8380792SMatthias Ringwald   __ASM volatile ("MRS %0, psp\n"  : "=r" (result) );
153*e8380792SMatthias Ringwald   return(result);
154*e8380792SMatthias Ringwald }
155*e8380792SMatthias Ringwald 
156*e8380792SMatthias Ringwald 
157*e8380792SMatthias Ringwald /**
158*e8380792SMatthias Ringwald   \brief   Set Process Stack Pointer
159*e8380792SMatthias Ringwald   \details Assigns the given value to the Process Stack Pointer (PSP).
160*e8380792SMatthias Ringwald   \param [in]    topOfProcStack  Process Stack Pointer value to set
161*e8380792SMatthias Ringwald  */
__set_PSP(uint32_t topOfProcStack)162*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PSP(uint32_t topOfProcStack)
163*e8380792SMatthias Ringwald {
164*e8380792SMatthias Ringwald   __ASM volatile ("MSR psp, %0\n" : : "r" (topOfProcStack) : "sp");
165*e8380792SMatthias Ringwald }
166*e8380792SMatthias Ringwald 
167*e8380792SMatthias Ringwald 
168*e8380792SMatthias Ringwald /**
169*e8380792SMatthias Ringwald   \brief   Get Main Stack Pointer
170*e8380792SMatthias Ringwald   \details Returns the current value of the Main Stack Pointer (MSP).
171*e8380792SMatthias Ringwald   \return               MSP Register value
172*e8380792SMatthias Ringwald  */
__get_MSP(void)173*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_MSP(void)
174*e8380792SMatthias Ringwald {
175*e8380792SMatthias Ringwald   register uint32_t result;
176*e8380792SMatthias Ringwald 
177*e8380792SMatthias Ringwald   __ASM volatile ("MRS %0, msp\n" : "=r" (result) );
178*e8380792SMatthias Ringwald   return(result);
179*e8380792SMatthias Ringwald }
180*e8380792SMatthias Ringwald 
181*e8380792SMatthias Ringwald 
182*e8380792SMatthias Ringwald /**
183*e8380792SMatthias Ringwald   \brief   Set Main Stack Pointer
184*e8380792SMatthias Ringwald   \details Assigns the given value to the Main Stack Pointer (MSP).
185*e8380792SMatthias Ringwald 
186*e8380792SMatthias Ringwald     \param [in]    topOfMainStack  Main Stack Pointer value to set
187*e8380792SMatthias Ringwald  */
__set_MSP(uint32_t topOfMainStack)188*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_MSP(uint32_t topOfMainStack)
189*e8380792SMatthias Ringwald {
190*e8380792SMatthias Ringwald   __ASM volatile ("MSR msp, %0\n" : : "r" (topOfMainStack) : "sp");
191*e8380792SMatthias Ringwald }
192*e8380792SMatthias Ringwald 
193*e8380792SMatthias Ringwald 
194*e8380792SMatthias Ringwald /**
195*e8380792SMatthias Ringwald   \brief   Get Priority Mask
196*e8380792SMatthias Ringwald   \details Returns the current state of the priority mask bit from the Priority Mask Register.
197*e8380792SMatthias Ringwald   \return               Priority Mask value
198*e8380792SMatthias Ringwald  */
__get_PRIMASK(void)199*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PRIMASK(void)
200*e8380792SMatthias Ringwald {
201*e8380792SMatthias Ringwald   uint32_t result;
202*e8380792SMatthias Ringwald 
203*e8380792SMatthias Ringwald   __ASM volatile ("MRS %0, primask" : "=r" (result) );
204*e8380792SMatthias Ringwald   return(result);
205*e8380792SMatthias Ringwald }
206*e8380792SMatthias Ringwald 
207*e8380792SMatthias Ringwald 
208*e8380792SMatthias Ringwald /**
209*e8380792SMatthias Ringwald   \brief   Set Priority Mask
210*e8380792SMatthias Ringwald   \details Assigns the given value to the Priority Mask Register.
211*e8380792SMatthias Ringwald   \param [in]    priMask  Priority Mask
212*e8380792SMatthias Ringwald  */
__set_PRIMASK(uint32_t priMask)213*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PRIMASK(uint32_t priMask)
214*e8380792SMatthias Ringwald {
215*e8380792SMatthias Ringwald   __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory");
216*e8380792SMatthias Ringwald }
217*e8380792SMatthias Ringwald 
218*e8380792SMatthias Ringwald 
219*e8380792SMatthias Ringwald #if       (__CORTEX_M >= 0x03U)
220*e8380792SMatthias Ringwald 
221*e8380792SMatthias Ringwald /**
222*e8380792SMatthias Ringwald   \brief   Enable FIQ
223*e8380792SMatthias Ringwald   \details Enables FIQ interrupts by clearing the F-bit in the CPSR.
224*e8380792SMatthias Ringwald            Can only be executed in Privileged modes.
225*e8380792SMatthias Ringwald  */
__enable_fault_irq(void)226*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_fault_irq(void)
227*e8380792SMatthias Ringwald {
228*e8380792SMatthias Ringwald   __ASM volatile ("cpsie f" : : : "memory");
229*e8380792SMatthias Ringwald }
230*e8380792SMatthias Ringwald 
231*e8380792SMatthias Ringwald 
232*e8380792SMatthias Ringwald /**
233*e8380792SMatthias Ringwald   \brief   Disable FIQ
234*e8380792SMatthias Ringwald   \details Disables FIQ interrupts by setting the F-bit in the CPSR.
235*e8380792SMatthias Ringwald            Can only be executed in Privileged modes.
236*e8380792SMatthias Ringwald  */
__disable_fault_irq(void)237*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_fault_irq(void)
238*e8380792SMatthias Ringwald {
239*e8380792SMatthias Ringwald   __ASM volatile ("cpsid f" : : : "memory");
240*e8380792SMatthias Ringwald }
241*e8380792SMatthias Ringwald 
242*e8380792SMatthias Ringwald 
243*e8380792SMatthias Ringwald /**
244*e8380792SMatthias Ringwald   \brief   Get Base Priority
245*e8380792SMatthias Ringwald   \details Returns the current value of the Base Priority register.
246*e8380792SMatthias Ringwald   \return               Base Priority register value
247*e8380792SMatthias Ringwald  */
__get_BASEPRI(void)248*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_BASEPRI(void)
249*e8380792SMatthias Ringwald {
250*e8380792SMatthias Ringwald   uint32_t result;
251*e8380792SMatthias Ringwald 
252*e8380792SMatthias Ringwald   __ASM volatile ("MRS %0, basepri" : "=r" (result) );
253*e8380792SMatthias Ringwald   return(result);
254*e8380792SMatthias Ringwald }
255*e8380792SMatthias Ringwald 
256*e8380792SMatthias Ringwald 
257*e8380792SMatthias Ringwald /**
258*e8380792SMatthias Ringwald   \brief   Set Base Priority
259*e8380792SMatthias Ringwald   \details Assigns the given value to the Base Priority register.
260*e8380792SMatthias Ringwald   \param [in]    basePri  Base Priority value to set
261*e8380792SMatthias Ringwald  */
__set_BASEPRI(uint32_t value)262*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_BASEPRI(uint32_t value)
263*e8380792SMatthias Ringwald {
264*e8380792SMatthias Ringwald   __ASM volatile ("MSR basepri, %0" : : "r" (value) : "memory");
265*e8380792SMatthias Ringwald }
266*e8380792SMatthias Ringwald 
267*e8380792SMatthias Ringwald 
268*e8380792SMatthias Ringwald /**
269*e8380792SMatthias Ringwald   \brief   Set Base Priority with condition
270*e8380792SMatthias Ringwald   \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled,
271*e8380792SMatthias Ringwald            or the new value increases the BASEPRI priority level.
272*e8380792SMatthias Ringwald   \param [in]    basePri  Base Priority value to set
273*e8380792SMatthias Ringwald  */
__set_BASEPRI_MAX(uint32_t value)274*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_BASEPRI_MAX(uint32_t value)
275*e8380792SMatthias Ringwald {
276*e8380792SMatthias Ringwald   __ASM volatile ("MSR basepri_max, %0" : : "r" (value) : "memory");
277*e8380792SMatthias Ringwald }
278*e8380792SMatthias Ringwald 
279*e8380792SMatthias Ringwald 
280*e8380792SMatthias Ringwald /**
281*e8380792SMatthias Ringwald   \brief   Get Fault Mask
282*e8380792SMatthias Ringwald   \details Returns the current value of the Fault Mask register.
283*e8380792SMatthias Ringwald   \return               Fault Mask register value
284*e8380792SMatthias Ringwald  */
__get_FAULTMASK(void)285*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FAULTMASK(void)
286*e8380792SMatthias Ringwald {
287*e8380792SMatthias Ringwald   uint32_t result;
288*e8380792SMatthias Ringwald 
289*e8380792SMatthias Ringwald   __ASM volatile ("MRS %0, faultmask" : "=r" (result) );
290*e8380792SMatthias Ringwald   return(result);
291*e8380792SMatthias Ringwald }
292*e8380792SMatthias Ringwald 
293*e8380792SMatthias Ringwald 
294*e8380792SMatthias Ringwald /**
295*e8380792SMatthias Ringwald   \brief   Set Fault Mask
296*e8380792SMatthias Ringwald   \details Assigns the given value to the Fault Mask register.
297*e8380792SMatthias Ringwald   \param [in]    faultMask  Fault Mask value to set
298*e8380792SMatthias Ringwald  */
__set_FAULTMASK(uint32_t faultMask)299*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask)
300*e8380792SMatthias Ringwald {
301*e8380792SMatthias Ringwald   __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory");
302*e8380792SMatthias Ringwald }
303*e8380792SMatthias Ringwald 
304*e8380792SMatthias Ringwald #endif /* (__CORTEX_M >= 0x03U) */
305*e8380792SMatthias Ringwald 
306*e8380792SMatthias Ringwald 
307*e8380792SMatthias Ringwald #if       (__CORTEX_M == 0x04U) || (__CORTEX_M == 0x07U)
308*e8380792SMatthias Ringwald 
309*e8380792SMatthias Ringwald /**
310*e8380792SMatthias Ringwald   \brief   Get FPSCR
311*e8380792SMatthias Ringwald   \details Returns the current value of the Floating Point Status/Control register.
312*e8380792SMatthias Ringwald   \return               Floating Point Status/Control register value
313*e8380792SMatthias Ringwald  */
__get_FPSCR(void)314*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FPSCR(void)
315*e8380792SMatthias Ringwald {
316*e8380792SMatthias Ringwald #if (__FPU_PRESENT == 1U) && (__FPU_USED == 1U)
317*e8380792SMatthias Ringwald   uint32_t result;
318*e8380792SMatthias Ringwald 
319*e8380792SMatthias Ringwald   /* Empty asm statement works as a scheduling barrier */
320*e8380792SMatthias Ringwald   __ASM volatile ("");
321*e8380792SMatthias Ringwald   __ASM volatile ("VMRS %0, fpscr" : "=r" (result) );
322*e8380792SMatthias Ringwald   __ASM volatile ("");
323*e8380792SMatthias Ringwald   return(result);
324*e8380792SMatthias Ringwald #else
325*e8380792SMatthias Ringwald    return(0);
326*e8380792SMatthias Ringwald #endif
327*e8380792SMatthias Ringwald }
328*e8380792SMatthias Ringwald 
329*e8380792SMatthias Ringwald 
330*e8380792SMatthias Ringwald /**
331*e8380792SMatthias Ringwald   \brief   Set FPSCR
332*e8380792SMatthias Ringwald   \details Assigns the given value to the Floating Point Status/Control register.
333*e8380792SMatthias Ringwald   \param [in]    fpscr  Floating Point Status/Control value to set
334*e8380792SMatthias Ringwald  */
__set_FPSCR(uint32_t fpscr)335*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
336*e8380792SMatthias Ringwald {
337*e8380792SMatthias Ringwald #if (__FPU_PRESENT == 1U) && (__FPU_USED == 1U)
338*e8380792SMatthias Ringwald   /* Empty asm statement works as a scheduling barrier */
339*e8380792SMatthias Ringwald   __ASM volatile ("");
340*e8380792SMatthias Ringwald   __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc");
341*e8380792SMatthias Ringwald   __ASM volatile ("");
342*e8380792SMatthias Ringwald #endif
343*e8380792SMatthias Ringwald }
344*e8380792SMatthias Ringwald 
345*e8380792SMatthias Ringwald #endif /* (__CORTEX_M == 0x04U) || (__CORTEX_M == 0x07U) */
346*e8380792SMatthias Ringwald 
347*e8380792SMatthias Ringwald 
348*e8380792SMatthias Ringwald 
349*e8380792SMatthias Ringwald /*@} end of CMSIS_Core_RegAccFunctions */
350*e8380792SMatthias Ringwald 
351*e8380792SMatthias Ringwald 
352*e8380792SMatthias Ringwald /* ##########################  Core Instruction Access  ######################### */
353*e8380792SMatthias Ringwald /** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
354*e8380792SMatthias Ringwald   Access to dedicated instructions
355*e8380792SMatthias Ringwald   @{
356*e8380792SMatthias Ringwald */
357*e8380792SMatthias Ringwald 
358*e8380792SMatthias Ringwald /* Define macros for porting to both thumb1 and thumb2.
359*e8380792SMatthias Ringwald  * For thumb1, use low register (r0-r7), specified by constraint "l"
360*e8380792SMatthias Ringwald  * Otherwise, use general registers, specified by constraint "r" */
361*e8380792SMatthias Ringwald #if defined (__thumb__) && !defined (__thumb2__)
362*e8380792SMatthias Ringwald #define __CMSIS_GCC_OUT_REG(r) "=l" (r)
363*e8380792SMatthias Ringwald #define __CMSIS_GCC_USE_REG(r) "l" (r)
364*e8380792SMatthias Ringwald #else
365*e8380792SMatthias Ringwald #define __CMSIS_GCC_OUT_REG(r) "=r" (r)
366*e8380792SMatthias Ringwald #define __CMSIS_GCC_USE_REG(r) "r" (r)
367*e8380792SMatthias Ringwald #endif
368*e8380792SMatthias Ringwald 
369*e8380792SMatthias Ringwald /**
370*e8380792SMatthias Ringwald   \brief   No Operation
371*e8380792SMatthias Ringwald   \details No Operation does nothing. This instruction can be used for code alignment purposes.
372*e8380792SMatthias Ringwald  */
__NOP(void)373*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE void __NOP(void)
374*e8380792SMatthias Ringwald {
375*e8380792SMatthias Ringwald   __ASM volatile ("nop");
376*e8380792SMatthias Ringwald }
377*e8380792SMatthias Ringwald 
378*e8380792SMatthias Ringwald 
379*e8380792SMatthias Ringwald /**
380*e8380792SMatthias Ringwald   \brief   Wait For Interrupt
381*e8380792SMatthias Ringwald   \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs.
382*e8380792SMatthias Ringwald  */
__WFI(void)383*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE void __WFI(void)
384*e8380792SMatthias Ringwald {
385*e8380792SMatthias Ringwald   __ASM volatile ("wfi");
386*e8380792SMatthias Ringwald }
387*e8380792SMatthias Ringwald 
388*e8380792SMatthias Ringwald 
389*e8380792SMatthias Ringwald /**
390*e8380792SMatthias Ringwald   \brief   Wait For Event
391*e8380792SMatthias Ringwald   \details Wait For Event is a hint instruction that permits the processor to enter
392*e8380792SMatthias Ringwald     a low-power state until one of a number of events occurs.
393*e8380792SMatthias Ringwald  */
__WFE(void)394*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE void __WFE(void)
395*e8380792SMatthias Ringwald {
396*e8380792SMatthias Ringwald   __ASM volatile ("wfe");
397*e8380792SMatthias Ringwald }
398*e8380792SMatthias Ringwald 
399*e8380792SMatthias Ringwald 
400*e8380792SMatthias Ringwald /**
401*e8380792SMatthias Ringwald   \brief   Send Event
402*e8380792SMatthias Ringwald   \details Send Event is a hint instruction. It causes an event to be signaled to the CPU.
403*e8380792SMatthias Ringwald  */
__SEV(void)404*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE void __SEV(void)
405*e8380792SMatthias Ringwald {
406*e8380792SMatthias Ringwald   __ASM volatile ("sev");
407*e8380792SMatthias Ringwald }
408*e8380792SMatthias Ringwald 
409*e8380792SMatthias Ringwald 
410*e8380792SMatthias Ringwald /**
411*e8380792SMatthias Ringwald   \brief   Instruction Synchronization Barrier
412*e8380792SMatthias Ringwald   \details Instruction Synchronization Barrier flushes the pipeline in the processor,
413*e8380792SMatthias Ringwald            so that all instructions following the ISB are fetched from cache or memory,
414*e8380792SMatthias Ringwald            after the instruction has been completed.
415*e8380792SMatthias Ringwald  */
__ISB(void)416*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE void __ISB(void)
417*e8380792SMatthias Ringwald {
418*e8380792SMatthias Ringwald   __ASM volatile ("isb 0xF":::"memory");
419*e8380792SMatthias Ringwald }
420*e8380792SMatthias Ringwald 
421*e8380792SMatthias Ringwald 
422*e8380792SMatthias Ringwald /**
423*e8380792SMatthias Ringwald   \brief   Data Synchronization Barrier
424*e8380792SMatthias Ringwald   \details Acts as a special kind of Data Memory Barrier.
425*e8380792SMatthias Ringwald            It completes when all explicit memory accesses before this instruction complete.
426*e8380792SMatthias Ringwald  */
__DSB(void)427*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE void __DSB(void)
428*e8380792SMatthias Ringwald {
429*e8380792SMatthias Ringwald   __ASM volatile ("dsb 0xF":::"memory");
430*e8380792SMatthias Ringwald }
431*e8380792SMatthias Ringwald 
432*e8380792SMatthias Ringwald 
433*e8380792SMatthias Ringwald /**
434*e8380792SMatthias Ringwald   \brief   Data Memory Barrier
435*e8380792SMatthias Ringwald   \details Ensures the apparent order of the explicit memory operations before
436*e8380792SMatthias Ringwald            and after the instruction, without ensuring their completion.
437*e8380792SMatthias Ringwald  */
__DMB(void)438*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE void __DMB(void)
439*e8380792SMatthias Ringwald {
440*e8380792SMatthias Ringwald   __ASM volatile ("dmb 0xF":::"memory");
441*e8380792SMatthias Ringwald }
442*e8380792SMatthias Ringwald 
443*e8380792SMatthias Ringwald 
444*e8380792SMatthias Ringwald /**
445*e8380792SMatthias Ringwald   \brief   Reverse byte order (32 bit)
446*e8380792SMatthias Ringwald   \details Reverses the byte order in integer value.
447*e8380792SMatthias Ringwald   \param [in]    value  Value to reverse
448*e8380792SMatthias Ringwald   \return               Reversed value
449*e8380792SMatthias Ringwald  */
__REV(uint32_t value)450*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint32_t __REV(uint32_t value)
451*e8380792SMatthias Ringwald {
452*e8380792SMatthias Ringwald #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
453*e8380792SMatthias Ringwald   return __builtin_bswap32(value);
454*e8380792SMatthias Ringwald #else
455*e8380792SMatthias Ringwald   uint32_t result;
456*e8380792SMatthias Ringwald 
457*e8380792SMatthias Ringwald   __ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
458*e8380792SMatthias Ringwald   return(result);
459*e8380792SMatthias Ringwald #endif
460*e8380792SMatthias Ringwald }
461*e8380792SMatthias Ringwald 
462*e8380792SMatthias Ringwald 
463*e8380792SMatthias Ringwald /**
464*e8380792SMatthias Ringwald   \brief   Reverse byte order (16 bit)
465*e8380792SMatthias Ringwald   \details Reverses the byte order in two unsigned short values.
466*e8380792SMatthias Ringwald   \param [in]    value  Value to reverse
467*e8380792SMatthias Ringwald   \return               Reversed value
468*e8380792SMatthias Ringwald  */
__REV16(uint32_t value)469*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint32_t __REV16(uint32_t value)
470*e8380792SMatthias Ringwald {
471*e8380792SMatthias Ringwald   uint32_t result;
472*e8380792SMatthias Ringwald 
473*e8380792SMatthias Ringwald   __ASM volatile ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
474*e8380792SMatthias Ringwald   return(result);
475*e8380792SMatthias Ringwald }
476*e8380792SMatthias Ringwald 
477*e8380792SMatthias Ringwald 
478*e8380792SMatthias Ringwald /**
479*e8380792SMatthias Ringwald   \brief   Reverse byte order in signed short value
480*e8380792SMatthias Ringwald   \details Reverses the byte order in a signed short value with sign extension to integer.
481*e8380792SMatthias Ringwald   \param [in]    value  Value to reverse
482*e8380792SMatthias Ringwald   \return               Reversed value
483*e8380792SMatthias Ringwald  */
__REVSH(int32_t value)484*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE int32_t __REVSH(int32_t value)
485*e8380792SMatthias Ringwald {
486*e8380792SMatthias Ringwald #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
487*e8380792SMatthias Ringwald   return (short)__builtin_bswap16(value);
488*e8380792SMatthias Ringwald #else
489*e8380792SMatthias Ringwald   int32_t result;
490*e8380792SMatthias Ringwald 
491*e8380792SMatthias Ringwald   __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
492*e8380792SMatthias Ringwald   return(result);
493*e8380792SMatthias Ringwald #endif
494*e8380792SMatthias Ringwald }
495*e8380792SMatthias Ringwald 
496*e8380792SMatthias Ringwald 
497*e8380792SMatthias Ringwald /**
498*e8380792SMatthias Ringwald   \brief   Rotate Right in unsigned value (32 bit)
499*e8380792SMatthias Ringwald   \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
500*e8380792SMatthias Ringwald   \param [in]    value  Value to rotate
501*e8380792SMatthias Ringwald   \param [in]    value  Number of Bits to rotate
502*e8380792SMatthias Ringwald   \return               Rotated value
503*e8380792SMatthias Ringwald  */
__ROR(uint32_t op1,uint32_t op2)504*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
505*e8380792SMatthias Ringwald {
506*e8380792SMatthias Ringwald   return (op1 >> op2) | (op1 << (32U - op2));
507*e8380792SMatthias Ringwald }
508*e8380792SMatthias Ringwald 
509*e8380792SMatthias Ringwald 
510*e8380792SMatthias Ringwald /**
511*e8380792SMatthias Ringwald   \brief   Breakpoint
512*e8380792SMatthias Ringwald   \details Causes the processor to enter Debug state.
513*e8380792SMatthias Ringwald            Debug tools can use this to investigate system state when the instruction at a particular address is reached.
514*e8380792SMatthias Ringwald   \param [in]    value  is ignored by the processor.
515*e8380792SMatthias Ringwald                  If required, a debugger can use it to store additional information about the breakpoint.
516*e8380792SMatthias Ringwald  */
517*e8380792SMatthias Ringwald #define __BKPT(value)                       __ASM volatile ("bkpt "#value)
518*e8380792SMatthias Ringwald 
519*e8380792SMatthias Ringwald 
520*e8380792SMatthias Ringwald /**
521*e8380792SMatthias Ringwald   \brief   Reverse bit order of value
522*e8380792SMatthias Ringwald   \details Reverses the bit order of the given value.
523*e8380792SMatthias Ringwald   \param [in]    value  Value to reverse
524*e8380792SMatthias Ringwald   \return               Reversed value
525*e8380792SMatthias Ringwald  */
__RBIT(uint32_t value)526*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value)
527*e8380792SMatthias Ringwald {
528*e8380792SMatthias Ringwald   uint32_t result;
529*e8380792SMatthias Ringwald 
530*e8380792SMatthias Ringwald #if       (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U)
531*e8380792SMatthias Ringwald    __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) );
532*e8380792SMatthias Ringwald #else
533*e8380792SMatthias Ringwald   int32_t s = 4 /*sizeof(v)*/ * 8 - 1; /* extra shift needed at end */
534*e8380792SMatthias Ringwald 
535*e8380792SMatthias Ringwald   result = value;                      /* r will be reversed bits of v; first get LSB of v */
536*e8380792SMatthias Ringwald   for (value >>= 1U; value; value >>= 1U)
537*e8380792SMatthias Ringwald   {
538*e8380792SMatthias Ringwald     result <<= 1U;
539*e8380792SMatthias Ringwald     result |= value & 1U;
540*e8380792SMatthias Ringwald     s--;
541*e8380792SMatthias Ringwald   }
542*e8380792SMatthias Ringwald   result <<= s;                        /* shift when v's highest bits are zero */
543*e8380792SMatthias Ringwald #endif
544*e8380792SMatthias Ringwald   return(result);
545*e8380792SMatthias Ringwald }
546*e8380792SMatthias Ringwald 
547*e8380792SMatthias Ringwald 
548*e8380792SMatthias Ringwald /**
549*e8380792SMatthias Ringwald   \brief   Count leading zeros
550*e8380792SMatthias Ringwald   \details Counts the number of leading zeros of a data value.
551*e8380792SMatthias Ringwald   \param [in]  value  Value to count the leading zeros
552*e8380792SMatthias Ringwald   \return             number of leading zeros in value
553*e8380792SMatthias Ringwald  */
554*e8380792SMatthias Ringwald #define __CLZ             __builtin_clz
555*e8380792SMatthias Ringwald 
556*e8380792SMatthias Ringwald 
557*e8380792SMatthias Ringwald #if       (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U)
558*e8380792SMatthias Ringwald 
559*e8380792SMatthias Ringwald /**
560*e8380792SMatthias Ringwald   \brief   LDR Exclusive (8 bit)
561*e8380792SMatthias Ringwald   \details Executes a exclusive LDR instruction for 8 bit value.
562*e8380792SMatthias Ringwald   \param [in]    ptr  Pointer to data
563*e8380792SMatthias Ringwald   \return             value of type uint8_t at (*ptr)
564*e8380792SMatthias Ringwald  */
__LDREXB(volatile uint8_t * addr)565*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint8_t __LDREXB(volatile uint8_t *addr)
566*e8380792SMatthias Ringwald {
567*e8380792SMatthias Ringwald     uint32_t result;
568*e8380792SMatthias Ringwald 
569*e8380792SMatthias Ringwald #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
570*e8380792SMatthias Ringwald    __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );
571*e8380792SMatthias Ringwald #else
572*e8380792SMatthias Ringwald     /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
573*e8380792SMatthias Ringwald        accepted by assembler. So has to use following less efficient pattern.
574*e8380792SMatthias Ringwald     */
575*e8380792SMatthias Ringwald    __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
576*e8380792SMatthias Ringwald #endif
577*e8380792SMatthias Ringwald    return ((uint8_t) result);    /* Add explicit type cast here */
578*e8380792SMatthias Ringwald }
579*e8380792SMatthias Ringwald 
580*e8380792SMatthias Ringwald 
581*e8380792SMatthias Ringwald /**
582*e8380792SMatthias Ringwald   \brief   LDR Exclusive (16 bit)
583*e8380792SMatthias Ringwald   \details Executes a exclusive LDR instruction for 16 bit values.
584*e8380792SMatthias Ringwald   \param [in]    ptr  Pointer to data
585*e8380792SMatthias Ringwald   \return        value of type uint16_t at (*ptr)
586*e8380792SMatthias Ringwald  */
__LDREXH(volatile uint16_t * addr)587*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint16_t __LDREXH(volatile uint16_t *addr)
588*e8380792SMatthias Ringwald {
589*e8380792SMatthias Ringwald     uint32_t result;
590*e8380792SMatthias Ringwald 
591*e8380792SMatthias Ringwald #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
592*e8380792SMatthias Ringwald    __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );
593*e8380792SMatthias Ringwald #else
594*e8380792SMatthias Ringwald     /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
595*e8380792SMatthias Ringwald        accepted by assembler. So has to use following less efficient pattern.
596*e8380792SMatthias Ringwald     */
597*e8380792SMatthias Ringwald    __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
598*e8380792SMatthias Ringwald #endif
599*e8380792SMatthias Ringwald    return ((uint16_t) result);    /* Add explicit type cast here */
600*e8380792SMatthias Ringwald }
601*e8380792SMatthias Ringwald 
602*e8380792SMatthias Ringwald 
603*e8380792SMatthias Ringwald /**
604*e8380792SMatthias Ringwald   \brief   LDR Exclusive (32 bit)
605*e8380792SMatthias Ringwald   \details Executes a exclusive LDR instruction for 32 bit values.
606*e8380792SMatthias Ringwald   \param [in]    ptr  Pointer to data
607*e8380792SMatthias Ringwald   \return        value of type uint32_t at (*ptr)
608*e8380792SMatthias Ringwald  */
__LDREXW(volatile uint32_t * addr)609*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint32_t __LDREXW(volatile uint32_t *addr)
610*e8380792SMatthias Ringwald {
611*e8380792SMatthias Ringwald     uint32_t result;
612*e8380792SMatthias Ringwald 
613*e8380792SMatthias Ringwald    __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) );
614*e8380792SMatthias Ringwald    return(result);
615*e8380792SMatthias Ringwald }
616*e8380792SMatthias Ringwald 
617*e8380792SMatthias Ringwald 
618*e8380792SMatthias Ringwald /**
619*e8380792SMatthias Ringwald   \brief   STR Exclusive (8 bit)
620*e8380792SMatthias Ringwald   \details Executes a exclusive STR instruction for 8 bit values.
621*e8380792SMatthias Ringwald   \param [in]  value  Value to store
622*e8380792SMatthias Ringwald   \param [in]    ptr  Pointer to location
623*e8380792SMatthias Ringwald   \return          0  Function succeeded
624*e8380792SMatthias Ringwald   \return          1  Function failed
625*e8380792SMatthias Ringwald  */
__STREXB(uint8_t value,volatile uint8_t * addr)626*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr)
627*e8380792SMatthias Ringwald {
628*e8380792SMatthias Ringwald    uint32_t result;
629*e8380792SMatthias Ringwald 
630*e8380792SMatthias Ringwald    __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
631*e8380792SMatthias Ringwald    return(result);
632*e8380792SMatthias Ringwald }
633*e8380792SMatthias Ringwald 
634*e8380792SMatthias Ringwald 
635*e8380792SMatthias Ringwald /**
636*e8380792SMatthias Ringwald   \brief   STR Exclusive (16 bit)
637*e8380792SMatthias Ringwald   \details Executes a exclusive STR instruction for 16 bit values.
638*e8380792SMatthias Ringwald   \param [in]  value  Value to store
639*e8380792SMatthias Ringwald   \param [in]    ptr  Pointer to location
640*e8380792SMatthias Ringwald   \return          0  Function succeeded
641*e8380792SMatthias Ringwald   \return          1  Function failed
642*e8380792SMatthias Ringwald  */
__STREXH(uint16_t value,volatile uint16_t * addr)643*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr)
644*e8380792SMatthias Ringwald {
645*e8380792SMatthias Ringwald    uint32_t result;
646*e8380792SMatthias Ringwald 
647*e8380792SMatthias Ringwald    __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
648*e8380792SMatthias Ringwald    return(result);
649*e8380792SMatthias Ringwald }
650*e8380792SMatthias Ringwald 
651*e8380792SMatthias Ringwald 
652*e8380792SMatthias Ringwald /**
653*e8380792SMatthias Ringwald   \brief   STR Exclusive (32 bit)
654*e8380792SMatthias Ringwald   \details Executes a exclusive STR instruction for 32 bit values.
655*e8380792SMatthias Ringwald   \param [in]  value  Value to store
656*e8380792SMatthias Ringwald   \param [in]    ptr  Pointer to location
657*e8380792SMatthias Ringwald   \return          0  Function succeeded
658*e8380792SMatthias Ringwald   \return          1  Function failed
659*e8380792SMatthias Ringwald  */
__STREXW(uint32_t value,volatile uint32_t * addr)660*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr)
661*e8380792SMatthias Ringwald {
662*e8380792SMatthias Ringwald    uint32_t result;
663*e8380792SMatthias Ringwald 
664*e8380792SMatthias Ringwald    __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
665*e8380792SMatthias Ringwald    return(result);
666*e8380792SMatthias Ringwald }
667*e8380792SMatthias Ringwald 
668*e8380792SMatthias Ringwald 
669*e8380792SMatthias Ringwald /**
670*e8380792SMatthias Ringwald   \brief   Remove the exclusive lock
671*e8380792SMatthias Ringwald   \details Removes the exclusive lock which is created by LDREX.
672*e8380792SMatthias Ringwald  */
__CLREX(void)673*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE void __CLREX(void)
674*e8380792SMatthias Ringwald {
675*e8380792SMatthias Ringwald   __ASM volatile ("clrex" ::: "memory");
676*e8380792SMatthias Ringwald }
677*e8380792SMatthias Ringwald 
678*e8380792SMatthias Ringwald 
679*e8380792SMatthias Ringwald /**
680*e8380792SMatthias Ringwald   \brief   Signed Saturate
681*e8380792SMatthias Ringwald   \details Saturates a signed value.
682*e8380792SMatthias Ringwald   \param [in]  value  Value to be saturated
683*e8380792SMatthias Ringwald   \param [in]    sat  Bit position to saturate to (1..32)
684*e8380792SMatthias Ringwald   \return             Saturated value
685*e8380792SMatthias Ringwald  */
686*e8380792SMatthias Ringwald #define __SSAT(ARG1,ARG2) \
687*e8380792SMatthias Ringwald ({                          \
688*e8380792SMatthias Ringwald   uint32_t __RES, __ARG1 = (ARG1); \
689*e8380792SMatthias Ringwald   __ASM ("ssat %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
690*e8380792SMatthias Ringwald   __RES; \
691*e8380792SMatthias Ringwald  })
692*e8380792SMatthias Ringwald 
693*e8380792SMatthias Ringwald 
694*e8380792SMatthias Ringwald /**
695*e8380792SMatthias Ringwald   \brief   Unsigned Saturate
696*e8380792SMatthias Ringwald   \details Saturates an unsigned value.
697*e8380792SMatthias Ringwald   \param [in]  value  Value to be saturated
698*e8380792SMatthias Ringwald   \param [in]    sat  Bit position to saturate to (0..31)
699*e8380792SMatthias Ringwald   \return             Saturated value
700*e8380792SMatthias Ringwald  */
701*e8380792SMatthias Ringwald #define __USAT(ARG1,ARG2) \
702*e8380792SMatthias Ringwald ({                          \
703*e8380792SMatthias Ringwald   uint32_t __RES, __ARG1 = (ARG1); \
704*e8380792SMatthias Ringwald   __ASM ("usat %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
705*e8380792SMatthias Ringwald   __RES; \
706*e8380792SMatthias Ringwald  })
707*e8380792SMatthias Ringwald 
708*e8380792SMatthias Ringwald 
709*e8380792SMatthias Ringwald /**
710*e8380792SMatthias Ringwald   \brief   Rotate Right with Extend (32 bit)
711*e8380792SMatthias Ringwald   \details Moves each bit of a bitstring right by one bit.
712*e8380792SMatthias Ringwald            The carry input is shifted in at the left end of the bitstring.
713*e8380792SMatthias Ringwald   \param [in]    value  Value to rotate
714*e8380792SMatthias Ringwald   \return               Rotated value
715*e8380792SMatthias Ringwald  */
__RRX(uint32_t value)716*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint32_t __RRX(uint32_t value)
717*e8380792SMatthias Ringwald {
718*e8380792SMatthias Ringwald   uint32_t result;
719*e8380792SMatthias Ringwald 
720*e8380792SMatthias Ringwald   __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
721*e8380792SMatthias Ringwald   return(result);
722*e8380792SMatthias Ringwald }
723*e8380792SMatthias Ringwald 
724*e8380792SMatthias Ringwald 
725*e8380792SMatthias Ringwald /**
726*e8380792SMatthias Ringwald   \brief   LDRT Unprivileged (8 bit)
727*e8380792SMatthias Ringwald   \details Executes a Unprivileged LDRT instruction for 8 bit value.
728*e8380792SMatthias Ringwald   \param [in]    ptr  Pointer to data
729*e8380792SMatthias Ringwald   \return             value of type uint8_t at (*ptr)
730*e8380792SMatthias Ringwald  */
__LDRBT(volatile uint8_t * addr)731*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint8_t __LDRBT(volatile uint8_t *addr)
732*e8380792SMatthias Ringwald {
733*e8380792SMatthias Ringwald     uint32_t result;
734*e8380792SMatthias Ringwald 
735*e8380792SMatthias Ringwald #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
736*e8380792SMatthias Ringwald    __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*addr) );
737*e8380792SMatthias Ringwald #else
738*e8380792SMatthias Ringwald     /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
739*e8380792SMatthias Ringwald        accepted by assembler. So has to use following less efficient pattern.
740*e8380792SMatthias Ringwald     */
741*e8380792SMatthias Ringwald    __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
742*e8380792SMatthias Ringwald #endif
743*e8380792SMatthias Ringwald    return ((uint8_t) result);    /* Add explicit type cast here */
744*e8380792SMatthias Ringwald }
745*e8380792SMatthias Ringwald 
746*e8380792SMatthias Ringwald 
747*e8380792SMatthias Ringwald /**
748*e8380792SMatthias Ringwald   \brief   LDRT Unprivileged (16 bit)
749*e8380792SMatthias Ringwald   \details Executes a Unprivileged LDRT instruction for 16 bit values.
750*e8380792SMatthias Ringwald   \param [in]    ptr  Pointer to data
751*e8380792SMatthias Ringwald   \return        value of type uint16_t at (*ptr)
752*e8380792SMatthias Ringwald  */
__LDRHT(volatile uint16_t * addr)753*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint16_t __LDRHT(volatile uint16_t *addr)
754*e8380792SMatthias Ringwald {
755*e8380792SMatthias Ringwald     uint32_t result;
756*e8380792SMatthias Ringwald 
757*e8380792SMatthias Ringwald #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
758*e8380792SMatthias Ringwald    __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*addr) );
759*e8380792SMatthias Ringwald #else
760*e8380792SMatthias Ringwald     /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
761*e8380792SMatthias Ringwald        accepted by assembler. So has to use following less efficient pattern.
762*e8380792SMatthias Ringwald     */
763*e8380792SMatthias Ringwald    __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
764*e8380792SMatthias Ringwald #endif
765*e8380792SMatthias Ringwald    return ((uint16_t) result);    /* Add explicit type cast here */
766*e8380792SMatthias Ringwald }
767*e8380792SMatthias Ringwald 
768*e8380792SMatthias Ringwald 
769*e8380792SMatthias Ringwald /**
770*e8380792SMatthias Ringwald   \brief   LDRT Unprivileged (32 bit)
771*e8380792SMatthias Ringwald   \details Executes a Unprivileged LDRT instruction for 32 bit values.
772*e8380792SMatthias Ringwald   \param [in]    ptr  Pointer to data
773*e8380792SMatthias Ringwald   \return        value of type uint32_t at (*ptr)
774*e8380792SMatthias Ringwald  */
__LDRT(volatile uint32_t * addr)775*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint32_t __LDRT(volatile uint32_t *addr)
776*e8380792SMatthias Ringwald {
777*e8380792SMatthias Ringwald     uint32_t result;
778*e8380792SMatthias Ringwald 
779*e8380792SMatthias Ringwald    __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*addr) );
780*e8380792SMatthias Ringwald    return(result);
781*e8380792SMatthias Ringwald }
782*e8380792SMatthias Ringwald 
783*e8380792SMatthias Ringwald 
784*e8380792SMatthias Ringwald /**
785*e8380792SMatthias Ringwald   \brief   STRT Unprivileged (8 bit)
786*e8380792SMatthias Ringwald   \details Executes a Unprivileged STRT instruction for 8 bit values.
787*e8380792SMatthias Ringwald   \param [in]  value  Value to store
788*e8380792SMatthias Ringwald   \param [in]    ptr  Pointer to location
789*e8380792SMatthias Ringwald  */
__STRBT(uint8_t value,volatile uint8_t * addr)790*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE void __STRBT(uint8_t value, volatile uint8_t *addr)
791*e8380792SMatthias Ringwald {
792*e8380792SMatthias Ringwald    __ASM volatile ("strbt %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) );
793*e8380792SMatthias Ringwald }
794*e8380792SMatthias Ringwald 
795*e8380792SMatthias Ringwald 
796*e8380792SMatthias Ringwald /**
797*e8380792SMatthias Ringwald   \brief   STRT Unprivileged (16 bit)
798*e8380792SMatthias Ringwald   \details Executes a Unprivileged STRT instruction for 16 bit values.
799*e8380792SMatthias Ringwald   \param [in]  value  Value to store
800*e8380792SMatthias Ringwald   \param [in]    ptr  Pointer to location
801*e8380792SMatthias Ringwald  */
__STRHT(uint16_t value,volatile uint16_t * addr)802*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE void __STRHT(uint16_t value, volatile uint16_t *addr)
803*e8380792SMatthias Ringwald {
804*e8380792SMatthias Ringwald    __ASM volatile ("strht %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) );
805*e8380792SMatthias Ringwald }
806*e8380792SMatthias Ringwald 
807*e8380792SMatthias Ringwald 
808*e8380792SMatthias Ringwald /**
809*e8380792SMatthias Ringwald   \brief   STRT Unprivileged (32 bit)
810*e8380792SMatthias Ringwald   \details Executes a Unprivileged STRT instruction for 32 bit values.
811*e8380792SMatthias Ringwald   \param [in]  value  Value to store
812*e8380792SMatthias Ringwald   \param [in]    ptr  Pointer to location
813*e8380792SMatthias Ringwald  */
__STRT(uint32_t value,volatile uint32_t * addr)814*e8380792SMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE void __STRT(uint32_t value, volatile uint32_t *addr)
815*e8380792SMatthias Ringwald {
816*e8380792SMatthias Ringwald    __ASM volatile ("strt %1, %0" : "=Q" (*addr) : "r" (value) );
817*e8380792SMatthias Ringwald }
818*e8380792SMatthias Ringwald 
819*e8380792SMatthias Ringwald #endif /* (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U) */
820*e8380792SMatthias Ringwald 
821*e8380792SMatthias Ringwald /*@}*/ /* end of group CMSIS_Core_InstructionInterface */
822*e8380792SMatthias Ringwald 
823*e8380792SMatthias Ringwald 
824*e8380792SMatthias Ringwald /* ###################  Compiler specific Intrinsics  ########################### */
825*e8380792SMatthias Ringwald /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
826*e8380792SMatthias Ringwald   Access to dedicated SIMD instructions
827*e8380792SMatthias Ringwald   @{
828*e8380792SMatthias Ringwald */
829*e8380792SMatthias Ringwald 
830*e8380792SMatthias Ringwald #if (__CORTEX_M >= 0x04U)  /* only for Cortex-M4 and above */
831*e8380792SMatthias Ringwald 
__SADD8(uint32_t op1,uint32_t op2)832*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
833*e8380792SMatthias Ringwald {
834*e8380792SMatthias Ringwald   uint32_t result;
835*e8380792SMatthias Ringwald 
836*e8380792SMatthias Ringwald   __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
837*e8380792SMatthias Ringwald   return(result);
838*e8380792SMatthias Ringwald }
839*e8380792SMatthias Ringwald 
__QADD8(uint32_t op1,uint32_t op2)840*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
841*e8380792SMatthias Ringwald {
842*e8380792SMatthias Ringwald   uint32_t result;
843*e8380792SMatthias Ringwald 
844*e8380792SMatthias Ringwald   __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
845*e8380792SMatthias Ringwald   return(result);
846*e8380792SMatthias Ringwald }
847*e8380792SMatthias Ringwald 
__SHADD8(uint32_t op1,uint32_t op2)848*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
849*e8380792SMatthias Ringwald {
850*e8380792SMatthias Ringwald   uint32_t result;
851*e8380792SMatthias Ringwald 
852*e8380792SMatthias Ringwald   __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
853*e8380792SMatthias Ringwald   return(result);
854*e8380792SMatthias Ringwald }
855*e8380792SMatthias Ringwald 
__UADD8(uint32_t op1,uint32_t op2)856*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
857*e8380792SMatthias Ringwald {
858*e8380792SMatthias Ringwald   uint32_t result;
859*e8380792SMatthias Ringwald 
860*e8380792SMatthias Ringwald   __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
861*e8380792SMatthias Ringwald   return(result);
862*e8380792SMatthias Ringwald }
863*e8380792SMatthias Ringwald 
__UQADD8(uint32_t op1,uint32_t op2)864*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
865*e8380792SMatthias Ringwald {
866*e8380792SMatthias Ringwald   uint32_t result;
867*e8380792SMatthias Ringwald 
868*e8380792SMatthias Ringwald   __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
869*e8380792SMatthias Ringwald   return(result);
870*e8380792SMatthias Ringwald }
871*e8380792SMatthias Ringwald 
__UHADD8(uint32_t op1,uint32_t op2)872*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
873*e8380792SMatthias Ringwald {
874*e8380792SMatthias Ringwald   uint32_t result;
875*e8380792SMatthias Ringwald 
876*e8380792SMatthias Ringwald   __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
877*e8380792SMatthias Ringwald   return(result);
878*e8380792SMatthias Ringwald }
879*e8380792SMatthias Ringwald 
880*e8380792SMatthias Ringwald 
__SSUB8(uint32_t op1,uint32_t op2)881*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
882*e8380792SMatthias Ringwald {
883*e8380792SMatthias Ringwald   uint32_t result;
884*e8380792SMatthias Ringwald 
885*e8380792SMatthias Ringwald   __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
886*e8380792SMatthias Ringwald   return(result);
887*e8380792SMatthias Ringwald }
888*e8380792SMatthias Ringwald 
__QSUB8(uint32_t op1,uint32_t op2)889*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
890*e8380792SMatthias Ringwald {
891*e8380792SMatthias Ringwald   uint32_t result;
892*e8380792SMatthias Ringwald 
893*e8380792SMatthias Ringwald   __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
894*e8380792SMatthias Ringwald   return(result);
895*e8380792SMatthias Ringwald }
896*e8380792SMatthias Ringwald 
__SHSUB8(uint32_t op1,uint32_t op2)897*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
898*e8380792SMatthias Ringwald {
899*e8380792SMatthias Ringwald   uint32_t result;
900*e8380792SMatthias Ringwald 
901*e8380792SMatthias Ringwald   __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
902*e8380792SMatthias Ringwald   return(result);
903*e8380792SMatthias Ringwald }
904*e8380792SMatthias Ringwald 
__USUB8(uint32_t op1,uint32_t op2)905*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
906*e8380792SMatthias Ringwald {
907*e8380792SMatthias Ringwald   uint32_t result;
908*e8380792SMatthias Ringwald 
909*e8380792SMatthias Ringwald   __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
910*e8380792SMatthias Ringwald   return(result);
911*e8380792SMatthias Ringwald }
912*e8380792SMatthias Ringwald 
__UQSUB8(uint32_t op1,uint32_t op2)913*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
914*e8380792SMatthias Ringwald {
915*e8380792SMatthias Ringwald   uint32_t result;
916*e8380792SMatthias Ringwald 
917*e8380792SMatthias Ringwald   __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
918*e8380792SMatthias Ringwald   return(result);
919*e8380792SMatthias Ringwald }
920*e8380792SMatthias Ringwald 
__UHSUB8(uint32_t op1,uint32_t op2)921*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
922*e8380792SMatthias Ringwald {
923*e8380792SMatthias Ringwald   uint32_t result;
924*e8380792SMatthias Ringwald 
925*e8380792SMatthias Ringwald   __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
926*e8380792SMatthias Ringwald   return(result);
927*e8380792SMatthias Ringwald }
928*e8380792SMatthias Ringwald 
929*e8380792SMatthias Ringwald 
__SADD16(uint32_t op1,uint32_t op2)930*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
931*e8380792SMatthias Ringwald {
932*e8380792SMatthias Ringwald   uint32_t result;
933*e8380792SMatthias Ringwald 
934*e8380792SMatthias Ringwald   __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
935*e8380792SMatthias Ringwald   return(result);
936*e8380792SMatthias Ringwald }
937*e8380792SMatthias Ringwald 
__QADD16(uint32_t op1,uint32_t op2)938*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
939*e8380792SMatthias Ringwald {
940*e8380792SMatthias Ringwald   uint32_t result;
941*e8380792SMatthias Ringwald 
942*e8380792SMatthias Ringwald   __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
943*e8380792SMatthias Ringwald   return(result);
944*e8380792SMatthias Ringwald }
945*e8380792SMatthias Ringwald 
__SHADD16(uint32_t op1,uint32_t op2)946*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
947*e8380792SMatthias Ringwald {
948*e8380792SMatthias Ringwald   uint32_t result;
949*e8380792SMatthias Ringwald 
950*e8380792SMatthias Ringwald   __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
951*e8380792SMatthias Ringwald   return(result);
952*e8380792SMatthias Ringwald }
953*e8380792SMatthias Ringwald 
__UADD16(uint32_t op1,uint32_t op2)954*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
955*e8380792SMatthias Ringwald {
956*e8380792SMatthias Ringwald   uint32_t result;
957*e8380792SMatthias Ringwald 
958*e8380792SMatthias Ringwald   __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
959*e8380792SMatthias Ringwald   return(result);
960*e8380792SMatthias Ringwald }
961*e8380792SMatthias Ringwald 
__UQADD16(uint32_t op1,uint32_t op2)962*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
963*e8380792SMatthias Ringwald {
964*e8380792SMatthias Ringwald   uint32_t result;
965*e8380792SMatthias Ringwald 
966*e8380792SMatthias Ringwald   __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
967*e8380792SMatthias Ringwald   return(result);
968*e8380792SMatthias Ringwald }
969*e8380792SMatthias Ringwald 
__UHADD16(uint32_t op1,uint32_t op2)970*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
971*e8380792SMatthias Ringwald {
972*e8380792SMatthias Ringwald   uint32_t result;
973*e8380792SMatthias Ringwald 
974*e8380792SMatthias Ringwald   __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
975*e8380792SMatthias Ringwald   return(result);
976*e8380792SMatthias Ringwald }
977*e8380792SMatthias Ringwald 
__SSUB16(uint32_t op1,uint32_t op2)978*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
979*e8380792SMatthias Ringwald {
980*e8380792SMatthias Ringwald   uint32_t result;
981*e8380792SMatthias Ringwald 
982*e8380792SMatthias Ringwald   __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
983*e8380792SMatthias Ringwald   return(result);
984*e8380792SMatthias Ringwald }
985*e8380792SMatthias Ringwald 
__QSUB16(uint32_t op1,uint32_t op2)986*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
987*e8380792SMatthias Ringwald {
988*e8380792SMatthias Ringwald   uint32_t result;
989*e8380792SMatthias Ringwald 
990*e8380792SMatthias Ringwald   __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
991*e8380792SMatthias Ringwald   return(result);
992*e8380792SMatthias Ringwald }
993*e8380792SMatthias Ringwald 
__SHSUB16(uint32_t op1,uint32_t op2)994*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
995*e8380792SMatthias Ringwald {
996*e8380792SMatthias Ringwald   uint32_t result;
997*e8380792SMatthias Ringwald 
998*e8380792SMatthias Ringwald   __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
999*e8380792SMatthias Ringwald   return(result);
1000*e8380792SMatthias Ringwald }
1001*e8380792SMatthias Ringwald 
__USUB16(uint32_t op1,uint32_t op2)1002*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
1003*e8380792SMatthias Ringwald {
1004*e8380792SMatthias Ringwald   uint32_t result;
1005*e8380792SMatthias Ringwald 
1006*e8380792SMatthias Ringwald   __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1007*e8380792SMatthias Ringwald   return(result);
1008*e8380792SMatthias Ringwald }
1009*e8380792SMatthias Ringwald 
__UQSUB16(uint32_t op1,uint32_t op2)1010*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
1011*e8380792SMatthias Ringwald {
1012*e8380792SMatthias Ringwald   uint32_t result;
1013*e8380792SMatthias Ringwald 
1014*e8380792SMatthias Ringwald   __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1015*e8380792SMatthias Ringwald   return(result);
1016*e8380792SMatthias Ringwald }
1017*e8380792SMatthias Ringwald 
__UHSUB16(uint32_t op1,uint32_t op2)1018*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
1019*e8380792SMatthias Ringwald {
1020*e8380792SMatthias Ringwald   uint32_t result;
1021*e8380792SMatthias Ringwald 
1022*e8380792SMatthias Ringwald   __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1023*e8380792SMatthias Ringwald   return(result);
1024*e8380792SMatthias Ringwald }
1025*e8380792SMatthias Ringwald 
__SASX(uint32_t op1,uint32_t op2)1026*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
1027*e8380792SMatthias Ringwald {
1028*e8380792SMatthias Ringwald   uint32_t result;
1029*e8380792SMatthias Ringwald 
1030*e8380792SMatthias Ringwald   __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1031*e8380792SMatthias Ringwald   return(result);
1032*e8380792SMatthias Ringwald }
1033*e8380792SMatthias Ringwald 
__QASX(uint32_t op1,uint32_t op2)1034*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
1035*e8380792SMatthias Ringwald {
1036*e8380792SMatthias Ringwald   uint32_t result;
1037*e8380792SMatthias Ringwald 
1038*e8380792SMatthias Ringwald   __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1039*e8380792SMatthias Ringwald   return(result);
1040*e8380792SMatthias Ringwald }
1041*e8380792SMatthias Ringwald 
__SHASX(uint32_t op1,uint32_t op2)1042*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
1043*e8380792SMatthias Ringwald {
1044*e8380792SMatthias Ringwald   uint32_t result;
1045*e8380792SMatthias Ringwald 
1046*e8380792SMatthias Ringwald   __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1047*e8380792SMatthias Ringwald   return(result);
1048*e8380792SMatthias Ringwald }
1049*e8380792SMatthias Ringwald 
__UASX(uint32_t op1,uint32_t op2)1050*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
1051*e8380792SMatthias Ringwald {
1052*e8380792SMatthias Ringwald   uint32_t result;
1053*e8380792SMatthias Ringwald 
1054*e8380792SMatthias Ringwald   __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1055*e8380792SMatthias Ringwald   return(result);
1056*e8380792SMatthias Ringwald }
1057*e8380792SMatthias Ringwald 
__UQASX(uint32_t op1,uint32_t op2)1058*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
1059*e8380792SMatthias Ringwald {
1060*e8380792SMatthias Ringwald   uint32_t result;
1061*e8380792SMatthias Ringwald 
1062*e8380792SMatthias Ringwald   __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1063*e8380792SMatthias Ringwald   return(result);
1064*e8380792SMatthias Ringwald }
1065*e8380792SMatthias Ringwald 
__UHASX(uint32_t op1,uint32_t op2)1066*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
1067*e8380792SMatthias Ringwald {
1068*e8380792SMatthias Ringwald   uint32_t result;
1069*e8380792SMatthias Ringwald 
1070*e8380792SMatthias Ringwald   __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1071*e8380792SMatthias Ringwald   return(result);
1072*e8380792SMatthias Ringwald }
1073*e8380792SMatthias Ringwald 
__SSAX(uint32_t op1,uint32_t op2)1074*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
1075*e8380792SMatthias Ringwald {
1076*e8380792SMatthias Ringwald   uint32_t result;
1077*e8380792SMatthias Ringwald 
1078*e8380792SMatthias Ringwald   __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1079*e8380792SMatthias Ringwald   return(result);
1080*e8380792SMatthias Ringwald }
1081*e8380792SMatthias Ringwald 
__QSAX(uint32_t op1,uint32_t op2)1082*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
1083*e8380792SMatthias Ringwald {
1084*e8380792SMatthias Ringwald   uint32_t result;
1085*e8380792SMatthias Ringwald 
1086*e8380792SMatthias Ringwald   __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1087*e8380792SMatthias Ringwald   return(result);
1088*e8380792SMatthias Ringwald }
1089*e8380792SMatthias Ringwald 
__SHSAX(uint32_t op1,uint32_t op2)1090*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
1091*e8380792SMatthias Ringwald {
1092*e8380792SMatthias Ringwald   uint32_t result;
1093*e8380792SMatthias Ringwald 
1094*e8380792SMatthias Ringwald   __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1095*e8380792SMatthias Ringwald   return(result);
1096*e8380792SMatthias Ringwald }
1097*e8380792SMatthias Ringwald 
__USAX(uint32_t op1,uint32_t op2)1098*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
1099*e8380792SMatthias Ringwald {
1100*e8380792SMatthias Ringwald   uint32_t result;
1101*e8380792SMatthias Ringwald 
1102*e8380792SMatthias Ringwald   __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1103*e8380792SMatthias Ringwald   return(result);
1104*e8380792SMatthias Ringwald }
1105*e8380792SMatthias Ringwald 
__UQSAX(uint32_t op1,uint32_t op2)1106*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
1107*e8380792SMatthias Ringwald {
1108*e8380792SMatthias Ringwald   uint32_t result;
1109*e8380792SMatthias Ringwald 
1110*e8380792SMatthias Ringwald   __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1111*e8380792SMatthias Ringwald   return(result);
1112*e8380792SMatthias Ringwald }
1113*e8380792SMatthias Ringwald 
__UHSAX(uint32_t op1,uint32_t op2)1114*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
1115*e8380792SMatthias Ringwald {
1116*e8380792SMatthias Ringwald   uint32_t result;
1117*e8380792SMatthias Ringwald 
1118*e8380792SMatthias Ringwald   __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1119*e8380792SMatthias Ringwald   return(result);
1120*e8380792SMatthias Ringwald }
1121*e8380792SMatthias Ringwald 
__USAD8(uint32_t op1,uint32_t op2)1122*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
1123*e8380792SMatthias Ringwald {
1124*e8380792SMatthias Ringwald   uint32_t result;
1125*e8380792SMatthias Ringwald 
1126*e8380792SMatthias Ringwald   __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1127*e8380792SMatthias Ringwald   return(result);
1128*e8380792SMatthias Ringwald }
1129*e8380792SMatthias Ringwald 
__USADA8(uint32_t op1,uint32_t op2,uint32_t op3)1130*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
1131*e8380792SMatthias Ringwald {
1132*e8380792SMatthias Ringwald   uint32_t result;
1133*e8380792SMatthias Ringwald 
1134*e8380792SMatthias Ringwald   __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1135*e8380792SMatthias Ringwald   return(result);
1136*e8380792SMatthias Ringwald }
1137*e8380792SMatthias Ringwald 
1138*e8380792SMatthias Ringwald #define __SSAT16(ARG1,ARG2) \
1139*e8380792SMatthias Ringwald ({                          \
1140*e8380792SMatthias Ringwald   int32_t __RES, __ARG1 = (ARG1); \
1141*e8380792SMatthias Ringwald   __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
1142*e8380792SMatthias Ringwald   __RES; \
1143*e8380792SMatthias Ringwald  })
1144*e8380792SMatthias Ringwald 
1145*e8380792SMatthias Ringwald #define __USAT16(ARG1,ARG2) \
1146*e8380792SMatthias Ringwald ({                          \
1147*e8380792SMatthias Ringwald   uint32_t __RES, __ARG1 = (ARG1); \
1148*e8380792SMatthias Ringwald   __ASM ("usat16 %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
1149*e8380792SMatthias Ringwald   __RES; \
1150*e8380792SMatthias Ringwald  })
1151*e8380792SMatthias Ringwald 
__UXTB16(uint32_t op1)1152*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
1153*e8380792SMatthias Ringwald {
1154*e8380792SMatthias Ringwald   uint32_t result;
1155*e8380792SMatthias Ringwald 
1156*e8380792SMatthias Ringwald   __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
1157*e8380792SMatthias Ringwald   return(result);
1158*e8380792SMatthias Ringwald }
1159*e8380792SMatthias Ringwald 
__UXTAB16(uint32_t op1,uint32_t op2)1160*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
1161*e8380792SMatthias Ringwald {
1162*e8380792SMatthias Ringwald   uint32_t result;
1163*e8380792SMatthias Ringwald 
1164*e8380792SMatthias Ringwald   __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1165*e8380792SMatthias Ringwald   return(result);
1166*e8380792SMatthias Ringwald }
1167*e8380792SMatthias Ringwald 
__SXTB16(uint32_t op1)1168*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
1169*e8380792SMatthias Ringwald {
1170*e8380792SMatthias Ringwald   uint32_t result;
1171*e8380792SMatthias Ringwald 
1172*e8380792SMatthias Ringwald   __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
1173*e8380792SMatthias Ringwald   return(result);
1174*e8380792SMatthias Ringwald }
1175*e8380792SMatthias Ringwald 
__SXTAB16(uint32_t op1,uint32_t op2)1176*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
1177*e8380792SMatthias Ringwald {
1178*e8380792SMatthias Ringwald   uint32_t result;
1179*e8380792SMatthias Ringwald 
1180*e8380792SMatthias Ringwald   __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1181*e8380792SMatthias Ringwald   return(result);
1182*e8380792SMatthias Ringwald }
1183*e8380792SMatthias Ringwald 
__SMUAD(uint32_t op1,uint32_t op2)1184*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD  (uint32_t op1, uint32_t op2)
1185*e8380792SMatthias Ringwald {
1186*e8380792SMatthias Ringwald   uint32_t result;
1187*e8380792SMatthias Ringwald 
1188*e8380792SMatthias Ringwald   __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1189*e8380792SMatthias Ringwald   return(result);
1190*e8380792SMatthias Ringwald }
1191*e8380792SMatthias Ringwald 
__SMUADX(uint32_t op1,uint32_t op2)1192*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
1193*e8380792SMatthias Ringwald {
1194*e8380792SMatthias Ringwald   uint32_t result;
1195*e8380792SMatthias Ringwald 
1196*e8380792SMatthias Ringwald   __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1197*e8380792SMatthias Ringwald   return(result);
1198*e8380792SMatthias Ringwald }
1199*e8380792SMatthias Ringwald 
__SMLAD(uint32_t op1,uint32_t op2,uint32_t op3)1200*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
1201*e8380792SMatthias Ringwald {
1202*e8380792SMatthias Ringwald   uint32_t result;
1203*e8380792SMatthias Ringwald 
1204*e8380792SMatthias Ringwald   __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1205*e8380792SMatthias Ringwald   return(result);
1206*e8380792SMatthias Ringwald }
1207*e8380792SMatthias Ringwald 
__SMLADX(uint32_t op1,uint32_t op2,uint32_t op3)1208*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
1209*e8380792SMatthias Ringwald {
1210*e8380792SMatthias Ringwald   uint32_t result;
1211*e8380792SMatthias Ringwald 
1212*e8380792SMatthias Ringwald   __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1213*e8380792SMatthias Ringwald   return(result);
1214*e8380792SMatthias Ringwald }
1215*e8380792SMatthias Ringwald 
__SMLALD(uint32_t op1,uint32_t op2,uint64_t acc)1216*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
1217*e8380792SMatthias Ringwald {
1218*e8380792SMatthias Ringwald   union llreg_u{
1219*e8380792SMatthias Ringwald     uint32_t w32[2];
1220*e8380792SMatthias Ringwald     uint64_t w64;
1221*e8380792SMatthias Ringwald   } llr;
1222*e8380792SMatthias Ringwald   llr.w64 = acc;
1223*e8380792SMatthias Ringwald 
1224*e8380792SMatthias Ringwald #ifndef __ARMEB__   /* Little endian */
1225*e8380792SMatthias Ringwald   __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1226*e8380792SMatthias Ringwald #else               /* Big endian */
1227*e8380792SMatthias Ringwald   __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1228*e8380792SMatthias Ringwald #endif
1229*e8380792SMatthias Ringwald 
1230*e8380792SMatthias Ringwald   return(llr.w64);
1231*e8380792SMatthias Ringwald }
1232*e8380792SMatthias Ringwald 
__SMLALDX(uint32_t op1,uint32_t op2,uint64_t acc)1233*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
1234*e8380792SMatthias Ringwald {
1235*e8380792SMatthias Ringwald   union llreg_u{
1236*e8380792SMatthias Ringwald     uint32_t w32[2];
1237*e8380792SMatthias Ringwald     uint64_t w64;
1238*e8380792SMatthias Ringwald   } llr;
1239*e8380792SMatthias Ringwald   llr.w64 = acc;
1240*e8380792SMatthias Ringwald 
1241*e8380792SMatthias Ringwald #ifndef __ARMEB__   /* Little endian */
1242*e8380792SMatthias Ringwald   __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1243*e8380792SMatthias Ringwald #else               /* Big endian */
1244*e8380792SMatthias Ringwald   __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1245*e8380792SMatthias Ringwald #endif
1246*e8380792SMatthias Ringwald 
1247*e8380792SMatthias Ringwald   return(llr.w64);
1248*e8380792SMatthias Ringwald }
1249*e8380792SMatthias Ringwald 
__SMUSD(uint32_t op1,uint32_t op2)1250*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD  (uint32_t op1, uint32_t op2)
1251*e8380792SMatthias Ringwald {
1252*e8380792SMatthias Ringwald   uint32_t result;
1253*e8380792SMatthias Ringwald 
1254*e8380792SMatthias Ringwald   __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1255*e8380792SMatthias Ringwald   return(result);
1256*e8380792SMatthias Ringwald }
1257*e8380792SMatthias Ringwald 
__SMUSDX(uint32_t op1,uint32_t op2)1258*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
1259*e8380792SMatthias Ringwald {
1260*e8380792SMatthias Ringwald   uint32_t result;
1261*e8380792SMatthias Ringwald 
1262*e8380792SMatthias Ringwald   __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1263*e8380792SMatthias Ringwald   return(result);
1264*e8380792SMatthias Ringwald }
1265*e8380792SMatthias Ringwald 
__SMLSD(uint32_t op1,uint32_t op2,uint32_t op3)1266*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
1267*e8380792SMatthias Ringwald {
1268*e8380792SMatthias Ringwald   uint32_t result;
1269*e8380792SMatthias Ringwald 
1270*e8380792SMatthias Ringwald   __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1271*e8380792SMatthias Ringwald   return(result);
1272*e8380792SMatthias Ringwald }
1273*e8380792SMatthias Ringwald 
__SMLSDX(uint32_t op1,uint32_t op2,uint32_t op3)1274*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
1275*e8380792SMatthias Ringwald {
1276*e8380792SMatthias Ringwald   uint32_t result;
1277*e8380792SMatthias Ringwald 
1278*e8380792SMatthias Ringwald   __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1279*e8380792SMatthias Ringwald   return(result);
1280*e8380792SMatthias Ringwald }
1281*e8380792SMatthias Ringwald 
__SMLSLD(uint32_t op1,uint32_t op2,uint64_t acc)1282*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)
1283*e8380792SMatthias Ringwald {
1284*e8380792SMatthias Ringwald   union llreg_u{
1285*e8380792SMatthias Ringwald     uint32_t w32[2];
1286*e8380792SMatthias Ringwald     uint64_t w64;
1287*e8380792SMatthias Ringwald   } llr;
1288*e8380792SMatthias Ringwald   llr.w64 = acc;
1289*e8380792SMatthias Ringwald 
1290*e8380792SMatthias Ringwald #ifndef __ARMEB__   /* Little endian */
1291*e8380792SMatthias Ringwald   __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1292*e8380792SMatthias Ringwald #else               /* Big endian */
1293*e8380792SMatthias Ringwald   __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1294*e8380792SMatthias Ringwald #endif
1295*e8380792SMatthias Ringwald 
1296*e8380792SMatthias Ringwald   return(llr.w64);
1297*e8380792SMatthias Ringwald }
1298*e8380792SMatthias Ringwald 
__SMLSLDX(uint32_t op1,uint32_t op2,uint64_t acc)1299*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)
1300*e8380792SMatthias Ringwald {
1301*e8380792SMatthias Ringwald   union llreg_u{
1302*e8380792SMatthias Ringwald     uint32_t w32[2];
1303*e8380792SMatthias Ringwald     uint64_t w64;
1304*e8380792SMatthias Ringwald   } llr;
1305*e8380792SMatthias Ringwald   llr.w64 = acc;
1306*e8380792SMatthias Ringwald 
1307*e8380792SMatthias Ringwald #ifndef __ARMEB__   /* Little endian */
1308*e8380792SMatthias Ringwald   __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1309*e8380792SMatthias Ringwald #else               /* Big endian */
1310*e8380792SMatthias Ringwald   __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1311*e8380792SMatthias Ringwald #endif
1312*e8380792SMatthias Ringwald 
1313*e8380792SMatthias Ringwald   return(llr.w64);
1314*e8380792SMatthias Ringwald }
1315*e8380792SMatthias Ringwald 
__SEL(uint32_t op1,uint32_t op2)1316*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL  (uint32_t op1, uint32_t op2)
1317*e8380792SMatthias Ringwald {
1318*e8380792SMatthias Ringwald   uint32_t result;
1319*e8380792SMatthias Ringwald 
1320*e8380792SMatthias Ringwald   __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1321*e8380792SMatthias Ringwald   return(result);
1322*e8380792SMatthias Ringwald }
1323*e8380792SMatthias Ringwald 
__QADD(int32_t op1,int32_t op2)1324*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE  int32_t __QADD( int32_t op1,  int32_t op2)
1325*e8380792SMatthias Ringwald {
1326*e8380792SMatthias Ringwald   int32_t result;
1327*e8380792SMatthias Ringwald 
1328*e8380792SMatthias Ringwald   __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1329*e8380792SMatthias Ringwald   return(result);
1330*e8380792SMatthias Ringwald }
1331*e8380792SMatthias Ringwald 
__QSUB(int32_t op1,int32_t op2)1332*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE  int32_t __QSUB( int32_t op1,  int32_t op2)
1333*e8380792SMatthias Ringwald {
1334*e8380792SMatthias Ringwald   int32_t result;
1335*e8380792SMatthias Ringwald 
1336*e8380792SMatthias Ringwald   __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1337*e8380792SMatthias Ringwald   return(result);
1338*e8380792SMatthias Ringwald }
1339*e8380792SMatthias Ringwald 
1340*e8380792SMatthias Ringwald #define __PKHBT(ARG1,ARG2,ARG3) \
1341*e8380792SMatthias Ringwald ({                          \
1342*e8380792SMatthias Ringwald   uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
1343*e8380792SMatthias Ringwald   __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2), "I" (ARG3)  ); \
1344*e8380792SMatthias Ringwald   __RES; \
1345*e8380792SMatthias Ringwald  })
1346*e8380792SMatthias Ringwald 
1347*e8380792SMatthias Ringwald #define __PKHTB(ARG1,ARG2,ARG3) \
1348*e8380792SMatthias Ringwald ({                          \
1349*e8380792SMatthias Ringwald   uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
1350*e8380792SMatthias Ringwald   if (ARG3 == 0) \
1351*e8380792SMatthias Ringwald     __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2)  ); \
1352*e8380792SMatthias Ringwald   else \
1353*e8380792SMatthias Ringwald     __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2), "I" (ARG3)  ); \
1354*e8380792SMatthias Ringwald   __RES; \
1355*e8380792SMatthias Ringwald  })
1356*e8380792SMatthias Ringwald 
__SMMLA(int32_t op1,int32_t op2,int32_t op3)1357*e8380792SMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
1358*e8380792SMatthias Ringwald {
1359*e8380792SMatthias Ringwald  int32_t result;
1360*e8380792SMatthias Ringwald 
1361*e8380792SMatthias Ringwald  __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r"  (op1), "r" (op2), "r" (op3) );
1362*e8380792SMatthias Ringwald  return(result);
1363*e8380792SMatthias Ringwald }
1364*e8380792SMatthias Ringwald 
1365*e8380792SMatthias Ringwald #endif /* (__CORTEX_M >= 0x04) */
1366*e8380792SMatthias Ringwald /*@} end of group CMSIS_SIMD_intrinsics */
1367*e8380792SMatthias Ringwald 
1368*e8380792SMatthias Ringwald 
1369*e8380792SMatthias Ringwald #if defined ( __GNUC__ )
1370*e8380792SMatthias Ringwald #pragma GCC diagnostic pop
1371*e8380792SMatthias Ringwald #endif
1372*e8380792SMatthias Ringwald 
1373*e8380792SMatthias Ringwald #endif /* __CMSIS_GCC_H */
1374