1*5fd0122aSMatthias Ringwald /**************************************************************************//**
2*5fd0122aSMatthias Ringwald * @file cmsis_gcc.h
3*5fd0122aSMatthias Ringwald * @brief CMSIS Cortex-M Core Function/Instruction Header File
4*5fd0122aSMatthias Ringwald * @version V4.20
5*5fd0122aSMatthias Ringwald * @date 18. August 2015
6*5fd0122aSMatthias Ringwald ******************************************************************************/
7*5fd0122aSMatthias Ringwald /* Copyright (c) 2009 - 2015 ARM LIMITED
8*5fd0122aSMatthias Ringwald
9*5fd0122aSMatthias Ringwald All rights reserved.
10*5fd0122aSMatthias Ringwald Redistribution and use in source and binary forms, with or without
11*5fd0122aSMatthias Ringwald modification, are permitted provided that the following conditions are met:
12*5fd0122aSMatthias Ringwald - Redistributions of source code must retain the above copyright
13*5fd0122aSMatthias Ringwald notice, this list of conditions and the following disclaimer.
14*5fd0122aSMatthias Ringwald - Redistributions in binary form must reproduce the above copyright
15*5fd0122aSMatthias Ringwald notice, this list of conditions and the following disclaimer in the
16*5fd0122aSMatthias Ringwald documentation and/or other materials provided with the distribution.
17*5fd0122aSMatthias Ringwald - Neither the name of ARM nor the names of its contributors may be used
18*5fd0122aSMatthias Ringwald to endorse or promote products derived from this software without
19*5fd0122aSMatthias Ringwald specific prior written permission.
20*5fd0122aSMatthias Ringwald *
21*5fd0122aSMatthias Ringwald THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22*5fd0122aSMatthias Ringwald AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23*5fd0122aSMatthias Ringwald IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24*5fd0122aSMatthias Ringwald ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
25*5fd0122aSMatthias Ringwald LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26*5fd0122aSMatthias Ringwald CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27*5fd0122aSMatthias Ringwald SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28*5fd0122aSMatthias Ringwald INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29*5fd0122aSMatthias Ringwald CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30*5fd0122aSMatthias Ringwald ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31*5fd0122aSMatthias Ringwald POSSIBILITY OF SUCH DAMAGE.
32*5fd0122aSMatthias Ringwald ---------------------------------------------------------------------------*/
33*5fd0122aSMatthias Ringwald
34*5fd0122aSMatthias Ringwald
35*5fd0122aSMatthias Ringwald #ifndef __CMSIS_GCC_H
36*5fd0122aSMatthias Ringwald #define __CMSIS_GCC_H
37*5fd0122aSMatthias Ringwald
38*5fd0122aSMatthias Ringwald
39*5fd0122aSMatthias Ringwald /* ########################### Core Function Access ########################### */
40*5fd0122aSMatthias Ringwald /** \ingroup CMSIS_Core_FunctionInterface
41*5fd0122aSMatthias Ringwald \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
42*5fd0122aSMatthias Ringwald @{
43*5fd0122aSMatthias Ringwald */
44*5fd0122aSMatthias Ringwald
45*5fd0122aSMatthias Ringwald /** \brief Enable IRQ Interrupts
46*5fd0122aSMatthias Ringwald
47*5fd0122aSMatthias Ringwald This function enables IRQ interrupts by clearing the I-bit in the CPSR.
48*5fd0122aSMatthias Ringwald Can only be executed in Privileged modes.
49*5fd0122aSMatthias Ringwald */
__enable_irq(void)50*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_irq(void)
51*5fd0122aSMatthias Ringwald {
52*5fd0122aSMatthias Ringwald __ASM volatile ("cpsie i" : : : "memory");
53*5fd0122aSMatthias Ringwald }
54*5fd0122aSMatthias Ringwald
55*5fd0122aSMatthias Ringwald
56*5fd0122aSMatthias Ringwald /** \brief Disable IRQ Interrupts
57*5fd0122aSMatthias Ringwald
58*5fd0122aSMatthias Ringwald This function disables IRQ interrupts by setting the I-bit in the CPSR.
59*5fd0122aSMatthias Ringwald Can only be executed in Privileged modes.
60*5fd0122aSMatthias Ringwald */
__disable_irq(void)61*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_irq(void)
62*5fd0122aSMatthias Ringwald {
63*5fd0122aSMatthias Ringwald __ASM volatile ("cpsid i" : : : "memory");
64*5fd0122aSMatthias Ringwald }
65*5fd0122aSMatthias Ringwald
66*5fd0122aSMatthias Ringwald
67*5fd0122aSMatthias Ringwald /** \brief Get Control Register
68*5fd0122aSMatthias Ringwald
69*5fd0122aSMatthias Ringwald This function returns the content of the Control Register.
70*5fd0122aSMatthias Ringwald
71*5fd0122aSMatthias Ringwald \return Control Register value
72*5fd0122aSMatthias Ringwald */
__get_CONTROL(void)73*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CONTROL(void)
74*5fd0122aSMatthias Ringwald {
75*5fd0122aSMatthias Ringwald uint32_t result;
76*5fd0122aSMatthias Ringwald
77*5fd0122aSMatthias Ringwald __ASM volatile ("MRS %0, control" : "=r" (result) );
78*5fd0122aSMatthias Ringwald return(result);
79*5fd0122aSMatthias Ringwald }
80*5fd0122aSMatthias Ringwald
81*5fd0122aSMatthias Ringwald
82*5fd0122aSMatthias Ringwald /** \brief Set Control Register
83*5fd0122aSMatthias Ringwald
84*5fd0122aSMatthias Ringwald This function writes the given value to the Control Register.
85*5fd0122aSMatthias Ringwald
86*5fd0122aSMatthias Ringwald \param [in] control Control Register value to set
87*5fd0122aSMatthias Ringwald */
__set_CONTROL(uint32_t control)88*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_CONTROL(uint32_t control)
89*5fd0122aSMatthias Ringwald {
90*5fd0122aSMatthias Ringwald __ASM volatile ("MSR control, %0" : : "r" (control) : "memory");
91*5fd0122aSMatthias Ringwald }
92*5fd0122aSMatthias Ringwald
93*5fd0122aSMatthias Ringwald
94*5fd0122aSMatthias Ringwald /** \brief Get IPSR Register
95*5fd0122aSMatthias Ringwald
96*5fd0122aSMatthias Ringwald This function returns the content of the IPSR Register.
97*5fd0122aSMatthias Ringwald
98*5fd0122aSMatthias Ringwald \return IPSR Register value
99*5fd0122aSMatthias Ringwald */
__get_IPSR(void)100*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_IPSR(void)
101*5fd0122aSMatthias Ringwald {
102*5fd0122aSMatthias Ringwald uint32_t result;
103*5fd0122aSMatthias Ringwald
104*5fd0122aSMatthias Ringwald __ASM volatile ("MRS %0, ipsr" : "=r" (result) );
105*5fd0122aSMatthias Ringwald return(result);
106*5fd0122aSMatthias Ringwald }
107*5fd0122aSMatthias Ringwald
108*5fd0122aSMatthias Ringwald
109*5fd0122aSMatthias Ringwald /** \brief Get APSR Register
110*5fd0122aSMatthias Ringwald
111*5fd0122aSMatthias Ringwald This function returns the content of the APSR Register.
112*5fd0122aSMatthias Ringwald
113*5fd0122aSMatthias Ringwald \return APSR Register value
114*5fd0122aSMatthias Ringwald */
__get_APSR(void)115*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_APSR(void)
116*5fd0122aSMatthias Ringwald {
117*5fd0122aSMatthias Ringwald uint32_t result;
118*5fd0122aSMatthias Ringwald
119*5fd0122aSMatthias Ringwald __ASM volatile ("MRS %0, apsr" : "=r" (result) );
120*5fd0122aSMatthias Ringwald return(result);
121*5fd0122aSMatthias Ringwald }
122*5fd0122aSMatthias Ringwald
123*5fd0122aSMatthias Ringwald
124*5fd0122aSMatthias Ringwald /** \brief Get xPSR Register
125*5fd0122aSMatthias Ringwald
126*5fd0122aSMatthias Ringwald This function returns the content of the xPSR Register.
127*5fd0122aSMatthias Ringwald
128*5fd0122aSMatthias Ringwald \return xPSR Register value
129*5fd0122aSMatthias Ringwald */
__get_xPSR(void)130*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_xPSR(void)
131*5fd0122aSMatthias Ringwald {
132*5fd0122aSMatthias Ringwald uint32_t result;
133*5fd0122aSMatthias Ringwald
134*5fd0122aSMatthias Ringwald __ASM volatile ("MRS %0, xpsr" : "=r" (result) );
135*5fd0122aSMatthias Ringwald return(result);
136*5fd0122aSMatthias Ringwald }
137*5fd0122aSMatthias Ringwald
138*5fd0122aSMatthias Ringwald
139*5fd0122aSMatthias Ringwald /** \brief Get Process Stack Pointer
140*5fd0122aSMatthias Ringwald
141*5fd0122aSMatthias Ringwald This function returns the current value of the Process Stack Pointer (PSP).
142*5fd0122aSMatthias Ringwald
143*5fd0122aSMatthias Ringwald \return PSP Register value
144*5fd0122aSMatthias Ringwald */
__get_PSP(void)145*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PSP(void)
146*5fd0122aSMatthias Ringwald {
147*5fd0122aSMatthias Ringwald register uint32_t result;
148*5fd0122aSMatthias Ringwald
149*5fd0122aSMatthias Ringwald __ASM volatile ("MRS %0, psp\n" : "=r" (result) );
150*5fd0122aSMatthias Ringwald return(result);
151*5fd0122aSMatthias Ringwald }
152*5fd0122aSMatthias Ringwald
153*5fd0122aSMatthias Ringwald
154*5fd0122aSMatthias Ringwald /** \brief Set Process Stack Pointer
155*5fd0122aSMatthias Ringwald
156*5fd0122aSMatthias Ringwald This function assigns the given value to the Process Stack Pointer (PSP).
157*5fd0122aSMatthias Ringwald
158*5fd0122aSMatthias Ringwald \param [in] topOfProcStack Process Stack Pointer value to set
159*5fd0122aSMatthias Ringwald */
__set_PSP(uint32_t topOfProcStack)160*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PSP(uint32_t topOfProcStack)
161*5fd0122aSMatthias Ringwald {
162*5fd0122aSMatthias Ringwald __ASM volatile ("MSR psp, %0\n" : : "r" (topOfProcStack) : "sp");
163*5fd0122aSMatthias Ringwald }
164*5fd0122aSMatthias Ringwald
165*5fd0122aSMatthias Ringwald
166*5fd0122aSMatthias Ringwald /** \brief Get Main Stack Pointer
167*5fd0122aSMatthias Ringwald
168*5fd0122aSMatthias Ringwald This function returns the current value of the Main Stack Pointer (MSP).
169*5fd0122aSMatthias Ringwald
170*5fd0122aSMatthias Ringwald \return MSP Register value
171*5fd0122aSMatthias Ringwald */
__get_MSP(void)172*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_MSP(void)
173*5fd0122aSMatthias Ringwald {
174*5fd0122aSMatthias Ringwald register uint32_t result;
175*5fd0122aSMatthias Ringwald
176*5fd0122aSMatthias Ringwald __ASM volatile ("MRS %0, msp\n" : "=r" (result) );
177*5fd0122aSMatthias Ringwald return(result);
178*5fd0122aSMatthias Ringwald }
179*5fd0122aSMatthias Ringwald
180*5fd0122aSMatthias Ringwald
181*5fd0122aSMatthias Ringwald /** \brief Set Main Stack Pointer
182*5fd0122aSMatthias Ringwald
183*5fd0122aSMatthias Ringwald This function assigns the given value to the Main Stack Pointer (MSP).
184*5fd0122aSMatthias Ringwald
185*5fd0122aSMatthias Ringwald \param [in] topOfMainStack Main Stack Pointer value to set
186*5fd0122aSMatthias Ringwald */
__set_MSP(uint32_t topOfMainStack)187*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_MSP(uint32_t topOfMainStack)
188*5fd0122aSMatthias Ringwald {
189*5fd0122aSMatthias Ringwald __ASM volatile ("MSR msp, %0\n" : : "r" (topOfMainStack) : "sp");
190*5fd0122aSMatthias Ringwald }
191*5fd0122aSMatthias Ringwald
192*5fd0122aSMatthias Ringwald
193*5fd0122aSMatthias Ringwald /** \brief Get Priority Mask
194*5fd0122aSMatthias Ringwald
195*5fd0122aSMatthias Ringwald This function returns the current state of the priority mask bit from the Priority Mask Register.
196*5fd0122aSMatthias Ringwald
197*5fd0122aSMatthias Ringwald \return Priority Mask value
198*5fd0122aSMatthias Ringwald */
__get_PRIMASK(void)199*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PRIMASK(void)
200*5fd0122aSMatthias Ringwald {
201*5fd0122aSMatthias Ringwald uint32_t result;
202*5fd0122aSMatthias Ringwald
203*5fd0122aSMatthias Ringwald __ASM volatile ("MRS %0, primask" : "=r" (result) );
204*5fd0122aSMatthias Ringwald return(result);
205*5fd0122aSMatthias Ringwald }
206*5fd0122aSMatthias Ringwald
207*5fd0122aSMatthias Ringwald
208*5fd0122aSMatthias Ringwald /** \brief Set Priority Mask
209*5fd0122aSMatthias Ringwald
210*5fd0122aSMatthias Ringwald This function assigns the given value to the Priority Mask Register.
211*5fd0122aSMatthias Ringwald
212*5fd0122aSMatthias Ringwald \param [in] priMask Priority Mask
213*5fd0122aSMatthias Ringwald */
__set_PRIMASK(uint32_t priMask)214*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PRIMASK(uint32_t priMask)
215*5fd0122aSMatthias Ringwald {
216*5fd0122aSMatthias Ringwald __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory");
217*5fd0122aSMatthias Ringwald }
218*5fd0122aSMatthias Ringwald
219*5fd0122aSMatthias Ringwald
220*5fd0122aSMatthias Ringwald #if (__CORTEX_M >= 0x03U)
221*5fd0122aSMatthias Ringwald
222*5fd0122aSMatthias Ringwald /** \brief Enable FIQ
223*5fd0122aSMatthias Ringwald
224*5fd0122aSMatthias Ringwald This function enables FIQ interrupts by clearing the F-bit in the CPSR.
225*5fd0122aSMatthias Ringwald Can only be executed in Privileged modes.
226*5fd0122aSMatthias Ringwald */
__enable_fault_irq(void)227*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_fault_irq(void)
228*5fd0122aSMatthias Ringwald {
229*5fd0122aSMatthias Ringwald __ASM volatile ("cpsie f" : : : "memory");
230*5fd0122aSMatthias Ringwald }
231*5fd0122aSMatthias Ringwald
232*5fd0122aSMatthias Ringwald
233*5fd0122aSMatthias Ringwald /** \brief Disable FIQ
234*5fd0122aSMatthias Ringwald
235*5fd0122aSMatthias Ringwald This function disables FIQ interrupts by setting the F-bit in the CPSR.
236*5fd0122aSMatthias Ringwald Can only be executed in Privileged modes.
237*5fd0122aSMatthias Ringwald */
__disable_fault_irq(void)238*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_fault_irq(void)
239*5fd0122aSMatthias Ringwald {
240*5fd0122aSMatthias Ringwald __ASM volatile ("cpsid f" : : : "memory");
241*5fd0122aSMatthias Ringwald }
242*5fd0122aSMatthias Ringwald
243*5fd0122aSMatthias Ringwald
244*5fd0122aSMatthias Ringwald /** \brief Get Base Priority
245*5fd0122aSMatthias Ringwald
246*5fd0122aSMatthias Ringwald This function returns the current value of the Base Priority register.
247*5fd0122aSMatthias Ringwald
248*5fd0122aSMatthias Ringwald \return Base Priority register value
249*5fd0122aSMatthias Ringwald */
__get_BASEPRI(void)250*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_BASEPRI(void)
251*5fd0122aSMatthias Ringwald {
252*5fd0122aSMatthias Ringwald uint32_t result;
253*5fd0122aSMatthias Ringwald
254*5fd0122aSMatthias Ringwald __ASM volatile ("MRS %0, basepri" : "=r" (result) );
255*5fd0122aSMatthias Ringwald return(result);
256*5fd0122aSMatthias Ringwald }
257*5fd0122aSMatthias Ringwald
258*5fd0122aSMatthias Ringwald
259*5fd0122aSMatthias Ringwald /** \brief Set Base Priority
260*5fd0122aSMatthias Ringwald
261*5fd0122aSMatthias Ringwald This function assigns the given value to the Base Priority register.
262*5fd0122aSMatthias Ringwald
263*5fd0122aSMatthias Ringwald \param [in] basePri Base Priority value to set
264*5fd0122aSMatthias Ringwald */
__set_BASEPRI(uint32_t value)265*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_BASEPRI(uint32_t value)
266*5fd0122aSMatthias Ringwald {
267*5fd0122aSMatthias Ringwald __ASM volatile ("MSR basepri, %0" : : "r" (value) : "memory");
268*5fd0122aSMatthias Ringwald }
269*5fd0122aSMatthias Ringwald
270*5fd0122aSMatthias Ringwald
271*5fd0122aSMatthias Ringwald /** \brief Set Base Priority with condition
272*5fd0122aSMatthias Ringwald
273*5fd0122aSMatthias Ringwald This function assigns the given value to the Base Priority register only if BASEPRI masking is disabled,
274*5fd0122aSMatthias Ringwald or the new value increases the BASEPRI priority level.
275*5fd0122aSMatthias Ringwald
276*5fd0122aSMatthias Ringwald \param [in] basePri Base Priority value to set
277*5fd0122aSMatthias Ringwald */
__set_BASEPRI_MAX(uint32_t value)278*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_BASEPRI_MAX(uint32_t value)
279*5fd0122aSMatthias Ringwald {
280*5fd0122aSMatthias Ringwald __ASM volatile ("MSR basepri_max, %0" : : "r" (value) : "memory");
281*5fd0122aSMatthias Ringwald }
282*5fd0122aSMatthias Ringwald
283*5fd0122aSMatthias Ringwald
284*5fd0122aSMatthias Ringwald /** \brief Get Fault Mask
285*5fd0122aSMatthias Ringwald
286*5fd0122aSMatthias Ringwald This function returns the current value of the Fault Mask register.
287*5fd0122aSMatthias Ringwald
288*5fd0122aSMatthias Ringwald \return Fault Mask register value
289*5fd0122aSMatthias Ringwald */
__get_FAULTMASK(void)290*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FAULTMASK(void)
291*5fd0122aSMatthias Ringwald {
292*5fd0122aSMatthias Ringwald uint32_t result;
293*5fd0122aSMatthias Ringwald
294*5fd0122aSMatthias Ringwald __ASM volatile ("MRS %0, faultmask" : "=r" (result) );
295*5fd0122aSMatthias Ringwald return(result);
296*5fd0122aSMatthias Ringwald }
297*5fd0122aSMatthias Ringwald
298*5fd0122aSMatthias Ringwald
299*5fd0122aSMatthias Ringwald /** \brief Set Fault Mask
300*5fd0122aSMatthias Ringwald
301*5fd0122aSMatthias Ringwald This function assigns the given value to the Fault Mask register.
302*5fd0122aSMatthias Ringwald
303*5fd0122aSMatthias Ringwald \param [in] faultMask Fault Mask value to set
304*5fd0122aSMatthias Ringwald */
__set_FAULTMASK(uint32_t faultMask)305*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask)
306*5fd0122aSMatthias Ringwald {
307*5fd0122aSMatthias Ringwald __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory");
308*5fd0122aSMatthias Ringwald }
309*5fd0122aSMatthias Ringwald
310*5fd0122aSMatthias Ringwald #endif /* (__CORTEX_M >= 0x03U) */
311*5fd0122aSMatthias Ringwald
312*5fd0122aSMatthias Ringwald
313*5fd0122aSMatthias Ringwald #if (__CORTEX_M == 0x04U) || (__CORTEX_M == 0x07U)
314*5fd0122aSMatthias Ringwald
315*5fd0122aSMatthias Ringwald /** \brief Get FPSCR
316*5fd0122aSMatthias Ringwald
317*5fd0122aSMatthias Ringwald This function returns the current value of the Floating Point Status/Control register.
318*5fd0122aSMatthias Ringwald
319*5fd0122aSMatthias Ringwald \return Floating Point Status/Control register value
320*5fd0122aSMatthias Ringwald */
__get_FPSCR(void)321*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FPSCR(void)
322*5fd0122aSMatthias Ringwald {
323*5fd0122aSMatthias Ringwald #if (__FPU_PRESENT == 1U) && (__FPU_USED == 1U)
324*5fd0122aSMatthias Ringwald uint32_t result;
325*5fd0122aSMatthias Ringwald
326*5fd0122aSMatthias Ringwald /* Empty asm statement works as a scheduling barrier */
327*5fd0122aSMatthias Ringwald __ASM volatile ("");
328*5fd0122aSMatthias Ringwald __ASM volatile ("VMRS %0, fpscr" : "=r" (result) );
329*5fd0122aSMatthias Ringwald __ASM volatile ("");
330*5fd0122aSMatthias Ringwald return(result);
331*5fd0122aSMatthias Ringwald #else
332*5fd0122aSMatthias Ringwald return(0);
333*5fd0122aSMatthias Ringwald #endif
334*5fd0122aSMatthias Ringwald }
335*5fd0122aSMatthias Ringwald
336*5fd0122aSMatthias Ringwald
337*5fd0122aSMatthias Ringwald /** \brief Set FPSCR
338*5fd0122aSMatthias Ringwald
339*5fd0122aSMatthias Ringwald This function assigns the given value to the Floating Point Status/Control register.
340*5fd0122aSMatthias Ringwald
341*5fd0122aSMatthias Ringwald \param [in] fpscr Floating Point Status/Control value to set
342*5fd0122aSMatthias Ringwald */
__set_FPSCR(uint32_t fpscr)343*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
344*5fd0122aSMatthias Ringwald {
345*5fd0122aSMatthias Ringwald #if (__FPU_PRESENT == 1U) && (__FPU_USED == 1U)
346*5fd0122aSMatthias Ringwald /* Empty asm statement works as a scheduling barrier */
347*5fd0122aSMatthias Ringwald __ASM volatile ("");
348*5fd0122aSMatthias Ringwald // __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc"); // ARMCC_V6: needs to be checked
349*5fd0122aSMatthias Ringwald __ASM volatile ("");
350*5fd0122aSMatthias Ringwald #endif
351*5fd0122aSMatthias Ringwald }
352*5fd0122aSMatthias Ringwald
353*5fd0122aSMatthias Ringwald #endif /* (__CORTEX_M == 0x04U) || (__CORTEX_M == 0x07U) */
354*5fd0122aSMatthias Ringwald
355*5fd0122aSMatthias Ringwald
356*5fd0122aSMatthias Ringwald
357*5fd0122aSMatthias Ringwald /*@} end of CMSIS_Core_RegAccFunctions */
358*5fd0122aSMatthias Ringwald
359*5fd0122aSMatthias Ringwald
360*5fd0122aSMatthias Ringwald /* ########################## Core Instruction Access ######################### */
361*5fd0122aSMatthias Ringwald /** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
362*5fd0122aSMatthias Ringwald Access to dedicated instructions
363*5fd0122aSMatthias Ringwald @{
364*5fd0122aSMatthias Ringwald */
365*5fd0122aSMatthias Ringwald
366*5fd0122aSMatthias Ringwald /* Define macros for porting to both thumb1 and thumb2.
367*5fd0122aSMatthias Ringwald * For thumb1, use low register (r0-r7), specified by constrant "l"
368*5fd0122aSMatthias Ringwald * Otherwise, use general registers, specified by constrant "r" */
369*5fd0122aSMatthias Ringwald #if defined (__thumb__) && !defined (__thumb2__)
370*5fd0122aSMatthias Ringwald #define __CMSIS_GCC_OUT_REG(r) "=l" (r)
371*5fd0122aSMatthias Ringwald #define __CMSIS_GCC_USE_REG(r) "l" (r)
372*5fd0122aSMatthias Ringwald #else
373*5fd0122aSMatthias Ringwald #define __CMSIS_GCC_OUT_REG(r) "=r" (r)
374*5fd0122aSMatthias Ringwald #define __CMSIS_GCC_USE_REG(r) "r" (r)
375*5fd0122aSMatthias Ringwald #endif
376*5fd0122aSMatthias Ringwald
377*5fd0122aSMatthias Ringwald /** \brief No Operation
378*5fd0122aSMatthias Ringwald
379*5fd0122aSMatthias Ringwald No Operation does nothing. This instruction can be used for code alignment purposes.
380*5fd0122aSMatthias Ringwald */
__NOP(void)381*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE void __NOP(void)
382*5fd0122aSMatthias Ringwald {
383*5fd0122aSMatthias Ringwald __ASM volatile ("nop");
384*5fd0122aSMatthias Ringwald }
385*5fd0122aSMatthias Ringwald
386*5fd0122aSMatthias Ringwald
387*5fd0122aSMatthias Ringwald /** \brief Wait For Interrupt
388*5fd0122aSMatthias Ringwald
389*5fd0122aSMatthias Ringwald Wait For Interrupt is a hint instruction that suspends execution
390*5fd0122aSMatthias Ringwald until one of a number of events occurs.
391*5fd0122aSMatthias Ringwald */
__WFI(void)392*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE void __WFI(void)
393*5fd0122aSMatthias Ringwald {
394*5fd0122aSMatthias Ringwald __ASM volatile ("wfi");
395*5fd0122aSMatthias Ringwald }
396*5fd0122aSMatthias Ringwald
397*5fd0122aSMatthias Ringwald
398*5fd0122aSMatthias Ringwald /** \brief Wait For Event
399*5fd0122aSMatthias Ringwald
400*5fd0122aSMatthias Ringwald Wait For Event is a hint instruction that permits the processor to enter
401*5fd0122aSMatthias Ringwald a low-power state until one of a number of events occurs.
402*5fd0122aSMatthias Ringwald */
__WFE(void)403*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE void __WFE(void)
404*5fd0122aSMatthias Ringwald {
405*5fd0122aSMatthias Ringwald __ASM volatile ("wfe");
406*5fd0122aSMatthias Ringwald }
407*5fd0122aSMatthias Ringwald
408*5fd0122aSMatthias Ringwald
409*5fd0122aSMatthias Ringwald /** \brief Send Event
410*5fd0122aSMatthias Ringwald
411*5fd0122aSMatthias Ringwald Send Event is a hint instruction. It causes an event to be signaled to the CPU.
412*5fd0122aSMatthias Ringwald */
__SEV(void)413*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE void __SEV(void)
414*5fd0122aSMatthias Ringwald {
415*5fd0122aSMatthias Ringwald __ASM volatile ("sev");
416*5fd0122aSMatthias Ringwald }
417*5fd0122aSMatthias Ringwald
418*5fd0122aSMatthias Ringwald
419*5fd0122aSMatthias Ringwald /** \brief Instruction Synchronization Barrier
420*5fd0122aSMatthias Ringwald
421*5fd0122aSMatthias Ringwald Instruction Synchronization Barrier flushes the pipeline in the processor,
422*5fd0122aSMatthias Ringwald so that all instructions following the ISB are fetched from cache or
423*5fd0122aSMatthias Ringwald memory, after the instruction has been completed.
424*5fd0122aSMatthias Ringwald */
__ISB(void)425*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE void __ISB(void)
426*5fd0122aSMatthias Ringwald {
427*5fd0122aSMatthias Ringwald __ASM volatile ("isb 0xF":::"memory");
428*5fd0122aSMatthias Ringwald }
429*5fd0122aSMatthias Ringwald
430*5fd0122aSMatthias Ringwald
431*5fd0122aSMatthias Ringwald /** \brief Data Synchronization Barrier
432*5fd0122aSMatthias Ringwald
433*5fd0122aSMatthias Ringwald This function acts as a special kind of Data Memory Barrier.
434*5fd0122aSMatthias Ringwald It completes when all explicit memory accesses before this instruction complete.
435*5fd0122aSMatthias Ringwald */
__DSB(void)436*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE void __DSB(void)
437*5fd0122aSMatthias Ringwald {
438*5fd0122aSMatthias Ringwald __ASM volatile ("dsb 0xF":::"memory");
439*5fd0122aSMatthias Ringwald }
440*5fd0122aSMatthias Ringwald
441*5fd0122aSMatthias Ringwald
442*5fd0122aSMatthias Ringwald /** \brief Data Memory Barrier
443*5fd0122aSMatthias Ringwald
444*5fd0122aSMatthias Ringwald This function ensures the apparent order of the explicit memory operations before
445*5fd0122aSMatthias Ringwald and after the instruction, without ensuring their completion.
446*5fd0122aSMatthias Ringwald */
__DMB(void)447*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE void __DMB(void)
448*5fd0122aSMatthias Ringwald {
449*5fd0122aSMatthias Ringwald __ASM volatile ("dmb 0xF":::"memory");
450*5fd0122aSMatthias Ringwald }
451*5fd0122aSMatthias Ringwald
452*5fd0122aSMatthias Ringwald
453*5fd0122aSMatthias Ringwald /** \brief Reverse byte order (32 bit)
454*5fd0122aSMatthias Ringwald
455*5fd0122aSMatthias Ringwald This function reverses the byte order in integer value.
456*5fd0122aSMatthias Ringwald
457*5fd0122aSMatthias Ringwald \param [in] value Value to reverse
458*5fd0122aSMatthias Ringwald \return Reversed value
459*5fd0122aSMatthias Ringwald */
__REV(uint32_t value)460*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint32_t __REV(uint32_t value)
461*5fd0122aSMatthias Ringwald {
462*5fd0122aSMatthias Ringwald #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
463*5fd0122aSMatthias Ringwald return __builtin_bswap32(value);
464*5fd0122aSMatthias Ringwald #else
465*5fd0122aSMatthias Ringwald uint32_t result;
466*5fd0122aSMatthias Ringwald
467*5fd0122aSMatthias Ringwald __ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
468*5fd0122aSMatthias Ringwald return(result);
469*5fd0122aSMatthias Ringwald #endif
470*5fd0122aSMatthias Ringwald }
471*5fd0122aSMatthias Ringwald
472*5fd0122aSMatthias Ringwald
473*5fd0122aSMatthias Ringwald /** \brief Reverse byte order (16 bit)
474*5fd0122aSMatthias Ringwald
475*5fd0122aSMatthias Ringwald This function reverses the byte order in two unsigned short values.
476*5fd0122aSMatthias Ringwald
477*5fd0122aSMatthias Ringwald \param [in] value Value to reverse
478*5fd0122aSMatthias Ringwald \return Reversed value
479*5fd0122aSMatthias Ringwald */
__REV16(uint32_t value)480*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint32_t __REV16(uint32_t value)
481*5fd0122aSMatthias Ringwald {
482*5fd0122aSMatthias Ringwald uint32_t result;
483*5fd0122aSMatthias Ringwald
484*5fd0122aSMatthias Ringwald __ASM volatile ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
485*5fd0122aSMatthias Ringwald return(result);
486*5fd0122aSMatthias Ringwald }
487*5fd0122aSMatthias Ringwald
488*5fd0122aSMatthias Ringwald
489*5fd0122aSMatthias Ringwald /** \brief Reverse byte order in signed short value
490*5fd0122aSMatthias Ringwald
491*5fd0122aSMatthias Ringwald This function reverses the byte order in a signed short value with sign extension to integer.
492*5fd0122aSMatthias Ringwald
493*5fd0122aSMatthias Ringwald \param [in] value Value to reverse
494*5fd0122aSMatthias Ringwald \return Reversed value
495*5fd0122aSMatthias Ringwald */
__REVSH(int32_t value)496*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE int32_t __REVSH(int32_t value)
497*5fd0122aSMatthias Ringwald {
498*5fd0122aSMatthias Ringwald #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
499*5fd0122aSMatthias Ringwald return (short)__builtin_bswap16(value);
500*5fd0122aSMatthias Ringwald #else
501*5fd0122aSMatthias Ringwald int32_t result;
502*5fd0122aSMatthias Ringwald
503*5fd0122aSMatthias Ringwald __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
504*5fd0122aSMatthias Ringwald return(result);
505*5fd0122aSMatthias Ringwald #endif
506*5fd0122aSMatthias Ringwald }
507*5fd0122aSMatthias Ringwald
508*5fd0122aSMatthias Ringwald
509*5fd0122aSMatthias Ringwald /** \brief Rotate Right in unsigned value (32 bit)
510*5fd0122aSMatthias Ringwald
511*5fd0122aSMatthias Ringwald This function Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
512*5fd0122aSMatthias Ringwald
513*5fd0122aSMatthias Ringwald \param [in] value Value to rotate
514*5fd0122aSMatthias Ringwald \param [in] value Number of Bits to rotate
515*5fd0122aSMatthias Ringwald \return Rotated value
516*5fd0122aSMatthias Ringwald */
__ROR(uint32_t op1,uint32_t op2)517*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
518*5fd0122aSMatthias Ringwald {
519*5fd0122aSMatthias Ringwald return (op1 >> op2) | (op1 << (32U - op2));
520*5fd0122aSMatthias Ringwald }
521*5fd0122aSMatthias Ringwald
522*5fd0122aSMatthias Ringwald
523*5fd0122aSMatthias Ringwald /** \brief Breakpoint
524*5fd0122aSMatthias Ringwald
525*5fd0122aSMatthias Ringwald This function causes the processor to enter Debug state.
526*5fd0122aSMatthias Ringwald Debug tools can use this to investigate system state when the instruction at a particular address is reached.
527*5fd0122aSMatthias Ringwald
528*5fd0122aSMatthias Ringwald \param [in] value is ignored by the processor.
529*5fd0122aSMatthias Ringwald If required, a debugger can use it to store additional information about the breakpoint.
530*5fd0122aSMatthias Ringwald */
531*5fd0122aSMatthias Ringwald #define __BKPT(value) __ASM volatile ("bkpt "#value)
532*5fd0122aSMatthias Ringwald
533*5fd0122aSMatthias Ringwald
534*5fd0122aSMatthias Ringwald /** \brief Reverse bit order of value
535*5fd0122aSMatthias Ringwald
536*5fd0122aSMatthias Ringwald This function reverses the bit order of the given value.
537*5fd0122aSMatthias Ringwald
538*5fd0122aSMatthias Ringwald \param [in] value Value to reverse
539*5fd0122aSMatthias Ringwald \return Reversed value
540*5fd0122aSMatthias Ringwald */
__RBIT(uint32_t value)541*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value)
542*5fd0122aSMatthias Ringwald {
543*5fd0122aSMatthias Ringwald uint32_t result;
544*5fd0122aSMatthias Ringwald
545*5fd0122aSMatthias Ringwald #if (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U)
546*5fd0122aSMatthias Ringwald __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) );
547*5fd0122aSMatthias Ringwald #else
548*5fd0122aSMatthias Ringwald int32_t s = 4 /*sizeof(v)*/ * 8 - 1; // extra shift needed at end
549*5fd0122aSMatthias Ringwald
550*5fd0122aSMatthias Ringwald result = value; // r will be reversed bits of v; first get LSB of v
551*5fd0122aSMatthias Ringwald for (value >>= 1U; value; value >>= 1U)
552*5fd0122aSMatthias Ringwald {
553*5fd0122aSMatthias Ringwald result <<= 1U;
554*5fd0122aSMatthias Ringwald result |= value & 1U;
555*5fd0122aSMatthias Ringwald s--;
556*5fd0122aSMatthias Ringwald }
557*5fd0122aSMatthias Ringwald result <<= s; // shift when v's highest bits are zero
558*5fd0122aSMatthias Ringwald #endif
559*5fd0122aSMatthias Ringwald return(result);
560*5fd0122aSMatthias Ringwald }
561*5fd0122aSMatthias Ringwald
562*5fd0122aSMatthias Ringwald
563*5fd0122aSMatthias Ringwald /** \brief Count leading zeros
564*5fd0122aSMatthias Ringwald
565*5fd0122aSMatthias Ringwald This function counts the number of leading zeros of a data value.
566*5fd0122aSMatthias Ringwald
567*5fd0122aSMatthias Ringwald \param [in] value Value to count the leading zeros
568*5fd0122aSMatthias Ringwald \return number of leading zeros in value
569*5fd0122aSMatthias Ringwald */
570*5fd0122aSMatthias Ringwald #define __CLZ __builtin_clz
571*5fd0122aSMatthias Ringwald
572*5fd0122aSMatthias Ringwald
573*5fd0122aSMatthias Ringwald #if (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U)
574*5fd0122aSMatthias Ringwald
575*5fd0122aSMatthias Ringwald /** \brief LDR Exclusive (8 bit)
576*5fd0122aSMatthias Ringwald
577*5fd0122aSMatthias Ringwald This function executes a exclusive LDR instruction for 8 bit value.
578*5fd0122aSMatthias Ringwald
579*5fd0122aSMatthias Ringwald \param [in] ptr Pointer to data
580*5fd0122aSMatthias Ringwald \return value of type uint8_t at (*ptr)
581*5fd0122aSMatthias Ringwald */
__LDREXB(volatile uint8_t * addr)582*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint8_t __LDREXB(volatile uint8_t *addr)
583*5fd0122aSMatthias Ringwald {
584*5fd0122aSMatthias Ringwald uint32_t result;
585*5fd0122aSMatthias Ringwald
586*5fd0122aSMatthias Ringwald #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
587*5fd0122aSMatthias Ringwald __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );
588*5fd0122aSMatthias Ringwald #else
589*5fd0122aSMatthias Ringwald /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
590*5fd0122aSMatthias Ringwald accepted by assembler. So has to use following less efficient pattern.
591*5fd0122aSMatthias Ringwald */
592*5fd0122aSMatthias Ringwald __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
593*5fd0122aSMatthias Ringwald #endif
594*5fd0122aSMatthias Ringwald return ((uint8_t) result); /* Add explicit type cast here */
595*5fd0122aSMatthias Ringwald }
596*5fd0122aSMatthias Ringwald
597*5fd0122aSMatthias Ringwald
598*5fd0122aSMatthias Ringwald /** \brief LDR Exclusive (16 bit)
599*5fd0122aSMatthias Ringwald
600*5fd0122aSMatthias Ringwald This function executes a exclusive LDR instruction for 16 bit values.
601*5fd0122aSMatthias Ringwald
602*5fd0122aSMatthias Ringwald \param [in] ptr Pointer to data
603*5fd0122aSMatthias Ringwald \return value of type uint16_t at (*ptr)
604*5fd0122aSMatthias Ringwald */
__LDREXH(volatile uint16_t * addr)605*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint16_t __LDREXH(volatile uint16_t *addr)
606*5fd0122aSMatthias Ringwald {
607*5fd0122aSMatthias Ringwald uint32_t result;
608*5fd0122aSMatthias Ringwald
609*5fd0122aSMatthias Ringwald #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
610*5fd0122aSMatthias Ringwald __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );
611*5fd0122aSMatthias Ringwald #else
612*5fd0122aSMatthias Ringwald /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
613*5fd0122aSMatthias Ringwald accepted by assembler. So has to use following less efficient pattern.
614*5fd0122aSMatthias Ringwald */
615*5fd0122aSMatthias Ringwald __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
616*5fd0122aSMatthias Ringwald #endif
617*5fd0122aSMatthias Ringwald return ((uint16_t) result); /* Add explicit type cast here */
618*5fd0122aSMatthias Ringwald }
619*5fd0122aSMatthias Ringwald
620*5fd0122aSMatthias Ringwald
621*5fd0122aSMatthias Ringwald /** \brief LDR Exclusive (32 bit)
622*5fd0122aSMatthias Ringwald
623*5fd0122aSMatthias Ringwald This function executes a exclusive LDR instruction for 32 bit values.
624*5fd0122aSMatthias Ringwald
625*5fd0122aSMatthias Ringwald \param [in] ptr Pointer to data
626*5fd0122aSMatthias Ringwald \return value of type uint32_t at (*ptr)
627*5fd0122aSMatthias Ringwald */
__LDREXW(volatile uint32_t * addr)628*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint32_t __LDREXW(volatile uint32_t *addr)
629*5fd0122aSMatthias Ringwald {
630*5fd0122aSMatthias Ringwald uint32_t result;
631*5fd0122aSMatthias Ringwald
632*5fd0122aSMatthias Ringwald __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) );
633*5fd0122aSMatthias Ringwald return(result);
634*5fd0122aSMatthias Ringwald }
635*5fd0122aSMatthias Ringwald
636*5fd0122aSMatthias Ringwald
637*5fd0122aSMatthias Ringwald /** \brief STR Exclusive (8 bit)
638*5fd0122aSMatthias Ringwald
639*5fd0122aSMatthias Ringwald This function executes a exclusive STR instruction for 8 bit values.
640*5fd0122aSMatthias Ringwald
641*5fd0122aSMatthias Ringwald \param [in] value Value to store
642*5fd0122aSMatthias Ringwald \param [in] ptr Pointer to location
643*5fd0122aSMatthias Ringwald \return 0 Function succeeded
644*5fd0122aSMatthias Ringwald \return 1 Function failed
645*5fd0122aSMatthias Ringwald */
__STREXB(uint8_t value,volatile uint8_t * addr)646*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr)
647*5fd0122aSMatthias Ringwald {
648*5fd0122aSMatthias Ringwald uint32_t result;
649*5fd0122aSMatthias Ringwald
650*5fd0122aSMatthias Ringwald __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
651*5fd0122aSMatthias Ringwald return(result);
652*5fd0122aSMatthias Ringwald }
653*5fd0122aSMatthias Ringwald
654*5fd0122aSMatthias Ringwald
655*5fd0122aSMatthias Ringwald /** \brief STR Exclusive (16 bit)
656*5fd0122aSMatthias Ringwald
657*5fd0122aSMatthias Ringwald This function executes a exclusive STR instruction for 16 bit values.
658*5fd0122aSMatthias Ringwald
659*5fd0122aSMatthias Ringwald \param [in] value Value to store
660*5fd0122aSMatthias Ringwald \param [in] ptr Pointer to location
661*5fd0122aSMatthias Ringwald \return 0 Function succeeded
662*5fd0122aSMatthias Ringwald \return 1 Function failed
663*5fd0122aSMatthias Ringwald */
__STREXH(uint16_t value,volatile uint16_t * addr)664*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr)
665*5fd0122aSMatthias Ringwald {
666*5fd0122aSMatthias Ringwald uint32_t result;
667*5fd0122aSMatthias Ringwald
668*5fd0122aSMatthias Ringwald __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
669*5fd0122aSMatthias Ringwald return(result);
670*5fd0122aSMatthias Ringwald }
671*5fd0122aSMatthias Ringwald
672*5fd0122aSMatthias Ringwald
673*5fd0122aSMatthias Ringwald /** \brief STR Exclusive (32 bit)
674*5fd0122aSMatthias Ringwald
675*5fd0122aSMatthias Ringwald This function executes a exclusive STR instruction for 32 bit values.
676*5fd0122aSMatthias Ringwald
677*5fd0122aSMatthias Ringwald \param [in] value Value to store
678*5fd0122aSMatthias Ringwald \param [in] ptr Pointer to location
679*5fd0122aSMatthias Ringwald \return 0 Function succeeded
680*5fd0122aSMatthias Ringwald \return 1 Function failed
681*5fd0122aSMatthias Ringwald */
__STREXW(uint32_t value,volatile uint32_t * addr)682*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr)
683*5fd0122aSMatthias Ringwald {
684*5fd0122aSMatthias Ringwald uint32_t result;
685*5fd0122aSMatthias Ringwald
686*5fd0122aSMatthias Ringwald __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
687*5fd0122aSMatthias Ringwald return(result);
688*5fd0122aSMatthias Ringwald }
689*5fd0122aSMatthias Ringwald
690*5fd0122aSMatthias Ringwald
691*5fd0122aSMatthias Ringwald /** \brief Remove the exclusive lock
692*5fd0122aSMatthias Ringwald
693*5fd0122aSMatthias Ringwald This function removes the exclusive lock which is created by LDREX.
694*5fd0122aSMatthias Ringwald
695*5fd0122aSMatthias Ringwald */
__CLREX(void)696*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE void __CLREX(void)
697*5fd0122aSMatthias Ringwald {
698*5fd0122aSMatthias Ringwald __ASM volatile ("clrex" ::: "memory");
699*5fd0122aSMatthias Ringwald }
700*5fd0122aSMatthias Ringwald
701*5fd0122aSMatthias Ringwald
702*5fd0122aSMatthias Ringwald /** \brief Signed Saturate
703*5fd0122aSMatthias Ringwald
704*5fd0122aSMatthias Ringwald This function saturates a signed value.
705*5fd0122aSMatthias Ringwald
706*5fd0122aSMatthias Ringwald \param [in] value Value to be saturated
707*5fd0122aSMatthias Ringwald \param [in] sat Bit position to saturate to (1..32)
708*5fd0122aSMatthias Ringwald \return Saturated value
709*5fd0122aSMatthias Ringwald */
710*5fd0122aSMatthias Ringwald #define __SSAT(ARG1,ARG2) \
711*5fd0122aSMatthias Ringwald ({ \
712*5fd0122aSMatthias Ringwald uint32_t __RES, __ARG1 = (ARG1); \
713*5fd0122aSMatthias Ringwald __ASM ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
714*5fd0122aSMatthias Ringwald __RES; \
715*5fd0122aSMatthias Ringwald })
716*5fd0122aSMatthias Ringwald
717*5fd0122aSMatthias Ringwald
718*5fd0122aSMatthias Ringwald /** \brief Unsigned Saturate
719*5fd0122aSMatthias Ringwald
720*5fd0122aSMatthias Ringwald This function saturates an unsigned value.
721*5fd0122aSMatthias Ringwald
722*5fd0122aSMatthias Ringwald \param [in] value Value to be saturated
723*5fd0122aSMatthias Ringwald \param [in] sat Bit position to saturate to (0..31)
724*5fd0122aSMatthias Ringwald \return Saturated value
725*5fd0122aSMatthias Ringwald */
726*5fd0122aSMatthias Ringwald #define __USAT(ARG1,ARG2) \
727*5fd0122aSMatthias Ringwald ({ \
728*5fd0122aSMatthias Ringwald uint32_t __RES, __ARG1 = (ARG1); \
729*5fd0122aSMatthias Ringwald __ASM ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
730*5fd0122aSMatthias Ringwald __RES; \
731*5fd0122aSMatthias Ringwald })
732*5fd0122aSMatthias Ringwald
733*5fd0122aSMatthias Ringwald
734*5fd0122aSMatthias Ringwald /** \brief Rotate Right with Extend (32 bit)
735*5fd0122aSMatthias Ringwald
736*5fd0122aSMatthias Ringwald This function moves each bit of a bitstring right by one bit.
737*5fd0122aSMatthias Ringwald The carry input is shifted in at the left end of the bitstring.
738*5fd0122aSMatthias Ringwald
739*5fd0122aSMatthias Ringwald \param [in] value Value to rotate
740*5fd0122aSMatthias Ringwald \return Rotated value
741*5fd0122aSMatthias Ringwald */
__RRX(uint32_t value)742*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint32_t __RRX(uint32_t value)
743*5fd0122aSMatthias Ringwald {
744*5fd0122aSMatthias Ringwald uint32_t result;
745*5fd0122aSMatthias Ringwald
746*5fd0122aSMatthias Ringwald __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
747*5fd0122aSMatthias Ringwald return(result);
748*5fd0122aSMatthias Ringwald }
749*5fd0122aSMatthias Ringwald
750*5fd0122aSMatthias Ringwald
751*5fd0122aSMatthias Ringwald /** \brief LDRT Unprivileged (8 bit)
752*5fd0122aSMatthias Ringwald
753*5fd0122aSMatthias Ringwald This function executes a Unprivileged LDRT instruction for 8 bit value.
754*5fd0122aSMatthias Ringwald
755*5fd0122aSMatthias Ringwald \param [in] ptr Pointer to data
756*5fd0122aSMatthias Ringwald \return value of type uint8_t at (*ptr)
757*5fd0122aSMatthias Ringwald */
__LDRBT(volatile uint8_t * addr)758*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint8_t __LDRBT(volatile uint8_t *addr)
759*5fd0122aSMatthias Ringwald {
760*5fd0122aSMatthias Ringwald uint32_t result;
761*5fd0122aSMatthias Ringwald
762*5fd0122aSMatthias Ringwald #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
763*5fd0122aSMatthias Ringwald __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*addr) );
764*5fd0122aSMatthias Ringwald #else
765*5fd0122aSMatthias Ringwald /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
766*5fd0122aSMatthias Ringwald accepted by assembler. So has to use following less efficient pattern.
767*5fd0122aSMatthias Ringwald */
768*5fd0122aSMatthias Ringwald __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
769*5fd0122aSMatthias Ringwald #endif
770*5fd0122aSMatthias Ringwald return ((uint8_t) result); /* Add explicit type cast here */
771*5fd0122aSMatthias Ringwald }
772*5fd0122aSMatthias Ringwald
773*5fd0122aSMatthias Ringwald
774*5fd0122aSMatthias Ringwald /** \brief LDRT Unprivileged (16 bit)
775*5fd0122aSMatthias Ringwald
776*5fd0122aSMatthias Ringwald This function executes a Unprivileged LDRT instruction for 16 bit values.
777*5fd0122aSMatthias Ringwald
778*5fd0122aSMatthias Ringwald \param [in] ptr Pointer to data
779*5fd0122aSMatthias Ringwald \return value of type uint16_t at (*ptr)
780*5fd0122aSMatthias Ringwald */
__LDRHT(volatile uint16_t * addr)781*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint16_t __LDRHT(volatile uint16_t *addr)
782*5fd0122aSMatthias Ringwald {
783*5fd0122aSMatthias Ringwald uint32_t result;
784*5fd0122aSMatthias Ringwald
785*5fd0122aSMatthias Ringwald #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
786*5fd0122aSMatthias Ringwald __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*addr) );
787*5fd0122aSMatthias Ringwald #else
788*5fd0122aSMatthias Ringwald /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
789*5fd0122aSMatthias Ringwald accepted by assembler. So has to use following less efficient pattern.
790*5fd0122aSMatthias Ringwald */
791*5fd0122aSMatthias Ringwald __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
792*5fd0122aSMatthias Ringwald #endif
793*5fd0122aSMatthias Ringwald return ((uint16_t) result); /* Add explicit type cast here */
794*5fd0122aSMatthias Ringwald }
795*5fd0122aSMatthias Ringwald
796*5fd0122aSMatthias Ringwald
797*5fd0122aSMatthias Ringwald /** \brief LDRT Unprivileged (32 bit)
798*5fd0122aSMatthias Ringwald
799*5fd0122aSMatthias Ringwald This function executes a Unprivileged LDRT instruction for 32 bit values.
800*5fd0122aSMatthias Ringwald
801*5fd0122aSMatthias Ringwald \param [in] ptr Pointer to data
802*5fd0122aSMatthias Ringwald \return value of type uint32_t at (*ptr)
803*5fd0122aSMatthias Ringwald */
__LDRT(volatile uint32_t * addr)804*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE uint32_t __LDRT(volatile uint32_t *addr)
805*5fd0122aSMatthias Ringwald {
806*5fd0122aSMatthias Ringwald uint32_t result;
807*5fd0122aSMatthias Ringwald
808*5fd0122aSMatthias Ringwald __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*addr) );
809*5fd0122aSMatthias Ringwald return(result);
810*5fd0122aSMatthias Ringwald }
811*5fd0122aSMatthias Ringwald
812*5fd0122aSMatthias Ringwald
813*5fd0122aSMatthias Ringwald /** \brief STRT Unprivileged (8 bit)
814*5fd0122aSMatthias Ringwald
815*5fd0122aSMatthias Ringwald This function executes a Unprivileged STRT instruction for 8 bit values.
816*5fd0122aSMatthias Ringwald
817*5fd0122aSMatthias Ringwald \param [in] value Value to store
818*5fd0122aSMatthias Ringwald \param [in] ptr Pointer to location
819*5fd0122aSMatthias Ringwald */
__STRBT(uint8_t value,volatile uint8_t * addr)820*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE void __STRBT(uint8_t value, volatile uint8_t *addr)
821*5fd0122aSMatthias Ringwald {
822*5fd0122aSMatthias Ringwald __ASM volatile ("strbt %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) );
823*5fd0122aSMatthias Ringwald }
824*5fd0122aSMatthias Ringwald
825*5fd0122aSMatthias Ringwald
826*5fd0122aSMatthias Ringwald /** \brief STRT Unprivileged (16 bit)
827*5fd0122aSMatthias Ringwald
828*5fd0122aSMatthias Ringwald This function executes a Unprivileged STRT instruction for 16 bit values.
829*5fd0122aSMatthias Ringwald
830*5fd0122aSMatthias Ringwald \param [in] value Value to store
831*5fd0122aSMatthias Ringwald \param [in] ptr Pointer to location
832*5fd0122aSMatthias Ringwald */
__STRHT(uint16_t value,volatile uint16_t * addr)833*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE void __STRHT(uint16_t value, volatile uint16_t *addr)
834*5fd0122aSMatthias Ringwald {
835*5fd0122aSMatthias Ringwald __ASM volatile ("strht %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) );
836*5fd0122aSMatthias Ringwald }
837*5fd0122aSMatthias Ringwald
838*5fd0122aSMatthias Ringwald
839*5fd0122aSMatthias Ringwald /** \brief STRT Unprivileged (32 bit)
840*5fd0122aSMatthias Ringwald
841*5fd0122aSMatthias Ringwald This function executes a Unprivileged STRT instruction for 32 bit values.
842*5fd0122aSMatthias Ringwald
843*5fd0122aSMatthias Ringwald \param [in] value Value to store
844*5fd0122aSMatthias Ringwald \param [in] ptr Pointer to location
845*5fd0122aSMatthias Ringwald */
__STRT(uint32_t value,volatile uint32_t * addr)846*5fd0122aSMatthias Ringwald __attribute__((always_inline)) __STATIC_INLINE void __STRT(uint32_t value, volatile uint32_t *addr)
847*5fd0122aSMatthias Ringwald {
848*5fd0122aSMatthias Ringwald __ASM volatile ("strt %1, %0" : "=Q" (*addr) : "r" (value) );
849*5fd0122aSMatthias Ringwald }
850*5fd0122aSMatthias Ringwald
851*5fd0122aSMatthias Ringwald #endif /* (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U) */
852*5fd0122aSMatthias Ringwald
853*5fd0122aSMatthias Ringwald /*@}*/ /* end of group CMSIS_Core_InstructionInterface */
854*5fd0122aSMatthias Ringwald
855*5fd0122aSMatthias Ringwald
856*5fd0122aSMatthias Ringwald /* ################### Compiler specific Intrinsics ########################### */
857*5fd0122aSMatthias Ringwald /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
858*5fd0122aSMatthias Ringwald Access to dedicated SIMD instructions
859*5fd0122aSMatthias Ringwald @{
860*5fd0122aSMatthias Ringwald */
861*5fd0122aSMatthias Ringwald
862*5fd0122aSMatthias Ringwald #if (__CORTEX_M >= 0x04) /* only for Cortex-M4 and above */
863*5fd0122aSMatthias Ringwald
__SADD8(uint32_t op1,uint32_t op2)864*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
865*5fd0122aSMatthias Ringwald {
866*5fd0122aSMatthias Ringwald uint32_t result;
867*5fd0122aSMatthias Ringwald
868*5fd0122aSMatthias Ringwald __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
869*5fd0122aSMatthias Ringwald return(result);
870*5fd0122aSMatthias Ringwald }
871*5fd0122aSMatthias Ringwald
__QADD8(uint32_t op1,uint32_t op2)872*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
873*5fd0122aSMatthias Ringwald {
874*5fd0122aSMatthias Ringwald uint32_t result;
875*5fd0122aSMatthias Ringwald
876*5fd0122aSMatthias Ringwald __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
877*5fd0122aSMatthias Ringwald return(result);
878*5fd0122aSMatthias Ringwald }
879*5fd0122aSMatthias Ringwald
__SHADD8(uint32_t op1,uint32_t op2)880*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
881*5fd0122aSMatthias Ringwald {
882*5fd0122aSMatthias Ringwald uint32_t result;
883*5fd0122aSMatthias Ringwald
884*5fd0122aSMatthias Ringwald __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
885*5fd0122aSMatthias Ringwald return(result);
886*5fd0122aSMatthias Ringwald }
887*5fd0122aSMatthias Ringwald
__UADD8(uint32_t op1,uint32_t op2)888*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
889*5fd0122aSMatthias Ringwald {
890*5fd0122aSMatthias Ringwald uint32_t result;
891*5fd0122aSMatthias Ringwald
892*5fd0122aSMatthias Ringwald __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
893*5fd0122aSMatthias Ringwald return(result);
894*5fd0122aSMatthias Ringwald }
895*5fd0122aSMatthias Ringwald
__UQADD8(uint32_t op1,uint32_t op2)896*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
897*5fd0122aSMatthias Ringwald {
898*5fd0122aSMatthias Ringwald uint32_t result;
899*5fd0122aSMatthias Ringwald
900*5fd0122aSMatthias Ringwald __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
901*5fd0122aSMatthias Ringwald return(result);
902*5fd0122aSMatthias Ringwald }
903*5fd0122aSMatthias Ringwald
__UHADD8(uint32_t op1,uint32_t op2)904*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
905*5fd0122aSMatthias Ringwald {
906*5fd0122aSMatthias Ringwald uint32_t result;
907*5fd0122aSMatthias Ringwald
908*5fd0122aSMatthias Ringwald __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
909*5fd0122aSMatthias Ringwald return(result);
910*5fd0122aSMatthias Ringwald }
911*5fd0122aSMatthias Ringwald
912*5fd0122aSMatthias Ringwald
__SSUB8(uint32_t op1,uint32_t op2)913*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
914*5fd0122aSMatthias Ringwald {
915*5fd0122aSMatthias Ringwald uint32_t result;
916*5fd0122aSMatthias Ringwald
917*5fd0122aSMatthias Ringwald __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
918*5fd0122aSMatthias Ringwald return(result);
919*5fd0122aSMatthias Ringwald }
920*5fd0122aSMatthias Ringwald
__QSUB8(uint32_t op1,uint32_t op2)921*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
922*5fd0122aSMatthias Ringwald {
923*5fd0122aSMatthias Ringwald uint32_t result;
924*5fd0122aSMatthias Ringwald
925*5fd0122aSMatthias Ringwald __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
926*5fd0122aSMatthias Ringwald return(result);
927*5fd0122aSMatthias Ringwald }
928*5fd0122aSMatthias Ringwald
__SHSUB8(uint32_t op1,uint32_t op2)929*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
930*5fd0122aSMatthias Ringwald {
931*5fd0122aSMatthias Ringwald uint32_t result;
932*5fd0122aSMatthias Ringwald
933*5fd0122aSMatthias Ringwald __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
934*5fd0122aSMatthias Ringwald return(result);
935*5fd0122aSMatthias Ringwald }
936*5fd0122aSMatthias Ringwald
__USUB8(uint32_t op1,uint32_t op2)937*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
938*5fd0122aSMatthias Ringwald {
939*5fd0122aSMatthias Ringwald uint32_t result;
940*5fd0122aSMatthias Ringwald
941*5fd0122aSMatthias Ringwald __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
942*5fd0122aSMatthias Ringwald return(result);
943*5fd0122aSMatthias Ringwald }
944*5fd0122aSMatthias Ringwald
__UQSUB8(uint32_t op1,uint32_t op2)945*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
946*5fd0122aSMatthias Ringwald {
947*5fd0122aSMatthias Ringwald uint32_t result;
948*5fd0122aSMatthias Ringwald
949*5fd0122aSMatthias Ringwald __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
950*5fd0122aSMatthias Ringwald return(result);
951*5fd0122aSMatthias Ringwald }
952*5fd0122aSMatthias Ringwald
__UHSUB8(uint32_t op1,uint32_t op2)953*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
954*5fd0122aSMatthias Ringwald {
955*5fd0122aSMatthias Ringwald uint32_t result;
956*5fd0122aSMatthias Ringwald
957*5fd0122aSMatthias Ringwald __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
958*5fd0122aSMatthias Ringwald return(result);
959*5fd0122aSMatthias Ringwald }
960*5fd0122aSMatthias Ringwald
961*5fd0122aSMatthias Ringwald
__SADD16(uint32_t op1,uint32_t op2)962*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
963*5fd0122aSMatthias Ringwald {
964*5fd0122aSMatthias Ringwald uint32_t result;
965*5fd0122aSMatthias Ringwald
966*5fd0122aSMatthias Ringwald __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
967*5fd0122aSMatthias Ringwald return(result);
968*5fd0122aSMatthias Ringwald }
969*5fd0122aSMatthias Ringwald
__QADD16(uint32_t op1,uint32_t op2)970*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
971*5fd0122aSMatthias Ringwald {
972*5fd0122aSMatthias Ringwald uint32_t result;
973*5fd0122aSMatthias Ringwald
974*5fd0122aSMatthias Ringwald __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
975*5fd0122aSMatthias Ringwald return(result);
976*5fd0122aSMatthias Ringwald }
977*5fd0122aSMatthias Ringwald
__SHADD16(uint32_t op1,uint32_t op2)978*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
979*5fd0122aSMatthias Ringwald {
980*5fd0122aSMatthias Ringwald uint32_t result;
981*5fd0122aSMatthias Ringwald
982*5fd0122aSMatthias Ringwald __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
983*5fd0122aSMatthias Ringwald return(result);
984*5fd0122aSMatthias Ringwald }
985*5fd0122aSMatthias Ringwald
__UADD16(uint32_t op1,uint32_t op2)986*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
987*5fd0122aSMatthias Ringwald {
988*5fd0122aSMatthias Ringwald uint32_t result;
989*5fd0122aSMatthias Ringwald
990*5fd0122aSMatthias Ringwald __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
991*5fd0122aSMatthias Ringwald return(result);
992*5fd0122aSMatthias Ringwald }
993*5fd0122aSMatthias Ringwald
__UQADD16(uint32_t op1,uint32_t op2)994*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
995*5fd0122aSMatthias Ringwald {
996*5fd0122aSMatthias Ringwald uint32_t result;
997*5fd0122aSMatthias Ringwald
998*5fd0122aSMatthias Ringwald __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
999*5fd0122aSMatthias Ringwald return(result);
1000*5fd0122aSMatthias Ringwald }
1001*5fd0122aSMatthias Ringwald
__UHADD16(uint32_t op1,uint32_t op2)1002*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
1003*5fd0122aSMatthias Ringwald {
1004*5fd0122aSMatthias Ringwald uint32_t result;
1005*5fd0122aSMatthias Ringwald
1006*5fd0122aSMatthias Ringwald __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1007*5fd0122aSMatthias Ringwald return(result);
1008*5fd0122aSMatthias Ringwald }
1009*5fd0122aSMatthias Ringwald
__SSUB16(uint32_t op1,uint32_t op2)1010*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
1011*5fd0122aSMatthias Ringwald {
1012*5fd0122aSMatthias Ringwald uint32_t result;
1013*5fd0122aSMatthias Ringwald
1014*5fd0122aSMatthias Ringwald __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1015*5fd0122aSMatthias Ringwald return(result);
1016*5fd0122aSMatthias Ringwald }
1017*5fd0122aSMatthias Ringwald
__QSUB16(uint32_t op1,uint32_t op2)1018*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
1019*5fd0122aSMatthias Ringwald {
1020*5fd0122aSMatthias Ringwald uint32_t result;
1021*5fd0122aSMatthias Ringwald
1022*5fd0122aSMatthias Ringwald __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1023*5fd0122aSMatthias Ringwald return(result);
1024*5fd0122aSMatthias Ringwald }
1025*5fd0122aSMatthias Ringwald
__SHSUB16(uint32_t op1,uint32_t op2)1026*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
1027*5fd0122aSMatthias Ringwald {
1028*5fd0122aSMatthias Ringwald uint32_t result;
1029*5fd0122aSMatthias Ringwald
1030*5fd0122aSMatthias Ringwald __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1031*5fd0122aSMatthias Ringwald return(result);
1032*5fd0122aSMatthias Ringwald }
1033*5fd0122aSMatthias Ringwald
__USUB16(uint32_t op1,uint32_t op2)1034*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
1035*5fd0122aSMatthias Ringwald {
1036*5fd0122aSMatthias Ringwald uint32_t result;
1037*5fd0122aSMatthias Ringwald
1038*5fd0122aSMatthias Ringwald __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1039*5fd0122aSMatthias Ringwald return(result);
1040*5fd0122aSMatthias Ringwald }
1041*5fd0122aSMatthias Ringwald
__UQSUB16(uint32_t op1,uint32_t op2)1042*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
1043*5fd0122aSMatthias Ringwald {
1044*5fd0122aSMatthias Ringwald uint32_t result;
1045*5fd0122aSMatthias Ringwald
1046*5fd0122aSMatthias Ringwald __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1047*5fd0122aSMatthias Ringwald return(result);
1048*5fd0122aSMatthias Ringwald }
1049*5fd0122aSMatthias Ringwald
__UHSUB16(uint32_t op1,uint32_t op2)1050*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
1051*5fd0122aSMatthias Ringwald {
1052*5fd0122aSMatthias Ringwald uint32_t result;
1053*5fd0122aSMatthias Ringwald
1054*5fd0122aSMatthias Ringwald __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1055*5fd0122aSMatthias Ringwald return(result);
1056*5fd0122aSMatthias Ringwald }
1057*5fd0122aSMatthias Ringwald
__SASX(uint32_t op1,uint32_t op2)1058*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
1059*5fd0122aSMatthias Ringwald {
1060*5fd0122aSMatthias Ringwald uint32_t result;
1061*5fd0122aSMatthias Ringwald
1062*5fd0122aSMatthias Ringwald __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1063*5fd0122aSMatthias Ringwald return(result);
1064*5fd0122aSMatthias Ringwald }
1065*5fd0122aSMatthias Ringwald
__QASX(uint32_t op1,uint32_t op2)1066*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
1067*5fd0122aSMatthias Ringwald {
1068*5fd0122aSMatthias Ringwald uint32_t result;
1069*5fd0122aSMatthias Ringwald
1070*5fd0122aSMatthias Ringwald __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1071*5fd0122aSMatthias Ringwald return(result);
1072*5fd0122aSMatthias Ringwald }
1073*5fd0122aSMatthias Ringwald
__SHASX(uint32_t op1,uint32_t op2)1074*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
1075*5fd0122aSMatthias Ringwald {
1076*5fd0122aSMatthias Ringwald uint32_t result;
1077*5fd0122aSMatthias Ringwald
1078*5fd0122aSMatthias Ringwald __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1079*5fd0122aSMatthias Ringwald return(result);
1080*5fd0122aSMatthias Ringwald }
1081*5fd0122aSMatthias Ringwald
__UASX(uint32_t op1,uint32_t op2)1082*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
1083*5fd0122aSMatthias Ringwald {
1084*5fd0122aSMatthias Ringwald uint32_t result;
1085*5fd0122aSMatthias Ringwald
1086*5fd0122aSMatthias Ringwald __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1087*5fd0122aSMatthias Ringwald return(result);
1088*5fd0122aSMatthias Ringwald }
1089*5fd0122aSMatthias Ringwald
__UQASX(uint32_t op1,uint32_t op2)1090*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
1091*5fd0122aSMatthias Ringwald {
1092*5fd0122aSMatthias Ringwald uint32_t result;
1093*5fd0122aSMatthias Ringwald
1094*5fd0122aSMatthias Ringwald __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1095*5fd0122aSMatthias Ringwald return(result);
1096*5fd0122aSMatthias Ringwald }
1097*5fd0122aSMatthias Ringwald
__UHASX(uint32_t op1,uint32_t op2)1098*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
1099*5fd0122aSMatthias Ringwald {
1100*5fd0122aSMatthias Ringwald uint32_t result;
1101*5fd0122aSMatthias Ringwald
1102*5fd0122aSMatthias Ringwald __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1103*5fd0122aSMatthias Ringwald return(result);
1104*5fd0122aSMatthias Ringwald }
1105*5fd0122aSMatthias Ringwald
__SSAX(uint32_t op1,uint32_t op2)1106*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
1107*5fd0122aSMatthias Ringwald {
1108*5fd0122aSMatthias Ringwald uint32_t result;
1109*5fd0122aSMatthias Ringwald
1110*5fd0122aSMatthias Ringwald __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1111*5fd0122aSMatthias Ringwald return(result);
1112*5fd0122aSMatthias Ringwald }
1113*5fd0122aSMatthias Ringwald
__QSAX(uint32_t op1,uint32_t op2)1114*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
1115*5fd0122aSMatthias Ringwald {
1116*5fd0122aSMatthias Ringwald uint32_t result;
1117*5fd0122aSMatthias Ringwald
1118*5fd0122aSMatthias Ringwald __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1119*5fd0122aSMatthias Ringwald return(result);
1120*5fd0122aSMatthias Ringwald }
1121*5fd0122aSMatthias Ringwald
__SHSAX(uint32_t op1,uint32_t op2)1122*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
1123*5fd0122aSMatthias Ringwald {
1124*5fd0122aSMatthias Ringwald uint32_t result;
1125*5fd0122aSMatthias Ringwald
1126*5fd0122aSMatthias Ringwald __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1127*5fd0122aSMatthias Ringwald return(result);
1128*5fd0122aSMatthias Ringwald }
1129*5fd0122aSMatthias Ringwald
__USAX(uint32_t op1,uint32_t op2)1130*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
1131*5fd0122aSMatthias Ringwald {
1132*5fd0122aSMatthias Ringwald uint32_t result;
1133*5fd0122aSMatthias Ringwald
1134*5fd0122aSMatthias Ringwald __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1135*5fd0122aSMatthias Ringwald return(result);
1136*5fd0122aSMatthias Ringwald }
1137*5fd0122aSMatthias Ringwald
__UQSAX(uint32_t op1,uint32_t op2)1138*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
1139*5fd0122aSMatthias Ringwald {
1140*5fd0122aSMatthias Ringwald uint32_t result;
1141*5fd0122aSMatthias Ringwald
1142*5fd0122aSMatthias Ringwald __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1143*5fd0122aSMatthias Ringwald return(result);
1144*5fd0122aSMatthias Ringwald }
1145*5fd0122aSMatthias Ringwald
__UHSAX(uint32_t op1,uint32_t op2)1146*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
1147*5fd0122aSMatthias Ringwald {
1148*5fd0122aSMatthias Ringwald uint32_t result;
1149*5fd0122aSMatthias Ringwald
1150*5fd0122aSMatthias Ringwald __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1151*5fd0122aSMatthias Ringwald return(result);
1152*5fd0122aSMatthias Ringwald }
1153*5fd0122aSMatthias Ringwald
__USAD8(uint32_t op1,uint32_t op2)1154*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
1155*5fd0122aSMatthias Ringwald {
1156*5fd0122aSMatthias Ringwald uint32_t result;
1157*5fd0122aSMatthias Ringwald
1158*5fd0122aSMatthias Ringwald __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1159*5fd0122aSMatthias Ringwald return(result);
1160*5fd0122aSMatthias Ringwald }
1161*5fd0122aSMatthias Ringwald
__USADA8(uint32_t op1,uint32_t op2,uint32_t op3)1162*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
1163*5fd0122aSMatthias Ringwald {
1164*5fd0122aSMatthias Ringwald uint32_t result;
1165*5fd0122aSMatthias Ringwald
1166*5fd0122aSMatthias Ringwald __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1167*5fd0122aSMatthias Ringwald return(result);
1168*5fd0122aSMatthias Ringwald }
1169*5fd0122aSMatthias Ringwald
1170*5fd0122aSMatthias Ringwald #define __SSAT16(ARG1,ARG2) \
1171*5fd0122aSMatthias Ringwald ({ \
1172*5fd0122aSMatthias Ringwald uint32_t __RES, __ARG1 = (ARG1); \
1173*5fd0122aSMatthias Ringwald __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
1174*5fd0122aSMatthias Ringwald __RES; \
1175*5fd0122aSMatthias Ringwald })
1176*5fd0122aSMatthias Ringwald
1177*5fd0122aSMatthias Ringwald #define __USAT16(ARG1,ARG2) \
1178*5fd0122aSMatthias Ringwald ({ \
1179*5fd0122aSMatthias Ringwald uint32_t __RES, __ARG1 = (ARG1); \
1180*5fd0122aSMatthias Ringwald __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
1181*5fd0122aSMatthias Ringwald __RES; \
1182*5fd0122aSMatthias Ringwald })
1183*5fd0122aSMatthias Ringwald
__UXTB16(uint32_t op1)1184*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
1185*5fd0122aSMatthias Ringwald {
1186*5fd0122aSMatthias Ringwald uint32_t result;
1187*5fd0122aSMatthias Ringwald
1188*5fd0122aSMatthias Ringwald __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
1189*5fd0122aSMatthias Ringwald return(result);
1190*5fd0122aSMatthias Ringwald }
1191*5fd0122aSMatthias Ringwald
__UXTAB16(uint32_t op1,uint32_t op2)1192*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
1193*5fd0122aSMatthias Ringwald {
1194*5fd0122aSMatthias Ringwald uint32_t result;
1195*5fd0122aSMatthias Ringwald
1196*5fd0122aSMatthias Ringwald __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1197*5fd0122aSMatthias Ringwald return(result);
1198*5fd0122aSMatthias Ringwald }
1199*5fd0122aSMatthias Ringwald
__SXTB16(uint32_t op1)1200*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
1201*5fd0122aSMatthias Ringwald {
1202*5fd0122aSMatthias Ringwald uint32_t result;
1203*5fd0122aSMatthias Ringwald
1204*5fd0122aSMatthias Ringwald __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
1205*5fd0122aSMatthias Ringwald return(result);
1206*5fd0122aSMatthias Ringwald }
1207*5fd0122aSMatthias Ringwald
__SXTAB16(uint32_t op1,uint32_t op2)1208*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
1209*5fd0122aSMatthias Ringwald {
1210*5fd0122aSMatthias Ringwald uint32_t result;
1211*5fd0122aSMatthias Ringwald
1212*5fd0122aSMatthias Ringwald __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1213*5fd0122aSMatthias Ringwald return(result);
1214*5fd0122aSMatthias Ringwald }
1215*5fd0122aSMatthias Ringwald
__SMUAD(uint32_t op1,uint32_t op2)1216*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
1217*5fd0122aSMatthias Ringwald {
1218*5fd0122aSMatthias Ringwald uint32_t result;
1219*5fd0122aSMatthias Ringwald
1220*5fd0122aSMatthias Ringwald __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1221*5fd0122aSMatthias Ringwald return(result);
1222*5fd0122aSMatthias Ringwald }
1223*5fd0122aSMatthias Ringwald
__SMUADX(uint32_t op1,uint32_t op2)1224*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
1225*5fd0122aSMatthias Ringwald {
1226*5fd0122aSMatthias Ringwald uint32_t result;
1227*5fd0122aSMatthias Ringwald
1228*5fd0122aSMatthias Ringwald __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1229*5fd0122aSMatthias Ringwald return(result);
1230*5fd0122aSMatthias Ringwald }
1231*5fd0122aSMatthias Ringwald
__SMLAD(uint32_t op1,uint32_t op2,uint32_t op3)1232*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
1233*5fd0122aSMatthias Ringwald {
1234*5fd0122aSMatthias Ringwald uint32_t result;
1235*5fd0122aSMatthias Ringwald
1236*5fd0122aSMatthias Ringwald __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1237*5fd0122aSMatthias Ringwald return(result);
1238*5fd0122aSMatthias Ringwald }
1239*5fd0122aSMatthias Ringwald
__SMLADX(uint32_t op1,uint32_t op2,uint32_t op3)1240*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
1241*5fd0122aSMatthias Ringwald {
1242*5fd0122aSMatthias Ringwald uint32_t result;
1243*5fd0122aSMatthias Ringwald
1244*5fd0122aSMatthias Ringwald __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1245*5fd0122aSMatthias Ringwald return(result);
1246*5fd0122aSMatthias Ringwald }
1247*5fd0122aSMatthias Ringwald
__SMLALD(uint32_t op1,uint32_t op2,uint64_t acc)1248*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
1249*5fd0122aSMatthias Ringwald {
1250*5fd0122aSMatthias Ringwald union llreg_u{
1251*5fd0122aSMatthias Ringwald uint32_t w32[2];
1252*5fd0122aSMatthias Ringwald uint64_t w64;
1253*5fd0122aSMatthias Ringwald } llr;
1254*5fd0122aSMatthias Ringwald llr.w64 = acc;
1255*5fd0122aSMatthias Ringwald
1256*5fd0122aSMatthias Ringwald #ifndef __ARMEB__ // Little endian
1257*5fd0122aSMatthias Ringwald __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1258*5fd0122aSMatthias Ringwald #else // Big endian
1259*5fd0122aSMatthias Ringwald __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1260*5fd0122aSMatthias Ringwald #endif
1261*5fd0122aSMatthias Ringwald
1262*5fd0122aSMatthias Ringwald return(llr.w64);
1263*5fd0122aSMatthias Ringwald }
1264*5fd0122aSMatthias Ringwald
__SMLALDX(uint32_t op1,uint32_t op2,uint64_t acc)1265*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
1266*5fd0122aSMatthias Ringwald {
1267*5fd0122aSMatthias Ringwald union llreg_u{
1268*5fd0122aSMatthias Ringwald uint32_t w32[2];
1269*5fd0122aSMatthias Ringwald uint64_t w64;
1270*5fd0122aSMatthias Ringwald } llr;
1271*5fd0122aSMatthias Ringwald llr.w64 = acc;
1272*5fd0122aSMatthias Ringwald
1273*5fd0122aSMatthias Ringwald #ifndef __ARMEB__ // Little endian
1274*5fd0122aSMatthias Ringwald __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1275*5fd0122aSMatthias Ringwald #else // Big endian
1276*5fd0122aSMatthias Ringwald __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1277*5fd0122aSMatthias Ringwald #endif
1278*5fd0122aSMatthias Ringwald
1279*5fd0122aSMatthias Ringwald return(llr.w64);
1280*5fd0122aSMatthias Ringwald }
1281*5fd0122aSMatthias Ringwald
__SMUSD(uint32_t op1,uint32_t op2)1282*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
1283*5fd0122aSMatthias Ringwald {
1284*5fd0122aSMatthias Ringwald uint32_t result;
1285*5fd0122aSMatthias Ringwald
1286*5fd0122aSMatthias Ringwald __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1287*5fd0122aSMatthias Ringwald return(result);
1288*5fd0122aSMatthias Ringwald }
1289*5fd0122aSMatthias Ringwald
__SMUSDX(uint32_t op1,uint32_t op2)1290*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
1291*5fd0122aSMatthias Ringwald {
1292*5fd0122aSMatthias Ringwald uint32_t result;
1293*5fd0122aSMatthias Ringwald
1294*5fd0122aSMatthias Ringwald __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1295*5fd0122aSMatthias Ringwald return(result);
1296*5fd0122aSMatthias Ringwald }
1297*5fd0122aSMatthias Ringwald
__SMLSD(uint32_t op1,uint32_t op2,uint32_t op3)1298*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
1299*5fd0122aSMatthias Ringwald {
1300*5fd0122aSMatthias Ringwald uint32_t result;
1301*5fd0122aSMatthias Ringwald
1302*5fd0122aSMatthias Ringwald __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1303*5fd0122aSMatthias Ringwald return(result);
1304*5fd0122aSMatthias Ringwald }
1305*5fd0122aSMatthias Ringwald
__SMLSDX(uint32_t op1,uint32_t op2,uint32_t op3)1306*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
1307*5fd0122aSMatthias Ringwald {
1308*5fd0122aSMatthias Ringwald uint32_t result;
1309*5fd0122aSMatthias Ringwald
1310*5fd0122aSMatthias Ringwald __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1311*5fd0122aSMatthias Ringwald return(result);
1312*5fd0122aSMatthias Ringwald }
1313*5fd0122aSMatthias Ringwald
__SMLSLD(uint32_t op1,uint32_t op2,uint64_t acc)1314*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)
1315*5fd0122aSMatthias Ringwald {
1316*5fd0122aSMatthias Ringwald union llreg_u{
1317*5fd0122aSMatthias Ringwald uint32_t w32[2];
1318*5fd0122aSMatthias Ringwald uint64_t w64;
1319*5fd0122aSMatthias Ringwald } llr;
1320*5fd0122aSMatthias Ringwald llr.w64 = acc;
1321*5fd0122aSMatthias Ringwald
1322*5fd0122aSMatthias Ringwald #ifndef __ARMEB__ // Little endian
1323*5fd0122aSMatthias Ringwald __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1324*5fd0122aSMatthias Ringwald #else // Big endian
1325*5fd0122aSMatthias Ringwald __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1326*5fd0122aSMatthias Ringwald #endif
1327*5fd0122aSMatthias Ringwald
1328*5fd0122aSMatthias Ringwald return(llr.w64);
1329*5fd0122aSMatthias Ringwald }
1330*5fd0122aSMatthias Ringwald
__SMLSLDX(uint32_t op1,uint32_t op2,uint64_t acc)1331*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)
1332*5fd0122aSMatthias Ringwald {
1333*5fd0122aSMatthias Ringwald union llreg_u{
1334*5fd0122aSMatthias Ringwald uint32_t w32[2];
1335*5fd0122aSMatthias Ringwald uint64_t w64;
1336*5fd0122aSMatthias Ringwald } llr;
1337*5fd0122aSMatthias Ringwald llr.w64 = acc;
1338*5fd0122aSMatthias Ringwald
1339*5fd0122aSMatthias Ringwald #ifndef __ARMEB__ // Little endian
1340*5fd0122aSMatthias Ringwald __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1341*5fd0122aSMatthias Ringwald #else // Big endian
1342*5fd0122aSMatthias Ringwald __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1343*5fd0122aSMatthias Ringwald #endif
1344*5fd0122aSMatthias Ringwald
1345*5fd0122aSMatthias Ringwald return(llr.w64);
1346*5fd0122aSMatthias Ringwald }
1347*5fd0122aSMatthias Ringwald
__SEL(uint32_t op1,uint32_t op2)1348*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
1349*5fd0122aSMatthias Ringwald {
1350*5fd0122aSMatthias Ringwald uint32_t result;
1351*5fd0122aSMatthias Ringwald
1352*5fd0122aSMatthias Ringwald __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1353*5fd0122aSMatthias Ringwald return(result);
1354*5fd0122aSMatthias Ringwald }
1355*5fd0122aSMatthias Ringwald
__QADD(uint32_t op1,uint32_t op2)1356*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD(uint32_t op1, uint32_t op2)
1357*5fd0122aSMatthias Ringwald {
1358*5fd0122aSMatthias Ringwald uint32_t result;
1359*5fd0122aSMatthias Ringwald
1360*5fd0122aSMatthias Ringwald __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1361*5fd0122aSMatthias Ringwald return(result);
1362*5fd0122aSMatthias Ringwald }
1363*5fd0122aSMatthias Ringwald
__QSUB(uint32_t op1,uint32_t op2)1364*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB(uint32_t op1, uint32_t op2)
1365*5fd0122aSMatthias Ringwald {
1366*5fd0122aSMatthias Ringwald uint32_t result;
1367*5fd0122aSMatthias Ringwald
1368*5fd0122aSMatthias Ringwald __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1369*5fd0122aSMatthias Ringwald return(result);
1370*5fd0122aSMatthias Ringwald }
1371*5fd0122aSMatthias Ringwald
1372*5fd0122aSMatthias Ringwald #define __PKHBT(ARG1,ARG2,ARG3) \
1373*5fd0122aSMatthias Ringwald ({ \
1374*5fd0122aSMatthias Ringwald uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
1375*5fd0122aSMatthias Ringwald __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
1376*5fd0122aSMatthias Ringwald __RES; \
1377*5fd0122aSMatthias Ringwald })
1378*5fd0122aSMatthias Ringwald
1379*5fd0122aSMatthias Ringwald #define __PKHTB(ARG1,ARG2,ARG3) \
1380*5fd0122aSMatthias Ringwald ({ \
1381*5fd0122aSMatthias Ringwald uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
1382*5fd0122aSMatthias Ringwald if (ARG3 == 0) \
1383*5fd0122aSMatthias Ringwald __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
1384*5fd0122aSMatthias Ringwald else \
1385*5fd0122aSMatthias Ringwald __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
1386*5fd0122aSMatthias Ringwald __RES; \
1387*5fd0122aSMatthias Ringwald })
1388*5fd0122aSMatthias Ringwald
__SMMLA(int32_t op1,int32_t op2,int32_t op3)1389*5fd0122aSMatthias Ringwald __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
1390*5fd0122aSMatthias Ringwald {
1391*5fd0122aSMatthias Ringwald int32_t result;
1392*5fd0122aSMatthias Ringwald
1393*5fd0122aSMatthias Ringwald __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
1394*5fd0122aSMatthias Ringwald return(result);
1395*5fd0122aSMatthias Ringwald }
1396*5fd0122aSMatthias Ringwald
1397*5fd0122aSMatthias Ringwald #endif /* (__CORTEX_M >= 0x04) */
1398*5fd0122aSMatthias Ringwald /*@} end of group CMSIS_SIMD_intrinsics */
1399*5fd0122aSMatthias Ringwald
1400*5fd0122aSMatthias Ringwald
1401*5fd0122aSMatthias Ringwald #endif /* __CMSIS_GCC_H */
1402