xref: /aosp_15_r20/prebuilts/sdk/renderscript/clang-include/arm_neon.h (revision 344a7f5ef16c479e7a7f54ee6567a9d112f9e72b)
1 /*===---- arm_neon.h - ARM Neon intrinsics ---------------------------------===
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a copy
4  * of this software and associated documentation files (the "Software"), to deal
5  * in the Software without restriction, including without limitation the rights
6  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7  * copies of the Software, and to permit persons to whom the Software is
8  * furnished to do so, subject to the following conditions:
9  *
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19  * THE SOFTWARE.
20  *
21  *===-----------------------------------------------------------------------===
22  */
23 
24 #ifndef __ARM_NEON_H
25 #define __ARM_NEON_H
26 
27 #if !defined(__ARM_NEON)
28 #error "NEON support not enabled"
29 #endif
30 
31 #include <stdint.h>
32 
33 typedef float float32_t;
34 typedef __fp16 float16_t;
35 #ifdef __aarch64__
36 typedef double float64_t;
37 #endif
38 
39 #ifdef __aarch64__
40 typedef uint8_t poly8_t;
41 typedef uint16_t poly16_t;
42 typedef uint64_t poly64_t;
43 typedef __uint128_t poly128_t;
44 #else
45 typedef int8_t poly8_t;
46 typedef int16_t poly16_t;
47 #endif
48 typedef __attribute__((neon_vector_type(8))) int8_t int8x8_t;
49 typedef __attribute__((neon_vector_type(16))) int8_t int8x16_t;
50 typedef __attribute__((neon_vector_type(4))) int16_t int16x4_t;
51 typedef __attribute__((neon_vector_type(8))) int16_t int16x8_t;
52 typedef __attribute__((neon_vector_type(2))) int32_t int32x2_t;
53 typedef __attribute__((neon_vector_type(4))) int32_t int32x4_t;
54 typedef __attribute__((neon_vector_type(1))) int64_t int64x1_t;
55 typedef __attribute__((neon_vector_type(2))) int64_t int64x2_t;
56 typedef __attribute__((neon_vector_type(8))) uint8_t uint8x8_t;
57 typedef __attribute__((neon_vector_type(16))) uint8_t uint8x16_t;
58 typedef __attribute__((neon_vector_type(4))) uint16_t uint16x4_t;
59 typedef __attribute__((neon_vector_type(8))) uint16_t uint16x8_t;
60 typedef __attribute__((neon_vector_type(2))) uint32_t uint32x2_t;
61 typedef __attribute__((neon_vector_type(4))) uint32_t uint32x4_t;
62 typedef __attribute__((neon_vector_type(1))) uint64_t uint64x1_t;
63 typedef __attribute__((neon_vector_type(2))) uint64_t uint64x2_t;
64 typedef __attribute__((neon_vector_type(4))) float16_t float16x4_t;
65 typedef __attribute__((neon_vector_type(8))) float16_t float16x8_t;
66 typedef __attribute__((neon_vector_type(2))) float32_t float32x2_t;
67 typedef __attribute__((neon_vector_type(4))) float32_t float32x4_t;
68 #ifdef __aarch64__
69 typedef __attribute__((neon_vector_type(1))) float64_t float64x1_t;
70 typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t;
71 #endif
72 typedef __attribute__((neon_polyvector_type(8))) poly8_t poly8x8_t;
73 typedef __attribute__((neon_polyvector_type(16))) poly8_t poly8x16_t;
74 typedef __attribute__((neon_polyvector_type(4))) poly16_t poly16x4_t;
75 typedef __attribute__((neon_polyvector_type(8))) poly16_t poly16x8_t;
76 #ifdef __aarch64__
77 typedef __attribute__((neon_polyvector_type(1))) poly64_t poly64x1_t;
78 typedef __attribute__((neon_polyvector_type(2))) poly64_t poly64x2_t;
79 #endif
80 
81 typedef struct int8x8x2_t {
82   int8x8_t val[2];
83 } int8x8x2_t;
84 
85 typedef struct int8x16x2_t {
86   int8x16_t val[2];
87 } int8x16x2_t;
88 
89 typedef struct int16x4x2_t {
90   int16x4_t val[2];
91 } int16x4x2_t;
92 
93 typedef struct int16x8x2_t {
94   int16x8_t val[2];
95 } int16x8x2_t;
96 
97 typedef struct int32x2x2_t {
98   int32x2_t val[2];
99 } int32x2x2_t;
100 
101 typedef struct int32x4x2_t {
102   int32x4_t val[2];
103 } int32x4x2_t;
104 
105 typedef struct int64x1x2_t {
106   int64x1_t val[2];
107 } int64x1x2_t;
108 
109 typedef struct int64x2x2_t {
110   int64x2_t val[2];
111 } int64x2x2_t;
112 
113 typedef struct uint8x8x2_t {
114   uint8x8_t val[2];
115 } uint8x8x2_t;
116 
117 typedef struct uint8x16x2_t {
118   uint8x16_t val[2];
119 } uint8x16x2_t;
120 
121 typedef struct uint16x4x2_t {
122   uint16x4_t val[2];
123 } uint16x4x2_t;
124 
125 typedef struct uint16x8x2_t {
126   uint16x8_t val[2];
127 } uint16x8x2_t;
128 
129 typedef struct uint32x2x2_t {
130   uint32x2_t val[2];
131 } uint32x2x2_t;
132 
133 typedef struct uint32x4x2_t {
134   uint32x4_t val[2];
135 } uint32x4x2_t;
136 
137 typedef struct uint64x1x2_t {
138   uint64x1_t val[2];
139 } uint64x1x2_t;
140 
141 typedef struct uint64x2x2_t {
142   uint64x2_t val[2];
143 } uint64x2x2_t;
144 
145 typedef struct float16x4x2_t {
146   float16x4_t val[2];
147 } float16x4x2_t;
148 
149 typedef struct float16x8x2_t {
150   float16x8_t val[2];
151 } float16x8x2_t;
152 
153 typedef struct float32x2x2_t {
154   float32x2_t val[2];
155 } float32x2x2_t;
156 
157 typedef struct float32x4x2_t {
158   float32x4_t val[2];
159 } float32x4x2_t;
160 
161 #ifdef __aarch64__
162 typedef struct float64x1x2_t {
163   float64x1_t val[2];
164 } float64x1x2_t;
165 
166 typedef struct float64x2x2_t {
167   float64x2_t val[2];
168 } float64x2x2_t;
169 
170 #endif
171 typedef struct poly8x8x2_t {
172   poly8x8_t val[2];
173 } poly8x8x2_t;
174 
175 typedef struct poly8x16x2_t {
176   poly8x16_t val[2];
177 } poly8x16x2_t;
178 
179 typedef struct poly16x4x2_t {
180   poly16x4_t val[2];
181 } poly16x4x2_t;
182 
183 typedef struct poly16x8x2_t {
184   poly16x8_t val[2];
185 } poly16x8x2_t;
186 
187 #ifdef __aarch64__
188 typedef struct poly64x1x2_t {
189   poly64x1_t val[2];
190 } poly64x1x2_t;
191 
192 typedef struct poly64x2x2_t {
193   poly64x2_t val[2];
194 } poly64x2x2_t;
195 
196 #endif
197 typedef struct int8x8x3_t {
198   int8x8_t val[3];
199 } int8x8x3_t;
200 
201 typedef struct int8x16x3_t {
202   int8x16_t val[3];
203 } int8x16x3_t;
204 
205 typedef struct int16x4x3_t {
206   int16x4_t val[3];
207 } int16x4x3_t;
208 
209 typedef struct int16x8x3_t {
210   int16x8_t val[3];
211 } int16x8x3_t;
212 
213 typedef struct int32x2x3_t {
214   int32x2_t val[3];
215 } int32x2x3_t;
216 
217 typedef struct int32x4x3_t {
218   int32x4_t val[3];
219 } int32x4x3_t;
220 
221 typedef struct int64x1x3_t {
222   int64x1_t val[3];
223 } int64x1x3_t;
224 
225 typedef struct int64x2x3_t {
226   int64x2_t val[3];
227 } int64x2x3_t;
228 
229 typedef struct uint8x8x3_t {
230   uint8x8_t val[3];
231 } uint8x8x3_t;
232 
233 typedef struct uint8x16x3_t {
234   uint8x16_t val[3];
235 } uint8x16x3_t;
236 
237 typedef struct uint16x4x3_t {
238   uint16x4_t val[3];
239 } uint16x4x3_t;
240 
241 typedef struct uint16x8x3_t {
242   uint16x8_t val[3];
243 } uint16x8x3_t;
244 
245 typedef struct uint32x2x3_t {
246   uint32x2_t val[3];
247 } uint32x2x3_t;
248 
249 typedef struct uint32x4x3_t {
250   uint32x4_t val[3];
251 } uint32x4x3_t;
252 
253 typedef struct uint64x1x3_t {
254   uint64x1_t val[3];
255 } uint64x1x3_t;
256 
257 typedef struct uint64x2x3_t {
258   uint64x2_t val[3];
259 } uint64x2x3_t;
260 
261 typedef struct float16x4x3_t {
262   float16x4_t val[3];
263 } float16x4x3_t;
264 
265 typedef struct float16x8x3_t {
266   float16x8_t val[3];
267 } float16x8x3_t;
268 
269 typedef struct float32x2x3_t {
270   float32x2_t val[3];
271 } float32x2x3_t;
272 
273 typedef struct float32x4x3_t {
274   float32x4_t val[3];
275 } float32x4x3_t;
276 
277 #ifdef __aarch64__
278 typedef struct float64x1x3_t {
279   float64x1_t val[3];
280 } float64x1x3_t;
281 
282 typedef struct float64x2x3_t {
283   float64x2_t val[3];
284 } float64x2x3_t;
285 
286 #endif
287 typedef struct poly8x8x3_t {
288   poly8x8_t val[3];
289 } poly8x8x3_t;
290 
291 typedef struct poly8x16x3_t {
292   poly8x16_t val[3];
293 } poly8x16x3_t;
294 
295 typedef struct poly16x4x3_t {
296   poly16x4_t val[3];
297 } poly16x4x3_t;
298 
299 typedef struct poly16x8x3_t {
300   poly16x8_t val[3];
301 } poly16x8x3_t;
302 
303 #ifdef __aarch64__
304 typedef struct poly64x1x3_t {
305   poly64x1_t val[3];
306 } poly64x1x3_t;
307 
308 typedef struct poly64x2x3_t {
309   poly64x2_t val[3];
310 } poly64x2x3_t;
311 
312 #endif
313 typedef struct int8x8x4_t {
314   int8x8_t val[4];
315 } int8x8x4_t;
316 
317 typedef struct int8x16x4_t {
318   int8x16_t val[4];
319 } int8x16x4_t;
320 
321 typedef struct int16x4x4_t {
322   int16x4_t val[4];
323 } int16x4x4_t;
324 
325 typedef struct int16x8x4_t {
326   int16x8_t val[4];
327 } int16x8x4_t;
328 
329 typedef struct int32x2x4_t {
330   int32x2_t val[4];
331 } int32x2x4_t;
332 
333 typedef struct int32x4x4_t {
334   int32x4_t val[4];
335 } int32x4x4_t;
336 
337 typedef struct int64x1x4_t {
338   int64x1_t val[4];
339 } int64x1x4_t;
340 
341 typedef struct int64x2x4_t {
342   int64x2_t val[4];
343 } int64x2x4_t;
344 
345 typedef struct uint8x8x4_t {
346   uint8x8_t val[4];
347 } uint8x8x4_t;
348 
349 typedef struct uint8x16x4_t {
350   uint8x16_t val[4];
351 } uint8x16x4_t;
352 
353 typedef struct uint16x4x4_t {
354   uint16x4_t val[4];
355 } uint16x4x4_t;
356 
357 typedef struct uint16x8x4_t {
358   uint16x8_t val[4];
359 } uint16x8x4_t;
360 
361 typedef struct uint32x2x4_t {
362   uint32x2_t val[4];
363 } uint32x2x4_t;
364 
365 typedef struct uint32x4x4_t {
366   uint32x4_t val[4];
367 } uint32x4x4_t;
368 
369 typedef struct uint64x1x4_t {
370   uint64x1_t val[4];
371 } uint64x1x4_t;
372 
373 typedef struct uint64x2x4_t {
374   uint64x2_t val[4];
375 } uint64x2x4_t;
376 
377 typedef struct float16x4x4_t {
378   float16x4_t val[4];
379 } float16x4x4_t;
380 
381 typedef struct float16x8x4_t {
382   float16x8_t val[4];
383 } float16x8x4_t;
384 
385 typedef struct float32x2x4_t {
386   float32x2_t val[4];
387 } float32x2x4_t;
388 
389 typedef struct float32x4x4_t {
390   float32x4_t val[4];
391 } float32x4x4_t;
392 
393 #ifdef __aarch64__
394 typedef struct float64x1x4_t {
395   float64x1_t val[4];
396 } float64x1x4_t;
397 
398 typedef struct float64x2x4_t {
399   float64x2_t val[4];
400 } float64x2x4_t;
401 
402 #endif
403 typedef struct poly8x8x4_t {
404   poly8x8_t val[4];
405 } poly8x8x4_t;
406 
407 typedef struct poly8x16x4_t {
408   poly8x16_t val[4];
409 } poly8x16x4_t;
410 
411 typedef struct poly16x4x4_t {
412   poly16x4_t val[4];
413 } poly16x4x4_t;
414 
415 typedef struct poly16x8x4_t {
416   poly16x8_t val[4];
417 } poly16x8x4_t;
418 
419 #ifdef __aarch64__
420 typedef struct poly64x1x4_t {
421   poly64x1_t val[4];
422 } poly64x1x4_t;
423 
424 typedef struct poly64x2x4_t {
425   poly64x2_t val[4];
426 } poly64x2x4_t;
427 
428 #endif
429 
430 #define __ai static inline __attribute__((__always_inline__, __nodebug__))
431 
432 #ifdef __LITTLE_ENDIAN__
vabdq_u8(uint8x16_t __p0,uint8x16_t __p1)433 __ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
434   uint8x16_t __ret;
435   __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
436   return __ret;
437 }
438 #else
vabdq_u8(uint8x16_t __p0,uint8x16_t __p1)439 __ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
440   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
441   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
442   uint8x16_t __ret;
443   __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
444   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
445   return __ret;
446 }
__noswap_vabdq_u8(uint8x16_t __p0,uint8x16_t __p1)447 __ai uint8x16_t __noswap_vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
448   uint8x16_t __ret;
449   __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
450   return __ret;
451 }
452 #endif
453 
454 #ifdef __LITTLE_ENDIAN__
vabdq_u32(uint32x4_t __p0,uint32x4_t __p1)455 __ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
456   uint32x4_t __ret;
457   __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
458   return __ret;
459 }
460 #else
vabdq_u32(uint32x4_t __p0,uint32x4_t __p1)461 __ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
462   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
463   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
464   uint32x4_t __ret;
465   __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
466   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
467   return __ret;
468 }
__noswap_vabdq_u32(uint32x4_t __p0,uint32x4_t __p1)469 __ai uint32x4_t __noswap_vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
470   uint32x4_t __ret;
471   __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
472   return __ret;
473 }
474 #endif
475 
476 #ifdef __LITTLE_ENDIAN__
vabdq_u16(uint16x8_t __p0,uint16x8_t __p1)477 __ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
478   uint16x8_t __ret;
479   __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
480   return __ret;
481 }
482 #else
vabdq_u16(uint16x8_t __p0,uint16x8_t __p1)483 __ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
484   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
485   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
486   uint16x8_t __ret;
487   __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
488   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
489   return __ret;
490 }
__noswap_vabdq_u16(uint16x8_t __p0,uint16x8_t __p1)491 __ai uint16x8_t __noswap_vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
492   uint16x8_t __ret;
493   __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
494   return __ret;
495 }
496 #endif
497 
498 #ifdef __LITTLE_ENDIAN__
vabdq_s8(int8x16_t __p0,int8x16_t __p1)499 __ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
500   int8x16_t __ret;
501   __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
502   return __ret;
503 }
504 #else
vabdq_s8(int8x16_t __p0,int8x16_t __p1)505 __ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
506   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
507   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
508   int8x16_t __ret;
509   __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
510   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
511   return __ret;
512 }
__noswap_vabdq_s8(int8x16_t __p0,int8x16_t __p1)513 __ai int8x16_t __noswap_vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
514   int8x16_t __ret;
515   __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
516   return __ret;
517 }
518 #endif
519 
520 #ifdef __LITTLE_ENDIAN__
vabdq_f32(float32x4_t __p0,float32x4_t __p1)521 __ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
522   float32x4_t __ret;
523   __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
524   return __ret;
525 }
526 #else
vabdq_f32(float32x4_t __p0,float32x4_t __p1)527 __ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
528   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
529   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
530   float32x4_t __ret;
531   __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
532   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
533   return __ret;
534 }
535 #endif
536 
537 #ifdef __LITTLE_ENDIAN__
vabdq_s32(int32x4_t __p0,int32x4_t __p1)538 __ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
539   int32x4_t __ret;
540   __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
541   return __ret;
542 }
543 #else
vabdq_s32(int32x4_t __p0,int32x4_t __p1)544 __ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
545   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
546   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
547   int32x4_t __ret;
548   __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
549   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
550   return __ret;
551 }
__noswap_vabdq_s32(int32x4_t __p0,int32x4_t __p1)552 __ai int32x4_t __noswap_vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
553   int32x4_t __ret;
554   __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
555   return __ret;
556 }
557 #endif
558 
559 #ifdef __LITTLE_ENDIAN__
vabdq_s16(int16x8_t __p0,int16x8_t __p1)560 __ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
561   int16x8_t __ret;
562   __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
563   return __ret;
564 }
565 #else
vabdq_s16(int16x8_t __p0,int16x8_t __p1)566 __ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
567   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
568   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
569   int16x8_t __ret;
570   __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
571   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
572   return __ret;
573 }
__noswap_vabdq_s16(int16x8_t __p0,int16x8_t __p1)574 __ai int16x8_t __noswap_vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
575   int16x8_t __ret;
576   __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
577   return __ret;
578 }
579 #endif
580 
581 #ifdef __LITTLE_ENDIAN__
vabd_u8(uint8x8_t __p0,uint8x8_t __p1)582 __ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
583   uint8x8_t __ret;
584   __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
585   return __ret;
586 }
587 #else
vabd_u8(uint8x8_t __p0,uint8x8_t __p1)588 __ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
589   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
590   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
591   uint8x8_t __ret;
592   __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
593   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
594   return __ret;
595 }
__noswap_vabd_u8(uint8x8_t __p0,uint8x8_t __p1)596 __ai uint8x8_t __noswap_vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
597   uint8x8_t __ret;
598   __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
599   return __ret;
600 }
601 #endif
602 
603 #ifdef __LITTLE_ENDIAN__
vabd_u32(uint32x2_t __p0,uint32x2_t __p1)604 __ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
605   uint32x2_t __ret;
606   __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
607   return __ret;
608 }
609 #else
vabd_u32(uint32x2_t __p0,uint32x2_t __p1)610 __ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
611   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
612   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
613   uint32x2_t __ret;
614   __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
615   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
616   return __ret;
617 }
__noswap_vabd_u32(uint32x2_t __p0,uint32x2_t __p1)618 __ai uint32x2_t __noswap_vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
619   uint32x2_t __ret;
620   __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
621   return __ret;
622 }
623 #endif
624 
625 #ifdef __LITTLE_ENDIAN__
vabd_u16(uint16x4_t __p0,uint16x4_t __p1)626 __ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
627   uint16x4_t __ret;
628   __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
629   return __ret;
630 }
631 #else
vabd_u16(uint16x4_t __p0,uint16x4_t __p1)632 __ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
633   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
634   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
635   uint16x4_t __ret;
636   __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
637   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
638   return __ret;
639 }
__noswap_vabd_u16(uint16x4_t __p0,uint16x4_t __p1)640 __ai uint16x4_t __noswap_vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
641   uint16x4_t __ret;
642   __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
643   return __ret;
644 }
645 #endif
646 
647 #ifdef __LITTLE_ENDIAN__
vabd_s8(int8x8_t __p0,int8x8_t __p1)648 __ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
649   int8x8_t __ret;
650   __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
651   return __ret;
652 }
653 #else
vabd_s8(int8x8_t __p0,int8x8_t __p1)654 __ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
655   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
656   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
657   int8x8_t __ret;
658   __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
659   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
660   return __ret;
661 }
__noswap_vabd_s8(int8x8_t __p0,int8x8_t __p1)662 __ai int8x8_t __noswap_vabd_s8(int8x8_t __p0, int8x8_t __p1) {
663   int8x8_t __ret;
664   __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
665   return __ret;
666 }
667 #endif
668 
669 #ifdef __LITTLE_ENDIAN__
vabd_f32(float32x2_t __p0,float32x2_t __p1)670 __ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
671   float32x2_t __ret;
672   __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
673   return __ret;
674 }
675 #else
vabd_f32(float32x2_t __p0,float32x2_t __p1)676 __ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
677   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
678   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
679   float32x2_t __ret;
680   __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
681   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
682   return __ret;
683 }
684 #endif
685 
686 #ifdef __LITTLE_ENDIAN__
vabd_s32(int32x2_t __p0,int32x2_t __p1)687 __ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
688   int32x2_t __ret;
689   __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
690   return __ret;
691 }
692 #else
vabd_s32(int32x2_t __p0,int32x2_t __p1)693 __ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
694   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
695   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
696   int32x2_t __ret;
697   __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
698   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
699   return __ret;
700 }
__noswap_vabd_s32(int32x2_t __p0,int32x2_t __p1)701 __ai int32x2_t __noswap_vabd_s32(int32x2_t __p0, int32x2_t __p1) {
702   int32x2_t __ret;
703   __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
704   return __ret;
705 }
706 #endif
707 
708 #ifdef __LITTLE_ENDIAN__
vabd_s16(int16x4_t __p0,int16x4_t __p1)709 __ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
710   int16x4_t __ret;
711   __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
712   return __ret;
713 }
714 #else
vabd_s16(int16x4_t __p0,int16x4_t __p1)715 __ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
716   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
717   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
718   int16x4_t __ret;
719   __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
720   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
721   return __ret;
722 }
__noswap_vabd_s16(int16x4_t __p0,int16x4_t __p1)723 __ai int16x4_t __noswap_vabd_s16(int16x4_t __p0, int16x4_t __p1) {
724   int16x4_t __ret;
725   __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
726   return __ret;
727 }
728 #endif
729 
730 #ifdef __LITTLE_ENDIAN__
vabsq_s8(int8x16_t __p0)731 __ai int8x16_t vabsq_s8(int8x16_t __p0) {
732   int8x16_t __ret;
733   __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 32);
734   return __ret;
735 }
736 #else
vabsq_s8(int8x16_t __p0)737 __ai int8x16_t vabsq_s8(int8x16_t __p0) {
738   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
739   int8x16_t __ret;
740   __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 32);
741   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
742   return __ret;
743 }
744 #endif
745 
746 #ifdef __LITTLE_ENDIAN__
vabsq_f32(float32x4_t __p0)747 __ai float32x4_t vabsq_f32(float32x4_t __p0) {
748   float32x4_t __ret;
749   __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 41);
750   return __ret;
751 }
752 #else
vabsq_f32(float32x4_t __p0)753 __ai float32x4_t vabsq_f32(float32x4_t __p0) {
754   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
755   float32x4_t __ret;
756   __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 41);
757   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
758   return __ret;
759 }
760 #endif
761 
762 #ifdef __LITTLE_ENDIAN__
vabsq_s32(int32x4_t __p0)763 __ai int32x4_t vabsq_s32(int32x4_t __p0) {
764   int32x4_t __ret;
765   __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 34);
766   return __ret;
767 }
768 #else
vabsq_s32(int32x4_t __p0)769 __ai int32x4_t vabsq_s32(int32x4_t __p0) {
770   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
771   int32x4_t __ret;
772   __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 34);
773   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
774   return __ret;
775 }
776 #endif
777 
778 #ifdef __LITTLE_ENDIAN__
vabsq_s16(int16x8_t __p0)779 __ai int16x8_t vabsq_s16(int16x8_t __p0) {
780   int16x8_t __ret;
781   __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 33);
782   return __ret;
783 }
784 #else
vabsq_s16(int16x8_t __p0)785 __ai int16x8_t vabsq_s16(int16x8_t __p0) {
786   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
787   int16x8_t __ret;
788   __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 33);
789   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
790   return __ret;
791 }
792 #endif
793 
794 #ifdef __LITTLE_ENDIAN__
vabs_s8(int8x8_t __p0)795 __ai int8x8_t vabs_s8(int8x8_t __p0) {
796   int8x8_t __ret;
797   __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__p0, 0);
798   return __ret;
799 }
800 #else
vabs_s8(int8x8_t __p0)801 __ai int8x8_t vabs_s8(int8x8_t __p0) {
802   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
803   int8x8_t __ret;
804   __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 0);
805   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
806   return __ret;
807 }
808 #endif
809 
810 #ifdef __LITTLE_ENDIAN__
vabs_f32(float32x2_t __p0)811 __ai float32x2_t vabs_f32(float32x2_t __p0) {
812   float32x2_t __ret;
813   __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 9);
814   return __ret;
815 }
816 #else
vabs_f32(float32x2_t __p0)817 __ai float32x2_t vabs_f32(float32x2_t __p0) {
818   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
819   float32x2_t __ret;
820   __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 9);
821   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
822   return __ret;
823 }
824 #endif
825 
826 #ifdef __LITTLE_ENDIAN__
vabs_s32(int32x2_t __p0)827 __ai int32x2_t vabs_s32(int32x2_t __p0) {
828   int32x2_t __ret;
829   __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 2);
830   return __ret;
831 }
832 #else
vabs_s32(int32x2_t __p0)833 __ai int32x2_t vabs_s32(int32x2_t __p0) {
834   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
835   int32x2_t __ret;
836   __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 2);
837   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
838   return __ret;
839 }
840 #endif
841 
842 #ifdef __LITTLE_ENDIAN__
vabs_s16(int16x4_t __p0)843 __ai int16x4_t vabs_s16(int16x4_t __p0) {
844   int16x4_t __ret;
845   __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 1);
846   return __ret;
847 }
848 #else
vabs_s16(int16x4_t __p0)849 __ai int16x4_t vabs_s16(int16x4_t __p0) {
850   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
851   int16x4_t __ret;
852   __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 1);
853   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
854   return __ret;
855 }
856 #endif
857 
858 #ifdef __LITTLE_ENDIAN__
vaddq_u8(uint8x16_t __p0,uint8x16_t __p1)859 __ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
860   uint8x16_t __ret;
861   __ret = __p0 + __p1;
862   return __ret;
863 }
864 #else
vaddq_u8(uint8x16_t __p0,uint8x16_t __p1)865 __ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
866   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
867   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
868   uint8x16_t __ret;
869   __ret = __rev0 + __rev1;
870   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
871   return __ret;
872 }
873 #endif
874 
875 #ifdef __LITTLE_ENDIAN__
vaddq_u32(uint32x4_t __p0,uint32x4_t __p1)876 __ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
877   uint32x4_t __ret;
878   __ret = __p0 + __p1;
879   return __ret;
880 }
881 #else
vaddq_u32(uint32x4_t __p0,uint32x4_t __p1)882 __ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
883   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
884   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
885   uint32x4_t __ret;
886   __ret = __rev0 + __rev1;
887   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
888   return __ret;
889 }
890 #endif
891 
892 #ifdef __LITTLE_ENDIAN__
vaddq_u64(uint64x2_t __p0,uint64x2_t __p1)893 __ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
894   uint64x2_t __ret;
895   __ret = __p0 + __p1;
896   return __ret;
897 }
898 #else
vaddq_u64(uint64x2_t __p0,uint64x2_t __p1)899 __ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
900   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
901   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
902   uint64x2_t __ret;
903   __ret = __rev0 + __rev1;
904   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
905   return __ret;
906 }
907 #endif
908 
909 #ifdef __LITTLE_ENDIAN__
vaddq_u16(uint16x8_t __p0,uint16x8_t __p1)910 __ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
911   uint16x8_t __ret;
912   __ret = __p0 + __p1;
913   return __ret;
914 }
915 #else
vaddq_u16(uint16x8_t __p0,uint16x8_t __p1)916 __ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
917   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
918   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
919   uint16x8_t __ret;
920   __ret = __rev0 + __rev1;
921   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
922   return __ret;
923 }
924 #endif
925 
926 #ifdef __LITTLE_ENDIAN__
vaddq_s8(int8x16_t __p0,int8x16_t __p1)927 __ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
928   int8x16_t __ret;
929   __ret = __p0 + __p1;
930   return __ret;
931 }
932 #else
vaddq_s8(int8x16_t __p0,int8x16_t __p1)933 __ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
934   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
935   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
936   int8x16_t __ret;
937   __ret = __rev0 + __rev1;
938   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
939   return __ret;
940 }
941 #endif
942 
943 #ifdef __LITTLE_ENDIAN__
vaddq_f32(float32x4_t __p0,float32x4_t __p1)944 __ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
945   float32x4_t __ret;
946   __ret = __p0 + __p1;
947   return __ret;
948 }
949 #else
vaddq_f32(float32x4_t __p0,float32x4_t __p1)950 __ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
951   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
952   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
953   float32x4_t __ret;
954   __ret = __rev0 + __rev1;
955   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
956   return __ret;
957 }
958 #endif
959 
960 #ifdef __LITTLE_ENDIAN__
vaddq_s32(int32x4_t __p0,int32x4_t __p1)961 __ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
962   int32x4_t __ret;
963   __ret = __p0 + __p1;
964   return __ret;
965 }
966 #else
vaddq_s32(int32x4_t __p0,int32x4_t __p1)967 __ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
968   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
969   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
970   int32x4_t __ret;
971   __ret = __rev0 + __rev1;
972   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
973   return __ret;
974 }
975 #endif
976 
977 #ifdef __LITTLE_ENDIAN__
vaddq_s64(int64x2_t __p0,int64x2_t __p1)978 __ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
979   int64x2_t __ret;
980   __ret = __p0 + __p1;
981   return __ret;
982 }
983 #else
vaddq_s64(int64x2_t __p0,int64x2_t __p1)984 __ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
985   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
986   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
987   int64x2_t __ret;
988   __ret = __rev0 + __rev1;
989   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
990   return __ret;
991 }
992 #endif
993 
994 #ifdef __LITTLE_ENDIAN__
vaddq_s16(int16x8_t __p0,int16x8_t __p1)995 __ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
996   int16x8_t __ret;
997   __ret = __p0 + __p1;
998   return __ret;
999 }
1000 #else
vaddq_s16(int16x8_t __p0,int16x8_t __p1)1001 __ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
1002   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1003   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1004   int16x8_t __ret;
1005   __ret = __rev0 + __rev1;
1006   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1007   return __ret;
1008 }
1009 #endif
1010 
1011 #ifdef __LITTLE_ENDIAN__
vadd_u8(uint8x8_t __p0,uint8x8_t __p1)1012 __ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
1013   uint8x8_t __ret;
1014   __ret = __p0 + __p1;
1015   return __ret;
1016 }
1017 #else
vadd_u8(uint8x8_t __p0,uint8x8_t __p1)1018 __ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
1019   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1020   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1021   uint8x8_t __ret;
1022   __ret = __rev0 + __rev1;
1023   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1024   return __ret;
1025 }
1026 #endif
1027 
1028 #ifdef __LITTLE_ENDIAN__
vadd_u32(uint32x2_t __p0,uint32x2_t __p1)1029 __ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
1030   uint32x2_t __ret;
1031   __ret = __p0 + __p1;
1032   return __ret;
1033 }
1034 #else
vadd_u32(uint32x2_t __p0,uint32x2_t __p1)1035 __ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
1036   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1037   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1038   uint32x2_t __ret;
1039   __ret = __rev0 + __rev1;
1040   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1041   return __ret;
1042 }
1043 #endif
1044 
1045 #ifdef __LITTLE_ENDIAN__
vadd_u64(uint64x1_t __p0,uint64x1_t __p1)1046 __ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
1047   uint64x1_t __ret;
1048   __ret = __p0 + __p1;
1049   return __ret;
1050 }
1051 #else
vadd_u64(uint64x1_t __p0,uint64x1_t __p1)1052 __ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
1053   uint64x1_t __ret;
1054   __ret = __p0 + __p1;
1055   return __ret;
1056 }
1057 #endif
1058 
1059 #ifdef __LITTLE_ENDIAN__
vadd_u16(uint16x4_t __p0,uint16x4_t __p1)1060 __ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
1061   uint16x4_t __ret;
1062   __ret = __p0 + __p1;
1063   return __ret;
1064 }
1065 #else
vadd_u16(uint16x4_t __p0,uint16x4_t __p1)1066 __ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
1067   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1068   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1069   uint16x4_t __ret;
1070   __ret = __rev0 + __rev1;
1071   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1072   return __ret;
1073 }
1074 #endif
1075 
1076 #ifdef __LITTLE_ENDIAN__
vadd_s8(int8x8_t __p0,int8x8_t __p1)1077 __ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
1078   int8x8_t __ret;
1079   __ret = __p0 + __p1;
1080   return __ret;
1081 }
1082 #else
vadd_s8(int8x8_t __p0,int8x8_t __p1)1083 __ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
1084   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1085   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1086   int8x8_t __ret;
1087   __ret = __rev0 + __rev1;
1088   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1089   return __ret;
1090 }
1091 #endif
1092 
1093 #ifdef __LITTLE_ENDIAN__
vadd_f32(float32x2_t __p0,float32x2_t __p1)1094 __ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
1095   float32x2_t __ret;
1096   __ret = __p0 + __p1;
1097   return __ret;
1098 }
1099 #else
vadd_f32(float32x2_t __p0,float32x2_t __p1)1100 __ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
1101   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1102   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1103   float32x2_t __ret;
1104   __ret = __rev0 + __rev1;
1105   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1106   return __ret;
1107 }
1108 #endif
1109 
1110 #ifdef __LITTLE_ENDIAN__
vadd_s32(int32x2_t __p0,int32x2_t __p1)1111 __ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
1112   int32x2_t __ret;
1113   __ret = __p0 + __p1;
1114   return __ret;
1115 }
1116 #else
vadd_s32(int32x2_t __p0,int32x2_t __p1)1117 __ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
1118   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1119   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1120   int32x2_t __ret;
1121   __ret = __rev0 + __rev1;
1122   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1123   return __ret;
1124 }
1125 #endif
1126 
1127 #ifdef __LITTLE_ENDIAN__
vadd_s64(int64x1_t __p0,int64x1_t __p1)1128 __ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) {
1129   int64x1_t __ret;
1130   __ret = __p0 + __p1;
1131   return __ret;
1132 }
1133 #else
vadd_s64(int64x1_t __p0,int64x1_t __p1)1134 __ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) {
1135   int64x1_t __ret;
1136   __ret = __p0 + __p1;
1137   return __ret;
1138 }
1139 #endif
1140 
1141 #ifdef __LITTLE_ENDIAN__
vadd_s16(int16x4_t __p0,int16x4_t __p1)1142 __ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
1143   int16x4_t __ret;
1144   __ret = __p0 + __p1;
1145   return __ret;
1146 }
1147 #else
vadd_s16(int16x4_t __p0,int16x4_t __p1)1148 __ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
1149   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1150   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1151   int16x4_t __ret;
1152   __ret = __rev0 + __rev1;
1153   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1154   return __ret;
1155 }
1156 #endif
1157 
1158 #ifdef __LITTLE_ENDIAN__
vaddhn_u32(uint32x4_t __p0,uint32x4_t __p1)1159 __ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
1160   uint16x4_t __ret;
1161   __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
1162   return __ret;
1163 }
1164 #else
vaddhn_u32(uint32x4_t __p0,uint32x4_t __p1)1165 __ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
1166   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1167   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1168   uint16x4_t __ret;
1169   __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
1170   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1171   return __ret;
1172 }
__noswap_vaddhn_u32(uint32x4_t __p0,uint32x4_t __p1)1173 __ai uint16x4_t __noswap_vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
1174   uint16x4_t __ret;
1175   __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
1176   return __ret;
1177 }
1178 #endif
1179 
1180 #ifdef __LITTLE_ENDIAN__
vaddhn_u64(uint64x2_t __p0,uint64x2_t __p1)1181 __ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
1182   uint32x2_t __ret;
1183   __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
1184   return __ret;
1185 }
1186 #else
vaddhn_u64(uint64x2_t __p0,uint64x2_t __p1)1187 __ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
1188   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1189   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1190   uint32x2_t __ret;
1191   __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
1192   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1193   return __ret;
1194 }
__noswap_vaddhn_u64(uint64x2_t __p0,uint64x2_t __p1)1195 __ai uint32x2_t __noswap_vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
1196   uint32x2_t __ret;
1197   __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
1198   return __ret;
1199 }
1200 #endif
1201 
1202 #ifdef __LITTLE_ENDIAN__
vaddhn_u16(uint16x8_t __p0,uint16x8_t __p1)1203 __ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
1204   uint8x8_t __ret;
1205   __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
1206   return __ret;
1207 }
1208 #else
vaddhn_u16(uint16x8_t __p0,uint16x8_t __p1)1209 __ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
1210   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1211   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1212   uint8x8_t __ret;
1213   __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
1214   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1215   return __ret;
1216 }
__noswap_vaddhn_u16(uint16x8_t __p0,uint16x8_t __p1)1217 __ai uint8x8_t __noswap_vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
1218   uint8x8_t __ret;
1219   __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
1220   return __ret;
1221 }
1222 #endif
1223 
1224 #ifdef __LITTLE_ENDIAN__
vaddhn_s32(int32x4_t __p0,int32x4_t __p1)1225 __ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
1226   int16x4_t __ret;
1227   __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
1228   return __ret;
1229 }
1230 #else
vaddhn_s32(int32x4_t __p0,int32x4_t __p1)1231 __ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
1232   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1233   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1234   int16x4_t __ret;
1235   __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
1236   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1237   return __ret;
1238 }
__noswap_vaddhn_s32(int32x4_t __p0,int32x4_t __p1)1239 __ai int16x4_t __noswap_vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
1240   int16x4_t __ret;
1241   __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
1242   return __ret;
1243 }
1244 #endif
1245 
1246 #ifdef __LITTLE_ENDIAN__
vaddhn_s64(int64x2_t __p0,int64x2_t __p1)1247 __ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
1248   int32x2_t __ret;
1249   __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
1250   return __ret;
1251 }
1252 #else
vaddhn_s64(int64x2_t __p0,int64x2_t __p1)1253 __ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
1254   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1255   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1256   int32x2_t __ret;
1257   __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
1258   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1259   return __ret;
1260 }
__noswap_vaddhn_s64(int64x2_t __p0,int64x2_t __p1)1261 __ai int32x2_t __noswap_vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
1262   int32x2_t __ret;
1263   __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
1264   return __ret;
1265 }
1266 #endif
1267 
1268 #ifdef __LITTLE_ENDIAN__
vaddhn_s16(int16x8_t __p0,int16x8_t __p1)1269 __ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
1270   int8x8_t __ret;
1271   __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
1272   return __ret;
1273 }
1274 #else
vaddhn_s16(int16x8_t __p0,int16x8_t __p1)1275 __ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
1276   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1277   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1278   int8x8_t __ret;
1279   __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
1280   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1281   return __ret;
1282 }
__noswap_vaddhn_s16(int16x8_t __p0,int16x8_t __p1)1283 __ai int8x8_t __noswap_vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
1284   int8x8_t __ret;
1285   __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
1286   return __ret;
1287 }
1288 #endif
1289 
1290 #ifdef __LITTLE_ENDIAN__
vandq_u8(uint8x16_t __p0,uint8x16_t __p1)1291 __ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
1292   uint8x16_t __ret;
1293   __ret = __p0 & __p1;
1294   return __ret;
1295 }
1296 #else
vandq_u8(uint8x16_t __p0,uint8x16_t __p1)1297 __ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
1298   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1299   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1300   uint8x16_t __ret;
1301   __ret = __rev0 & __rev1;
1302   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1303   return __ret;
1304 }
1305 #endif
1306 
1307 #ifdef __LITTLE_ENDIAN__
vandq_u32(uint32x4_t __p0,uint32x4_t __p1)1308 __ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
1309   uint32x4_t __ret;
1310   __ret = __p0 & __p1;
1311   return __ret;
1312 }
1313 #else
vandq_u32(uint32x4_t __p0,uint32x4_t __p1)1314 __ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
1315   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1316   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1317   uint32x4_t __ret;
1318   __ret = __rev0 & __rev1;
1319   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1320   return __ret;
1321 }
1322 #endif
1323 
1324 #ifdef __LITTLE_ENDIAN__
vandq_u64(uint64x2_t __p0,uint64x2_t __p1)1325 __ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
1326   uint64x2_t __ret;
1327   __ret = __p0 & __p1;
1328   return __ret;
1329 }
1330 #else
vandq_u64(uint64x2_t __p0,uint64x2_t __p1)1331 __ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
1332   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1333   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1334   uint64x2_t __ret;
1335   __ret = __rev0 & __rev1;
1336   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1337   return __ret;
1338 }
1339 #endif
1340 
1341 #ifdef __LITTLE_ENDIAN__
vandq_u16(uint16x8_t __p0,uint16x8_t __p1)1342 __ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
1343   uint16x8_t __ret;
1344   __ret = __p0 & __p1;
1345   return __ret;
1346 }
1347 #else
vandq_u16(uint16x8_t __p0,uint16x8_t __p1)1348 __ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
1349   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1350   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1351   uint16x8_t __ret;
1352   __ret = __rev0 & __rev1;
1353   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1354   return __ret;
1355 }
1356 #endif
1357 
1358 #ifdef __LITTLE_ENDIAN__
vandq_s8(int8x16_t __p0,int8x16_t __p1)1359 __ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
1360   int8x16_t __ret;
1361   __ret = __p0 & __p1;
1362   return __ret;
1363 }
1364 #else
vandq_s8(int8x16_t __p0,int8x16_t __p1)1365 __ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
1366   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1367   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1368   int8x16_t __ret;
1369   __ret = __rev0 & __rev1;
1370   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1371   return __ret;
1372 }
1373 #endif
1374 
1375 #ifdef __LITTLE_ENDIAN__
vandq_s32(int32x4_t __p0,int32x4_t __p1)1376 __ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
1377   int32x4_t __ret;
1378   __ret = __p0 & __p1;
1379   return __ret;
1380 }
1381 #else
vandq_s32(int32x4_t __p0,int32x4_t __p1)1382 __ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
1383   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1384   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1385   int32x4_t __ret;
1386   __ret = __rev0 & __rev1;
1387   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1388   return __ret;
1389 }
1390 #endif
1391 
1392 #ifdef __LITTLE_ENDIAN__
vandq_s64(int64x2_t __p0,int64x2_t __p1)1393 __ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
1394   int64x2_t __ret;
1395   __ret = __p0 & __p1;
1396   return __ret;
1397 }
1398 #else
vandq_s64(int64x2_t __p0,int64x2_t __p1)1399 __ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
1400   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1401   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1402   int64x2_t __ret;
1403   __ret = __rev0 & __rev1;
1404   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1405   return __ret;
1406 }
1407 #endif
1408 
1409 #ifdef __LITTLE_ENDIAN__
vandq_s16(int16x8_t __p0,int16x8_t __p1)1410 __ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
1411   int16x8_t __ret;
1412   __ret = __p0 & __p1;
1413   return __ret;
1414 }
1415 #else
vandq_s16(int16x8_t __p0,int16x8_t __p1)1416 __ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
1417   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1418   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1419   int16x8_t __ret;
1420   __ret = __rev0 & __rev1;
1421   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1422   return __ret;
1423 }
1424 #endif
1425 
1426 #ifdef __LITTLE_ENDIAN__
vand_u8(uint8x8_t __p0,uint8x8_t __p1)1427 __ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
1428   uint8x8_t __ret;
1429   __ret = __p0 & __p1;
1430   return __ret;
1431 }
1432 #else
vand_u8(uint8x8_t __p0,uint8x8_t __p1)1433 __ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
1434   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1435   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1436   uint8x8_t __ret;
1437   __ret = __rev0 & __rev1;
1438   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1439   return __ret;
1440 }
1441 #endif
1442 
1443 #ifdef __LITTLE_ENDIAN__
vand_u32(uint32x2_t __p0,uint32x2_t __p1)1444 __ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
1445   uint32x2_t __ret;
1446   __ret = __p0 & __p1;
1447   return __ret;
1448 }
1449 #else
vand_u32(uint32x2_t __p0,uint32x2_t __p1)1450 __ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
1451   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1452   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1453   uint32x2_t __ret;
1454   __ret = __rev0 & __rev1;
1455   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1456   return __ret;
1457 }
1458 #endif
1459 
1460 #ifdef __LITTLE_ENDIAN__
vand_u64(uint64x1_t __p0,uint64x1_t __p1)1461 __ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) {
1462   uint64x1_t __ret;
1463   __ret = __p0 & __p1;
1464   return __ret;
1465 }
1466 #else
vand_u64(uint64x1_t __p0,uint64x1_t __p1)1467 __ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) {
1468   uint64x1_t __ret;
1469   __ret = __p0 & __p1;
1470   return __ret;
1471 }
1472 #endif
1473 
1474 #ifdef __LITTLE_ENDIAN__
vand_u16(uint16x4_t __p0,uint16x4_t __p1)1475 __ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
1476   uint16x4_t __ret;
1477   __ret = __p0 & __p1;
1478   return __ret;
1479 }
1480 #else
vand_u16(uint16x4_t __p0,uint16x4_t __p1)1481 __ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
1482   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1483   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1484   uint16x4_t __ret;
1485   __ret = __rev0 & __rev1;
1486   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1487   return __ret;
1488 }
1489 #endif
1490 
1491 #ifdef __LITTLE_ENDIAN__
vand_s8(int8x8_t __p0,int8x8_t __p1)1492 __ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
1493   int8x8_t __ret;
1494   __ret = __p0 & __p1;
1495   return __ret;
1496 }
1497 #else
vand_s8(int8x8_t __p0,int8x8_t __p1)1498 __ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
1499   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1500   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1501   int8x8_t __ret;
1502   __ret = __rev0 & __rev1;
1503   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1504   return __ret;
1505 }
1506 #endif
1507 
1508 #ifdef __LITTLE_ENDIAN__
vand_s32(int32x2_t __p0,int32x2_t __p1)1509 __ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
1510   int32x2_t __ret;
1511   __ret = __p0 & __p1;
1512   return __ret;
1513 }
1514 #else
vand_s32(int32x2_t __p0,int32x2_t __p1)1515 __ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
1516   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1517   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1518   int32x2_t __ret;
1519   __ret = __rev0 & __rev1;
1520   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1521   return __ret;
1522 }
1523 #endif
1524 
1525 #ifdef __LITTLE_ENDIAN__
vand_s64(int64x1_t __p0,int64x1_t __p1)1526 __ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) {
1527   int64x1_t __ret;
1528   __ret = __p0 & __p1;
1529   return __ret;
1530 }
1531 #else
vand_s64(int64x1_t __p0,int64x1_t __p1)1532 __ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) {
1533   int64x1_t __ret;
1534   __ret = __p0 & __p1;
1535   return __ret;
1536 }
1537 #endif
1538 
1539 #ifdef __LITTLE_ENDIAN__
vand_s16(int16x4_t __p0,int16x4_t __p1)1540 __ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
1541   int16x4_t __ret;
1542   __ret = __p0 & __p1;
1543   return __ret;
1544 }
1545 #else
vand_s16(int16x4_t __p0,int16x4_t __p1)1546 __ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
1547   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1548   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1549   int16x4_t __ret;
1550   __ret = __rev0 & __rev1;
1551   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1552   return __ret;
1553 }
1554 #endif
1555 
1556 #ifdef __LITTLE_ENDIAN__
vbicq_u8(uint8x16_t __p0,uint8x16_t __p1)1557 __ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
1558   uint8x16_t __ret;
1559   __ret = __p0 & ~__p1;
1560   return __ret;
1561 }
1562 #else
vbicq_u8(uint8x16_t __p0,uint8x16_t __p1)1563 __ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
1564   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1565   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1566   uint8x16_t __ret;
1567   __ret = __rev0 & ~__rev1;
1568   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1569   return __ret;
1570 }
1571 #endif
1572 
1573 #ifdef __LITTLE_ENDIAN__
vbicq_u32(uint32x4_t __p0,uint32x4_t __p1)1574 __ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
1575   uint32x4_t __ret;
1576   __ret = __p0 & ~__p1;
1577   return __ret;
1578 }
1579 #else
vbicq_u32(uint32x4_t __p0,uint32x4_t __p1)1580 __ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
1581   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1582   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1583   uint32x4_t __ret;
1584   __ret = __rev0 & ~__rev1;
1585   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1586   return __ret;
1587 }
1588 #endif
1589 
1590 #ifdef __LITTLE_ENDIAN__
vbicq_u64(uint64x2_t __p0,uint64x2_t __p1)1591 __ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
1592   uint64x2_t __ret;
1593   __ret = __p0 & ~__p1;
1594   return __ret;
1595 }
1596 #else
vbicq_u64(uint64x2_t __p0,uint64x2_t __p1)1597 __ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
1598   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1599   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1600   uint64x2_t __ret;
1601   __ret = __rev0 & ~__rev1;
1602   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1603   return __ret;
1604 }
1605 #endif
1606 
1607 #ifdef __LITTLE_ENDIAN__
vbicq_u16(uint16x8_t __p0,uint16x8_t __p1)1608 __ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
1609   uint16x8_t __ret;
1610   __ret = __p0 & ~__p1;
1611   return __ret;
1612 }
1613 #else
vbicq_u16(uint16x8_t __p0,uint16x8_t __p1)1614 __ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
1615   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1616   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1617   uint16x8_t __ret;
1618   __ret = __rev0 & ~__rev1;
1619   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1620   return __ret;
1621 }
1622 #endif
1623 
1624 #ifdef __LITTLE_ENDIAN__
vbicq_s8(int8x16_t __p0,int8x16_t __p1)1625 __ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
1626   int8x16_t __ret;
1627   __ret = __p0 & ~__p1;
1628   return __ret;
1629 }
1630 #else
vbicq_s8(int8x16_t __p0,int8x16_t __p1)1631 __ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
1632   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1633   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1634   int8x16_t __ret;
1635   __ret = __rev0 & ~__rev1;
1636   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1637   return __ret;
1638 }
1639 #endif
1640 
1641 #ifdef __LITTLE_ENDIAN__
vbicq_s32(int32x4_t __p0,int32x4_t __p1)1642 __ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
1643   int32x4_t __ret;
1644   __ret = __p0 & ~__p1;
1645   return __ret;
1646 }
1647 #else
vbicq_s32(int32x4_t __p0,int32x4_t __p1)1648 __ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
1649   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1650   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1651   int32x4_t __ret;
1652   __ret = __rev0 & ~__rev1;
1653   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1654   return __ret;
1655 }
1656 #endif
1657 
1658 #ifdef __LITTLE_ENDIAN__
vbicq_s64(int64x2_t __p0,int64x2_t __p1)1659 __ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
1660   int64x2_t __ret;
1661   __ret = __p0 & ~__p1;
1662   return __ret;
1663 }
1664 #else
vbicq_s64(int64x2_t __p0,int64x2_t __p1)1665 __ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
1666   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1667   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1668   int64x2_t __ret;
1669   __ret = __rev0 & ~__rev1;
1670   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1671   return __ret;
1672 }
1673 #endif
1674 
1675 #ifdef __LITTLE_ENDIAN__
vbicq_s16(int16x8_t __p0,int16x8_t __p1)1676 __ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
1677   int16x8_t __ret;
1678   __ret = __p0 & ~__p1;
1679   return __ret;
1680 }
1681 #else
vbicq_s16(int16x8_t __p0,int16x8_t __p1)1682 __ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
1683   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1684   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1685   int16x8_t __ret;
1686   __ret = __rev0 & ~__rev1;
1687   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1688   return __ret;
1689 }
1690 #endif
1691 
1692 #ifdef __LITTLE_ENDIAN__
vbic_u8(uint8x8_t __p0,uint8x8_t __p1)1693 __ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
1694   uint8x8_t __ret;
1695   __ret = __p0 & ~__p1;
1696   return __ret;
1697 }
1698 #else
vbic_u8(uint8x8_t __p0,uint8x8_t __p1)1699 __ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
1700   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1701   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1702   uint8x8_t __ret;
1703   __ret = __rev0 & ~__rev1;
1704   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1705   return __ret;
1706 }
1707 #endif
1708 
1709 #ifdef __LITTLE_ENDIAN__
vbic_u32(uint32x2_t __p0,uint32x2_t __p1)1710 __ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
1711   uint32x2_t __ret;
1712   __ret = __p0 & ~__p1;
1713   return __ret;
1714 }
1715 #else
vbic_u32(uint32x2_t __p0,uint32x2_t __p1)1716 __ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
1717   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1718   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1719   uint32x2_t __ret;
1720   __ret = __rev0 & ~__rev1;
1721   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1722   return __ret;
1723 }
1724 #endif
1725 
1726 #ifdef __LITTLE_ENDIAN__
vbic_u64(uint64x1_t __p0,uint64x1_t __p1)1727 __ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) {
1728   uint64x1_t __ret;
1729   __ret = __p0 & ~__p1;
1730   return __ret;
1731 }
1732 #else
vbic_u64(uint64x1_t __p0,uint64x1_t __p1)1733 __ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) {
1734   uint64x1_t __ret;
1735   __ret = __p0 & ~__p1;
1736   return __ret;
1737 }
1738 #endif
1739 
1740 #ifdef __LITTLE_ENDIAN__
vbic_u16(uint16x4_t __p0,uint16x4_t __p1)1741 __ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
1742   uint16x4_t __ret;
1743   __ret = __p0 & ~__p1;
1744   return __ret;
1745 }
1746 #else
vbic_u16(uint16x4_t __p0,uint16x4_t __p1)1747 __ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
1748   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1749   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1750   uint16x4_t __ret;
1751   __ret = __rev0 & ~__rev1;
1752   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1753   return __ret;
1754 }
1755 #endif
1756 
1757 #ifdef __LITTLE_ENDIAN__
vbic_s8(int8x8_t __p0,int8x8_t __p1)1758 __ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
1759   int8x8_t __ret;
1760   __ret = __p0 & ~__p1;
1761   return __ret;
1762 }
1763 #else
vbic_s8(int8x8_t __p0,int8x8_t __p1)1764 __ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
1765   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1766   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1767   int8x8_t __ret;
1768   __ret = __rev0 & ~__rev1;
1769   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1770   return __ret;
1771 }
1772 #endif
1773 
1774 #ifdef __LITTLE_ENDIAN__
vbic_s32(int32x2_t __p0,int32x2_t __p1)1775 __ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
1776   int32x2_t __ret;
1777   __ret = __p0 & ~__p1;
1778   return __ret;
1779 }
1780 #else
vbic_s32(int32x2_t __p0,int32x2_t __p1)1781 __ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
1782   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1783   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1784   int32x2_t __ret;
1785   __ret = __rev0 & ~__rev1;
1786   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1787   return __ret;
1788 }
1789 #endif
1790 
1791 #ifdef __LITTLE_ENDIAN__
vbic_s64(int64x1_t __p0,int64x1_t __p1)1792 __ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) {
1793   int64x1_t __ret;
1794   __ret = __p0 & ~__p1;
1795   return __ret;
1796 }
1797 #else
vbic_s64(int64x1_t __p0,int64x1_t __p1)1798 __ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) {
1799   int64x1_t __ret;
1800   __ret = __p0 & ~__p1;
1801   return __ret;
1802 }
1803 #endif
1804 
1805 #ifdef __LITTLE_ENDIAN__
vbic_s16(int16x4_t __p0,int16x4_t __p1)1806 __ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
1807   int16x4_t __ret;
1808   __ret = __p0 & ~__p1;
1809   return __ret;
1810 }
1811 #else
vbic_s16(int16x4_t __p0,int16x4_t __p1)1812 __ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
1813   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1814   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1815   int16x4_t __ret;
1816   __ret = __rev0 & ~__rev1;
1817   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1818   return __ret;
1819 }
1820 #endif
1821 
1822 #ifdef __LITTLE_ENDIAN__
vbsl_p8(uint8x8_t __p0,poly8x8_t __p1,poly8x8_t __p2)1823 __ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
1824   poly8x8_t __ret;
1825   __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4);
1826   return __ret;
1827 }
1828 #else
vbsl_p8(uint8x8_t __p0,poly8x8_t __p1,poly8x8_t __p2)1829 __ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
1830   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1831   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1832   poly8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
1833   poly8x8_t __ret;
1834   __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
1835   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1836   return __ret;
1837 }
1838 #endif
1839 
1840 #ifdef __LITTLE_ENDIAN__
vbsl_p16(uint16x4_t __p0,poly16x4_t __p1,poly16x4_t __p2)1841 __ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
1842   poly16x4_t __ret;
1843   __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 5);
1844   return __ret;
1845 }
1846 #else
vbsl_p16(uint16x4_t __p0,poly16x4_t __p1,poly16x4_t __p2)1847 __ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
1848   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1849   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1850   poly16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
1851   poly16x4_t __ret;
1852   __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 5);
1853   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1854   return __ret;
1855 }
1856 #endif
1857 
1858 #ifdef __LITTLE_ENDIAN__
vbslq_p8(uint8x16_t __p0,poly8x16_t __p1,poly8x16_t __p2)1859 __ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
1860   poly8x16_t __ret;
1861   __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
1862   return __ret;
1863 }
1864 #else
vbslq_p8(uint8x16_t __p0,poly8x16_t __p1,poly8x16_t __p2)1865 __ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
1866   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1867   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1868   poly8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1869   poly8x16_t __ret;
1870   __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
1871   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1872   return __ret;
1873 }
1874 #endif
1875 
1876 #ifdef __LITTLE_ENDIAN__
vbslq_p16(uint16x8_t __p0,poly16x8_t __p1,poly16x8_t __p2)1877 __ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
1878   poly16x8_t __ret;
1879   __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 37);
1880   return __ret;
1881 }
1882 #else
vbslq_p16(uint16x8_t __p0,poly16x8_t __p1,poly16x8_t __p2)1883 __ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
1884   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1885   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1886   poly16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
1887   poly16x8_t __ret;
1888   __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 37);
1889   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1890   return __ret;
1891 }
1892 #endif
1893 
1894 #ifdef __LITTLE_ENDIAN__
vbslq_u8(uint8x16_t __p0,uint8x16_t __p1,uint8x16_t __p2)1895 __ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
1896   uint8x16_t __ret;
1897   __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
1898   return __ret;
1899 }
1900 #else
vbslq_u8(uint8x16_t __p0,uint8x16_t __p1,uint8x16_t __p2)1901 __ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
1902   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1903   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1904   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1905   uint8x16_t __ret;
1906   __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
1907   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1908   return __ret;
1909 }
1910 #endif
1911 
1912 #ifdef __LITTLE_ENDIAN__
vbslq_u32(uint32x4_t __p0,uint32x4_t __p1,uint32x4_t __p2)1913 __ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
1914   uint32x4_t __ret;
1915   __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
1916   return __ret;
1917 }
1918 #else
vbslq_u32(uint32x4_t __p0,uint32x4_t __p1,uint32x4_t __p2)1919 __ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
1920   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1921   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1922   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
1923   uint32x4_t __ret;
1924   __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
1925   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1926   return __ret;
1927 }
1928 #endif
1929 
1930 #ifdef __LITTLE_ENDIAN__
vbslq_u64(uint64x2_t __p0,uint64x2_t __p1,uint64x2_t __p2)1931 __ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
1932   uint64x2_t __ret;
1933   __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51);
1934   return __ret;
1935 }
1936 #else
vbslq_u64(uint64x2_t __p0,uint64x2_t __p1,uint64x2_t __p2)1937 __ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
1938   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1939   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1940   uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
1941   uint64x2_t __ret;
1942   __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
1943   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1944   return __ret;
1945 }
1946 #endif
1947 
1948 #ifdef __LITTLE_ENDIAN__
vbslq_u16(uint16x8_t __p0,uint16x8_t __p1,uint16x8_t __p2)1949 __ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
1950   uint16x8_t __ret;
1951   __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49);
1952   return __ret;
1953 }
1954 #else
vbslq_u16(uint16x8_t __p0,uint16x8_t __p1,uint16x8_t __p2)1955 __ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
1956   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1957   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1958   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
1959   uint16x8_t __ret;
1960   __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49);
1961   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1962   return __ret;
1963 }
1964 #endif
1965 
1966 #ifdef __LITTLE_ENDIAN__
vbslq_s8(uint8x16_t __p0,int8x16_t __p1,int8x16_t __p2)1967 __ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
1968   int8x16_t __ret;
1969   __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
1970   return __ret;
1971 }
1972 #else
vbslq_s8(uint8x16_t __p0,int8x16_t __p1,int8x16_t __p2)1973 __ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
1974   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1975   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1976   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1977   int8x16_t __ret;
1978   __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
1979   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1980   return __ret;
1981 }
1982 #endif
1983 
1984 #ifdef __LITTLE_ENDIAN__
vbslq_f32(uint32x4_t __p0,float32x4_t __p1,float32x4_t __p2)1985 __ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
1986   float32x4_t __ret;
1987   __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
1988   return __ret;
1989 }
1990 #else
vbslq_f32(uint32x4_t __p0,float32x4_t __p1,float32x4_t __p2)1991 __ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
1992   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1993   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1994   float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
1995   float32x4_t __ret;
1996   __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
1997   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1998   return __ret;
1999 }
2000 #endif
2001 
2002 #ifdef __LITTLE_ENDIAN__
vbslq_s32(uint32x4_t __p0,int32x4_t __p1,int32x4_t __p2)2003 __ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
2004   int32x4_t __ret;
2005   __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
2006   return __ret;
2007 }
2008 #else
vbslq_s32(uint32x4_t __p0,int32x4_t __p1,int32x4_t __p2)2009 __ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
2010   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2011   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2012   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
2013   int32x4_t __ret;
2014   __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
2015   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2016   return __ret;
2017 }
2018 #endif
2019 
2020 #ifdef __LITTLE_ENDIAN__
vbslq_s64(uint64x2_t __p0,int64x2_t __p1,int64x2_t __p2)2021 __ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
2022   int64x2_t __ret;
2023   __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35);
2024   return __ret;
2025 }
2026 #else
vbslq_s64(uint64x2_t __p0,int64x2_t __p1,int64x2_t __p2)2027 __ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
2028   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2029   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2030   int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
2031   int64x2_t __ret;
2032   __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35);
2033   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2034   return __ret;
2035 }
2036 #endif
2037 
2038 #ifdef __LITTLE_ENDIAN__
vbslq_s16(uint16x8_t __p0,int16x8_t __p1,int16x8_t __p2)2039 __ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
2040   int16x8_t __ret;
2041   __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
2042   return __ret;
2043 }
2044 #else
vbslq_s16(uint16x8_t __p0,int16x8_t __p1,int16x8_t __p2)2045 __ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
2046   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2047   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2048   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
2049   int16x8_t __ret;
2050   __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
2051   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2052   return __ret;
2053 }
2054 #endif
2055 
2056 #ifdef __LITTLE_ENDIAN__
vbsl_u8(uint8x8_t __p0,uint8x8_t __p1,uint8x8_t __p2)2057 __ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
2058   uint8x8_t __ret;
2059   __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16);
2060   return __ret;
2061 }
2062 #else
vbsl_u8(uint8x8_t __p0,uint8x8_t __p1,uint8x8_t __p2)2063 __ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
2064   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2065   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2066   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
2067   uint8x8_t __ret;
2068   __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
2069   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2070   return __ret;
2071 }
2072 #endif
2073 
2074 #ifdef __LITTLE_ENDIAN__
vbsl_u32(uint32x2_t __p0,uint32x2_t __p1,uint32x2_t __p2)2075 __ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
2076   uint32x2_t __ret;
2077   __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
2078   return __ret;
2079 }
2080 #else
vbsl_u32(uint32x2_t __p0,uint32x2_t __p1,uint32x2_t __p2)2081 __ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
2082   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2083   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2084   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
2085   uint32x2_t __ret;
2086   __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18);
2087   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2088   return __ret;
2089 }
2090 #endif
2091 
2092 #ifdef __LITTLE_ENDIAN__
vbsl_u64(uint64x1_t __p0,uint64x1_t __p1,uint64x1_t __p2)2093 __ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) {
2094   uint64x1_t __ret;
2095   __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19);
2096   return __ret;
2097 }
2098 #else
vbsl_u64(uint64x1_t __p0,uint64x1_t __p1,uint64x1_t __p2)2099 __ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) {
2100   uint64x1_t __ret;
2101   __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19);
2102   return __ret;
2103 }
2104 #endif
2105 
2106 #ifdef __LITTLE_ENDIAN__
vbsl_u16(uint16x4_t __p0,uint16x4_t __p1,uint16x4_t __p2)2107 __ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
2108   uint16x4_t __ret;
2109   __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 17);
2110   return __ret;
2111 }
2112 #else
vbsl_u16(uint16x4_t __p0,uint16x4_t __p1,uint16x4_t __p2)2113 __ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
2114   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2115   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2116   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
2117   uint16x4_t __ret;
2118   __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 17);
2119   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2120   return __ret;
2121 }
2122 #endif
2123 
2124 #ifdef __LITTLE_ENDIAN__
vbsl_s8(uint8x8_t __p0,int8x8_t __p1,int8x8_t __p2)2125 __ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
2126   int8x8_t __ret;
2127   __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0);
2128   return __ret;
2129 }
2130 #else
vbsl_s8(uint8x8_t __p0,int8x8_t __p1,int8x8_t __p2)2131 __ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
2132   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2133   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2134   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
2135   int8x8_t __ret;
2136   __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
2137   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2138   return __ret;
2139 }
2140 #endif
2141 
2142 #ifdef __LITTLE_ENDIAN__
vbsl_f32(uint32x2_t __p0,float32x2_t __p1,float32x2_t __p2)2143 __ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
2144   float32x2_t __ret;
2145   __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
2146   return __ret;
2147 }
2148 #else
vbsl_f32(uint32x2_t __p0,float32x2_t __p1,float32x2_t __p2)2149 __ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
2150   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2151   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2152   float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
2153   float32x2_t __ret;
2154   __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
2155   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2156   return __ret;
2157 }
2158 #endif
2159 
2160 #ifdef __LITTLE_ENDIAN__
vbsl_s32(uint32x2_t __p0,int32x2_t __p1,int32x2_t __p2)2161 __ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
2162   int32x2_t __ret;
2163   __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
2164   return __ret;
2165 }
2166 #else
vbsl_s32(uint32x2_t __p0,int32x2_t __p1,int32x2_t __p2)2167 __ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
2168   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2169   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2170   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
2171   int32x2_t __ret;
2172   __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
2173   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2174   return __ret;
2175 }
2176 #endif
2177 
2178 #ifdef __LITTLE_ENDIAN__
vbsl_s64(uint64x1_t __p0,int64x1_t __p1,int64x1_t __p2)2179 __ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) {
2180   int64x1_t __ret;
2181   __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3);
2182   return __ret;
2183 }
2184 #else
vbsl_s64(uint64x1_t __p0,int64x1_t __p1,int64x1_t __p2)2185 __ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) {
2186   int64x1_t __ret;
2187   __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3);
2188   return __ret;
2189 }
2190 #endif
2191 
2192 #ifdef __LITTLE_ENDIAN__
vbsl_s16(uint16x4_t __p0,int16x4_t __p1,int16x4_t __p2)2193 __ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
2194   int16x4_t __ret;
2195   __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
2196   return __ret;
2197 }
2198 #else
vbsl_s16(uint16x4_t __p0,int16x4_t __p1,int16x4_t __p2)2199 __ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
2200   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2201   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2202   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
2203   int16x4_t __ret;
2204   __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1);
2205   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2206   return __ret;
2207 }
2208 #endif
2209 
2210 #ifdef __LITTLE_ENDIAN__
vcageq_f32(float32x4_t __p0,float32x4_t __p1)2211 __ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
2212   uint32x4_t __ret;
2213   __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
2214   return __ret;
2215 }
2216 #else
vcageq_f32(float32x4_t __p0,float32x4_t __p1)2217 __ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
2218   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2219   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2220   uint32x4_t __ret;
2221   __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
2222   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2223   return __ret;
2224 }
2225 #endif
2226 
2227 #ifdef __LITTLE_ENDIAN__
vcage_f32(float32x2_t __p0,float32x2_t __p1)2228 __ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
2229   uint32x2_t __ret;
2230   __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
2231   return __ret;
2232 }
2233 #else
vcage_f32(float32x2_t __p0,float32x2_t __p1)2234 __ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
2235   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2236   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2237   uint32x2_t __ret;
2238   __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
2239   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2240   return __ret;
2241 }
2242 #endif
2243 
2244 #ifdef __LITTLE_ENDIAN__
vcagtq_f32(float32x4_t __p0,float32x4_t __p1)2245 __ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
2246   uint32x4_t __ret;
2247   __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
2248   return __ret;
2249 }
2250 #else
vcagtq_f32(float32x4_t __p0,float32x4_t __p1)2251 __ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
2252   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2253   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2254   uint32x4_t __ret;
2255   __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
2256   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2257   return __ret;
2258 }
2259 #endif
2260 
2261 #ifdef __LITTLE_ENDIAN__
vcagt_f32(float32x2_t __p0,float32x2_t __p1)2262 __ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
2263   uint32x2_t __ret;
2264   __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
2265   return __ret;
2266 }
2267 #else
vcagt_f32(float32x2_t __p0,float32x2_t __p1)2268 __ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
2269   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2270   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2271   uint32x2_t __ret;
2272   __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
2273   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2274   return __ret;
2275 }
2276 #endif
2277 
2278 #ifdef __LITTLE_ENDIAN__
vcaleq_f32(float32x4_t __p0,float32x4_t __p1)2279 __ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
2280   uint32x4_t __ret;
2281   __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
2282   return __ret;
2283 }
2284 #else
vcaleq_f32(float32x4_t __p0,float32x4_t __p1)2285 __ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
2286   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2287   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2288   uint32x4_t __ret;
2289   __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
2290   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2291   return __ret;
2292 }
2293 #endif
2294 
2295 #ifdef __LITTLE_ENDIAN__
vcale_f32(float32x2_t __p0,float32x2_t __p1)2296 __ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
2297   uint32x2_t __ret;
2298   __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
2299   return __ret;
2300 }
2301 #else
vcale_f32(float32x2_t __p0,float32x2_t __p1)2302 __ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
2303   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2304   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2305   uint32x2_t __ret;
2306   __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
2307   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2308   return __ret;
2309 }
2310 #endif
2311 
2312 #ifdef __LITTLE_ENDIAN__
vcaltq_f32(float32x4_t __p0,float32x4_t __p1)2313 __ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
2314   uint32x4_t __ret;
2315   __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
2316   return __ret;
2317 }
2318 #else
vcaltq_f32(float32x4_t __p0,float32x4_t __p1)2319 __ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
2320   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2321   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2322   uint32x4_t __ret;
2323   __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
2324   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2325   return __ret;
2326 }
2327 #endif
2328 
2329 #ifdef __LITTLE_ENDIAN__
vcalt_f32(float32x2_t __p0,float32x2_t __p1)2330 __ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
2331   uint32x2_t __ret;
2332   __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
2333   return __ret;
2334 }
2335 #else
vcalt_f32(float32x2_t __p0,float32x2_t __p1)2336 __ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
2337   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2338   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2339   uint32x2_t __ret;
2340   __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
2341   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2342   return __ret;
2343 }
2344 #endif
2345 
2346 #ifdef __LITTLE_ENDIAN__
vceq_p8(poly8x8_t __p0,poly8x8_t __p1)2347 __ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
2348   uint8x8_t __ret;
2349   __ret = (uint8x8_t)(__p0 == __p1);
2350   return __ret;
2351 }
2352 #else
vceq_p8(poly8x8_t __p0,poly8x8_t __p1)2353 __ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
2354   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2355   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2356   uint8x8_t __ret;
2357   __ret = (uint8x8_t)(__rev0 == __rev1);
2358   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2359   return __ret;
2360 }
2361 #endif
2362 
2363 #ifdef __LITTLE_ENDIAN__
vceqq_p8(poly8x16_t __p0,poly8x16_t __p1)2364 __ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
2365   uint8x16_t __ret;
2366   __ret = (uint8x16_t)(__p0 == __p1);
2367   return __ret;
2368 }
2369 #else
vceqq_p8(poly8x16_t __p0,poly8x16_t __p1)2370 __ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
2371   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2372   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2373   uint8x16_t __ret;
2374   __ret = (uint8x16_t)(__rev0 == __rev1);
2375   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2376   return __ret;
2377 }
2378 #endif
2379 
2380 #ifdef __LITTLE_ENDIAN__
vceqq_u8(uint8x16_t __p0,uint8x16_t __p1)2381 __ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
2382   uint8x16_t __ret;
2383   __ret = (uint8x16_t)(__p0 == __p1);
2384   return __ret;
2385 }
2386 #else
vceqq_u8(uint8x16_t __p0,uint8x16_t __p1)2387 __ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
2388   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2389   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2390   uint8x16_t __ret;
2391   __ret = (uint8x16_t)(__rev0 == __rev1);
2392   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2393   return __ret;
2394 }
2395 #endif
2396 
2397 #ifdef __LITTLE_ENDIAN__
vceqq_u32(uint32x4_t __p0,uint32x4_t __p1)2398 __ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
2399   uint32x4_t __ret;
2400   __ret = (uint32x4_t)(__p0 == __p1);
2401   return __ret;
2402 }
2403 #else
vceqq_u32(uint32x4_t __p0,uint32x4_t __p1)2404 __ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
2405   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2406   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2407   uint32x4_t __ret;
2408   __ret = (uint32x4_t)(__rev0 == __rev1);
2409   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2410   return __ret;
2411 }
2412 #endif
2413 
2414 #ifdef __LITTLE_ENDIAN__
vceqq_u16(uint16x8_t __p0,uint16x8_t __p1)2415 __ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
2416   uint16x8_t __ret;
2417   __ret = (uint16x8_t)(__p0 == __p1);
2418   return __ret;
2419 }
2420 #else
vceqq_u16(uint16x8_t __p0,uint16x8_t __p1)2421 __ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
2422   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2423   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2424   uint16x8_t __ret;
2425   __ret = (uint16x8_t)(__rev0 == __rev1);
2426   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2427   return __ret;
2428 }
2429 #endif
2430 
2431 #ifdef __LITTLE_ENDIAN__
vceqq_s8(int8x16_t __p0,int8x16_t __p1)2432 __ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
2433   uint8x16_t __ret;
2434   __ret = (uint8x16_t)(__p0 == __p1);
2435   return __ret;
2436 }
2437 #else
vceqq_s8(int8x16_t __p0,int8x16_t __p1)2438 __ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
2439   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2440   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2441   uint8x16_t __ret;
2442   __ret = (uint8x16_t)(__rev0 == __rev1);
2443   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2444   return __ret;
2445 }
2446 #endif
2447 
2448 #ifdef __LITTLE_ENDIAN__
vceqq_f32(float32x4_t __p0,float32x4_t __p1)2449 __ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
2450   uint32x4_t __ret;
2451   __ret = (uint32x4_t)(__p0 == __p1);
2452   return __ret;
2453 }
2454 #else
vceqq_f32(float32x4_t __p0,float32x4_t __p1)2455 __ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
2456   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2457   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2458   uint32x4_t __ret;
2459   __ret = (uint32x4_t)(__rev0 == __rev1);
2460   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2461   return __ret;
2462 }
2463 #endif
2464 
2465 #ifdef __LITTLE_ENDIAN__
vceqq_s32(int32x4_t __p0,int32x4_t __p1)2466 __ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
2467   uint32x4_t __ret;
2468   __ret = (uint32x4_t)(__p0 == __p1);
2469   return __ret;
2470 }
2471 #else
vceqq_s32(int32x4_t __p0,int32x4_t __p1)2472 __ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
2473   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2474   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2475   uint32x4_t __ret;
2476   __ret = (uint32x4_t)(__rev0 == __rev1);
2477   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2478   return __ret;
2479 }
2480 #endif
2481 
2482 #ifdef __LITTLE_ENDIAN__
vceqq_s16(int16x8_t __p0,int16x8_t __p1)2483 __ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
2484   uint16x8_t __ret;
2485   __ret = (uint16x8_t)(__p0 == __p1);
2486   return __ret;
2487 }
2488 #else
vceqq_s16(int16x8_t __p0,int16x8_t __p1)2489 __ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
2490   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2491   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2492   uint16x8_t __ret;
2493   __ret = (uint16x8_t)(__rev0 == __rev1);
2494   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2495   return __ret;
2496 }
2497 #endif
2498 
2499 #ifdef __LITTLE_ENDIAN__
vceq_u8(uint8x8_t __p0,uint8x8_t __p1)2500 __ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
2501   uint8x8_t __ret;
2502   __ret = (uint8x8_t)(__p0 == __p1);
2503   return __ret;
2504 }
2505 #else
vceq_u8(uint8x8_t __p0,uint8x8_t __p1)2506 __ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
2507   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2508   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2509   uint8x8_t __ret;
2510   __ret = (uint8x8_t)(__rev0 == __rev1);
2511   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2512   return __ret;
2513 }
2514 #endif
2515 
2516 #ifdef __LITTLE_ENDIAN__
vceq_u32(uint32x2_t __p0,uint32x2_t __p1)2517 __ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
2518   uint32x2_t __ret;
2519   __ret = (uint32x2_t)(__p0 == __p1);
2520   return __ret;
2521 }
2522 #else
vceq_u32(uint32x2_t __p0,uint32x2_t __p1)2523 __ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
2524   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2525   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2526   uint32x2_t __ret;
2527   __ret = (uint32x2_t)(__rev0 == __rev1);
2528   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2529   return __ret;
2530 }
2531 #endif
2532 
2533 #ifdef __LITTLE_ENDIAN__
vceq_u16(uint16x4_t __p0,uint16x4_t __p1)2534 __ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
2535   uint16x4_t __ret;
2536   __ret = (uint16x4_t)(__p0 == __p1);
2537   return __ret;
2538 }
2539 #else
vceq_u16(uint16x4_t __p0,uint16x4_t __p1)2540 __ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
2541   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2542   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2543   uint16x4_t __ret;
2544   __ret = (uint16x4_t)(__rev0 == __rev1);
2545   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2546   return __ret;
2547 }
2548 #endif
2549 
2550 #ifdef __LITTLE_ENDIAN__
vceq_s8(int8x8_t __p0,int8x8_t __p1)2551 __ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
2552   uint8x8_t __ret;
2553   __ret = (uint8x8_t)(__p0 == __p1);
2554   return __ret;
2555 }
2556 #else
vceq_s8(int8x8_t __p0,int8x8_t __p1)2557 __ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
2558   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2559   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2560   uint8x8_t __ret;
2561   __ret = (uint8x8_t)(__rev0 == __rev1);
2562   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2563   return __ret;
2564 }
2565 #endif
2566 
2567 #ifdef __LITTLE_ENDIAN__
vceq_f32(float32x2_t __p0,float32x2_t __p1)2568 __ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
2569   uint32x2_t __ret;
2570   __ret = (uint32x2_t)(__p0 == __p1);
2571   return __ret;
2572 }
2573 #else
vceq_f32(float32x2_t __p0,float32x2_t __p1)2574 __ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
2575   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2576   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2577   uint32x2_t __ret;
2578   __ret = (uint32x2_t)(__rev0 == __rev1);
2579   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2580   return __ret;
2581 }
2582 #endif
2583 
2584 #ifdef __LITTLE_ENDIAN__
vceq_s32(int32x2_t __p0,int32x2_t __p1)2585 __ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
2586   uint32x2_t __ret;
2587   __ret = (uint32x2_t)(__p0 == __p1);
2588   return __ret;
2589 }
2590 #else
vceq_s32(int32x2_t __p0,int32x2_t __p1)2591 __ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
2592   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2593   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2594   uint32x2_t __ret;
2595   __ret = (uint32x2_t)(__rev0 == __rev1);
2596   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2597   return __ret;
2598 }
2599 #endif
2600 
2601 #ifdef __LITTLE_ENDIAN__
vceq_s16(int16x4_t __p0,int16x4_t __p1)2602 __ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
2603   uint16x4_t __ret;
2604   __ret = (uint16x4_t)(__p0 == __p1);
2605   return __ret;
2606 }
2607 #else
vceq_s16(int16x4_t __p0,int16x4_t __p1)2608 __ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
2609   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2610   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2611   uint16x4_t __ret;
2612   __ret = (uint16x4_t)(__rev0 == __rev1);
2613   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2614   return __ret;
2615 }
2616 #endif
2617 
2618 #ifdef __LITTLE_ENDIAN__
vcgeq_u8(uint8x16_t __p0,uint8x16_t __p1)2619 __ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
2620   uint8x16_t __ret;
2621   __ret = (uint8x16_t)(__p0 >= __p1);
2622   return __ret;
2623 }
2624 #else
vcgeq_u8(uint8x16_t __p0,uint8x16_t __p1)2625 __ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
2626   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2627   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2628   uint8x16_t __ret;
2629   __ret = (uint8x16_t)(__rev0 >= __rev1);
2630   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2631   return __ret;
2632 }
2633 #endif
2634 
2635 #ifdef __LITTLE_ENDIAN__
vcgeq_u32(uint32x4_t __p0,uint32x4_t __p1)2636 __ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
2637   uint32x4_t __ret;
2638   __ret = (uint32x4_t)(__p0 >= __p1);
2639   return __ret;
2640 }
2641 #else
vcgeq_u32(uint32x4_t __p0,uint32x4_t __p1)2642 __ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
2643   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2644   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2645   uint32x4_t __ret;
2646   __ret = (uint32x4_t)(__rev0 >= __rev1);
2647   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2648   return __ret;
2649 }
2650 #endif
2651 
2652 #ifdef __LITTLE_ENDIAN__
vcgeq_u16(uint16x8_t __p0,uint16x8_t __p1)2653 __ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
2654   uint16x8_t __ret;
2655   __ret = (uint16x8_t)(__p0 >= __p1);
2656   return __ret;
2657 }
2658 #else
vcgeq_u16(uint16x8_t __p0,uint16x8_t __p1)2659 __ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
2660   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2661   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2662   uint16x8_t __ret;
2663   __ret = (uint16x8_t)(__rev0 >= __rev1);
2664   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2665   return __ret;
2666 }
2667 #endif
2668 
2669 #ifdef __LITTLE_ENDIAN__
vcgeq_s8(int8x16_t __p0,int8x16_t __p1)2670 __ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
2671   uint8x16_t __ret;
2672   __ret = (uint8x16_t)(__p0 >= __p1);
2673   return __ret;
2674 }
2675 #else
vcgeq_s8(int8x16_t __p0,int8x16_t __p1)2676 __ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
2677   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2678   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2679   uint8x16_t __ret;
2680   __ret = (uint8x16_t)(__rev0 >= __rev1);
2681   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2682   return __ret;
2683 }
2684 #endif
2685 
2686 #ifdef __LITTLE_ENDIAN__
vcgeq_f32(float32x4_t __p0,float32x4_t __p1)2687 __ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
2688   uint32x4_t __ret;
2689   __ret = (uint32x4_t)(__p0 >= __p1);
2690   return __ret;
2691 }
2692 #else
vcgeq_f32(float32x4_t __p0,float32x4_t __p1)2693 __ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
2694   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2695   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2696   uint32x4_t __ret;
2697   __ret = (uint32x4_t)(__rev0 >= __rev1);
2698   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2699   return __ret;
2700 }
2701 #endif
2702 
2703 #ifdef __LITTLE_ENDIAN__
vcgeq_s32(int32x4_t __p0,int32x4_t __p1)2704 __ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
2705   uint32x4_t __ret;
2706   __ret = (uint32x4_t)(__p0 >= __p1);
2707   return __ret;
2708 }
2709 #else
vcgeq_s32(int32x4_t __p0,int32x4_t __p1)2710 __ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
2711   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2712   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2713   uint32x4_t __ret;
2714   __ret = (uint32x4_t)(__rev0 >= __rev1);
2715   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2716   return __ret;
2717 }
2718 #endif
2719 
2720 #ifdef __LITTLE_ENDIAN__
vcgeq_s16(int16x8_t __p0,int16x8_t __p1)2721 __ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
2722   uint16x8_t __ret;
2723   __ret = (uint16x8_t)(__p0 >= __p1);
2724   return __ret;
2725 }
2726 #else
vcgeq_s16(int16x8_t __p0,int16x8_t __p1)2727 __ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
2728   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2729   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2730   uint16x8_t __ret;
2731   __ret = (uint16x8_t)(__rev0 >= __rev1);
2732   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2733   return __ret;
2734 }
2735 #endif
2736 
2737 #ifdef __LITTLE_ENDIAN__
vcge_u8(uint8x8_t __p0,uint8x8_t __p1)2738 __ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
2739   uint8x8_t __ret;
2740   __ret = (uint8x8_t)(__p0 >= __p1);
2741   return __ret;
2742 }
2743 #else
vcge_u8(uint8x8_t __p0,uint8x8_t __p1)2744 __ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
2745   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2746   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2747   uint8x8_t __ret;
2748   __ret = (uint8x8_t)(__rev0 >= __rev1);
2749   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2750   return __ret;
2751 }
2752 #endif
2753 
2754 #ifdef __LITTLE_ENDIAN__
vcge_u32(uint32x2_t __p0,uint32x2_t __p1)2755 __ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
2756   uint32x2_t __ret;
2757   __ret = (uint32x2_t)(__p0 >= __p1);
2758   return __ret;
2759 }
2760 #else
vcge_u32(uint32x2_t __p0,uint32x2_t __p1)2761 __ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
2762   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2763   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2764   uint32x2_t __ret;
2765   __ret = (uint32x2_t)(__rev0 >= __rev1);
2766   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2767   return __ret;
2768 }
2769 #endif
2770 
2771 #ifdef __LITTLE_ENDIAN__
vcge_u16(uint16x4_t __p0,uint16x4_t __p1)2772 __ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
2773   uint16x4_t __ret;
2774   __ret = (uint16x4_t)(__p0 >= __p1);
2775   return __ret;
2776 }
2777 #else
vcge_u16(uint16x4_t __p0,uint16x4_t __p1)2778 __ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
2779   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2780   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2781   uint16x4_t __ret;
2782   __ret = (uint16x4_t)(__rev0 >= __rev1);
2783   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2784   return __ret;
2785 }
2786 #endif
2787 
2788 #ifdef __LITTLE_ENDIAN__
vcge_s8(int8x8_t __p0,int8x8_t __p1)2789 __ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
2790   uint8x8_t __ret;
2791   __ret = (uint8x8_t)(__p0 >= __p1);
2792   return __ret;
2793 }
2794 #else
vcge_s8(int8x8_t __p0,int8x8_t __p1)2795 __ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
2796   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2797   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2798   uint8x8_t __ret;
2799   __ret = (uint8x8_t)(__rev0 >= __rev1);
2800   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2801   return __ret;
2802 }
2803 #endif
2804 
2805 #ifdef __LITTLE_ENDIAN__
vcge_f32(float32x2_t __p0,float32x2_t __p1)2806 __ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
2807   uint32x2_t __ret;
2808   __ret = (uint32x2_t)(__p0 >= __p1);
2809   return __ret;
2810 }
2811 #else
vcge_f32(float32x2_t __p0,float32x2_t __p1)2812 __ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
2813   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2814   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2815   uint32x2_t __ret;
2816   __ret = (uint32x2_t)(__rev0 >= __rev1);
2817   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2818   return __ret;
2819 }
2820 #endif
2821 
2822 #ifdef __LITTLE_ENDIAN__
vcge_s32(int32x2_t __p0,int32x2_t __p1)2823 __ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
2824   uint32x2_t __ret;
2825   __ret = (uint32x2_t)(__p0 >= __p1);
2826   return __ret;
2827 }
2828 #else
vcge_s32(int32x2_t __p0,int32x2_t __p1)2829 __ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
2830   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2831   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2832   uint32x2_t __ret;
2833   __ret = (uint32x2_t)(__rev0 >= __rev1);
2834   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2835   return __ret;
2836 }
2837 #endif
2838 
2839 #ifdef __LITTLE_ENDIAN__
vcge_s16(int16x4_t __p0,int16x4_t __p1)2840 __ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
2841   uint16x4_t __ret;
2842   __ret = (uint16x4_t)(__p0 >= __p1);
2843   return __ret;
2844 }
2845 #else
vcge_s16(int16x4_t __p0,int16x4_t __p1)2846 __ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
2847   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2848   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2849   uint16x4_t __ret;
2850   __ret = (uint16x4_t)(__rev0 >= __rev1);
2851   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2852   return __ret;
2853 }
2854 #endif
2855 
2856 #ifdef __LITTLE_ENDIAN__
vcgtq_u8(uint8x16_t __p0,uint8x16_t __p1)2857 __ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
2858   uint8x16_t __ret;
2859   __ret = (uint8x16_t)(__p0 > __p1);
2860   return __ret;
2861 }
2862 #else
vcgtq_u8(uint8x16_t __p0,uint8x16_t __p1)2863 __ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
2864   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2865   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2866   uint8x16_t __ret;
2867   __ret = (uint8x16_t)(__rev0 > __rev1);
2868   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2869   return __ret;
2870 }
2871 #endif
2872 
2873 #ifdef __LITTLE_ENDIAN__
vcgtq_u32(uint32x4_t __p0,uint32x4_t __p1)2874 __ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
2875   uint32x4_t __ret;
2876   __ret = (uint32x4_t)(__p0 > __p1);
2877   return __ret;
2878 }
2879 #else
vcgtq_u32(uint32x4_t __p0,uint32x4_t __p1)2880 __ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
2881   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2882   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2883   uint32x4_t __ret;
2884   __ret = (uint32x4_t)(__rev0 > __rev1);
2885   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2886   return __ret;
2887 }
2888 #endif
2889 
2890 #ifdef __LITTLE_ENDIAN__
vcgtq_u16(uint16x8_t __p0,uint16x8_t __p1)2891 __ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
2892   uint16x8_t __ret;
2893   __ret = (uint16x8_t)(__p0 > __p1);
2894   return __ret;
2895 }
2896 #else
vcgtq_u16(uint16x8_t __p0,uint16x8_t __p1)2897 __ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
2898   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2899   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2900   uint16x8_t __ret;
2901   __ret = (uint16x8_t)(__rev0 > __rev1);
2902   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2903   return __ret;
2904 }
2905 #endif
2906 
2907 #ifdef __LITTLE_ENDIAN__
vcgtq_s8(int8x16_t __p0,int8x16_t __p1)2908 __ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
2909   uint8x16_t __ret;
2910   __ret = (uint8x16_t)(__p0 > __p1);
2911   return __ret;
2912 }
2913 #else
vcgtq_s8(int8x16_t __p0,int8x16_t __p1)2914 __ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
2915   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2916   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2917   uint8x16_t __ret;
2918   __ret = (uint8x16_t)(__rev0 > __rev1);
2919   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2920   return __ret;
2921 }
2922 #endif
2923 
2924 #ifdef __LITTLE_ENDIAN__
vcgtq_f32(float32x4_t __p0,float32x4_t __p1)2925 __ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
2926   uint32x4_t __ret;
2927   __ret = (uint32x4_t)(__p0 > __p1);
2928   return __ret;
2929 }
2930 #else
vcgtq_f32(float32x4_t __p0,float32x4_t __p1)2931 __ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
2932   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2933   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2934   uint32x4_t __ret;
2935   __ret = (uint32x4_t)(__rev0 > __rev1);
2936   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2937   return __ret;
2938 }
2939 #endif
2940 
2941 #ifdef __LITTLE_ENDIAN__
vcgtq_s32(int32x4_t __p0,int32x4_t __p1)2942 __ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
2943   uint32x4_t __ret;
2944   __ret = (uint32x4_t)(__p0 > __p1);
2945   return __ret;
2946 }
2947 #else
vcgtq_s32(int32x4_t __p0,int32x4_t __p1)2948 __ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
2949   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2950   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2951   uint32x4_t __ret;
2952   __ret = (uint32x4_t)(__rev0 > __rev1);
2953   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2954   return __ret;
2955 }
2956 #endif
2957 
2958 #ifdef __LITTLE_ENDIAN__
vcgtq_s16(int16x8_t __p0,int16x8_t __p1)2959 __ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
2960   uint16x8_t __ret;
2961   __ret = (uint16x8_t)(__p0 > __p1);
2962   return __ret;
2963 }
2964 #else
vcgtq_s16(int16x8_t __p0,int16x8_t __p1)2965 __ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
2966   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2967   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2968   uint16x8_t __ret;
2969   __ret = (uint16x8_t)(__rev0 > __rev1);
2970   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2971   return __ret;
2972 }
2973 #endif
2974 
2975 #ifdef __LITTLE_ENDIAN__
vcgt_u8(uint8x8_t __p0,uint8x8_t __p1)2976 __ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
2977   uint8x8_t __ret;
2978   __ret = (uint8x8_t)(__p0 > __p1);
2979   return __ret;
2980 }
2981 #else
vcgt_u8(uint8x8_t __p0,uint8x8_t __p1)2982 __ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
2983   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2984   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2985   uint8x8_t __ret;
2986   __ret = (uint8x8_t)(__rev0 > __rev1);
2987   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2988   return __ret;
2989 }
2990 #endif
2991 
2992 #ifdef __LITTLE_ENDIAN__
vcgt_u32(uint32x2_t __p0,uint32x2_t __p1)2993 __ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
2994   uint32x2_t __ret;
2995   __ret = (uint32x2_t)(__p0 > __p1);
2996   return __ret;
2997 }
2998 #else
vcgt_u32(uint32x2_t __p0,uint32x2_t __p1)2999 __ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
3000   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
3001   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
3002   uint32x2_t __ret;
3003   __ret = (uint32x2_t)(__rev0 > __rev1);
3004   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
3005   return __ret;
3006 }
3007 #endif
3008 
3009 #ifdef __LITTLE_ENDIAN__
vcgt_u16(uint16x4_t __p0,uint16x4_t __p1)3010 __ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
3011   uint16x4_t __ret;
3012   __ret = (uint16x4_t)(__p0 > __p1);
3013   return __ret;
3014 }
3015 #else
vcgt_u16(uint16x4_t __p0,uint16x4_t __p1)3016 __ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
3017   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3018   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3019   uint16x4_t __ret;
3020   __ret = (uint16x4_t)(__rev0 > __rev1);
3021   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3022   return __ret;
3023 }
3024 #endif
3025 
3026 #ifdef __LITTLE_ENDIAN__
vcgt_s8(int8x8_t __p0,int8x8_t __p1)3027 __ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
3028   uint8x8_t __ret;
3029   __ret = (uint8x8_t)(__p0 > __p1);
3030   return __ret;
3031 }
3032 #else
vcgt_s8(int8x8_t __p0,int8x8_t __p1)3033 __ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
3034   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3035   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
3036   uint8x8_t __ret;
3037   __ret = (uint8x8_t)(__rev0 > __rev1);
3038   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3039   return __ret;
3040 }
3041 #endif
3042 
3043 #ifdef __LITTLE_ENDIAN__
vcgt_f32(float32x2_t __p0,float32x2_t __p1)3044 __ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
3045   uint32x2_t __ret;
3046   __ret = (uint32x2_t)(__p0 > __p1);
3047   return __ret;
3048 }
3049 #else
vcgt_f32(float32x2_t __p0,float32x2_t __p1)3050 __ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
3051   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
3052   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
3053   uint32x2_t __ret;
3054   __ret = (uint32x2_t)(__rev0 > __rev1);
3055   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
3056   return __ret;
3057 }
3058 #endif
3059 
3060 #ifdef __LITTLE_ENDIAN__
vcgt_s32(int32x2_t __p0,int32x2_t __p1)3061 __ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
3062   uint32x2_t __ret;
3063   __ret = (uint32x2_t)(__p0 > __p1);
3064   return __ret;
3065 }
3066 #else
vcgt_s32(int32x2_t __p0,int32x2_t __p1)3067 __ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
3068   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
3069   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
3070   uint32x2_t __ret;
3071   __ret = (uint32x2_t)(__rev0 > __rev1);
3072   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
3073   return __ret;
3074 }
3075 #endif
3076 
3077 #ifdef __LITTLE_ENDIAN__
vcgt_s16(int16x4_t __p0,int16x4_t __p1)3078 __ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
3079   uint16x4_t __ret;
3080   __ret = (uint16x4_t)(__p0 > __p1);
3081   return __ret;
3082 }
3083 #else
vcgt_s16(int16x4_t __p0,int16x4_t __p1)3084 __ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
3085   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3086   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3087   uint16x4_t __ret;
3088   __ret = (uint16x4_t)(__rev0 > __rev1);
3089   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3090   return __ret;
3091 }
3092 #endif
3093 
3094 #ifdef __LITTLE_ENDIAN__
vcleq_u8(uint8x16_t __p0,uint8x16_t __p1)3095 __ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
3096   uint8x16_t __ret;
3097   __ret = (uint8x16_t)(__p0 <= __p1);
3098   return __ret;
3099 }
3100 #else
vcleq_u8(uint8x16_t __p0,uint8x16_t __p1)3101 __ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
3102   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3103   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3104   uint8x16_t __ret;
3105   __ret = (uint8x16_t)(__rev0 <= __rev1);
3106   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3107   return __ret;
3108 }
3109 #endif
3110 
3111 #ifdef __LITTLE_ENDIAN__
vcleq_u32(uint32x4_t __p0,uint32x4_t __p1)3112 __ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
3113   uint32x4_t __ret;
3114   __ret = (uint32x4_t)(__p0 <= __p1);
3115   return __ret;
3116 }
3117 #else
vcleq_u32(uint32x4_t __p0,uint32x4_t __p1)3118 __ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
3119   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3120   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3121   uint32x4_t __ret;
3122   __ret = (uint32x4_t)(__rev0 <= __rev1);
3123   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3124   return __ret;
3125 }
3126 #endif
3127 
3128 #ifdef __LITTLE_ENDIAN__
vcleq_u16(uint16x8_t __p0,uint16x8_t __p1)3129 __ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
3130   uint16x8_t __ret;
3131   __ret = (uint16x8_t)(__p0 <= __p1);
3132   return __ret;
3133 }
3134 #else
vcleq_u16(uint16x8_t __p0,uint16x8_t __p1)3135 __ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
3136   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3137   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
3138   uint16x8_t __ret;
3139   __ret = (uint16x8_t)(__rev0 <= __rev1);
3140   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3141   return __ret;
3142 }
3143 #endif
3144 
3145 #ifdef __LITTLE_ENDIAN__
vcleq_s8(int8x16_t __p0,int8x16_t __p1)3146 __ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
3147   uint8x16_t __ret;
3148   __ret = (uint8x16_t)(__p0 <= __p1);
3149   return __ret;
3150 }
3151 #else
vcleq_s8(int8x16_t __p0,int8x16_t __p1)3152 __ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
3153   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3154   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3155   uint8x16_t __ret;
3156   __ret = (uint8x16_t)(__rev0 <= __rev1);
3157   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3158   return __ret;
3159 }
3160 #endif
3161 
3162 #ifdef __LITTLE_ENDIAN__
vcleq_f32(float32x4_t __p0,float32x4_t __p1)3163 __ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
3164   uint32x4_t __ret;
3165   __ret = (uint32x4_t)(__p0 <= __p1);
3166   return __ret;
3167 }
3168 #else
vcleq_f32(float32x4_t __p0,float32x4_t __p1)3169 __ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
3170   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3171   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3172   uint32x4_t __ret;
3173   __ret = (uint32x4_t)(__rev0 <= __rev1);
3174   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3175   return __ret;
3176 }
3177 #endif
3178 
3179 #ifdef __LITTLE_ENDIAN__
vcleq_s32(int32x4_t __p0,int32x4_t __p1)3180 __ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
3181   uint32x4_t __ret;
3182   __ret = (uint32x4_t)(__p0 <= __p1);
3183   return __ret;
3184 }
3185 #else
vcleq_s32(int32x4_t __p0,int32x4_t __p1)3186 __ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
3187   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3188   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3189   uint32x4_t __ret;
3190   __ret = (uint32x4_t)(__rev0 <= __rev1);
3191   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3192   return __ret;
3193 }
3194 #endif
3195 
3196 #ifdef __LITTLE_ENDIAN__
vcleq_s16(int16x8_t __p0,int16x8_t __p1)3197 __ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
3198   uint16x8_t __ret;
3199   __ret = (uint16x8_t)(__p0 <= __p1);
3200   return __ret;
3201 }
3202 #else
vcleq_s16(int16x8_t __p0,int16x8_t __p1)3203 __ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
3204   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3205   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
3206   uint16x8_t __ret;
3207   __ret = (uint16x8_t)(__rev0 <= __rev1);
3208   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3209   return __ret;
3210 }
3211 #endif
3212 
3213 #ifdef __LITTLE_ENDIAN__
vcle_u8(uint8x8_t __p0,uint8x8_t __p1)3214 __ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
3215   uint8x8_t __ret;
3216   __ret = (uint8x8_t)(__p0 <= __p1);
3217   return __ret;
3218 }
3219 #else
vcle_u8(uint8x8_t __p0,uint8x8_t __p1)3220 __ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
3221   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3222   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
3223   uint8x8_t __ret;
3224   __ret = (uint8x8_t)(__rev0 <= __rev1);
3225   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3226   return __ret;
3227 }
3228 #endif
3229 
3230 #ifdef __LITTLE_ENDIAN__
vcle_u32(uint32x2_t __p0,uint32x2_t __p1)3231 __ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
3232   uint32x2_t __ret;
3233   __ret = (uint32x2_t)(__p0 <= __p1);
3234   return __ret;
3235 }
3236 #else
vcle_u32(uint32x2_t __p0,uint32x2_t __p1)3237 __ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
3238   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
3239   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
3240   uint32x2_t __ret;
3241   __ret = (uint32x2_t)(__rev0 <= __rev1);
3242   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
3243   return __ret;
3244 }
3245 #endif
3246 
3247 #ifdef __LITTLE_ENDIAN__
vcle_u16(uint16x4_t __p0,uint16x4_t __p1)3248 __ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
3249   uint16x4_t __ret;
3250   __ret = (uint16x4_t)(__p0 <= __p1);
3251   return __ret;
3252 }
3253 #else
vcle_u16(uint16x4_t __p0,uint16x4_t __p1)3254 __ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
3255   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3256   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3257   uint16x4_t __ret;
3258   __ret = (uint16x4_t)(__rev0 <= __rev1);
3259   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3260   return __ret;
3261 }
3262 #endif
3263 
3264 #ifdef __LITTLE_ENDIAN__
vcle_s8(int8x8_t __p0,int8x8_t __p1)3265 __ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
3266   uint8x8_t __ret;
3267   __ret = (uint8x8_t)(__p0 <= __p1);
3268   return __ret;
3269 }
3270 #else
vcle_s8(int8x8_t __p0,int8x8_t __p1)3271 __ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
3272   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3273   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
3274   uint8x8_t __ret;
3275   __ret = (uint8x8_t)(__rev0 <= __rev1);
3276   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3277   return __ret;
3278 }
3279 #endif
3280 
3281 #ifdef __LITTLE_ENDIAN__
vcle_f32(float32x2_t __p0,float32x2_t __p1)3282 __ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
3283   uint32x2_t __ret;
3284   __ret = (uint32x2_t)(__p0 <= __p1);
3285   return __ret;
3286 }
3287 #else
vcle_f32(float32x2_t __p0,float32x2_t __p1)3288 __ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
3289   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
3290   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
3291   uint32x2_t __ret;
3292   __ret = (uint32x2_t)(__rev0 <= __rev1);
3293   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
3294   return __ret;
3295 }
3296 #endif
3297 
3298 #ifdef __LITTLE_ENDIAN__
vcle_s32(int32x2_t __p0,int32x2_t __p1)3299 __ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
3300   uint32x2_t __ret;
3301   __ret = (uint32x2_t)(__p0 <= __p1);
3302   return __ret;
3303 }
3304 #else
vcle_s32(int32x2_t __p0,int32x2_t __p1)3305 __ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
3306   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
3307   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
3308   uint32x2_t __ret;
3309   __ret = (uint32x2_t)(__rev0 <= __rev1);
3310   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
3311   return __ret;
3312 }
3313 #endif
3314 
3315 #ifdef __LITTLE_ENDIAN__
vcle_s16(int16x4_t __p0,int16x4_t __p1)3316 __ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
3317   uint16x4_t __ret;
3318   __ret = (uint16x4_t)(__p0 <= __p1);
3319   return __ret;
3320 }
3321 #else
vcle_s16(int16x4_t __p0,int16x4_t __p1)3322 __ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
3323   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3324   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3325   uint16x4_t __ret;
3326   __ret = (uint16x4_t)(__rev0 <= __rev1);
3327   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3328   return __ret;
3329 }
3330 #endif
3331 
3332 #ifdef __LITTLE_ENDIAN__
vclsq_s8(int8x16_t __p0)3333 __ai int8x16_t vclsq_s8(int8x16_t __p0) {
3334   int8x16_t __ret;
3335   __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32);
3336   return __ret;
3337 }
3338 #else
vclsq_s8(int8x16_t __p0)3339 __ai int8x16_t vclsq_s8(int8x16_t __p0) {
3340   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3341   int8x16_t __ret;
3342   __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32);
3343   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3344   return __ret;
3345 }
3346 #endif
3347 
3348 #ifdef __LITTLE_ENDIAN__
vclsq_s32(int32x4_t __p0)3349 __ai int32x4_t vclsq_s32(int32x4_t __p0) {
3350   int32x4_t __ret;
3351   __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34);
3352   return __ret;
3353 }
3354 #else
vclsq_s32(int32x4_t __p0)3355 __ai int32x4_t vclsq_s32(int32x4_t __p0) {
3356   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3357   int32x4_t __ret;
3358   __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34);
3359   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3360   return __ret;
3361 }
3362 #endif
3363 
3364 #ifdef __LITTLE_ENDIAN__
vclsq_s16(int16x8_t __p0)3365 __ai int16x8_t vclsq_s16(int16x8_t __p0) {
3366   int16x8_t __ret;
3367   __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33);
3368   return __ret;
3369 }
3370 #else
vclsq_s16(int16x8_t __p0)3371 __ai int16x8_t vclsq_s16(int16x8_t __p0) {
3372   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3373   int16x8_t __ret;
3374   __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33);
3375   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3376   return __ret;
3377 }
3378 #endif
3379 
3380 #ifdef __LITTLE_ENDIAN__
vcls_s8(int8x8_t __p0)3381 __ai int8x8_t vcls_s8(int8x8_t __p0) {
3382   int8x8_t __ret;
3383   __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0);
3384   return __ret;
3385 }
3386 #else
vcls_s8(int8x8_t __p0)3387 __ai int8x8_t vcls_s8(int8x8_t __p0) {
3388   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3389   int8x8_t __ret;
3390   __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0);
3391   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3392   return __ret;
3393 }
3394 #endif
3395 
3396 #ifdef __LITTLE_ENDIAN__
vcls_s32(int32x2_t __p0)3397 __ai int32x2_t vcls_s32(int32x2_t __p0) {
3398   int32x2_t __ret;
3399   __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2);
3400   return __ret;
3401 }
3402 #else
vcls_s32(int32x2_t __p0)3403 __ai int32x2_t vcls_s32(int32x2_t __p0) {
3404   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
3405   int32x2_t __ret;
3406   __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2);
3407   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
3408   return __ret;
3409 }
3410 #endif
3411 
3412 #ifdef __LITTLE_ENDIAN__
vcls_s16(int16x4_t __p0)3413 __ai int16x4_t vcls_s16(int16x4_t __p0) {
3414   int16x4_t __ret;
3415   __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1);
3416   return __ret;
3417 }
3418 #else
vcls_s16(int16x4_t __p0)3419 __ai int16x4_t vcls_s16(int16x4_t __p0) {
3420   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3421   int16x4_t __ret;
3422   __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1);
3423   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3424   return __ret;
3425 }
3426 #endif
3427 
3428 #ifdef __LITTLE_ENDIAN__
vcltq_u8(uint8x16_t __p0,uint8x16_t __p1)3429 __ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
3430   uint8x16_t __ret;
3431   __ret = (uint8x16_t)(__p0 < __p1);
3432   return __ret;
3433 }
3434 #else
vcltq_u8(uint8x16_t __p0,uint8x16_t __p1)3435 __ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
3436   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3437   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3438   uint8x16_t __ret;
3439   __ret = (uint8x16_t)(__rev0 < __rev1);
3440   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3441   return __ret;
3442 }
3443 #endif
3444 
3445 #ifdef __LITTLE_ENDIAN__
vcltq_u32(uint32x4_t __p0,uint32x4_t __p1)3446 __ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
3447   uint32x4_t __ret;
3448   __ret = (uint32x4_t)(__p0 < __p1);
3449   return __ret;
3450 }
3451 #else
vcltq_u32(uint32x4_t __p0,uint32x4_t __p1)3452 __ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
3453   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3454   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3455   uint32x4_t __ret;
3456   __ret = (uint32x4_t)(__rev0 < __rev1);
3457   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3458   return __ret;
3459 }
3460 #endif
3461 
3462 #ifdef __LITTLE_ENDIAN__
vcltq_u16(uint16x8_t __p0,uint16x8_t __p1)3463 __ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
3464   uint16x8_t __ret;
3465   __ret = (uint16x8_t)(__p0 < __p1);
3466   return __ret;
3467 }
3468 #else
vcltq_u16(uint16x8_t __p0,uint16x8_t __p1)3469 __ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
3470   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3471   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
3472   uint16x8_t __ret;
3473   __ret = (uint16x8_t)(__rev0 < __rev1);
3474   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3475   return __ret;
3476 }
3477 #endif
3478 
3479 #ifdef __LITTLE_ENDIAN__
vcltq_s8(int8x16_t __p0,int8x16_t __p1)3480 __ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
3481   uint8x16_t __ret;
3482   __ret = (uint8x16_t)(__p0 < __p1);
3483   return __ret;
3484 }
3485 #else
vcltq_s8(int8x16_t __p0,int8x16_t __p1)3486 __ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
3487   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3488   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3489   uint8x16_t __ret;
3490   __ret = (uint8x16_t)(__rev0 < __rev1);
3491   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3492   return __ret;
3493 }
3494 #endif
3495 
3496 #ifdef __LITTLE_ENDIAN__
vcltq_f32(float32x4_t __p0,float32x4_t __p1)3497 __ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
3498   uint32x4_t __ret;
3499   __ret = (uint32x4_t)(__p0 < __p1);
3500   return __ret;
3501 }
3502 #else
vcltq_f32(float32x4_t __p0,float32x4_t __p1)3503 __ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
3504   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3505   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3506   uint32x4_t __ret;
3507   __ret = (uint32x4_t)(__rev0 < __rev1);
3508   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3509   return __ret;
3510 }
3511 #endif
3512 
3513 #ifdef __LITTLE_ENDIAN__
vcltq_s32(int32x4_t __p0,int32x4_t __p1)3514 __ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
3515   uint32x4_t __ret;
3516   __ret = (uint32x4_t)(__p0 < __p1);
3517   return __ret;
3518 }
3519 #else
vcltq_s32(int32x4_t __p0,int32x4_t __p1)3520 __ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
3521   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3522   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3523   uint32x4_t __ret;
3524   __ret = (uint32x4_t)(__rev0 < __rev1);
3525   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3526   return __ret;
3527 }
3528 #endif
3529 
3530 #ifdef __LITTLE_ENDIAN__
vcltq_s16(int16x8_t __p0,int16x8_t __p1)3531 __ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
3532   uint16x8_t __ret;
3533   __ret = (uint16x8_t)(__p0 < __p1);
3534   return __ret;
3535 }
3536 #else
vcltq_s16(int16x8_t __p0,int16x8_t __p1)3537 __ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
3538   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3539   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
3540   uint16x8_t __ret;
3541   __ret = (uint16x8_t)(__rev0 < __rev1);
3542   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3543   return __ret;
3544 }
3545 #endif
3546 
3547 #ifdef __LITTLE_ENDIAN__
vclt_u8(uint8x8_t __p0,uint8x8_t __p1)3548 __ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
3549   uint8x8_t __ret;
3550   __ret = (uint8x8_t)(__p0 < __p1);
3551   return __ret;
3552 }
3553 #else
vclt_u8(uint8x8_t __p0,uint8x8_t __p1)3554 __ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
3555   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3556   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
3557   uint8x8_t __ret;
3558   __ret = (uint8x8_t)(__rev0 < __rev1);
3559   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3560   return __ret;
3561 }
3562 #endif
3563 
3564 #ifdef __LITTLE_ENDIAN__
vclt_u32(uint32x2_t __p0,uint32x2_t __p1)3565 __ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
3566   uint32x2_t __ret;
3567   __ret = (uint32x2_t)(__p0 < __p1);
3568   return __ret;
3569 }
3570 #else
vclt_u32(uint32x2_t __p0,uint32x2_t __p1)3571 __ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
3572   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
3573   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
3574   uint32x2_t __ret;
3575   __ret = (uint32x2_t)(__rev0 < __rev1);
3576   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
3577   return __ret;
3578 }
3579 #endif
3580 
3581 #ifdef __LITTLE_ENDIAN__
vclt_u16(uint16x4_t __p0,uint16x4_t __p1)3582 __ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
3583   uint16x4_t __ret;
3584   __ret = (uint16x4_t)(__p0 < __p1);
3585   return __ret;
3586 }
3587 #else
vclt_u16(uint16x4_t __p0,uint16x4_t __p1)3588 __ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
3589   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3590   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3591   uint16x4_t __ret;
3592   __ret = (uint16x4_t)(__rev0 < __rev1);
3593   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3594   return __ret;
3595 }
3596 #endif
3597 
3598 #ifdef __LITTLE_ENDIAN__
vclt_s8(int8x8_t __p0,int8x8_t __p1)3599 __ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
3600   uint8x8_t __ret;
3601   __ret = (uint8x8_t)(__p0 < __p1);
3602   return __ret;
3603 }
3604 #else
vclt_s8(int8x8_t __p0,int8x8_t __p1)3605 __ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
3606   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3607   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
3608   uint8x8_t __ret;
3609   __ret = (uint8x8_t)(__rev0 < __rev1);
3610   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3611   return __ret;
3612 }
3613 #endif
3614 
3615 #ifdef __LITTLE_ENDIAN__
vclt_f32(float32x2_t __p0,float32x2_t __p1)3616 __ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
3617   uint32x2_t __ret;
3618   __ret = (uint32x2_t)(__p0 < __p1);
3619   return __ret;
3620 }
3621 #else
vclt_f32(float32x2_t __p0,float32x2_t __p1)3622 __ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
3623   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
3624   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
3625   uint32x2_t __ret;
3626   __ret = (uint32x2_t)(__rev0 < __rev1);
3627   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
3628   return __ret;
3629 }
3630 #endif
3631 
3632 #ifdef __LITTLE_ENDIAN__
vclt_s32(int32x2_t __p0,int32x2_t __p1)3633 __ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
3634   uint32x2_t __ret;
3635   __ret = (uint32x2_t)(__p0 < __p1);
3636   return __ret;
3637 }
3638 #else
vclt_s32(int32x2_t __p0,int32x2_t __p1)3639 __ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
3640   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
3641   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
3642   uint32x2_t __ret;
3643   __ret = (uint32x2_t)(__rev0 < __rev1);
3644   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
3645   return __ret;
3646 }
3647 #endif
3648 
3649 #ifdef __LITTLE_ENDIAN__
vclt_s16(int16x4_t __p0,int16x4_t __p1)3650 __ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
3651   uint16x4_t __ret;
3652   __ret = (uint16x4_t)(__p0 < __p1);
3653   return __ret;
3654 }
3655 #else
vclt_s16(int16x4_t __p0,int16x4_t __p1)3656 __ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
3657   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3658   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3659   uint16x4_t __ret;
3660   __ret = (uint16x4_t)(__rev0 < __rev1);
3661   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3662   return __ret;
3663 }
3664 #endif
3665 
3666 #ifdef __LITTLE_ENDIAN__
vclzq_u8(uint8x16_t __p0)3667 __ai uint8x16_t vclzq_u8(uint8x16_t __p0) {
3668   uint8x16_t __ret;
3669   __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 48);
3670   return __ret;
3671 }
3672 #else
vclzq_u8(uint8x16_t __p0)3673 __ai uint8x16_t vclzq_u8(uint8x16_t __p0) {
3674   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3675   uint8x16_t __ret;
3676   __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 48);
3677   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3678   return __ret;
3679 }
3680 #endif
3681 
3682 #ifdef __LITTLE_ENDIAN__
vclzq_u32(uint32x4_t __p0)3683 __ai uint32x4_t vclzq_u32(uint32x4_t __p0) {
3684   uint32x4_t __ret;
3685   __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 50);
3686   return __ret;
3687 }
3688 #else
vclzq_u32(uint32x4_t __p0)3689 __ai uint32x4_t vclzq_u32(uint32x4_t __p0) {
3690   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3691   uint32x4_t __ret;
3692   __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 50);
3693   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3694   return __ret;
3695 }
3696 #endif
3697 
3698 #ifdef __LITTLE_ENDIAN__
vclzq_u16(uint16x8_t __p0)3699 __ai uint16x8_t vclzq_u16(uint16x8_t __p0) {
3700   uint16x8_t __ret;
3701   __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 49);
3702   return __ret;
3703 }
3704 #else
vclzq_u16(uint16x8_t __p0)3705 __ai uint16x8_t vclzq_u16(uint16x8_t __p0) {
3706   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3707   uint16x8_t __ret;
3708   __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 49);
3709   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3710   return __ret;
3711 }
3712 #endif
3713 
3714 #ifdef __LITTLE_ENDIAN__
vclzq_s8(int8x16_t __p0)3715 __ai int8x16_t vclzq_s8(int8x16_t __p0) {
3716   int8x16_t __ret;
3717   __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 32);
3718   return __ret;
3719 }
3720 #else
vclzq_s8(int8x16_t __p0)3721 __ai int8x16_t vclzq_s8(int8x16_t __p0) {
3722   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3723   int8x16_t __ret;
3724   __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 32);
3725   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3726   return __ret;
3727 }
3728 #endif
3729 
3730 #ifdef __LITTLE_ENDIAN__
vclzq_s32(int32x4_t __p0)3731 __ai int32x4_t vclzq_s32(int32x4_t __p0) {
3732   int32x4_t __ret;
3733   __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 34);
3734   return __ret;
3735 }
3736 #else
vclzq_s32(int32x4_t __p0)3737 __ai int32x4_t vclzq_s32(int32x4_t __p0) {
3738   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3739   int32x4_t __ret;
3740   __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 34);
3741   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3742   return __ret;
3743 }
3744 #endif
3745 
3746 #ifdef __LITTLE_ENDIAN__
vclzq_s16(int16x8_t __p0)3747 __ai int16x8_t vclzq_s16(int16x8_t __p0) {
3748   int16x8_t __ret;
3749   __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 33);
3750   return __ret;
3751 }
3752 #else
vclzq_s16(int16x8_t __p0)3753 __ai int16x8_t vclzq_s16(int16x8_t __p0) {
3754   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3755   int16x8_t __ret;
3756   __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 33);
3757   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3758   return __ret;
3759 }
3760 #endif
3761 
3762 #ifdef __LITTLE_ENDIAN__
vclz_u8(uint8x8_t __p0)3763 __ai uint8x8_t vclz_u8(uint8x8_t __p0) {
3764   uint8x8_t __ret;
3765   __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 16);
3766   return __ret;
3767 }
3768 #else
vclz_u8(uint8x8_t __p0)3769 __ai uint8x8_t vclz_u8(uint8x8_t __p0) {
3770   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3771   uint8x8_t __ret;
3772   __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 16);
3773   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3774   return __ret;
3775 }
3776 #endif
3777 
3778 #ifdef __LITTLE_ENDIAN__
vclz_u32(uint32x2_t __p0)3779 __ai uint32x2_t vclz_u32(uint32x2_t __p0) {
3780   uint32x2_t __ret;
3781   __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 18);
3782   return __ret;
3783 }
3784 #else
vclz_u32(uint32x2_t __p0)3785 __ai uint32x2_t vclz_u32(uint32x2_t __p0) {
3786   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
3787   uint32x2_t __ret;
3788   __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 18);
3789   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
3790   return __ret;
3791 }
3792 #endif
3793 
3794 #ifdef __LITTLE_ENDIAN__
vclz_u16(uint16x4_t __p0)3795 __ai uint16x4_t vclz_u16(uint16x4_t __p0) {
3796   uint16x4_t __ret;
3797   __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 17);
3798   return __ret;
3799 }
3800 #else
vclz_u16(uint16x4_t __p0)3801 __ai uint16x4_t vclz_u16(uint16x4_t __p0) {
3802   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3803   uint16x4_t __ret;
3804   __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 17);
3805   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3806   return __ret;
3807 }
3808 #endif
3809 
3810 #ifdef __LITTLE_ENDIAN__
vclz_s8(int8x8_t __p0)3811 __ai int8x8_t vclz_s8(int8x8_t __p0) {
3812   int8x8_t __ret;
3813   __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 0);
3814   return __ret;
3815 }
3816 #else
vclz_s8(int8x8_t __p0)3817 __ai int8x8_t vclz_s8(int8x8_t __p0) {
3818   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3819   int8x8_t __ret;
3820   __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 0);
3821   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3822   return __ret;
3823 }
3824 #endif
3825 
3826 #ifdef __LITTLE_ENDIAN__
vclz_s32(int32x2_t __p0)3827 __ai int32x2_t vclz_s32(int32x2_t __p0) {
3828   int32x2_t __ret;
3829   __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 2);
3830   return __ret;
3831 }
3832 #else
vclz_s32(int32x2_t __p0)3833 __ai int32x2_t vclz_s32(int32x2_t __p0) {
3834   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
3835   int32x2_t __ret;
3836   __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 2);
3837   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
3838   return __ret;
3839 }
3840 #endif
3841 
3842 #ifdef __LITTLE_ENDIAN__
vclz_s16(int16x4_t __p0)3843 __ai int16x4_t vclz_s16(int16x4_t __p0) {
3844   int16x4_t __ret;
3845   __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 1);
3846   return __ret;
3847 }
3848 #else
vclz_s16(int16x4_t __p0)3849 __ai int16x4_t vclz_s16(int16x4_t __p0) {
3850   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3851   int16x4_t __ret;
3852   __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 1);
3853   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3854   return __ret;
3855 }
3856 #endif
3857 
3858 #ifdef __LITTLE_ENDIAN__
vcnt_p8(poly8x8_t __p0)3859 __ai poly8x8_t vcnt_p8(poly8x8_t __p0) {
3860   poly8x8_t __ret;
3861   __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 4);
3862   return __ret;
3863 }
3864 #else
vcnt_p8(poly8x8_t __p0)3865 __ai poly8x8_t vcnt_p8(poly8x8_t __p0) {
3866   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3867   poly8x8_t __ret;
3868   __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 4);
3869   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3870   return __ret;
3871 }
3872 #endif
3873 
3874 #ifdef __LITTLE_ENDIAN__
vcntq_p8(poly8x16_t __p0)3875 __ai poly8x16_t vcntq_p8(poly8x16_t __p0) {
3876   poly8x16_t __ret;
3877   __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 36);
3878   return __ret;
3879 }
3880 #else
vcntq_p8(poly8x16_t __p0)3881 __ai poly8x16_t vcntq_p8(poly8x16_t __p0) {
3882   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3883   poly8x16_t __ret;
3884   __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 36);
3885   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3886   return __ret;
3887 }
3888 #endif
3889 
3890 #ifdef __LITTLE_ENDIAN__
vcntq_u8(uint8x16_t __p0)3891 __ai uint8x16_t vcntq_u8(uint8x16_t __p0) {
3892   uint8x16_t __ret;
3893   __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 48);
3894   return __ret;
3895 }
3896 #else
vcntq_u8(uint8x16_t __p0)3897 __ai uint8x16_t vcntq_u8(uint8x16_t __p0) {
3898   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3899   uint8x16_t __ret;
3900   __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 48);
3901   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3902   return __ret;
3903 }
3904 #endif
3905 
3906 #ifdef __LITTLE_ENDIAN__
vcntq_s8(int8x16_t __p0)3907 __ai int8x16_t vcntq_s8(int8x16_t __p0) {
3908   int8x16_t __ret;
3909   __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 32);
3910   return __ret;
3911 }
3912 #else
vcntq_s8(int8x16_t __p0)3913 __ai int8x16_t vcntq_s8(int8x16_t __p0) {
3914   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3915   int8x16_t __ret;
3916   __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 32);
3917   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3918   return __ret;
3919 }
3920 #endif
3921 
3922 #ifdef __LITTLE_ENDIAN__
vcnt_u8(uint8x8_t __p0)3923 __ai uint8x8_t vcnt_u8(uint8x8_t __p0) {
3924   uint8x8_t __ret;
3925   __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 16);
3926   return __ret;
3927 }
3928 #else
vcnt_u8(uint8x8_t __p0)3929 __ai uint8x8_t vcnt_u8(uint8x8_t __p0) {
3930   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3931   uint8x8_t __ret;
3932   __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 16);
3933   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3934   return __ret;
3935 }
3936 #endif
3937 
3938 #ifdef __LITTLE_ENDIAN__
vcnt_s8(int8x8_t __p0)3939 __ai int8x8_t vcnt_s8(int8x8_t __p0) {
3940   int8x8_t __ret;
3941   __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 0);
3942   return __ret;
3943 }
3944 #else
vcnt_s8(int8x8_t __p0)3945 __ai int8x8_t vcnt_s8(int8x8_t __p0) {
3946   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3947   int8x8_t __ret;
3948   __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 0);
3949   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3950   return __ret;
3951 }
3952 #endif
3953 
3954 #ifdef __LITTLE_ENDIAN__
vcombine_p8(poly8x8_t __p0,poly8x8_t __p1)3955 __ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
3956   poly8x16_t __ret;
3957   __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
3958   return __ret;
3959 }
3960 #else
vcombine_p8(poly8x8_t __p0,poly8x8_t __p1)3961 __ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
3962   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3963   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
3964   poly8x16_t __ret;
3965   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
3966   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3967   return __ret;
3968 }
3969 #endif
3970 
3971 #ifdef __LITTLE_ENDIAN__
vcombine_p16(poly16x4_t __p0,poly16x4_t __p1)3972 __ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
3973   poly16x8_t __ret;
3974   __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
3975   return __ret;
3976 }
3977 #else
vcombine_p16(poly16x4_t __p0,poly16x4_t __p1)3978 __ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
3979   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3980   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3981   poly16x8_t __ret;
3982   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
3983   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3984   return __ret;
3985 }
3986 #endif
3987 
3988 #ifdef __LITTLE_ENDIAN__
vcombine_u8(uint8x8_t __p0,uint8x8_t __p1)3989 __ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
3990   uint8x16_t __ret;
3991   __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
3992   return __ret;
3993 }
3994 #else
vcombine_u8(uint8x8_t __p0,uint8x8_t __p1)3995 __ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
3996   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3997   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
3998   uint8x16_t __ret;
3999   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4000   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
4001   return __ret;
4002 }
__noswap_vcombine_u8(uint8x8_t __p0,uint8x8_t __p1)4003 __ai uint8x16_t __noswap_vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
4004   uint8x16_t __ret;
4005   __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4006   return __ret;
4007 }
4008 #endif
4009 
4010 #ifdef __LITTLE_ENDIAN__
vcombine_u32(uint32x2_t __p0,uint32x2_t __p1)4011 __ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
4012   uint32x4_t __ret;
4013   __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
4014   return __ret;
4015 }
4016 #else
vcombine_u32(uint32x2_t __p0,uint32x2_t __p1)4017 __ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
4018   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
4019   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
4020   uint32x4_t __ret;
4021   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
4022   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
4023   return __ret;
4024 }
__noswap_vcombine_u32(uint32x2_t __p0,uint32x2_t __p1)4025 __ai uint32x4_t __noswap_vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
4026   uint32x4_t __ret;
4027   __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
4028   return __ret;
4029 }
4030 #endif
4031 
4032 #ifdef __LITTLE_ENDIAN__
vcombine_u64(uint64x1_t __p0,uint64x1_t __p1)4033 __ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) {
4034   uint64x2_t __ret;
4035   __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
4036   return __ret;
4037 }
4038 #else
vcombine_u64(uint64x1_t __p0,uint64x1_t __p1)4039 __ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) {
4040   uint64x2_t __ret;
4041   __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
4042   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
4043   return __ret;
4044 }
4045 #endif
4046 
4047 #ifdef __LITTLE_ENDIAN__
vcombine_u16(uint16x4_t __p0,uint16x4_t __p1)4048 __ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
4049   uint16x8_t __ret;
4050   __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
4051   return __ret;
4052 }
4053 #else
vcombine_u16(uint16x4_t __p0,uint16x4_t __p1)4054 __ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
4055   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
4056   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
4057   uint16x8_t __ret;
4058   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
4059   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
4060   return __ret;
4061 }
__noswap_vcombine_u16(uint16x4_t __p0,uint16x4_t __p1)4062 __ai uint16x8_t __noswap_vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
4063   uint16x8_t __ret;
4064   __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
4065   return __ret;
4066 }
4067 #endif
4068 
4069 #ifdef __LITTLE_ENDIAN__
vcombine_s8(int8x8_t __p0,int8x8_t __p1)4070 __ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
4071   int8x16_t __ret;
4072   __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4073   return __ret;
4074 }
4075 #else
vcombine_s8(int8x8_t __p0,int8x8_t __p1)4076 __ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
4077   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
4078   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
4079   int8x16_t __ret;
4080   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4081   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
4082   return __ret;
4083 }
__noswap_vcombine_s8(int8x8_t __p0,int8x8_t __p1)4084 __ai int8x16_t __noswap_vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
4085   int8x16_t __ret;
4086   __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4087   return __ret;
4088 }
4089 #endif
4090 
4091 #ifdef __LITTLE_ENDIAN__
vcombine_f32(float32x2_t __p0,float32x2_t __p1)4092 __ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
4093   float32x4_t __ret;
4094   __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
4095   return __ret;
4096 }
4097 #else
vcombine_f32(float32x2_t __p0,float32x2_t __p1)4098 __ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
4099   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
4100   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
4101   float32x4_t __ret;
4102   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
4103   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
4104   return __ret;
4105 }
__noswap_vcombine_f32(float32x2_t __p0,float32x2_t __p1)4106 __ai float32x4_t __noswap_vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
4107   float32x4_t __ret;
4108   __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
4109   return __ret;
4110 }
4111 #endif
4112 
4113 #ifdef __LITTLE_ENDIAN__
vcombine_f16(float16x4_t __p0,float16x4_t __p1)4114 __ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
4115   float16x8_t __ret;
4116   __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
4117   return __ret;
4118 }
4119 #else
vcombine_f16(float16x4_t __p0,float16x4_t __p1)4120 __ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
4121   float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
4122   float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
4123   float16x8_t __ret;
4124   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
4125   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
4126   return __ret;
4127 }
__noswap_vcombine_f16(float16x4_t __p0,float16x4_t __p1)4128 __ai float16x8_t __noswap_vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
4129   float16x8_t __ret;
4130   __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
4131   return __ret;
4132 }
4133 #endif
4134 
4135 #ifdef __LITTLE_ENDIAN__
vcombine_s32(int32x2_t __p0,int32x2_t __p1)4136 __ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
4137   int32x4_t __ret;
4138   __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
4139   return __ret;
4140 }
4141 #else
vcombine_s32(int32x2_t __p0,int32x2_t __p1)4142 __ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
4143   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
4144   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
4145   int32x4_t __ret;
4146   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
4147   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
4148   return __ret;
4149 }
__noswap_vcombine_s32(int32x2_t __p0,int32x2_t __p1)4150 __ai int32x4_t __noswap_vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
4151   int32x4_t __ret;
4152   __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
4153   return __ret;
4154 }
4155 #endif
4156 
4157 #ifdef __LITTLE_ENDIAN__
vcombine_s64(int64x1_t __p0,int64x1_t __p1)4158 __ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) {
4159   int64x2_t __ret;
4160   __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
4161   return __ret;
4162 }
4163 #else
vcombine_s64(int64x1_t __p0,int64x1_t __p1)4164 __ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) {
4165   int64x2_t __ret;
4166   __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
4167   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
4168   return __ret;
4169 }
4170 #endif
4171 
4172 #ifdef __LITTLE_ENDIAN__
vcombine_s16(int16x4_t __p0,int16x4_t __p1)4173 __ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
4174   int16x8_t __ret;
4175   __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
4176   return __ret;
4177 }
4178 #else
vcombine_s16(int16x4_t __p0,int16x4_t __p1)4179 __ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
4180   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
4181   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
4182   int16x8_t __ret;
4183   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
4184   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
4185   return __ret;
4186 }
__noswap_vcombine_s16(int16x4_t __p0,int16x4_t __p1)4187 __ai int16x8_t __noswap_vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
4188   int16x8_t __ret;
4189   __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
4190   return __ret;
4191 }
4192 #endif
4193 
4194 #ifdef __LITTLE_ENDIAN__
vcreate_p8(uint64_t __p0)4195 __ai poly8x8_t vcreate_p8(uint64_t __p0) {
4196   poly8x8_t __ret;
4197   __ret = (poly8x8_t)(__p0);
4198   return __ret;
4199 }
4200 #else
vcreate_p8(uint64_t __p0)4201 __ai poly8x8_t vcreate_p8(uint64_t __p0) {
4202   poly8x8_t __ret;
4203   __ret = (poly8x8_t)(__p0);
4204   return __ret;
4205 }
4206 #endif
4207 
4208 #ifdef __LITTLE_ENDIAN__
vcreate_p16(uint64_t __p0)4209 __ai poly16x4_t vcreate_p16(uint64_t __p0) {
4210   poly16x4_t __ret;
4211   __ret = (poly16x4_t)(__p0);
4212   return __ret;
4213 }
4214 #else
vcreate_p16(uint64_t __p0)4215 __ai poly16x4_t vcreate_p16(uint64_t __p0) {
4216   poly16x4_t __ret;
4217   __ret = (poly16x4_t)(__p0);
4218   return __ret;
4219 }
4220 #endif
4221 
4222 #ifdef __LITTLE_ENDIAN__
vcreate_u8(uint64_t __p0)4223 __ai uint8x8_t vcreate_u8(uint64_t __p0) {
4224   uint8x8_t __ret;
4225   __ret = (uint8x8_t)(__p0);
4226   return __ret;
4227 }
4228 #else
vcreate_u8(uint64_t __p0)4229 __ai uint8x8_t vcreate_u8(uint64_t __p0) {
4230   uint8x8_t __ret;
4231   __ret = (uint8x8_t)(__p0);
4232   return __ret;
4233 }
4234 #endif
4235 
4236 #ifdef __LITTLE_ENDIAN__
vcreate_u32(uint64_t __p0)4237 __ai uint32x2_t vcreate_u32(uint64_t __p0) {
4238   uint32x2_t __ret;
4239   __ret = (uint32x2_t)(__p0);
4240   return __ret;
4241 }
4242 #else
vcreate_u32(uint64_t __p0)4243 __ai uint32x2_t vcreate_u32(uint64_t __p0) {
4244   uint32x2_t __ret;
4245   __ret = (uint32x2_t)(__p0);
4246   return __ret;
4247 }
4248 #endif
4249 
4250 #ifdef __LITTLE_ENDIAN__
vcreate_u64(uint64_t __p0)4251 __ai uint64x1_t vcreate_u64(uint64_t __p0) {
4252   uint64x1_t __ret;
4253   __ret = (uint64x1_t)(__p0);
4254   return __ret;
4255 }
4256 #else
vcreate_u64(uint64_t __p0)4257 __ai uint64x1_t vcreate_u64(uint64_t __p0) {
4258   uint64x1_t __ret;
4259   __ret = (uint64x1_t)(__p0);
4260   return __ret;
4261 }
4262 #endif
4263 
4264 #ifdef __LITTLE_ENDIAN__
vcreate_u16(uint64_t __p0)4265 __ai uint16x4_t vcreate_u16(uint64_t __p0) {
4266   uint16x4_t __ret;
4267   __ret = (uint16x4_t)(__p0);
4268   return __ret;
4269 }
4270 #else
vcreate_u16(uint64_t __p0)4271 __ai uint16x4_t vcreate_u16(uint64_t __p0) {
4272   uint16x4_t __ret;
4273   __ret = (uint16x4_t)(__p0);
4274   return __ret;
4275 }
4276 #endif
4277 
4278 #ifdef __LITTLE_ENDIAN__
vcreate_s8(uint64_t __p0)4279 __ai int8x8_t vcreate_s8(uint64_t __p0) {
4280   int8x8_t __ret;
4281   __ret = (int8x8_t)(__p0);
4282   return __ret;
4283 }
4284 #else
vcreate_s8(uint64_t __p0)4285 __ai int8x8_t vcreate_s8(uint64_t __p0) {
4286   int8x8_t __ret;
4287   __ret = (int8x8_t)(__p0);
4288   return __ret;
4289 }
4290 #endif
4291 
4292 #ifdef __LITTLE_ENDIAN__
vcreate_f32(uint64_t __p0)4293 __ai float32x2_t vcreate_f32(uint64_t __p0) {
4294   float32x2_t __ret;
4295   __ret = (float32x2_t)(__p0);
4296   return __ret;
4297 }
4298 #else
vcreate_f32(uint64_t __p0)4299 __ai float32x2_t vcreate_f32(uint64_t __p0) {
4300   float32x2_t __ret;
4301   __ret = (float32x2_t)(__p0);
4302   return __ret;
4303 }
4304 #endif
4305 
4306 #ifdef __LITTLE_ENDIAN__
vcreate_f16(uint64_t __p0)4307 __ai float16x4_t vcreate_f16(uint64_t __p0) {
4308   float16x4_t __ret;
4309   __ret = (float16x4_t)(__p0);
4310   return __ret;
4311 }
4312 #else
vcreate_f16(uint64_t __p0)4313 __ai float16x4_t vcreate_f16(uint64_t __p0) {
4314   float16x4_t __ret;
4315   __ret = (float16x4_t)(__p0);
4316   return __ret;
4317 }
4318 #endif
4319 
4320 #ifdef __LITTLE_ENDIAN__
vcreate_s32(uint64_t __p0)4321 __ai int32x2_t vcreate_s32(uint64_t __p0) {
4322   int32x2_t __ret;
4323   __ret = (int32x2_t)(__p0);
4324   return __ret;
4325 }
4326 #else
vcreate_s32(uint64_t __p0)4327 __ai int32x2_t vcreate_s32(uint64_t __p0) {
4328   int32x2_t __ret;
4329   __ret = (int32x2_t)(__p0);
4330   return __ret;
4331 }
4332 #endif
4333 
4334 #ifdef __LITTLE_ENDIAN__
vcreate_s64(uint64_t __p0)4335 __ai int64x1_t vcreate_s64(uint64_t __p0) {
4336   int64x1_t __ret;
4337   __ret = (int64x1_t)(__p0);
4338   return __ret;
4339 }
4340 #else
vcreate_s64(uint64_t __p0)4341 __ai int64x1_t vcreate_s64(uint64_t __p0) {
4342   int64x1_t __ret;
4343   __ret = (int64x1_t)(__p0);
4344   return __ret;
4345 }
4346 #endif
4347 
4348 #ifdef __LITTLE_ENDIAN__
vcreate_s16(uint64_t __p0)4349 __ai int16x4_t vcreate_s16(uint64_t __p0) {
4350   int16x4_t __ret;
4351   __ret = (int16x4_t)(__p0);
4352   return __ret;
4353 }
4354 #else
vcreate_s16(uint64_t __p0)4355 __ai int16x4_t vcreate_s16(uint64_t __p0) {
4356   int16x4_t __ret;
4357   __ret = (int16x4_t)(__p0);
4358   return __ret;
4359 }
4360 #endif
4361 
4362 #ifdef __LITTLE_ENDIAN__
vcvt_f16_f32(float32x4_t __p0)4363 __ai float16x4_t vcvt_f16_f32(float32x4_t __p0) {
4364   float16x4_t __ret;
4365   __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x16_t)__p0, 8);
4366   return __ret;
4367 }
4368 #else
vcvt_f16_f32(float32x4_t __p0)4369 __ai float16x4_t vcvt_f16_f32(float32x4_t __p0) {
4370   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
4371   float16x4_t __ret;
4372   __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x16_t)__rev0, 8);
4373   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
4374   return __ret;
4375 }
__noswap_vcvt_f16_f32(float32x4_t __p0)4376 __ai float16x4_t __noswap_vcvt_f16_f32(float32x4_t __p0) {
4377   float16x4_t __ret;
4378   __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x16_t)__p0, 8);
4379   return __ret;
4380 }
4381 #endif
4382 
4383 #ifdef __LITTLE_ENDIAN__
vcvtq_f32_u32(uint32x4_t __p0)4384 __ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
4385   float32x4_t __ret;
4386   __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 50);
4387   return __ret;
4388 }
4389 #else
vcvtq_f32_u32(uint32x4_t __p0)4390 __ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
4391   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
4392   float32x4_t __ret;
4393   __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 50);
4394   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
4395   return __ret;
4396 }
4397 #endif
4398 
4399 #ifdef __LITTLE_ENDIAN__
vcvtq_f32_s32(int32x4_t __p0)4400 __ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
4401   float32x4_t __ret;
4402   __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 34);
4403   return __ret;
4404 }
4405 #else
vcvtq_f32_s32(int32x4_t __p0)4406 __ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
4407   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
4408   float32x4_t __ret;
4409   __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 34);
4410   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
4411   return __ret;
4412 }
4413 #endif
4414 
4415 #ifdef __LITTLE_ENDIAN__
vcvt_f32_u32(uint32x2_t __p0)4416 __ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
4417   float32x2_t __ret;
4418   __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 18);
4419   return __ret;
4420 }
4421 #else
vcvt_f32_u32(uint32x2_t __p0)4422 __ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
4423   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
4424   float32x2_t __ret;
4425   __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 18);
4426   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
4427   return __ret;
4428 }
4429 #endif
4430 
4431 #ifdef __LITTLE_ENDIAN__
vcvt_f32_s32(int32x2_t __p0)4432 __ai float32x2_t vcvt_f32_s32(int32x2_t __p0) {
4433   float32x2_t __ret;
4434   __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 2);
4435   return __ret;
4436 }
4437 #else
vcvt_f32_s32(int32x2_t __p0)4438 __ai float32x2_t vcvt_f32_s32(int32x2_t __p0) {
4439   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
4440   float32x2_t __ret;
4441   __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 2);
4442   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
4443   return __ret;
4444 }
4445 #endif
4446 
4447 #ifdef __LITTLE_ENDIAN__
vcvt_f32_f16(float16x4_t __p0)4448 __ai float32x4_t vcvt_f32_f16(float16x4_t __p0) {
4449   float32x4_t __ret;
4450   __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 8);
4451   return __ret;
4452 }
4453 #else
vcvt_f32_f16(float16x4_t __p0)4454 __ai float32x4_t vcvt_f32_f16(float16x4_t __p0) {
4455   float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
4456   float32x4_t __ret;
4457   __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__rev0, 8);
4458   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
4459   return __ret;
4460 }
__noswap_vcvt_f32_f16(float16x4_t __p0)4461 __ai float32x4_t __noswap_vcvt_f32_f16(float16x4_t __p0) {
4462   float32x4_t __ret;
4463   __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 8);
4464   return __ret;
4465 }
4466 #endif
4467 
4468 #ifdef __LITTLE_ENDIAN__
4469 #define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
4470   uint32x4_t __s0 = __p0; \
4471   float32x4_t __ret; \
4472   __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 50); \
4473   __ret; \
4474 })
4475 #else
4476 #define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
4477   uint32x4_t __s0 = __p0; \
4478   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
4479   float32x4_t __ret; \
4480   __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 50); \
4481   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
4482   __ret; \
4483 })
4484 #endif
4485 
4486 #ifdef __LITTLE_ENDIAN__
4487 #define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
4488   int32x4_t __s0 = __p0; \
4489   float32x4_t __ret; \
4490   __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 34); \
4491   __ret; \
4492 })
4493 #else
4494 #define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
4495   int32x4_t __s0 = __p0; \
4496   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
4497   float32x4_t __ret; \
4498   __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 34); \
4499   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
4500   __ret; \
4501 })
4502 #endif
4503 
4504 #ifdef __LITTLE_ENDIAN__
4505 #define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
4506   uint32x2_t __s0 = __p0; \
4507   float32x2_t __ret; \
4508   __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 18); \
4509   __ret; \
4510 })
4511 #else
4512 #define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
4513   uint32x2_t __s0 = __p0; \
4514   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
4515   float32x2_t __ret; \
4516   __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 18); \
4517   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
4518   __ret; \
4519 })
4520 #endif
4521 
4522 #ifdef __LITTLE_ENDIAN__
4523 #define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
4524   int32x2_t __s0 = __p0; \
4525   float32x2_t __ret; \
4526   __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 2); \
4527   __ret; \
4528 })
4529 #else
4530 #define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
4531   int32x2_t __s0 = __p0; \
4532   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
4533   float32x2_t __ret; \
4534   __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 2); \
4535   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
4536   __ret; \
4537 })
4538 #endif
4539 
4540 #ifdef __LITTLE_ENDIAN__
4541 #define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
4542   float32x4_t __s0 = __p0; \
4543   int32x4_t __ret; \
4544   __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__s0, __p1, 34); \
4545   __ret; \
4546 })
4547 #else
4548 #define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
4549   float32x4_t __s0 = __p0; \
4550   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
4551   int32x4_t __ret; \
4552   __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__rev0, __p1, 34); \
4553   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
4554   __ret; \
4555 })
4556 #endif
4557 
4558 #ifdef __LITTLE_ENDIAN__
4559 #define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
4560   float32x2_t __s0 = __p0; \
4561   int32x2_t __ret; \
4562   __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__s0, __p1, 2); \
4563   __ret; \
4564 })
4565 #else
4566 #define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
4567   float32x2_t __s0 = __p0; \
4568   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
4569   int32x2_t __ret; \
4570   __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__rev0, __p1, 2); \
4571   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
4572   __ret; \
4573 })
4574 #endif
4575 
4576 #ifdef __LITTLE_ENDIAN__
4577 #define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
4578   float32x4_t __s0 = __p0; \
4579   uint32x4_t __ret; \
4580   __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__s0, __p1, 50); \
4581   __ret; \
4582 })
4583 #else
4584 #define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
4585   float32x4_t __s0 = __p0; \
4586   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
4587   uint32x4_t __ret; \
4588   __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__rev0, __p1, 50); \
4589   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
4590   __ret; \
4591 })
4592 #endif
4593 
4594 #ifdef __LITTLE_ENDIAN__
4595 #define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
4596   float32x2_t __s0 = __p0; \
4597   uint32x2_t __ret; \
4598   __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__s0, __p1, 18); \
4599   __ret; \
4600 })
4601 #else
4602 #define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
4603   float32x2_t __s0 = __p0; \
4604   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
4605   uint32x2_t __ret; \
4606   __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__rev0, __p1, 18); \
4607   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
4608   __ret; \
4609 })
4610 #endif
4611 
4612 #ifdef __LITTLE_ENDIAN__
vcvtq_s32_f32(float32x4_t __p0)4613 __ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
4614   int32x4_t __ret;
4615   __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__p0, 34);
4616   return __ret;
4617 }
4618 #else
vcvtq_s32_f32(float32x4_t __p0)4619 __ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
4620   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
4621   int32x4_t __ret;
4622   __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__rev0, 34);
4623   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
4624   return __ret;
4625 }
4626 #endif
4627 
4628 #ifdef __LITTLE_ENDIAN__
vcvt_s32_f32(float32x2_t __p0)4629 __ai int32x2_t vcvt_s32_f32(float32x2_t __p0) {
4630   int32x2_t __ret;
4631   __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__p0, 2);
4632   return __ret;
4633 }
4634 #else
vcvt_s32_f32(float32x2_t __p0)4635 __ai int32x2_t vcvt_s32_f32(float32x2_t __p0) {
4636   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
4637   int32x2_t __ret;
4638   __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__rev0, 2);
4639   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
4640   return __ret;
4641 }
4642 #endif
4643 
4644 #ifdef __LITTLE_ENDIAN__
vcvtq_u32_f32(float32x4_t __p0)4645 __ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
4646   uint32x4_t __ret;
4647   __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__p0, 50);
4648   return __ret;
4649 }
4650 #else
vcvtq_u32_f32(float32x4_t __p0)4651 __ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
4652   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
4653   uint32x4_t __ret;
4654   __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__rev0, 50);
4655   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
4656   return __ret;
4657 }
4658 #endif
4659 
4660 #ifdef __LITTLE_ENDIAN__
vcvt_u32_f32(float32x2_t __p0)4661 __ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
4662   uint32x2_t __ret;
4663   __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__p0, 18);
4664   return __ret;
4665 }
4666 #else
vcvt_u32_f32(float32x2_t __p0)4667 __ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
4668   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
4669   uint32x2_t __ret;
4670   __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__rev0, 18);
4671   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
4672   return __ret;
4673 }
4674 #endif
4675 
4676 #ifdef __LITTLE_ENDIAN__
4677 #define vdup_lane_p8(__p0, __p1) __extension__ ({ \
4678   poly8x8_t __s0 = __p0; \
4679   poly8x8_t __ret; \
4680   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4681   __ret; \
4682 })
4683 #else
4684 #define vdup_lane_p8(__p0, __p1) __extension__ ({ \
4685   poly8x8_t __s0 = __p0; \
4686   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
4687   poly8x8_t __ret; \
4688   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4689   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
4690   __ret; \
4691 })
4692 #endif
4693 
4694 #ifdef __LITTLE_ENDIAN__
4695 #define vdup_lane_p16(__p0, __p1) __extension__ ({ \
4696   poly16x4_t __s0 = __p0; \
4697   poly16x4_t __ret; \
4698   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
4699   __ret; \
4700 })
4701 #else
4702 #define vdup_lane_p16(__p0, __p1) __extension__ ({ \
4703   poly16x4_t __s0 = __p0; \
4704   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
4705   poly16x4_t __ret; \
4706   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
4707   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
4708   __ret; \
4709 })
4710 #endif
4711 
4712 #ifdef __LITTLE_ENDIAN__
4713 #define vdupq_lane_p8(__p0, __p1) __extension__ ({ \
4714   poly8x8_t __s0 = __p0; \
4715   poly8x16_t __ret; \
4716   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4717   __ret; \
4718 })
4719 #else
4720 #define vdupq_lane_p8(__p0, __p1) __extension__ ({ \
4721   poly8x8_t __s0 = __p0; \
4722   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
4723   poly8x16_t __ret; \
4724   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4725   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
4726   __ret; \
4727 })
4728 #endif
4729 
4730 #ifdef __LITTLE_ENDIAN__
4731 #define vdupq_lane_p16(__p0, __p1) __extension__ ({ \
4732   poly16x4_t __s0 = __p0; \
4733   poly16x8_t __ret; \
4734   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4735   __ret; \
4736 })
4737 #else
4738 #define vdupq_lane_p16(__p0, __p1) __extension__ ({ \
4739   poly16x4_t __s0 = __p0; \
4740   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
4741   poly16x8_t __ret; \
4742   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4743   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
4744   __ret; \
4745 })
4746 #endif
4747 
4748 #ifdef __LITTLE_ENDIAN__
4749 #define vdupq_lane_u8(__p0, __p1) __extension__ ({ \
4750   uint8x8_t __s0 = __p0; \
4751   uint8x16_t __ret; \
4752   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4753   __ret; \
4754 })
4755 #else
4756 #define vdupq_lane_u8(__p0, __p1) __extension__ ({ \
4757   uint8x8_t __s0 = __p0; \
4758   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
4759   uint8x16_t __ret; \
4760   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4761   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
4762   __ret; \
4763 })
4764 #endif
4765 
4766 #ifdef __LITTLE_ENDIAN__
4767 #define vdupq_lane_u32(__p0, __p1) __extension__ ({ \
4768   uint32x2_t __s0 = __p0; \
4769   uint32x4_t __ret; \
4770   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
4771   __ret; \
4772 })
4773 #else
4774 #define vdupq_lane_u32(__p0, __p1) __extension__ ({ \
4775   uint32x2_t __s0 = __p0; \
4776   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
4777   uint32x4_t __ret; \
4778   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
4779   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
4780   __ret; \
4781 })
4782 #endif
4783 
4784 #ifdef __LITTLE_ENDIAN__
4785 #define vdupq_lane_u64(__p0, __p1) __extension__ ({ \
4786   uint64x1_t __s0 = __p0; \
4787   uint64x2_t __ret; \
4788   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
4789   __ret; \
4790 })
4791 #else
4792 #define vdupq_lane_u64(__p0, __p1) __extension__ ({ \
4793   uint64x1_t __s0 = __p0; \
4794   uint64x2_t __ret; \
4795   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
4796   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
4797   __ret; \
4798 })
4799 #endif
4800 
4801 #ifdef __LITTLE_ENDIAN__
4802 #define vdupq_lane_u16(__p0, __p1) __extension__ ({ \
4803   uint16x4_t __s0 = __p0; \
4804   uint16x8_t __ret; \
4805   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4806   __ret; \
4807 })
4808 #else
4809 #define vdupq_lane_u16(__p0, __p1) __extension__ ({ \
4810   uint16x4_t __s0 = __p0; \
4811   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
4812   uint16x8_t __ret; \
4813   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4814   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
4815   __ret; \
4816 })
4817 #endif
4818 
4819 #ifdef __LITTLE_ENDIAN__
4820 #define vdupq_lane_s8(__p0, __p1) __extension__ ({ \
4821   int8x8_t __s0 = __p0; \
4822   int8x16_t __ret; \
4823   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4824   __ret; \
4825 })
4826 #else
4827 #define vdupq_lane_s8(__p0, __p1) __extension__ ({ \
4828   int8x8_t __s0 = __p0; \
4829   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
4830   int8x16_t __ret; \
4831   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4832   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
4833   __ret; \
4834 })
4835 #endif
4836 
4837 #ifdef __LITTLE_ENDIAN__
4838 #define vdupq_lane_f32(__p0, __p1) __extension__ ({ \
4839   float32x2_t __s0 = __p0; \
4840   float32x4_t __ret; \
4841   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
4842   __ret; \
4843 })
4844 #else
4845 #define vdupq_lane_f32(__p0, __p1) __extension__ ({ \
4846   float32x2_t __s0 = __p0; \
4847   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
4848   float32x4_t __ret; \
4849   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
4850   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
4851   __ret; \
4852 })
4853 #endif
4854 
4855 #ifdef __LITTLE_ENDIAN__
4856 #define vdupq_lane_s32(__p0, __p1) __extension__ ({ \
4857   int32x2_t __s0 = __p0; \
4858   int32x4_t __ret; \
4859   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
4860   __ret; \
4861 })
4862 #else
4863 #define vdupq_lane_s32(__p0, __p1) __extension__ ({ \
4864   int32x2_t __s0 = __p0; \
4865   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
4866   int32x4_t __ret; \
4867   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
4868   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
4869   __ret; \
4870 })
4871 #endif
4872 
4873 #ifdef __LITTLE_ENDIAN__
4874 #define vdupq_lane_s64(__p0, __p1) __extension__ ({ \
4875   int64x1_t __s0 = __p0; \
4876   int64x2_t __ret; \
4877   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
4878   __ret; \
4879 })
4880 #else
4881 #define vdupq_lane_s64(__p0, __p1) __extension__ ({ \
4882   int64x1_t __s0 = __p0; \
4883   int64x2_t __ret; \
4884   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
4885   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
4886   __ret; \
4887 })
4888 #endif
4889 
4890 #ifdef __LITTLE_ENDIAN__
4891 #define vdupq_lane_s16(__p0, __p1) __extension__ ({ \
4892   int16x4_t __s0 = __p0; \
4893   int16x8_t __ret; \
4894   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4895   __ret; \
4896 })
4897 #else
4898 #define vdupq_lane_s16(__p0, __p1) __extension__ ({ \
4899   int16x4_t __s0 = __p0; \
4900   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
4901   int16x8_t __ret; \
4902   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4903   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
4904   __ret; \
4905 })
4906 #endif
4907 
4908 #ifdef __LITTLE_ENDIAN__
4909 #define vdup_lane_u8(__p0, __p1) __extension__ ({ \
4910   uint8x8_t __s0 = __p0; \
4911   uint8x8_t __ret; \
4912   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4913   __ret; \
4914 })
4915 #else
4916 #define vdup_lane_u8(__p0, __p1) __extension__ ({ \
4917   uint8x8_t __s0 = __p0; \
4918   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
4919   uint8x8_t __ret; \
4920   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4921   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
4922   __ret; \
4923 })
4924 #endif
4925 
4926 #ifdef __LITTLE_ENDIAN__
4927 #define vdup_lane_u32(__p0, __p1) __extension__ ({ \
4928   uint32x2_t __s0 = __p0; \
4929   uint32x2_t __ret; \
4930   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
4931   __ret; \
4932 })
4933 #else
4934 #define vdup_lane_u32(__p0, __p1) __extension__ ({ \
4935   uint32x2_t __s0 = __p0; \
4936   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
4937   uint32x2_t __ret; \
4938   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
4939   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
4940   __ret; \
4941 })
4942 #endif
4943 
4944 #ifdef __LITTLE_ENDIAN__
4945 #define vdup_lane_u64(__p0, __p1) __extension__ ({ \
4946   uint64x1_t __s0 = __p0; \
4947   uint64x1_t __ret; \
4948   __ret = __builtin_shufflevector(__s0, __s0, __p1); \
4949   __ret; \
4950 })
4951 #else
4952 #define vdup_lane_u64(__p0, __p1) __extension__ ({ \
4953   uint64x1_t __s0 = __p0; \
4954   uint64x1_t __ret; \
4955   __ret = __builtin_shufflevector(__s0, __s0, __p1); \
4956   __ret; \
4957 })
4958 #endif
4959 
4960 #ifdef __LITTLE_ENDIAN__
4961 #define vdup_lane_u16(__p0, __p1) __extension__ ({ \
4962   uint16x4_t __s0 = __p0; \
4963   uint16x4_t __ret; \
4964   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
4965   __ret; \
4966 })
4967 #else
4968 #define vdup_lane_u16(__p0, __p1) __extension__ ({ \
4969   uint16x4_t __s0 = __p0; \
4970   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
4971   uint16x4_t __ret; \
4972   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
4973   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
4974   __ret; \
4975 })
4976 #endif
4977 
4978 #ifdef __LITTLE_ENDIAN__
4979 #define vdup_lane_s8(__p0, __p1) __extension__ ({ \
4980   int8x8_t __s0 = __p0; \
4981   int8x8_t __ret; \
4982   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4983   __ret; \
4984 })
4985 #else
4986 #define vdup_lane_s8(__p0, __p1) __extension__ ({ \
4987   int8x8_t __s0 = __p0; \
4988   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
4989   int8x8_t __ret; \
4990   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4991   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
4992   __ret; \
4993 })
4994 #endif
4995 
4996 #ifdef __LITTLE_ENDIAN__
4997 #define vdup_lane_f32(__p0, __p1) __extension__ ({ \
4998   float32x2_t __s0 = __p0; \
4999   float32x2_t __ret; \
5000   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
5001   __ret; \
5002 })
5003 #else
5004 #define vdup_lane_f32(__p0, __p1) __extension__ ({ \
5005   float32x2_t __s0 = __p0; \
5006   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
5007   float32x2_t __ret; \
5008   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
5009   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
5010   __ret; \
5011 })
5012 #endif
5013 
5014 #ifdef __LITTLE_ENDIAN__
5015 #define vdup_lane_s32(__p0, __p1) __extension__ ({ \
5016   int32x2_t __s0 = __p0; \
5017   int32x2_t __ret; \
5018   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
5019   __ret; \
5020 })
5021 #else
5022 #define vdup_lane_s32(__p0, __p1) __extension__ ({ \
5023   int32x2_t __s0 = __p0; \
5024   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
5025   int32x2_t __ret; \
5026   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
5027   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
5028   __ret; \
5029 })
5030 #endif
5031 
5032 #ifdef __LITTLE_ENDIAN__
5033 #define vdup_lane_s64(__p0, __p1) __extension__ ({ \
5034   int64x1_t __s0 = __p0; \
5035   int64x1_t __ret; \
5036   __ret = __builtin_shufflevector(__s0, __s0, __p1); \
5037   __ret; \
5038 })
5039 #else
5040 #define vdup_lane_s64(__p0, __p1) __extension__ ({ \
5041   int64x1_t __s0 = __p0; \
5042   int64x1_t __ret; \
5043   __ret = __builtin_shufflevector(__s0, __s0, __p1); \
5044   __ret; \
5045 })
5046 #endif
5047 
5048 #ifdef __LITTLE_ENDIAN__
5049 #define vdup_lane_s16(__p0, __p1) __extension__ ({ \
5050   int16x4_t __s0 = __p0; \
5051   int16x4_t __ret; \
5052   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
5053   __ret; \
5054 })
5055 #else
5056 #define vdup_lane_s16(__p0, __p1) __extension__ ({ \
5057   int16x4_t __s0 = __p0; \
5058   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
5059   int16x4_t __ret; \
5060   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
5061   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
5062   __ret; \
5063 })
5064 #endif
5065 
5066 #ifdef __LITTLE_ENDIAN__
vdup_n_p8(poly8_t __p0)5067 __ai poly8x8_t vdup_n_p8(poly8_t __p0) {
5068   poly8x8_t __ret;
5069   __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5070   return __ret;
5071 }
5072 #else
vdup_n_p8(poly8_t __p0)5073 __ai poly8x8_t vdup_n_p8(poly8_t __p0) {
5074   poly8x8_t __ret;
5075   __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5076   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
5077   return __ret;
5078 }
5079 #endif
5080 
5081 #ifdef __LITTLE_ENDIAN__
vdup_n_p16(poly16_t __p0)5082 __ai poly16x4_t vdup_n_p16(poly16_t __p0) {
5083   poly16x4_t __ret;
5084   __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
5085   return __ret;
5086 }
5087 #else
vdup_n_p16(poly16_t __p0)5088 __ai poly16x4_t vdup_n_p16(poly16_t __p0) {
5089   poly16x4_t __ret;
5090   __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
5091   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
5092   return __ret;
5093 }
5094 #endif
5095 
5096 #ifdef __LITTLE_ENDIAN__
vdupq_n_p8(poly8_t __p0)5097 __ai poly8x16_t vdupq_n_p8(poly8_t __p0) {
5098   poly8x16_t __ret;
5099   __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5100   return __ret;
5101 }
5102 #else
vdupq_n_p8(poly8_t __p0)5103 __ai poly8x16_t vdupq_n_p8(poly8_t __p0) {
5104   poly8x16_t __ret;
5105   __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5106   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
5107   return __ret;
5108 }
5109 #endif
5110 
5111 #ifdef __LITTLE_ENDIAN__
vdupq_n_p16(poly16_t __p0)5112 __ai poly16x8_t vdupq_n_p16(poly16_t __p0) {
5113   poly16x8_t __ret;
5114   __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5115   return __ret;
5116 }
5117 #else
vdupq_n_p16(poly16_t __p0)5118 __ai poly16x8_t vdupq_n_p16(poly16_t __p0) {
5119   poly16x8_t __ret;
5120   __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5121   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
5122   return __ret;
5123 }
5124 #endif
5125 
5126 #ifdef __LITTLE_ENDIAN__
vdupq_n_u8(uint8_t __p0)5127 __ai uint8x16_t vdupq_n_u8(uint8_t __p0) {
5128   uint8x16_t __ret;
5129   __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5130   return __ret;
5131 }
5132 #else
vdupq_n_u8(uint8_t __p0)5133 __ai uint8x16_t vdupq_n_u8(uint8_t __p0) {
5134   uint8x16_t __ret;
5135   __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5136   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
5137   return __ret;
5138 }
5139 #endif
5140 
5141 #ifdef __LITTLE_ENDIAN__
vdupq_n_u32(uint32_t __p0)5142 __ai uint32x4_t vdupq_n_u32(uint32_t __p0) {
5143   uint32x4_t __ret;
5144   __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
5145   return __ret;
5146 }
5147 #else
vdupq_n_u32(uint32_t __p0)5148 __ai uint32x4_t vdupq_n_u32(uint32_t __p0) {
5149   uint32x4_t __ret;
5150   __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
5151   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
5152   return __ret;
5153 }
5154 #endif
5155 
5156 #ifdef __LITTLE_ENDIAN__
vdupq_n_u64(uint64_t __p0)5157 __ai uint64x2_t vdupq_n_u64(uint64_t __p0) {
5158   uint64x2_t __ret;
5159   __ret = (uint64x2_t) {__p0, __p0};
5160   return __ret;
5161 }
5162 #else
vdupq_n_u64(uint64_t __p0)5163 __ai uint64x2_t vdupq_n_u64(uint64_t __p0) {
5164   uint64x2_t __ret;
5165   __ret = (uint64x2_t) {__p0, __p0};
5166   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
5167   return __ret;
5168 }
5169 #endif
5170 
5171 #ifdef __LITTLE_ENDIAN__
vdupq_n_u16(uint16_t __p0)5172 __ai uint16x8_t vdupq_n_u16(uint16_t __p0) {
5173   uint16x8_t __ret;
5174   __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5175   return __ret;
5176 }
5177 #else
vdupq_n_u16(uint16_t __p0)5178 __ai uint16x8_t vdupq_n_u16(uint16_t __p0) {
5179   uint16x8_t __ret;
5180   __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5181   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
5182   return __ret;
5183 }
5184 #endif
5185 
5186 #ifdef __LITTLE_ENDIAN__
vdupq_n_s8(int8_t __p0)5187 __ai int8x16_t vdupq_n_s8(int8_t __p0) {
5188   int8x16_t __ret;
5189   __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5190   return __ret;
5191 }
5192 #else
vdupq_n_s8(int8_t __p0)5193 __ai int8x16_t vdupq_n_s8(int8_t __p0) {
5194   int8x16_t __ret;
5195   __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5196   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
5197   return __ret;
5198 }
5199 #endif
5200 
5201 #ifdef __LITTLE_ENDIAN__
vdupq_n_f32(float32_t __p0)5202 __ai float32x4_t vdupq_n_f32(float32_t __p0) {
5203   float32x4_t __ret;
5204   __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
5205   return __ret;
5206 }
5207 #else
vdupq_n_f32(float32_t __p0)5208 __ai float32x4_t vdupq_n_f32(float32_t __p0) {
5209   float32x4_t __ret;
5210   __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
5211   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
5212   return __ret;
5213 }
5214 #endif
5215 
5216 #ifdef __LITTLE_ENDIAN__
5217 #define vdupq_n_f16(__p0) __extension__ ({ \
5218   float16_t __s0 = __p0; \
5219   float16x8_t __ret; \
5220   __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
5221   __ret; \
5222 })
5223 #else
5224 #define vdupq_n_f16(__p0) __extension__ ({ \
5225   float16_t __s0 = __p0; \
5226   float16x8_t __ret; \
5227   __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
5228   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
5229   __ret; \
5230 })
5231 #endif
5232 
5233 #ifdef __LITTLE_ENDIAN__
vdupq_n_s32(int32_t __p0)5234 __ai int32x4_t vdupq_n_s32(int32_t __p0) {
5235   int32x4_t __ret;
5236   __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
5237   return __ret;
5238 }
5239 #else
vdupq_n_s32(int32_t __p0)5240 __ai int32x4_t vdupq_n_s32(int32_t __p0) {
5241   int32x4_t __ret;
5242   __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
5243   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
5244   return __ret;
5245 }
5246 #endif
5247 
5248 #ifdef __LITTLE_ENDIAN__
vdupq_n_s64(int64_t __p0)5249 __ai int64x2_t vdupq_n_s64(int64_t __p0) {
5250   int64x2_t __ret;
5251   __ret = (int64x2_t) {__p0, __p0};
5252   return __ret;
5253 }
5254 #else
vdupq_n_s64(int64_t __p0)5255 __ai int64x2_t vdupq_n_s64(int64_t __p0) {
5256   int64x2_t __ret;
5257   __ret = (int64x2_t) {__p0, __p0};
5258   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
5259   return __ret;
5260 }
5261 #endif
5262 
5263 #ifdef __LITTLE_ENDIAN__
vdupq_n_s16(int16_t __p0)5264 __ai int16x8_t vdupq_n_s16(int16_t __p0) {
5265   int16x8_t __ret;
5266   __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5267   return __ret;
5268 }
5269 #else
vdupq_n_s16(int16_t __p0)5270 __ai int16x8_t vdupq_n_s16(int16_t __p0) {
5271   int16x8_t __ret;
5272   __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5273   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
5274   return __ret;
5275 }
5276 #endif
5277 
5278 #ifdef __LITTLE_ENDIAN__
vdup_n_u8(uint8_t __p0)5279 __ai uint8x8_t vdup_n_u8(uint8_t __p0) {
5280   uint8x8_t __ret;
5281   __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5282   return __ret;
5283 }
5284 #else
vdup_n_u8(uint8_t __p0)5285 __ai uint8x8_t vdup_n_u8(uint8_t __p0) {
5286   uint8x8_t __ret;
5287   __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5288   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
5289   return __ret;
5290 }
5291 #endif
5292 
5293 #ifdef __LITTLE_ENDIAN__
vdup_n_u32(uint32_t __p0)5294 __ai uint32x2_t vdup_n_u32(uint32_t __p0) {
5295   uint32x2_t __ret;
5296   __ret = (uint32x2_t) {__p0, __p0};
5297   return __ret;
5298 }
5299 #else
vdup_n_u32(uint32_t __p0)5300 __ai uint32x2_t vdup_n_u32(uint32_t __p0) {
5301   uint32x2_t __ret;
5302   __ret = (uint32x2_t) {__p0, __p0};
5303   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
5304   return __ret;
5305 }
5306 #endif
5307 
5308 #ifdef __LITTLE_ENDIAN__
vdup_n_u64(uint64_t __p0)5309 __ai uint64x1_t vdup_n_u64(uint64_t __p0) {
5310   uint64x1_t __ret;
5311   __ret = (uint64x1_t) {__p0};
5312   return __ret;
5313 }
5314 #else
vdup_n_u64(uint64_t __p0)5315 __ai uint64x1_t vdup_n_u64(uint64_t __p0) {
5316   uint64x1_t __ret;
5317   __ret = (uint64x1_t) {__p0};
5318   return __ret;
5319 }
5320 #endif
5321 
5322 #ifdef __LITTLE_ENDIAN__
vdup_n_u16(uint16_t __p0)5323 __ai uint16x4_t vdup_n_u16(uint16_t __p0) {
5324   uint16x4_t __ret;
5325   __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
5326   return __ret;
5327 }
5328 #else
vdup_n_u16(uint16_t __p0)5329 __ai uint16x4_t vdup_n_u16(uint16_t __p0) {
5330   uint16x4_t __ret;
5331   __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
5332   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
5333   return __ret;
5334 }
5335 #endif
5336 
5337 #ifdef __LITTLE_ENDIAN__
vdup_n_s8(int8_t __p0)5338 __ai int8x8_t vdup_n_s8(int8_t __p0) {
5339   int8x8_t __ret;
5340   __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5341   return __ret;
5342 }
5343 #else
vdup_n_s8(int8_t __p0)5344 __ai int8x8_t vdup_n_s8(int8_t __p0) {
5345   int8x8_t __ret;
5346   __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5347   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
5348   return __ret;
5349 }
5350 #endif
5351 
5352 #ifdef __LITTLE_ENDIAN__
vdup_n_f32(float32_t __p0)5353 __ai float32x2_t vdup_n_f32(float32_t __p0) {
5354   float32x2_t __ret;
5355   __ret = (float32x2_t) {__p0, __p0};
5356   return __ret;
5357 }
5358 #else
vdup_n_f32(float32_t __p0)5359 __ai float32x2_t vdup_n_f32(float32_t __p0) {
5360   float32x2_t __ret;
5361   __ret = (float32x2_t) {__p0, __p0};
5362   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
5363   return __ret;
5364 }
5365 #endif
5366 
5367 #ifdef __LITTLE_ENDIAN__
5368 #define vdup_n_f16(__p0) __extension__ ({ \
5369   float16_t __s0 = __p0; \
5370   float16x4_t __ret; \
5371   __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
5372   __ret; \
5373 })
5374 #else
5375 #define vdup_n_f16(__p0) __extension__ ({ \
5376   float16_t __s0 = __p0; \
5377   float16x4_t __ret; \
5378   __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
5379   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
5380   __ret; \
5381 })
5382 #endif
5383 
5384 #ifdef __LITTLE_ENDIAN__
vdup_n_s32(int32_t __p0)5385 __ai int32x2_t vdup_n_s32(int32_t __p0) {
5386   int32x2_t __ret;
5387   __ret = (int32x2_t) {__p0, __p0};
5388   return __ret;
5389 }
5390 #else
vdup_n_s32(int32_t __p0)5391 __ai int32x2_t vdup_n_s32(int32_t __p0) {
5392   int32x2_t __ret;
5393   __ret = (int32x2_t) {__p0, __p0};
5394   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
5395   return __ret;
5396 }
5397 #endif
5398 
5399 #ifdef __LITTLE_ENDIAN__
vdup_n_s64(int64_t __p0)5400 __ai int64x1_t vdup_n_s64(int64_t __p0) {
5401   int64x1_t __ret;
5402   __ret = (int64x1_t) {__p0};
5403   return __ret;
5404 }
5405 #else
vdup_n_s64(int64_t __p0)5406 __ai int64x1_t vdup_n_s64(int64_t __p0) {
5407   int64x1_t __ret;
5408   __ret = (int64x1_t) {__p0};
5409   return __ret;
5410 }
5411 #endif
5412 
5413 #ifdef __LITTLE_ENDIAN__
vdup_n_s16(int16_t __p0)5414 __ai int16x4_t vdup_n_s16(int16_t __p0) {
5415   int16x4_t __ret;
5416   __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
5417   return __ret;
5418 }
5419 #else
vdup_n_s16(int16_t __p0)5420 __ai int16x4_t vdup_n_s16(int16_t __p0) {
5421   int16x4_t __ret;
5422   __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
5423   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
5424   return __ret;
5425 }
5426 #endif
5427 
5428 #ifdef __LITTLE_ENDIAN__
veorq_u8(uint8x16_t __p0,uint8x16_t __p1)5429 __ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
5430   uint8x16_t __ret;
5431   __ret = __p0 ^ __p1;
5432   return __ret;
5433 }
5434 #else
veorq_u8(uint8x16_t __p0,uint8x16_t __p1)5435 __ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
5436   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
5437   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
5438   uint8x16_t __ret;
5439   __ret = __rev0 ^ __rev1;
5440   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
5441   return __ret;
5442 }
5443 #endif
5444 
5445 #ifdef __LITTLE_ENDIAN__
veorq_u32(uint32x4_t __p0,uint32x4_t __p1)5446 __ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
5447   uint32x4_t __ret;
5448   __ret = __p0 ^ __p1;
5449   return __ret;
5450 }
5451 #else
veorq_u32(uint32x4_t __p0,uint32x4_t __p1)5452 __ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
5453   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
5454   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
5455   uint32x4_t __ret;
5456   __ret = __rev0 ^ __rev1;
5457   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
5458   return __ret;
5459 }
5460 #endif
5461 
5462 #ifdef __LITTLE_ENDIAN__
veorq_u64(uint64x2_t __p0,uint64x2_t __p1)5463 __ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
5464   uint64x2_t __ret;
5465   __ret = __p0 ^ __p1;
5466   return __ret;
5467 }
5468 #else
veorq_u64(uint64x2_t __p0,uint64x2_t __p1)5469 __ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
5470   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
5471   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
5472   uint64x2_t __ret;
5473   __ret = __rev0 ^ __rev1;
5474   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
5475   return __ret;
5476 }
5477 #endif
5478 
5479 #ifdef __LITTLE_ENDIAN__
veorq_u16(uint16x8_t __p0,uint16x8_t __p1)5480 __ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
5481   uint16x8_t __ret;
5482   __ret = __p0 ^ __p1;
5483   return __ret;
5484 }
5485 #else
veorq_u16(uint16x8_t __p0,uint16x8_t __p1)5486 __ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
5487   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
5488   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
5489   uint16x8_t __ret;
5490   __ret = __rev0 ^ __rev1;
5491   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
5492   return __ret;
5493 }
5494 #endif
5495 
5496 #ifdef __LITTLE_ENDIAN__
veorq_s8(int8x16_t __p0,int8x16_t __p1)5497 __ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
5498   int8x16_t __ret;
5499   __ret = __p0 ^ __p1;
5500   return __ret;
5501 }
5502 #else
veorq_s8(int8x16_t __p0,int8x16_t __p1)5503 __ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
5504   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
5505   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
5506   int8x16_t __ret;
5507   __ret = __rev0 ^ __rev1;
5508   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
5509   return __ret;
5510 }
5511 #endif
5512 
5513 #ifdef __LITTLE_ENDIAN__
veorq_s32(int32x4_t __p0,int32x4_t __p1)5514 __ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
5515   int32x4_t __ret;
5516   __ret = __p0 ^ __p1;
5517   return __ret;
5518 }
5519 #else
veorq_s32(int32x4_t __p0,int32x4_t __p1)5520 __ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
5521   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
5522   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
5523   int32x4_t __ret;
5524   __ret = __rev0 ^ __rev1;
5525   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
5526   return __ret;
5527 }
5528 #endif
5529 
5530 #ifdef __LITTLE_ENDIAN__
veorq_s64(int64x2_t __p0,int64x2_t __p1)5531 __ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
5532   int64x2_t __ret;
5533   __ret = __p0 ^ __p1;
5534   return __ret;
5535 }
5536 #else
veorq_s64(int64x2_t __p0,int64x2_t __p1)5537 __ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
5538   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
5539   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
5540   int64x2_t __ret;
5541   __ret = __rev0 ^ __rev1;
5542   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
5543   return __ret;
5544 }
5545 #endif
5546 
5547 #ifdef __LITTLE_ENDIAN__
veorq_s16(int16x8_t __p0,int16x8_t __p1)5548 __ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
5549   int16x8_t __ret;
5550   __ret = __p0 ^ __p1;
5551   return __ret;
5552 }
5553 #else
veorq_s16(int16x8_t __p0,int16x8_t __p1)5554 __ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
5555   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
5556   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
5557   int16x8_t __ret;
5558   __ret = __rev0 ^ __rev1;
5559   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
5560   return __ret;
5561 }
5562 #endif
5563 
5564 #ifdef __LITTLE_ENDIAN__
veor_u8(uint8x8_t __p0,uint8x8_t __p1)5565 __ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
5566   uint8x8_t __ret;
5567   __ret = __p0 ^ __p1;
5568   return __ret;
5569 }
5570 #else
veor_u8(uint8x8_t __p0,uint8x8_t __p1)5571 __ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
5572   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
5573   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
5574   uint8x8_t __ret;
5575   __ret = __rev0 ^ __rev1;
5576   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
5577   return __ret;
5578 }
5579 #endif
5580 
5581 #ifdef __LITTLE_ENDIAN__
veor_u32(uint32x2_t __p0,uint32x2_t __p1)5582 __ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
5583   uint32x2_t __ret;
5584   __ret = __p0 ^ __p1;
5585   return __ret;
5586 }
5587 #else
veor_u32(uint32x2_t __p0,uint32x2_t __p1)5588 __ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
5589   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
5590   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
5591   uint32x2_t __ret;
5592   __ret = __rev0 ^ __rev1;
5593   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
5594   return __ret;
5595 }
5596 #endif
5597 
5598 #ifdef __LITTLE_ENDIAN__
veor_u64(uint64x1_t __p0,uint64x1_t __p1)5599 __ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) {
5600   uint64x1_t __ret;
5601   __ret = __p0 ^ __p1;
5602   return __ret;
5603 }
5604 #else
veor_u64(uint64x1_t __p0,uint64x1_t __p1)5605 __ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) {
5606   uint64x1_t __ret;
5607   __ret = __p0 ^ __p1;
5608   return __ret;
5609 }
5610 #endif
5611 
5612 #ifdef __LITTLE_ENDIAN__
veor_u16(uint16x4_t __p0,uint16x4_t __p1)5613 __ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
5614   uint16x4_t __ret;
5615   __ret = __p0 ^ __p1;
5616   return __ret;
5617 }
5618 #else
veor_u16(uint16x4_t __p0,uint16x4_t __p1)5619 __ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
5620   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
5621   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
5622   uint16x4_t __ret;
5623   __ret = __rev0 ^ __rev1;
5624   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
5625   return __ret;
5626 }
5627 #endif
5628 
5629 #ifdef __LITTLE_ENDIAN__
veor_s8(int8x8_t __p0,int8x8_t __p1)5630 __ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
5631   int8x8_t __ret;
5632   __ret = __p0 ^ __p1;
5633   return __ret;
5634 }
5635 #else
veor_s8(int8x8_t __p0,int8x8_t __p1)5636 __ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
5637   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
5638   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
5639   int8x8_t __ret;
5640   __ret = __rev0 ^ __rev1;
5641   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
5642   return __ret;
5643 }
5644 #endif
5645 
5646 #ifdef __LITTLE_ENDIAN__
veor_s32(int32x2_t __p0,int32x2_t __p1)5647 __ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
5648   int32x2_t __ret;
5649   __ret = __p0 ^ __p1;
5650   return __ret;
5651 }
5652 #else
veor_s32(int32x2_t __p0,int32x2_t __p1)5653 __ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
5654   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
5655   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
5656   int32x2_t __ret;
5657   __ret = __rev0 ^ __rev1;
5658   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
5659   return __ret;
5660 }
5661 #endif
5662 
5663 #ifdef __LITTLE_ENDIAN__
veor_s64(int64x1_t __p0,int64x1_t __p1)5664 __ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) {
5665   int64x1_t __ret;
5666   __ret = __p0 ^ __p1;
5667   return __ret;
5668 }
5669 #else
veor_s64(int64x1_t __p0,int64x1_t __p1)5670 __ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) {
5671   int64x1_t __ret;
5672   __ret = __p0 ^ __p1;
5673   return __ret;
5674 }
5675 #endif
5676 
5677 #ifdef __LITTLE_ENDIAN__
veor_s16(int16x4_t __p0,int16x4_t __p1)5678 __ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
5679   int16x4_t __ret;
5680   __ret = __p0 ^ __p1;
5681   return __ret;
5682 }
5683 #else
veor_s16(int16x4_t __p0,int16x4_t __p1)5684 __ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
5685   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
5686   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
5687   int16x4_t __ret;
5688   __ret = __rev0 ^ __rev1;
5689   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
5690   return __ret;
5691 }
5692 #endif
5693 
5694 #ifdef __LITTLE_ENDIAN__
5695 #define vext_p8(__p0, __p1, __p2) __extension__ ({ \
5696   poly8x8_t __s0 = __p0; \
5697   poly8x8_t __s1 = __p1; \
5698   poly8x8_t __ret; \
5699   __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
5700   __ret; \
5701 })
5702 #else
5703 #define vext_p8(__p0, __p1, __p2) __extension__ ({ \
5704   poly8x8_t __s0 = __p0; \
5705   poly8x8_t __s1 = __p1; \
5706   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
5707   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
5708   poly8x8_t __ret; \
5709   __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
5710   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
5711   __ret; \
5712 })
5713 #endif
5714 
5715 #ifdef __LITTLE_ENDIAN__
5716 #define vext_p16(__p0, __p1, __p2) __extension__ ({ \
5717   poly16x4_t __s0 = __p0; \
5718   poly16x4_t __s1 = __p1; \
5719   poly16x4_t __ret; \
5720   __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
5721   __ret; \
5722 })
5723 #else
5724 #define vext_p16(__p0, __p1, __p2) __extension__ ({ \
5725   poly16x4_t __s0 = __p0; \
5726   poly16x4_t __s1 = __p1; \
5727   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
5728   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
5729   poly16x4_t __ret; \
5730   __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
5731   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
5732   __ret; \
5733 })
5734 #endif
5735 
5736 #ifdef __LITTLE_ENDIAN__
5737 #define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
5738   poly8x16_t __s0 = __p0; \
5739   poly8x16_t __s1 = __p1; \
5740   poly8x16_t __ret; \
5741   __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
5742   __ret; \
5743 })
5744 #else
5745 #define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
5746   poly8x16_t __s0 = __p0; \
5747   poly8x16_t __s1 = __p1; \
5748   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
5749   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
5750   poly8x16_t __ret; \
5751   __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
5752   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
5753   __ret; \
5754 })
5755 #endif
5756 
5757 #ifdef __LITTLE_ENDIAN__
5758 #define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
5759   poly16x8_t __s0 = __p0; \
5760   poly16x8_t __s1 = __p1; \
5761   poly16x8_t __ret; \
5762   __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
5763   __ret; \
5764 })
5765 #else
5766 #define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
5767   poly16x8_t __s0 = __p0; \
5768   poly16x8_t __s1 = __p1; \
5769   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
5770   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
5771   poly16x8_t __ret; \
5772   __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
5773   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
5774   __ret; \
5775 })
5776 #endif
5777 
5778 #ifdef __LITTLE_ENDIAN__
5779 #define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
5780   uint8x16_t __s0 = __p0; \
5781   uint8x16_t __s1 = __p1; \
5782   uint8x16_t __ret; \
5783   __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
5784   __ret; \
5785 })
5786 #else
5787 #define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
5788   uint8x16_t __s0 = __p0; \
5789   uint8x16_t __s1 = __p1; \
5790   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
5791   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
5792   uint8x16_t __ret; \
5793   __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
5794   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
5795   __ret; \
5796 })
5797 #endif
5798 
5799 #ifdef __LITTLE_ENDIAN__
5800 #define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
5801   uint32x4_t __s0 = __p0; \
5802   uint32x4_t __s1 = __p1; \
5803   uint32x4_t __ret; \
5804   __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
5805   __ret; \
5806 })
5807 #else
5808 #define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
5809   uint32x4_t __s0 = __p0; \
5810   uint32x4_t __s1 = __p1; \
5811   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
5812   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
5813   uint32x4_t __ret; \
5814   __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
5815   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
5816   __ret; \
5817 })
5818 #endif
5819 
5820 #ifdef __LITTLE_ENDIAN__
5821 #define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
5822   uint64x2_t __s0 = __p0; \
5823   uint64x2_t __s1 = __p1; \
5824   uint64x2_t __ret; \
5825   __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
5826   __ret; \
5827 })
5828 #else
5829 #define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
5830   uint64x2_t __s0 = __p0; \
5831   uint64x2_t __s1 = __p1; \
5832   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
5833   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
5834   uint64x2_t __ret; \
5835   __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
5836   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
5837   __ret; \
5838 })
5839 #endif
5840 
5841 #ifdef __LITTLE_ENDIAN__
5842 #define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
5843   uint16x8_t __s0 = __p0; \
5844   uint16x8_t __s1 = __p1; \
5845   uint16x8_t __ret; \
5846   __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
5847   __ret; \
5848 })
5849 #else
5850 #define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
5851   uint16x8_t __s0 = __p0; \
5852   uint16x8_t __s1 = __p1; \
5853   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
5854   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
5855   uint16x8_t __ret; \
5856   __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
5857   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
5858   __ret; \
5859 })
5860 #endif
5861 
5862 #ifdef __LITTLE_ENDIAN__
5863 #define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
5864   int8x16_t __s0 = __p0; \
5865   int8x16_t __s1 = __p1; \
5866   int8x16_t __ret; \
5867   __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
5868   __ret; \
5869 })
5870 #else
5871 #define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
5872   int8x16_t __s0 = __p0; \
5873   int8x16_t __s1 = __p1; \
5874   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
5875   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
5876   int8x16_t __ret; \
5877   __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
5878   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
5879   __ret; \
5880 })
5881 #endif
5882 
5883 #ifdef __LITTLE_ENDIAN__
5884 #define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
5885   float32x4_t __s0 = __p0; \
5886   float32x4_t __s1 = __p1; \
5887   float32x4_t __ret; \
5888   __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 41); \
5889   __ret; \
5890 })
5891 #else
5892 #define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
5893   float32x4_t __s0 = __p0; \
5894   float32x4_t __s1 = __p1; \
5895   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
5896   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
5897   float32x4_t __ret; \
5898   __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 41); \
5899   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
5900   __ret; \
5901 })
5902 #endif
5903 
5904 #ifdef __LITTLE_ENDIAN__
5905 #define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
5906   int32x4_t __s0 = __p0; \
5907   int32x4_t __s1 = __p1; \
5908   int32x4_t __ret; \
5909   __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
5910   __ret; \
5911 })
5912 #else
5913 #define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
5914   int32x4_t __s0 = __p0; \
5915   int32x4_t __s1 = __p1; \
5916   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
5917   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
5918   int32x4_t __ret; \
5919   __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
5920   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
5921   __ret; \
5922 })
5923 #endif
5924 
5925 #ifdef __LITTLE_ENDIAN__
5926 #define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
5927   int64x2_t __s0 = __p0; \
5928   int64x2_t __s1 = __p1; \
5929   int64x2_t __ret; \
5930   __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
5931   __ret; \
5932 })
5933 #else
5934 #define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
5935   int64x2_t __s0 = __p0; \
5936   int64x2_t __s1 = __p1; \
5937   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
5938   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
5939   int64x2_t __ret; \
5940   __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
5941   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
5942   __ret; \
5943 })
5944 #endif
5945 
5946 #ifdef __LITTLE_ENDIAN__
5947 #define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
5948   int16x8_t __s0 = __p0; \
5949   int16x8_t __s1 = __p1; \
5950   int16x8_t __ret; \
5951   __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
5952   __ret; \
5953 })
5954 #else
5955 #define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
5956   int16x8_t __s0 = __p0; \
5957   int16x8_t __s1 = __p1; \
5958   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
5959   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
5960   int16x8_t __ret; \
5961   __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
5962   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
5963   __ret; \
5964 })
5965 #endif
5966 
5967 #ifdef __LITTLE_ENDIAN__
5968 #define vext_u8(__p0, __p1, __p2) __extension__ ({ \
5969   uint8x8_t __s0 = __p0; \
5970   uint8x8_t __s1 = __p1; \
5971   uint8x8_t __ret; \
5972   __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
5973   __ret; \
5974 })
5975 #else
5976 #define vext_u8(__p0, __p1, __p2) __extension__ ({ \
5977   uint8x8_t __s0 = __p0; \
5978   uint8x8_t __s1 = __p1; \
5979   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
5980   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
5981   uint8x8_t __ret; \
5982   __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
5983   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
5984   __ret; \
5985 })
5986 #endif
5987 
5988 #ifdef __LITTLE_ENDIAN__
5989 #define vext_u32(__p0, __p1, __p2) __extension__ ({ \
5990   uint32x2_t __s0 = __p0; \
5991   uint32x2_t __s1 = __p1; \
5992   uint32x2_t __ret; \
5993   __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
5994   __ret; \
5995 })
5996 #else
5997 #define vext_u32(__p0, __p1, __p2) __extension__ ({ \
5998   uint32x2_t __s0 = __p0; \
5999   uint32x2_t __s1 = __p1; \
6000   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
6001   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
6002   uint32x2_t __ret; \
6003   __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
6004   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
6005   __ret; \
6006 })
6007 #endif
6008 
6009 #ifdef __LITTLE_ENDIAN__
6010 #define vext_u64(__p0, __p1, __p2) __extension__ ({ \
6011   uint64x1_t __s0 = __p0; \
6012   uint64x1_t __s1 = __p1; \
6013   uint64x1_t __ret; \
6014   __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
6015   __ret; \
6016 })
6017 #else
6018 #define vext_u64(__p0, __p1, __p2) __extension__ ({ \
6019   uint64x1_t __s0 = __p0; \
6020   uint64x1_t __s1 = __p1; \
6021   uint64x1_t __ret; \
6022   __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
6023   __ret; \
6024 })
6025 #endif
6026 
6027 #ifdef __LITTLE_ENDIAN__
6028 #define vext_u16(__p0, __p1, __p2) __extension__ ({ \
6029   uint16x4_t __s0 = __p0; \
6030   uint16x4_t __s1 = __p1; \
6031   uint16x4_t __ret; \
6032   __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
6033   __ret; \
6034 })
6035 #else
6036 #define vext_u16(__p0, __p1, __p2) __extension__ ({ \
6037   uint16x4_t __s0 = __p0; \
6038   uint16x4_t __s1 = __p1; \
6039   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
6040   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
6041   uint16x4_t __ret; \
6042   __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
6043   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
6044   __ret; \
6045 })
6046 #endif
6047 
6048 #ifdef __LITTLE_ENDIAN__
6049 #define vext_s8(__p0, __p1, __p2) __extension__ ({ \
6050   int8x8_t __s0 = __p0; \
6051   int8x8_t __s1 = __p1; \
6052   int8x8_t __ret; \
6053   __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
6054   __ret; \
6055 })
6056 #else
6057 #define vext_s8(__p0, __p1, __p2) __extension__ ({ \
6058   int8x8_t __s0 = __p0; \
6059   int8x8_t __s1 = __p1; \
6060   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
6061   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
6062   int8x8_t __ret; \
6063   __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
6064   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
6065   __ret; \
6066 })
6067 #endif
6068 
6069 #ifdef __LITTLE_ENDIAN__
6070 #define vext_f32(__p0, __p1, __p2) __extension__ ({ \
6071   float32x2_t __s0 = __p0; \
6072   float32x2_t __s1 = __p1; \
6073   float32x2_t __ret; \
6074   __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 9); \
6075   __ret; \
6076 })
6077 #else
6078 #define vext_f32(__p0, __p1, __p2) __extension__ ({ \
6079   float32x2_t __s0 = __p0; \
6080   float32x2_t __s1 = __p1; \
6081   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
6082   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
6083   float32x2_t __ret; \
6084   __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 9); \
6085   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
6086   __ret; \
6087 })
6088 #endif
6089 
6090 #ifdef __LITTLE_ENDIAN__
6091 #define vext_s32(__p0, __p1, __p2) __extension__ ({ \
6092   int32x2_t __s0 = __p0; \
6093   int32x2_t __s1 = __p1; \
6094   int32x2_t __ret; \
6095   __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
6096   __ret; \
6097 })
6098 #else
6099 #define vext_s32(__p0, __p1, __p2) __extension__ ({ \
6100   int32x2_t __s0 = __p0; \
6101   int32x2_t __s1 = __p1; \
6102   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
6103   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
6104   int32x2_t __ret; \
6105   __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
6106   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
6107   __ret; \
6108 })
6109 #endif
6110 
6111 #ifdef __LITTLE_ENDIAN__
6112 #define vext_s64(__p0, __p1, __p2) __extension__ ({ \
6113   int64x1_t __s0 = __p0; \
6114   int64x1_t __s1 = __p1; \
6115   int64x1_t __ret; \
6116   __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
6117   __ret; \
6118 })
6119 #else
6120 #define vext_s64(__p0, __p1, __p2) __extension__ ({ \
6121   int64x1_t __s0 = __p0; \
6122   int64x1_t __s1 = __p1; \
6123   int64x1_t __ret; \
6124   __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
6125   __ret; \
6126 })
6127 #endif
6128 
6129 #ifdef __LITTLE_ENDIAN__
6130 #define vext_s16(__p0, __p1, __p2) __extension__ ({ \
6131   int16x4_t __s0 = __p0; \
6132   int16x4_t __s1 = __p1; \
6133   int16x4_t __ret; \
6134   __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
6135   __ret; \
6136 })
6137 #else
6138 #define vext_s16(__p0, __p1, __p2) __extension__ ({ \
6139   int16x4_t __s0 = __p0; \
6140   int16x4_t __s1 = __p1; \
6141   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
6142   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
6143   int16x4_t __ret; \
6144   __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
6145   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
6146   __ret; \
6147 })
6148 #endif
6149 
6150 #ifdef __LITTLE_ENDIAN__
vfmaq_f32(float32x4_t __p0,float32x4_t __p1,float32x4_t __p2)6151 __ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
6152   float32x4_t __ret;
6153   __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
6154   return __ret;
6155 }
6156 #else
vfmaq_f32(float32x4_t __p0,float32x4_t __p1,float32x4_t __p2)6157 __ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
6158   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
6159   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
6160   float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
6161   float32x4_t __ret;
6162   __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
6163   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
6164   return __ret;
6165 }
__noswap_vfmaq_f32(float32x4_t __p0,float32x4_t __p1,float32x4_t __p2)6166 __ai float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
6167   float32x4_t __ret;
6168   __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
6169   return __ret;
6170 }
6171 #endif
6172 
6173 #ifdef __LITTLE_ENDIAN__
vfma_f32(float32x2_t __p0,float32x2_t __p1,float32x2_t __p2)6174 __ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
6175   float32x2_t __ret;
6176   __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
6177   return __ret;
6178 }
6179 #else
vfma_f32(float32x2_t __p0,float32x2_t __p1,float32x2_t __p2)6180 __ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
6181   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
6182   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
6183   float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
6184   float32x2_t __ret;
6185   __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
6186   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
6187   return __ret;
6188 }
__noswap_vfma_f32(float32x2_t __p0,float32x2_t __p1,float32x2_t __p2)6189 __ai float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
6190   float32x2_t __ret;
6191   __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
6192   return __ret;
6193 }
6194 #endif
6195 
6196 #ifdef __LITTLE_ENDIAN__
vget_high_p8(poly8x16_t __p0)6197 __ai poly8x8_t vget_high_p8(poly8x16_t __p0) {
6198   poly8x8_t __ret;
6199   __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
6200   return __ret;
6201 }
6202 #else
vget_high_p8(poly8x16_t __p0)6203 __ai poly8x8_t vget_high_p8(poly8x16_t __p0) {
6204   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
6205   poly8x8_t __ret;
6206   __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
6207   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
6208   return __ret;
6209 }
__noswap_vget_high_p8(poly8x16_t __p0)6210 __ai poly8x8_t __noswap_vget_high_p8(poly8x16_t __p0) {
6211   poly8x8_t __ret;
6212   __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
6213   return __ret;
6214 }
6215 #endif
6216 
6217 #ifdef __LITTLE_ENDIAN__
vget_high_p16(poly16x8_t __p0)6218 __ai poly16x4_t vget_high_p16(poly16x8_t __p0) {
6219   poly16x4_t __ret;
6220   __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
6221   return __ret;
6222 }
6223 #else
vget_high_p16(poly16x8_t __p0)6224 __ai poly16x4_t vget_high_p16(poly16x8_t __p0) {
6225   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
6226   poly16x4_t __ret;
6227   __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
6228   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
6229   return __ret;
6230 }
6231 #endif
6232 
6233 #ifdef __LITTLE_ENDIAN__
vget_high_u8(uint8x16_t __p0)6234 __ai uint8x8_t vget_high_u8(uint8x16_t __p0) {
6235   uint8x8_t __ret;
6236   __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
6237   return __ret;
6238 }
6239 #else
vget_high_u8(uint8x16_t __p0)6240 __ai uint8x8_t vget_high_u8(uint8x16_t __p0) {
6241   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
6242   uint8x8_t __ret;
6243   __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
6244   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
6245   return __ret;
6246 }
__noswap_vget_high_u8(uint8x16_t __p0)6247 __ai uint8x8_t __noswap_vget_high_u8(uint8x16_t __p0) {
6248   uint8x8_t __ret;
6249   __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
6250   return __ret;
6251 }
6252 #endif
6253 
6254 #ifdef __LITTLE_ENDIAN__
vget_high_u32(uint32x4_t __p0)6255 __ai uint32x2_t vget_high_u32(uint32x4_t __p0) {
6256   uint32x2_t __ret;
6257   __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
6258   return __ret;
6259 }
6260 #else
vget_high_u32(uint32x4_t __p0)6261 __ai uint32x2_t vget_high_u32(uint32x4_t __p0) {
6262   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
6263   uint32x2_t __ret;
6264   __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
6265   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
6266   return __ret;
6267 }
__noswap_vget_high_u32(uint32x4_t __p0)6268 __ai uint32x2_t __noswap_vget_high_u32(uint32x4_t __p0) {
6269   uint32x2_t __ret;
6270   __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
6271   return __ret;
6272 }
6273 #endif
6274 
6275 #ifdef __LITTLE_ENDIAN__
vget_high_u64(uint64x2_t __p0)6276 __ai uint64x1_t vget_high_u64(uint64x2_t __p0) {
6277   uint64x1_t __ret;
6278   __ret = __builtin_shufflevector(__p0, __p0, 1);
6279   return __ret;
6280 }
6281 #else
vget_high_u64(uint64x2_t __p0)6282 __ai uint64x1_t vget_high_u64(uint64x2_t __p0) {
6283   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
6284   uint64x1_t __ret;
6285   __ret = __builtin_shufflevector(__rev0, __rev0, 1);
6286   return __ret;
6287 }
6288 #endif
6289 
6290 #ifdef __LITTLE_ENDIAN__
vget_high_u16(uint16x8_t __p0)6291 __ai uint16x4_t vget_high_u16(uint16x8_t __p0) {
6292   uint16x4_t __ret;
6293   __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
6294   return __ret;
6295 }
6296 #else
vget_high_u16(uint16x8_t __p0)6297 __ai uint16x4_t vget_high_u16(uint16x8_t __p0) {
6298   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
6299   uint16x4_t __ret;
6300   __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
6301   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
6302   return __ret;
6303 }
__noswap_vget_high_u16(uint16x8_t __p0)6304 __ai uint16x4_t __noswap_vget_high_u16(uint16x8_t __p0) {
6305   uint16x4_t __ret;
6306   __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
6307   return __ret;
6308 }
6309 #endif
6310 
6311 #ifdef __LITTLE_ENDIAN__
vget_high_s8(int8x16_t __p0)6312 __ai int8x8_t vget_high_s8(int8x16_t __p0) {
6313   int8x8_t __ret;
6314   __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
6315   return __ret;
6316 }
6317 #else
vget_high_s8(int8x16_t __p0)6318 __ai int8x8_t vget_high_s8(int8x16_t __p0) {
6319   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
6320   int8x8_t __ret;
6321   __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
6322   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
6323   return __ret;
6324 }
__noswap_vget_high_s8(int8x16_t __p0)6325 __ai int8x8_t __noswap_vget_high_s8(int8x16_t __p0) {
6326   int8x8_t __ret;
6327   __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
6328   return __ret;
6329 }
6330 #endif
6331 
6332 #ifdef __LITTLE_ENDIAN__
vget_high_f32(float32x4_t __p0)6333 __ai float32x2_t vget_high_f32(float32x4_t __p0) {
6334   float32x2_t __ret;
6335   __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
6336   return __ret;
6337 }
6338 #else
vget_high_f32(float32x4_t __p0)6339 __ai float32x2_t vget_high_f32(float32x4_t __p0) {
6340   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
6341   float32x2_t __ret;
6342   __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
6343   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
6344   return __ret;
6345 }
__noswap_vget_high_f32(float32x4_t __p0)6346 __ai float32x2_t __noswap_vget_high_f32(float32x4_t __p0) {
6347   float32x2_t __ret;
6348   __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
6349   return __ret;
6350 }
6351 #endif
6352 
6353 #ifdef __LITTLE_ENDIAN__
vget_high_f16(float16x8_t __p0)6354 __ai float16x4_t vget_high_f16(float16x8_t __p0) {
6355   float16x4_t __ret;
6356   __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
6357   return __ret;
6358 }
6359 #else
vget_high_f16(float16x8_t __p0)6360 __ai float16x4_t vget_high_f16(float16x8_t __p0) {
6361   float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
6362   float16x4_t __ret;
6363   __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
6364   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
6365   return __ret;
6366 }
__noswap_vget_high_f16(float16x8_t __p0)6367 __ai float16x4_t __noswap_vget_high_f16(float16x8_t __p0) {
6368   float16x4_t __ret;
6369   __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
6370   return __ret;
6371 }
6372 #endif
6373 
6374 #ifdef __LITTLE_ENDIAN__
vget_high_s32(int32x4_t __p0)6375 __ai int32x2_t vget_high_s32(int32x4_t __p0) {
6376   int32x2_t __ret;
6377   __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
6378   return __ret;
6379 }
6380 #else
vget_high_s32(int32x4_t __p0)6381 __ai int32x2_t vget_high_s32(int32x4_t __p0) {
6382   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
6383   int32x2_t __ret;
6384   __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
6385   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
6386   return __ret;
6387 }
__noswap_vget_high_s32(int32x4_t __p0)6388 __ai int32x2_t __noswap_vget_high_s32(int32x4_t __p0) {
6389   int32x2_t __ret;
6390   __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
6391   return __ret;
6392 }
6393 #endif
6394 
6395 #ifdef __LITTLE_ENDIAN__
vget_high_s64(int64x2_t __p0)6396 __ai int64x1_t vget_high_s64(int64x2_t __p0) {
6397   int64x1_t __ret;
6398   __ret = __builtin_shufflevector(__p0, __p0, 1);
6399   return __ret;
6400 }
6401 #else
vget_high_s64(int64x2_t __p0)6402 __ai int64x1_t vget_high_s64(int64x2_t __p0) {
6403   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
6404   int64x1_t __ret;
6405   __ret = __builtin_shufflevector(__rev0, __rev0, 1);
6406   return __ret;
6407 }
6408 #endif
6409 
6410 #ifdef __LITTLE_ENDIAN__
vget_high_s16(int16x8_t __p0)6411 __ai int16x4_t vget_high_s16(int16x8_t __p0) {
6412   int16x4_t __ret;
6413   __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
6414   return __ret;
6415 }
6416 #else
vget_high_s16(int16x8_t __p0)6417 __ai int16x4_t vget_high_s16(int16x8_t __p0) {
6418   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
6419   int16x4_t __ret;
6420   __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
6421   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
6422   return __ret;
6423 }
__noswap_vget_high_s16(int16x8_t __p0)6424 __ai int16x4_t __noswap_vget_high_s16(int16x8_t __p0) {
6425   int16x4_t __ret;
6426   __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
6427   return __ret;
6428 }
6429 #endif
6430 
6431 #ifdef __LITTLE_ENDIAN__
6432 #define vget_lane_p8(__p0, __p1) __extension__ ({ \
6433   poly8x8_t __s0 = __p0; \
6434   poly8_t __ret; \
6435   __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
6436   __ret; \
6437 })
6438 #else
6439 #define vget_lane_p8(__p0, __p1) __extension__ ({ \
6440   poly8x8_t __s0 = __p0; \
6441   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
6442   poly8_t __ret; \
6443   __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
6444   __ret; \
6445 })
6446 #define __noswap_vget_lane_p8(__p0, __p1) __extension__ ({ \
6447   poly8x8_t __s0 = __p0; \
6448   poly8_t __ret; \
6449   __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
6450   __ret; \
6451 })
6452 #endif
6453 
6454 #ifdef __LITTLE_ENDIAN__
6455 #define vget_lane_p16(__p0, __p1) __extension__ ({ \
6456   poly16x4_t __s0 = __p0; \
6457   poly16_t __ret; \
6458   __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
6459   __ret; \
6460 })
6461 #else
6462 #define vget_lane_p16(__p0, __p1) __extension__ ({ \
6463   poly16x4_t __s0 = __p0; \
6464   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
6465   poly16_t __ret; \
6466   __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
6467   __ret; \
6468 })
6469 #define __noswap_vget_lane_p16(__p0, __p1) __extension__ ({ \
6470   poly16x4_t __s0 = __p0; \
6471   poly16_t __ret; \
6472   __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
6473   __ret; \
6474 })
6475 #endif
6476 
6477 #ifdef __LITTLE_ENDIAN__
6478 #define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
6479   poly8x16_t __s0 = __p0; \
6480   poly8_t __ret; \
6481   __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
6482   __ret; \
6483 })
6484 #else
6485 #define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
6486   poly8x16_t __s0 = __p0; \
6487   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
6488   poly8_t __ret; \
6489   __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
6490   __ret; \
6491 })
6492 #define __noswap_vgetq_lane_p8(__p0, __p1) __extension__ ({ \
6493   poly8x16_t __s0 = __p0; \
6494   poly8_t __ret; \
6495   __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
6496   __ret; \
6497 })
6498 #endif
6499 
6500 #ifdef __LITTLE_ENDIAN__
6501 #define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
6502   poly16x8_t __s0 = __p0; \
6503   poly16_t __ret; \
6504   __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
6505   __ret; \
6506 })
6507 #else
6508 #define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
6509   poly16x8_t __s0 = __p0; \
6510   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
6511   poly16_t __ret; \
6512   __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
6513   __ret; \
6514 })
6515 #define __noswap_vgetq_lane_p16(__p0, __p1) __extension__ ({ \
6516   poly16x8_t __s0 = __p0; \
6517   poly16_t __ret; \
6518   __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
6519   __ret; \
6520 })
6521 #endif
6522 
6523 #ifdef __LITTLE_ENDIAN__
6524 #define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
6525   uint8x16_t __s0 = __p0; \
6526   uint8_t __ret; \
6527   __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
6528   __ret; \
6529 })
6530 #else
6531 #define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
6532   uint8x16_t __s0 = __p0; \
6533   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
6534   uint8_t __ret; \
6535   __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
6536   __ret; \
6537 })
6538 #define __noswap_vgetq_lane_u8(__p0, __p1) __extension__ ({ \
6539   uint8x16_t __s0 = __p0; \
6540   uint8_t __ret; \
6541   __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
6542   __ret; \
6543 })
6544 #endif
6545 
6546 #ifdef __LITTLE_ENDIAN__
6547 #define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
6548   uint32x4_t __s0 = __p0; \
6549   uint32_t __ret; \
6550   __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
6551   __ret; \
6552 })
6553 #else
6554 #define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
6555   uint32x4_t __s0 = __p0; \
6556   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
6557   uint32_t __ret; \
6558   __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__rev0, __p1); \
6559   __ret; \
6560 })
6561 #define __noswap_vgetq_lane_u32(__p0, __p1) __extension__ ({ \
6562   uint32x4_t __s0 = __p0; \
6563   uint32_t __ret; \
6564   __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
6565   __ret; \
6566 })
6567 #endif
6568 
6569 #ifdef __LITTLE_ENDIAN__
6570 #define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
6571   uint64x2_t __s0 = __p0; \
6572   uint64_t __ret; \
6573   __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
6574   __ret; \
6575 })
6576 #else
6577 #define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
6578   uint64x2_t __s0 = __p0; \
6579   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
6580   uint64_t __ret; \
6581   __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
6582   __ret; \
6583 })
6584 #define __noswap_vgetq_lane_u64(__p0, __p1) __extension__ ({ \
6585   uint64x2_t __s0 = __p0; \
6586   uint64_t __ret; \
6587   __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
6588   __ret; \
6589 })
6590 #endif
6591 
6592 #ifdef __LITTLE_ENDIAN__
6593 #define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
6594   uint16x8_t __s0 = __p0; \
6595   uint16_t __ret; \
6596   __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
6597   __ret; \
6598 })
6599 #else
6600 #define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
6601   uint16x8_t __s0 = __p0; \
6602   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
6603   uint16_t __ret; \
6604   __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
6605   __ret; \
6606 })
6607 #define __noswap_vgetq_lane_u16(__p0, __p1) __extension__ ({ \
6608   uint16x8_t __s0 = __p0; \
6609   uint16_t __ret; \
6610   __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
6611   __ret; \
6612 })
6613 #endif
6614 
6615 #ifdef __LITTLE_ENDIAN__
6616 #define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
6617   int8x16_t __s0 = __p0; \
6618   int8_t __ret; \
6619   __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
6620   __ret; \
6621 })
6622 #else
6623 #define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
6624   int8x16_t __s0 = __p0; \
6625   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
6626   int8_t __ret; \
6627   __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
6628   __ret; \
6629 })
6630 #define __noswap_vgetq_lane_s8(__p0, __p1) __extension__ ({ \
6631   int8x16_t __s0 = __p0; \
6632   int8_t __ret; \
6633   __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
6634   __ret; \
6635 })
6636 #endif
6637 
6638 #ifdef __LITTLE_ENDIAN__
6639 #define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
6640   float32x4_t __s0 = __p0; \
6641   float32_t __ret; \
6642   __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__s0, __p1); \
6643   __ret; \
6644 })
6645 #else
6646 #define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
6647   float32x4_t __s0 = __p0; \
6648   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
6649   float32_t __ret; \
6650   __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__rev0, __p1); \
6651   __ret; \
6652 })
6653 #define __noswap_vgetq_lane_f32(__p0, __p1) __extension__ ({ \
6654   float32x4_t __s0 = __p0; \
6655   float32_t __ret; \
6656   __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__s0, __p1); \
6657   __ret; \
6658 })
6659 #endif
6660 
6661 #ifdef __LITTLE_ENDIAN__
6662 #define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
6663   int32x4_t __s0 = __p0; \
6664   int32_t __ret; \
6665   __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
6666   __ret; \
6667 })
6668 #else
6669 #define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
6670   int32x4_t __s0 = __p0; \
6671   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
6672   int32_t __ret; \
6673   __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__rev0, __p1); \
6674   __ret; \
6675 })
6676 #define __noswap_vgetq_lane_s32(__p0, __p1) __extension__ ({ \
6677   int32x4_t __s0 = __p0; \
6678   int32_t __ret; \
6679   __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
6680   __ret; \
6681 })
6682 #endif
6683 
6684 #ifdef __LITTLE_ENDIAN__
6685 #define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
6686   int64x2_t __s0 = __p0; \
6687   int64_t __ret; \
6688   __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
6689   __ret; \
6690 })
6691 #else
6692 #define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
6693   int64x2_t __s0 = __p0; \
6694   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
6695   int64_t __ret; \
6696   __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
6697   __ret; \
6698 })
6699 #define __noswap_vgetq_lane_s64(__p0, __p1) __extension__ ({ \
6700   int64x2_t __s0 = __p0; \
6701   int64_t __ret; \
6702   __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
6703   __ret; \
6704 })
6705 #endif
6706 
6707 #ifdef __LITTLE_ENDIAN__
6708 #define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
6709   int16x8_t __s0 = __p0; \
6710   int16_t __ret; \
6711   __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
6712   __ret; \
6713 })
6714 #else
6715 #define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
6716   int16x8_t __s0 = __p0; \
6717   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
6718   int16_t __ret; \
6719   __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
6720   __ret; \
6721 })
6722 #define __noswap_vgetq_lane_s16(__p0, __p1) __extension__ ({ \
6723   int16x8_t __s0 = __p0; \
6724   int16_t __ret; \
6725   __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
6726   __ret; \
6727 })
6728 #endif
6729 
6730 #ifdef __LITTLE_ENDIAN__
6731 #define vget_lane_u8(__p0, __p1) __extension__ ({ \
6732   uint8x8_t __s0 = __p0; \
6733   uint8_t __ret; \
6734   __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
6735   __ret; \
6736 })
6737 #else
6738 #define vget_lane_u8(__p0, __p1) __extension__ ({ \
6739   uint8x8_t __s0 = __p0; \
6740   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
6741   uint8_t __ret; \
6742   __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
6743   __ret; \
6744 })
6745 #define __noswap_vget_lane_u8(__p0, __p1) __extension__ ({ \
6746   uint8x8_t __s0 = __p0; \
6747   uint8_t __ret; \
6748   __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
6749   __ret; \
6750 })
6751 #endif
6752 
6753 #ifdef __LITTLE_ENDIAN__
6754 #define vget_lane_u32(__p0, __p1) __extension__ ({ \
6755   uint32x2_t __s0 = __p0; \
6756   uint32_t __ret; \
6757   __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
6758   __ret; \
6759 })
6760 #else
6761 #define vget_lane_u32(__p0, __p1) __extension__ ({ \
6762   uint32x2_t __s0 = __p0; \
6763   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
6764   uint32_t __ret; \
6765   __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__rev0, __p1); \
6766   __ret; \
6767 })
6768 #define __noswap_vget_lane_u32(__p0, __p1) __extension__ ({ \
6769   uint32x2_t __s0 = __p0; \
6770   uint32_t __ret; \
6771   __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
6772   __ret; \
6773 })
6774 #endif
6775 
6776 #ifdef __LITTLE_ENDIAN__
6777 #define vget_lane_u64(__p0, __p1) __extension__ ({ \
6778   uint64x1_t __s0 = __p0; \
6779   uint64_t __ret; \
6780   __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
6781   __ret; \
6782 })
6783 #else
6784 #define vget_lane_u64(__p0, __p1) __extension__ ({ \
6785   uint64x1_t __s0 = __p0; \
6786   uint64_t __ret; \
6787   __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
6788   __ret; \
6789 })
6790 #define __noswap_vget_lane_u64(__p0, __p1) __extension__ ({ \
6791   uint64x1_t __s0 = __p0; \
6792   uint64_t __ret; \
6793   __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
6794   __ret; \
6795 })
6796 #endif
6797 
6798 #ifdef __LITTLE_ENDIAN__
6799 #define vget_lane_u16(__p0, __p1) __extension__ ({ \
6800   uint16x4_t __s0 = __p0; \
6801   uint16_t __ret; \
6802   __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
6803   __ret; \
6804 })
6805 #else
6806 #define vget_lane_u16(__p0, __p1) __extension__ ({ \
6807   uint16x4_t __s0 = __p0; \
6808   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
6809   uint16_t __ret; \
6810   __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
6811   __ret; \
6812 })
6813 #define __noswap_vget_lane_u16(__p0, __p1) __extension__ ({ \
6814   uint16x4_t __s0 = __p0; \
6815   uint16_t __ret; \
6816   __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
6817   __ret; \
6818 })
6819 #endif
6820 
6821 #ifdef __LITTLE_ENDIAN__
6822 #define vget_lane_s8(__p0, __p1) __extension__ ({ \
6823   int8x8_t __s0 = __p0; \
6824   int8_t __ret; \
6825   __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
6826   __ret; \
6827 })
6828 #else
6829 #define vget_lane_s8(__p0, __p1) __extension__ ({ \
6830   int8x8_t __s0 = __p0; \
6831   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
6832   int8_t __ret; \
6833   __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
6834   __ret; \
6835 })
6836 #define __noswap_vget_lane_s8(__p0, __p1) __extension__ ({ \
6837   int8x8_t __s0 = __p0; \
6838   int8_t __ret; \
6839   __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
6840   __ret; \
6841 })
6842 #endif
6843 
6844 #ifdef __LITTLE_ENDIAN__
6845 #define vget_lane_f32(__p0, __p1) __extension__ ({ \
6846   float32x2_t __s0 = __p0; \
6847   float32_t __ret; \
6848   __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__s0, __p1); \
6849   __ret; \
6850 })
6851 #else
6852 #define vget_lane_f32(__p0, __p1) __extension__ ({ \
6853   float32x2_t __s0 = __p0; \
6854   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
6855   float32_t __ret; \
6856   __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__rev0, __p1); \
6857   __ret; \
6858 })
6859 #define __noswap_vget_lane_f32(__p0, __p1) __extension__ ({ \
6860   float32x2_t __s0 = __p0; \
6861   float32_t __ret; \
6862   __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__s0, __p1); \
6863   __ret; \
6864 })
6865 #endif
6866 
6867 #ifdef __LITTLE_ENDIAN__
6868 #define vget_lane_s32(__p0, __p1) __extension__ ({ \
6869   int32x2_t __s0 = __p0; \
6870   int32_t __ret; \
6871   __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
6872   __ret; \
6873 })
6874 #else
6875 #define vget_lane_s32(__p0, __p1) __extension__ ({ \
6876   int32x2_t __s0 = __p0; \
6877   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
6878   int32_t __ret; \
6879   __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__rev0, __p1); \
6880   __ret; \
6881 })
6882 #define __noswap_vget_lane_s32(__p0, __p1) __extension__ ({ \
6883   int32x2_t __s0 = __p0; \
6884   int32_t __ret; \
6885   __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
6886   __ret; \
6887 })
6888 #endif
6889 
6890 #ifdef __LITTLE_ENDIAN__
6891 #define vget_lane_s64(__p0, __p1) __extension__ ({ \
6892   int64x1_t __s0 = __p0; \
6893   int64_t __ret; \
6894   __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
6895   __ret; \
6896 })
6897 #else
6898 #define vget_lane_s64(__p0, __p1) __extension__ ({ \
6899   int64x1_t __s0 = __p0; \
6900   int64_t __ret; \
6901   __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
6902   __ret; \
6903 })
6904 #define __noswap_vget_lane_s64(__p0, __p1) __extension__ ({ \
6905   int64x1_t __s0 = __p0; \
6906   int64_t __ret; \
6907   __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
6908   __ret; \
6909 })
6910 #endif
6911 
6912 #ifdef __LITTLE_ENDIAN__
6913 #define vget_lane_s16(__p0, __p1) __extension__ ({ \
6914   int16x4_t __s0 = __p0; \
6915   int16_t __ret; \
6916   __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
6917   __ret; \
6918 })
6919 #else
6920 #define vget_lane_s16(__p0, __p1) __extension__ ({ \
6921   int16x4_t __s0 = __p0; \
6922   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
6923   int16_t __ret; \
6924   __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
6925   __ret; \
6926 })
6927 #define __noswap_vget_lane_s16(__p0, __p1) __extension__ ({ \
6928   int16x4_t __s0 = __p0; \
6929   int16_t __ret; \
6930   __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
6931   __ret; \
6932 })
6933 #endif
6934 
6935 #ifdef __LITTLE_ENDIAN__
vget_low_p8(poly8x16_t __p0)6936 __ai poly8x8_t vget_low_p8(poly8x16_t __p0) {
6937   poly8x8_t __ret;
6938   __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
6939   return __ret;
6940 }
6941 #else
vget_low_p8(poly8x16_t __p0)6942 __ai poly8x8_t vget_low_p8(poly8x16_t __p0) {
6943   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
6944   poly8x8_t __ret;
6945   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
6946   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
6947   return __ret;
6948 }
6949 #endif
6950 
6951 #ifdef __LITTLE_ENDIAN__
vget_low_p16(poly16x8_t __p0)6952 __ai poly16x4_t vget_low_p16(poly16x8_t __p0) {
6953   poly16x4_t __ret;
6954   __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
6955   return __ret;
6956 }
6957 #else
vget_low_p16(poly16x8_t __p0)6958 __ai poly16x4_t vget_low_p16(poly16x8_t __p0) {
6959   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
6960   poly16x4_t __ret;
6961   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
6962   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
6963   return __ret;
6964 }
6965 #endif
6966 
6967 #ifdef __LITTLE_ENDIAN__
vget_low_u8(uint8x16_t __p0)6968 __ai uint8x8_t vget_low_u8(uint8x16_t __p0) {
6969   uint8x8_t __ret;
6970   __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
6971   return __ret;
6972 }
6973 #else
vget_low_u8(uint8x16_t __p0)6974 __ai uint8x8_t vget_low_u8(uint8x16_t __p0) {
6975   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
6976   uint8x8_t __ret;
6977   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
6978   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
6979   return __ret;
6980 }
6981 #endif
6982 
6983 #ifdef __LITTLE_ENDIAN__
vget_low_u32(uint32x4_t __p0)6984 __ai uint32x2_t vget_low_u32(uint32x4_t __p0) {
6985   uint32x2_t __ret;
6986   __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
6987   return __ret;
6988 }
6989 #else
vget_low_u32(uint32x4_t __p0)6990 __ai uint32x2_t vget_low_u32(uint32x4_t __p0) {
6991   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
6992   uint32x2_t __ret;
6993   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
6994   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
6995   return __ret;
6996 }
6997 #endif
6998 
6999 #ifdef __LITTLE_ENDIAN__
vget_low_u64(uint64x2_t __p0)7000 __ai uint64x1_t vget_low_u64(uint64x2_t __p0) {
7001   uint64x1_t __ret;
7002   __ret = __builtin_shufflevector(__p0, __p0, 0);
7003   return __ret;
7004 }
7005 #else
vget_low_u64(uint64x2_t __p0)7006 __ai uint64x1_t vget_low_u64(uint64x2_t __p0) {
7007   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
7008   uint64x1_t __ret;
7009   __ret = __builtin_shufflevector(__rev0, __rev0, 0);
7010   return __ret;
7011 }
7012 #endif
7013 
7014 #ifdef __LITTLE_ENDIAN__
vget_low_u16(uint16x8_t __p0)7015 __ai uint16x4_t vget_low_u16(uint16x8_t __p0) {
7016   uint16x4_t __ret;
7017   __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
7018   return __ret;
7019 }
7020 #else
vget_low_u16(uint16x8_t __p0)7021 __ai uint16x4_t vget_low_u16(uint16x8_t __p0) {
7022   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
7023   uint16x4_t __ret;
7024   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
7025   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
7026   return __ret;
7027 }
7028 #endif
7029 
7030 #ifdef __LITTLE_ENDIAN__
vget_low_s8(int8x16_t __p0)7031 __ai int8x8_t vget_low_s8(int8x16_t __p0) {
7032   int8x8_t __ret;
7033   __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
7034   return __ret;
7035 }
7036 #else
vget_low_s8(int8x16_t __p0)7037 __ai int8x8_t vget_low_s8(int8x16_t __p0) {
7038   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7039   int8x8_t __ret;
7040   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
7041   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
7042   return __ret;
7043 }
7044 #endif
7045 
7046 #ifdef __LITTLE_ENDIAN__
vget_low_f32(float32x4_t __p0)7047 __ai float32x2_t vget_low_f32(float32x4_t __p0) {
7048   float32x2_t __ret;
7049   __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
7050   return __ret;
7051 }
7052 #else
vget_low_f32(float32x4_t __p0)7053 __ai float32x2_t vget_low_f32(float32x4_t __p0) {
7054   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
7055   float32x2_t __ret;
7056   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
7057   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
7058   return __ret;
7059 }
7060 #endif
7061 
7062 #ifdef __LITTLE_ENDIAN__
vget_low_f16(float16x8_t __p0)7063 __ai float16x4_t vget_low_f16(float16x8_t __p0) {
7064   float16x4_t __ret;
7065   __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
7066   return __ret;
7067 }
7068 #else
vget_low_f16(float16x8_t __p0)7069 __ai float16x4_t vget_low_f16(float16x8_t __p0) {
7070   float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
7071   float16x4_t __ret;
7072   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
7073   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
7074   return __ret;
7075 }
7076 #endif
7077 
7078 #ifdef __LITTLE_ENDIAN__
vget_low_s32(int32x4_t __p0)7079 __ai int32x2_t vget_low_s32(int32x4_t __p0) {
7080   int32x2_t __ret;
7081   __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
7082   return __ret;
7083 }
7084 #else
vget_low_s32(int32x4_t __p0)7085 __ai int32x2_t vget_low_s32(int32x4_t __p0) {
7086   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
7087   int32x2_t __ret;
7088   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
7089   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
7090   return __ret;
7091 }
7092 #endif
7093 
7094 #ifdef __LITTLE_ENDIAN__
vget_low_s64(int64x2_t __p0)7095 __ai int64x1_t vget_low_s64(int64x2_t __p0) {
7096   int64x1_t __ret;
7097   __ret = __builtin_shufflevector(__p0, __p0, 0);
7098   return __ret;
7099 }
7100 #else
vget_low_s64(int64x2_t __p0)7101 __ai int64x1_t vget_low_s64(int64x2_t __p0) {
7102   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
7103   int64x1_t __ret;
7104   __ret = __builtin_shufflevector(__rev0, __rev0, 0);
7105   return __ret;
7106 }
7107 #endif
7108 
7109 #ifdef __LITTLE_ENDIAN__
vget_low_s16(int16x8_t __p0)7110 __ai int16x4_t vget_low_s16(int16x8_t __p0) {
7111   int16x4_t __ret;
7112   __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
7113   return __ret;
7114 }
7115 #else
vget_low_s16(int16x8_t __p0)7116 __ai int16x4_t vget_low_s16(int16x8_t __p0) {
7117   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
7118   int16x4_t __ret;
7119   __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
7120   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
7121   return __ret;
7122 }
7123 #endif
7124 
7125 #ifdef __LITTLE_ENDIAN__
vhaddq_u8(uint8x16_t __p0,uint8x16_t __p1)7126 __ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
7127   uint8x16_t __ret;
7128   __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
7129   return __ret;
7130 }
7131 #else
vhaddq_u8(uint8x16_t __p0,uint8x16_t __p1)7132 __ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
7133   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7134   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7135   uint8x16_t __ret;
7136   __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
7137   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7138   return __ret;
7139 }
7140 #endif
7141 
7142 #ifdef __LITTLE_ENDIAN__
vhaddq_u32(uint32x4_t __p0,uint32x4_t __p1)7143 __ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
7144   uint32x4_t __ret;
7145   __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
7146   return __ret;
7147 }
7148 #else
vhaddq_u32(uint32x4_t __p0,uint32x4_t __p1)7149 __ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
7150   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
7151   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
7152   uint32x4_t __ret;
7153   __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
7154   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
7155   return __ret;
7156 }
7157 #endif
7158 
7159 #ifdef __LITTLE_ENDIAN__
vhaddq_u16(uint16x8_t __p0,uint16x8_t __p1)7160 __ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
7161   uint16x8_t __ret;
7162   __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
7163   return __ret;
7164 }
7165 #else
vhaddq_u16(uint16x8_t __p0,uint16x8_t __p1)7166 __ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
7167   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
7168   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
7169   uint16x8_t __ret;
7170   __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
7171   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
7172   return __ret;
7173 }
7174 #endif
7175 
7176 #ifdef __LITTLE_ENDIAN__
vhaddq_s8(int8x16_t __p0,int8x16_t __p1)7177 __ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
7178   int8x16_t __ret;
7179   __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
7180   return __ret;
7181 }
7182 #else
vhaddq_s8(int8x16_t __p0,int8x16_t __p1)7183 __ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
7184   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7185   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7186   int8x16_t __ret;
7187   __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
7188   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7189   return __ret;
7190 }
7191 #endif
7192 
7193 #ifdef __LITTLE_ENDIAN__
vhaddq_s32(int32x4_t __p0,int32x4_t __p1)7194 __ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
7195   int32x4_t __ret;
7196   __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
7197   return __ret;
7198 }
7199 #else
vhaddq_s32(int32x4_t __p0,int32x4_t __p1)7200 __ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
7201   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
7202   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
7203   int32x4_t __ret;
7204   __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
7205   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
7206   return __ret;
7207 }
7208 #endif
7209 
7210 #ifdef __LITTLE_ENDIAN__
vhaddq_s16(int16x8_t __p0,int16x8_t __p1)7211 __ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
7212   int16x8_t __ret;
7213   __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
7214   return __ret;
7215 }
7216 #else
vhaddq_s16(int16x8_t __p0,int16x8_t __p1)7217 __ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
7218   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
7219   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
7220   int16x8_t __ret;
7221   __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
7222   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
7223   return __ret;
7224 }
7225 #endif
7226 
7227 #ifdef __LITTLE_ENDIAN__
vhadd_u8(uint8x8_t __p0,uint8x8_t __p1)7228 __ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
7229   uint8x8_t __ret;
7230   __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
7231   return __ret;
7232 }
7233 #else
vhadd_u8(uint8x8_t __p0,uint8x8_t __p1)7234 __ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
7235   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
7236   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
7237   uint8x8_t __ret;
7238   __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
7239   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
7240   return __ret;
7241 }
7242 #endif
7243 
7244 #ifdef __LITTLE_ENDIAN__
vhadd_u32(uint32x2_t __p0,uint32x2_t __p1)7245 __ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
7246   uint32x2_t __ret;
7247   __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
7248   return __ret;
7249 }
7250 #else
vhadd_u32(uint32x2_t __p0,uint32x2_t __p1)7251 __ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
7252   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
7253   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
7254   uint32x2_t __ret;
7255   __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
7256   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
7257   return __ret;
7258 }
7259 #endif
7260 
7261 #ifdef __LITTLE_ENDIAN__
vhadd_u16(uint16x4_t __p0,uint16x4_t __p1)7262 __ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
7263   uint16x4_t __ret;
7264   __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
7265   return __ret;
7266 }
7267 #else
vhadd_u16(uint16x4_t __p0,uint16x4_t __p1)7268 __ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
7269   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
7270   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
7271   uint16x4_t __ret;
7272   __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
7273   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
7274   return __ret;
7275 }
7276 #endif
7277 
7278 #ifdef __LITTLE_ENDIAN__
vhadd_s8(int8x8_t __p0,int8x8_t __p1)7279 __ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
7280   int8x8_t __ret;
7281   __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
7282   return __ret;
7283 }
7284 #else
vhadd_s8(int8x8_t __p0,int8x8_t __p1)7285 __ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
7286   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
7287   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
7288   int8x8_t __ret;
7289   __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
7290   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
7291   return __ret;
7292 }
7293 #endif
7294 
7295 #ifdef __LITTLE_ENDIAN__
vhadd_s32(int32x2_t __p0,int32x2_t __p1)7296 __ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
7297   int32x2_t __ret;
7298   __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
7299   return __ret;
7300 }
7301 #else
vhadd_s32(int32x2_t __p0,int32x2_t __p1)7302 __ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
7303   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
7304   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
7305   int32x2_t __ret;
7306   __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
7307   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
7308   return __ret;
7309 }
7310 #endif
7311 
7312 #ifdef __LITTLE_ENDIAN__
vhadd_s16(int16x4_t __p0,int16x4_t __p1)7313 __ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
7314   int16x4_t __ret;
7315   __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
7316   return __ret;
7317 }
7318 #else
vhadd_s16(int16x4_t __p0,int16x4_t __p1)7319 __ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
7320   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
7321   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
7322   int16x4_t __ret;
7323   __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
7324   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
7325   return __ret;
7326 }
7327 #endif
7328 
7329 #ifdef __LITTLE_ENDIAN__
vhsubq_u8(uint8x16_t __p0,uint8x16_t __p1)7330 __ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
7331   uint8x16_t __ret;
7332   __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
7333   return __ret;
7334 }
7335 #else
vhsubq_u8(uint8x16_t __p0,uint8x16_t __p1)7336 __ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
7337   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7338   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7339   uint8x16_t __ret;
7340   __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
7341   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7342   return __ret;
7343 }
7344 #endif
7345 
7346 #ifdef __LITTLE_ENDIAN__
vhsubq_u32(uint32x4_t __p0,uint32x4_t __p1)7347 __ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
7348   uint32x4_t __ret;
7349   __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
7350   return __ret;
7351 }
7352 #else
vhsubq_u32(uint32x4_t __p0,uint32x4_t __p1)7353 __ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
7354   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
7355   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
7356   uint32x4_t __ret;
7357   __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
7358   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
7359   return __ret;
7360 }
7361 #endif
7362 
7363 #ifdef __LITTLE_ENDIAN__
vhsubq_u16(uint16x8_t __p0,uint16x8_t __p1)7364 __ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
7365   uint16x8_t __ret;
7366   __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
7367   return __ret;
7368 }
7369 #else
vhsubq_u16(uint16x8_t __p0,uint16x8_t __p1)7370 __ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
7371   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
7372   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
7373   uint16x8_t __ret;
7374   __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
7375   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
7376   return __ret;
7377 }
7378 #endif
7379 
7380 #ifdef __LITTLE_ENDIAN__
vhsubq_s8(int8x16_t __p0,int8x16_t __p1)7381 __ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
7382   int8x16_t __ret;
7383   __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
7384   return __ret;
7385 }
7386 #else
vhsubq_s8(int8x16_t __p0,int8x16_t __p1)7387 __ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
7388   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7389   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7390   int8x16_t __ret;
7391   __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
7392   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7393   return __ret;
7394 }
7395 #endif
7396 
7397 #ifdef __LITTLE_ENDIAN__
vhsubq_s32(int32x4_t __p0,int32x4_t __p1)7398 __ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
7399   int32x4_t __ret;
7400   __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
7401   return __ret;
7402 }
7403 #else
vhsubq_s32(int32x4_t __p0,int32x4_t __p1)7404 __ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
7405   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
7406   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
7407   int32x4_t __ret;
7408   __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
7409   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
7410   return __ret;
7411 }
7412 #endif
7413 
7414 #ifdef __LITTLE_ENDIAN__
vhsubq_s16(int16x8_t __p0,int16x8_t __p1)7415 __ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
7416   int16x8_t __ret;
7417   __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
7418   return __ret;
7419 }
7420 #else
vhsubq_s16(int16x8_t __p0,int16x8_t __p1)7421 __ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
7422   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
7423   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
7424   int16x8_t __ret;
7425   __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
7426   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
7427   return __ret;
7428 }
7429 #endif
7430 
7431 #ifdef __LITTLE_ENDIAN__
vhsub_u8(uint8x8_t __p0,uint8x8_t __p1)7432 __ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
7433   uint8x8_t __ret;
7434   __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
7435   return __ret;
7436 }
7437 #else
vhsub_u8(uint8x8_t __p0,uint8x8_t __p1)7438 __ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
7439   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
7440   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
7441   uint8x8_t __ret;
7442   __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
7443   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
7444   return __ret;
7445 }
7446 #endif
7447 
7448 #ifdef __LITTLE_ENDIAN__
vhsub_u32(uint32x2_t __p0,uint32x2_t __p1)7449 __ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
7450   uint32x2_t __ret;
7451   __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
7452   return __ret;
7453 }
7454 #else
vhsub_u32(uint32x2_t __p0,uint32x2_t __p1)7455 __ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
7456   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
7457   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
7458   uint32x2_t __ret;
7459   __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
7460   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
7461   return __ret;
7462 }
7463 #endif
7464 
7465 #ifdef __LITTLE_ENDIAN__
vhsub_u16(uint16x4_t __p0,uint16x4_t __p1)7466 __ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
7467   uint16x4_t __ret;
7468   __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
7469   return __ret;
7470 }
7471 #else
vhsub_u16(uint16x4_t __p0,uint16x4_t __p1)7472 __ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
7473   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
7474   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
7475   uint16x4_t __ret;
7476   __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
7477   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
7478   return __ret;
7479 }
7480 #endif
7481 
7482 #ifdef __LITTLE_ENDIAN__
vhsub_s8(int8x8_t __p0,int8x8_t __p1)7483 __ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
7484   int8x8_t __ret;
7485   __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
7486   return __ret;
7487 }
7488 #else
vhsub_s8(int8x8_t __p0,int8x8_t __p1)7489 __ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
7490   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
7491   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
7492   int8x8_t __ret;
7493   __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
7494   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
7495   return __ret;
7496 }
7497 #endif
7498 
7499 #ifdef __LITTLE_ENDIAN__
vhsub_s32(int32x2_t __p0,int32x2_t __p1)7500 __ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
7501   int32x2_t __ret;
7502   __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
7503   return __ret;
7504 }
7505 #else
vhsub_s32(int32x2_t __p0,int32x2_t __p1)7506 __ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
7507   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
7508   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
7509   int32x2_t __ret;
7510   __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
7511   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
7512   return __ret;
7513 }
7514 #endif
7515 
7516 #ifdef __LITTLE_ENDIAN__
vhsub_s16(int16x4_t __p0,int16x4_t __p1)7517 __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
7518   int16x4_t __ret;
7519   __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
7520   return __ret;
7521 }
7522 #else
vhsub_s16(int16x4_t __p0,int16x4_t __p1)7523 __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
7524   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
7525   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
7526   int16x4_t __ret;
7527   __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
7528   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
7529   return __ret;
7530 }
7531 #endif
7532 
7533 #ifdef __LITTLE_ENDIAN__
7534 #define vld1_p8(__p0) __extension__ ({ \
7535   poly8x8_t __ret; \
7536   __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \
7537   __ret; \
7538 })
7539 #else
7540 #define vld1_p8(__p0) __extension__ ({ \
7541   poly8x8_t __ret; \
7542   __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \
7543   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
7544   __ret; \
7545 })
7546 #endif
7547 
7548 #ifdef __LITTLE_ENDIAN__
7549 #define vld1_p16(__p0) __extension__ ({ \
7550   poly16x4_t __ret; \
7551   __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \
7552   __ret; \
7553 })
7554 #else
7555 #define vld1_p16(__p0) __extension__ ({ \
7556   poly16x4_t __ret; \
7557   __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \
7558   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
7559   __ret; \
7560 })
7561 #endif
7562 
7563 #ifdef __LITTLE_ENDIAN__
7564 #define vld1q_p8(__p0) __extension__ ({ \
7565   poly8x16_t __ret; \
7566   __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \
7567   __ret; \
7568 })
7569 #else
7570 #define vld1q_p8(__p0) __extension__ ({ \
7571   poly8x16_t __ret; \
7572   __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \
7573   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
7574   __ret; \
7575 })
7576 #endif
7577 
7578 #ifdef __LITTLE_ENDIAN__
7579 #define vld1q_p16(__p0) __extension__ ({ \
7580   poly16x8_t __ret; \
7581   __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \
7582   __ret; \
7583 })
7584 #else
7585 #define vld1q_p16(__p0) __extension__ ({ \
7586   poly16x8_t __ret; \
7587   __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \
7588   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
7589   __ret; \
7590 })
7591 #endif
7592 
7593 #ifdef __LITTLE_ENDIAN__
7594 #define vld1q_u8(__p0) __extension__ ({ \
7595   uint8x16_t __ret; \
7596   __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \
7597   __ret; \
7598 })
7599 #else
7600 #define vld1q_u8(__p0) __extension__ ({ \
7601   uint8x16_t __ret; \
7602   __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \
7603   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
7604   __ret; \
7605 })
7606 #endif
7607 
7608 #ifdef __LITTLE_ENDIAN__
7609 #define vld1q_u32(__p0) __extension__ ({ \
7610   uint32x4_t __ret; \
7611   __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \
7612   __ret; \
7613 })
7614 #else
7615 #define vld1q_u32(__p0) __extension__ ({ \
7616   uint32x4_t __ret; \
7617   __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \
7618   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
7619   __ret; \
7620 })
7621 #endif
7622 
7623 #ifdef __LITTLE_ENDIAN__
7624 #define vld1q_u64(__p0) __extension__ ({ \
7625   uint64x2_t __ret; \
7626   __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \
7627   __ret; \
7628 })
7629 #else
7630 #define vld1q_u64(__p0) __extension__ ({ \
7631   uint64x2_t __ret; \
7632   __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \
7633   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
7634   __ret; \
7635 })
7636 #endif
7637 
7638 #ifdef __LITTLE_ENDIAN__
7639 #define vld1q_u16(__p0) __extension__ ({ \
7640   uint16x8_t __ret; \
7641   __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \
7642   __ret; \
7643 })
7644 #else
7645 #define vld1q_u16(__p0) __extension__ ({ \
7646   uint16x8_t __ret; \
7647   __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \
7648   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
7649   __ret; \
7650 })
7651 #endif
7652 
7653 #ifdef __LITTLE_ENDIAN__
7654 #define vld1q_s8(__p0) __extension__ ({ \
7655   int8x16_t __ret; \
7656   __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \
7657   __ret; \
7658 })
7659 #else
7660 #define vld1q_s8(__p0) __extension__ ({ \
7661   int8x16_t __ret; \
7662   __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \
7663   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
7664   __ret; \
7665 })
7666 #endif
7667 
7668 #ifdef __LITTLE_ENDIAN__
7669 #define vld1q_f32(__p0) __extension__ ({ \
7670   float32x4_t __ret; \
7671   __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \
7672   __ret; \
7673 })
7674 #else
7675 #define vld1q_f32(__p0) __extension__ ({ \
7676   float32x4_t __ret; \
7677   __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \
7678   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
7679   __ret; \
7680 })
7681 #endif
7682 
7683 #ifdef __LITTLE_ENDIAN__
7684 #define vld1q_f16(__p0) __extension__ ({ \
7685   float16x8_t __ret; \
7686   __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \
7687   __ret; \
7688 })
7689 #else
7690 #define vld1q_f16(__p0) __extension__ ({ \
7691   float16x8_t __ret; \
7692   __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \
7693   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
7694   __ret; \
7695 })
7696 #endif
7697 
7698 #ifdef __LITTLE_ENDIAN__
7699 #define vld1q_s32(__p0) __extension__ ({ \
7700   int32x4_t __ret; \
7701   __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \
7702   __ret; \
7703 })
7704 #else
7705 #define vld1q_s32(__p0) __extension__ ({ \
7706   int32x4_t __ret; \
7707   __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \
7708   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
7709   __ret; \
7710 })
7711 #endif
7712 
7713 #ifdef __LITTLE_ENDIAN__
7714 #define vld1q_s64(__p0) __extension__ ({ \
7715   int64x2_t __ret; \
7716   __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \
7717   __ret; \
7718 })
7719 #else
7720 #define vld1q_s64(__p0) __extension__ ({ \
7721   int64x2_t __ret; \
7722   __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \
7723   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
7724   __ret; \
7725 })
7726 #endif
7727 
7728 #ifdef __LITTLE_ENDIAN__
7729 #define vld1q_s16(__p0) __extension__ ({ \
7730   int16x8_t __ret; \
7731   __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \
7732   __ret; \
7733 })
7734 #else
7735 #define vld1q_s16(__p0) __extension__ ({ \
7736   int16x8_t __ret; \
7737   __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \
7738   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
7739   __ret; \
7740 })
7741 #endif
7742 
7743 #ifdef __LITTLE_ENDIAN__
7744 #define vld1_u8(__p0) __extension__ ({ \
7745   uint8x8_t __ret; \
7746   __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \
7747   __ret; \
7748 })
7749 #else
7750 #define vld1_u8(__p0) __extension__ ({ \
7751   uint8x8_t __ret; \
7752   __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \
7753   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
7754   __ret; \
7755 })
7756 #endif
7757 
7758 #ifdef __LITTLE_ENDIAN__
7759 #define vld1_u32(__p0) __extension__ ({ \
7760   uint32x2_t __ret; \
7761   __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \
7762   __ret; \
7763 })
7764 #else
7765 #define vld1_u32(__p0) __extension__ ({ \
7766   uint32x2_t __ret; \
7767   __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \
7768   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
7769   __ret; \
7770 })
7771 #endif
7772 
7773 #ifdef __LITTLE_ENDIAN__
7774 #define vld1_u64(__p0) __extension__ ({ \
7775   uint64x1_t __ret; \
7776   __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \
7777   __ret; \
7778 })
7779 #else
7780 #define vld1_u64(__p0) __extension__ ({ \
7781   uint64x1_t __ret; \
7782   __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \
7783   __ret; \
7784 })
7785 #endif
7786 
7787 #ifdef __LITTLE_ENDIAN__
7788 #define vld1_u16(__p0) __extension__ ({ \
7789   uint16x4_t __ret; \
7790   __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \
7791   __ret; \
7792 })
7793 #else
7794 #define vld1_u16(__p0) __extension__ ({ \
7795   uint16x4_t __ret; \
7796   __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \
7797   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
7798   __ret; \
7799 })
7800 #endif
7801 
7802 #ifdef __LITTLE_ENDIAN__
7803 #define vld1_s8(__p0) __extension__ ({ \
7804   int8x8_t __ret; \
7805   __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \
7806   __ret; \
7807 })
7808 #else
7809 #define vld1_s8(__p0) __extension__ ({ \
7810   int8x8_t __ret; \
7811   __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \
7812   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
7813   __ret; \
7814 })
7815 #endif
7816 
7817 #ifdef __LITTLE_ENDIAN__
7818 #define vld1_f32(__p0) __extension__ ({ \
7819   float32x2_t __ret; \
7820   __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \
7821   __ret; \
7822 })
7823 #else
7824 #define vld1_f32(__p0) __extension__ ({ \
7825   float32x2_t __ret; \
7826   __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \
7827   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
7828   __ret; \
7829 })
7830 #endif
7831 
7832 #ifdef __LITTLE_ENDIAN__
7833 #define vld1_f16(__p0) __extension__ ({ \
7834   float16x4_t __ret; \
7835   __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \
7836   __ret; \
7837 })
7838 #else
7839 #define vld1_f16(__p0) __extension__ ({ \
7840   float16x4_t __ret; \
7841   __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \
7842   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
7843   __ret; \
7844 })
7845 #endif
7846 
7847 #ifdef __LITTLE_ENDIAN__
7848 #define vld1_s32(__p0) __extension__ ({ \
7849   int32x2_t __ret; \
7850   __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \
7851   __ret; \
7852 })
7853 #else
7854 #define vld1_s32(__p0) __extension__ ({ \
7855   int32x2_t __ret; \
7856   __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \
7857   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
7858   __ret; \
7859 })
7860 #endif
7861 
7862 #ifdef __LITTLE_ENDIAN__
7863 #define vld1_s64(__p0) __extension__ ({ \
7864   int64x1_t __ret; \
7865   __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \
7866   __ret; \
7867 })
7868 #else
7869 #define vld1_s64(__p0) __extension__ ({ \
7870   int64x1_t __ret; \
7871   __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \
7872   __ret; \
7873 })
7874 #endif
7875 
7876 #ifdef __LITTLE_ENDIAN__
7877 #define vld1_s16(__p0) __extension__ ({ \
7878   int16x4_t __ret; \
7879   __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \
7880   __ret; \
7881 })
7882 #else
7883 #define vld1_s16(__p0) __extension__ ({ \
7884   int16x4_t __ret; \
7885   __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \
7886   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
7887   __ret; \
7888 })
7889 #endif
7890 
7891 #ifdef __LITTLE_ENDIAN__
7892 #define vld1_dup_p8(__p0) __extension__ ({ \
7893   poly8x8_t __ret; \
7894   __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \
7895   __ret; \
7896 })
7897 #else
7898 #define vld1_dup_p8(__p0) __extension__ ({ \
7899   poly8x8_t __ret; \
7900   __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \
7901   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
7902   __ret; \
7903 })
7904 #endif
7905 
7906 #ifdef __LITTLE_ENDIAN__
7907 #define vld1_dup_p16(__p0) __extension__ ({ \
7908   poly16x4_t __ret; \
7909   __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \
7910   __ret; \
7911 })
7912 #else
7913 #define vld1_dup_p16(__p0) __extension__ ({ \
7914   poly16x4_t __ret; \
7915   __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \
7916   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
7917   __ret; \
7918 })
7919 #endif
7920 
7921 #ifdef __LITTLE_ENDIAN__
7922 #define vld1q_dup_p8(__p0) __extension__ ({ \
7923   poly8x16_t __ret; \
7924   __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \
7925   __ret; \
7926 })
7927 #else
7928 #define vld1q_dup_p8(__p0) __extension__ ({ \
7929   poly8x16_t __ret; \
7930   __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \
7931   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
7932   __ret; \
7933 })
7934 #endif
7935 
7936 #ifdef __LITTLE_ENDIAN__
7937 #define vld1q_dup_p16(__p0) __extension__ ({ \
7938   poly16x8_t __ret; \
7939   __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \
7940   __ret; \
7941 })
7942 #else
7943 #define vld1q_dup_p16(__p0) __extension__ ({ \
7944   poly16x8_t __ret; \
7945   __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \
7946   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
7947   __ret; \
7948 })
7949 #endif
7950 
7951 #ifdef __LITTLE_ENDIAN__
7952 #define vld1q_dup_u8(__p0) __extension__ ({ \
7953   uint8x16_t __ret; \
7954   __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \
7955   __ret; \
7956 })
7957 #else
7958 #define vld1q_dup_u8(__p0) __extension__ ({ \
7959   uint8x16_t __ret; \
7960   __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \
7961   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
7962   __ret; \
7963 })
7964 #endif
7965 
7966 #ifdef __LITTLE_ENDIAN__
7967 #define vld1q_dup_u32(__p0) __extension__ ({ \
7968   uint32x4_t __ret; \
7969   __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \
7970   __ret; \
7971 })
7972 #else
7973 #define vld1q_dup_u32(__p0) __extension__ ({ \
7974   uint32x4_t __ret; \
7975   __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \
7976   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
7977   __ret; \
7978 })
7979 #endif
7980 
7981 #ifdef __LITTLE_ENDIAN__
7982 #define vld1q_dup_u64(__p0) __extension__ ({ \
7983   uint64x2_t __ret; \
7984   __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \
7985   __ret; \
7986 })
7987 #else
7988 #define vld1q_dup_u64(__p0) __extension__ ({ \
7989   uint64x2_t __ret; \
7990   __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \
7991   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
7992   __ret; \
7993 })
7994 #endif
7995 
7996 #ifdef __LITTLE_ENDIAN__
7997 #define vld1q_dup_u16(__p0) __extension__ ({ \
7998   uint16x8_t __ret; \
7999   __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \
8000   __ret; \
8001 })
8002 #else
8003 #define vld1q_dup_u16(__p0) __extension__ ({ \
8004   uint16x8_t __ret; \
8005   __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \
8006   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
8007   __ret; \
8008 })
8009 #endif
8010 
8011 #ifdef __LITTLE_ENDIAN__
8012 #define vld1q_dup_s8(__p0) __extension__ ({ \
8013   int8x16_t __ret; \
8014   __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \
8015   __ret; \
8016 })
8017 #else
8018 #define vld1q_dup_s8(__p0) __extension__ ({ \
8019   int8x16_t __ret; \
8020   __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \
8021   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
8022   __ret; \
8023 })
8024 #endif
8025 
8026 #ifdef __LITTLE_ENDIAN__
8027 #define vld1q_dup_f32(__p0) __extension__ ({ \
8028   float32x4_t __ret; \
8029   __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \
8030   __ret; \
8031 })
8032 #else
8033 #define vld1q_dup_f32(__p0) __extension__ ({ \
8034   float32x4_t __ret; \
8035   __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \
8036   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
8037   __ret; \
8038 })
8039 #endif
8040 
8041 #ifdef __LITTLE_ENDIAN__
8042 #define vld1q_dup_f16(__p0) __extension__ ({ \
8043   float16x8_t __ret; \
8044   __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \
8045   __ret; \
8046 })
8047 #else
8048 #define vld1q_dup_f16(__p0) __extension__ ({ \
8049   float16x8_t __ret; \
8050   __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \
8051   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
8052   __ret; \
8053 })
8054 #endif
8055 
8056 #ifdef __LITTLE_ENDIAN__
8057 #define vld1q_dup_s32(__p0) __extension__ ({ \
8058   int32x4_t __ret; \
8059   __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \
8060   __ret; \
8061 })
8062 #else
8063 #define vld1q_dup_s32(__p0) __extension__ ({ \
8064   int32x4_t __ret; \
8065   __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \
8066   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
8067   __ret; \
8068 })
8069 #endif
8070 
8071 #ifdef __LITTLE_ENDIAN__
8072 #define vld1q_dup_s64(__p0) __extension__ ({ \
8073   int64x2_t __ret; \
8074   __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \
8075   __ret; \
8076 })
8077 #else
8078 #define vld1q_dup_s64(__p0) __extension__ ({ \
8079   int64x2_t __ret; \
8080   __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \
8081   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
8082   __ret; \
8083 })
8084 #endif
8085 
8086 #ifdef __LITTLE_ENDIAN__
8087 #define vld1q_dup_s16(__p0) __extension__ ({ \
8088   int16x8_t __ret; \
8089   __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \
8090   __ret; \
8091 })
8092 #else
8093 #define vld1q_dup_s16(__p0) __extension__ ({ \
8094   int16x8_t __ret; \
8095   __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \
8096   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
8097   __ret; \
8098 })
8099 #endif
8100 
8101 #ifdef __LITTLE_ENDIAN__
8102 #define vld1_dup_u8(__p0) __extension__ ({ \
8103   uint8x8_t __ret; \
8104   __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \
8105   __ret; \
8106 })
8107 #else
8108 #define vld1_dup_u8(__p0) __extension__ ({ \
8109   uint8x8_t __ret; \
8110   __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \
8111   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
8112   __ret; \
8113 })
8114 #endif
8115 
8116 #ifdef __LITTLE_ENDIAN__
8117 #define vld1_dup_u32(__p0) __extension__ ({ \
8118   uint32x2_t __ret; \
8119   __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \
8120   __ret; \
8121 })
8122 #else
8123 #define vld1_dup_u32(__p0) __extension__ ({ \
8124   uint32x2_t __ret; \
8125   __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \
8126   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
8127   __ret; \
8128 })
8129 #endif
8130 
8131 #ifdef __LITTLE_ENDIAN__
8132 #define vld1_dup_u64(__p0) __extension__ ({ \
8133   uint64x1_t __ret; \
8134   __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \
8135   __ret; \
8136 })
8137 #else
8138 #define vld1_dup_u64(__p0) __extension__ ({ \
8139   uint64x1_t __ret; \
8140   __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \
8141   __ret; \
8142 })
8143 #endif
8144 
8145 #ifdef __LITTLE_ENDIAN__
8146 #define vld1_dup_u16(__p0) __extension__ ({ \
8147   uint16x4_t __ret; \
8148   __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \
8149   __ret; \
8150 })
8151 #else
8152 #define vld1_dup_u16(__p0) __extension__ ({ \
8153   uint16x4_t __ret; \
8154   __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \
8155   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
8156   __ret; \
8157 })
8158 #endif
8159 
8160 #ifdef __LITTLE_ENDIAN__
8161 #define vld1_dup_s8(__p0) __extension__ ({ \
8162   int8x8_t __ret; \
8163   __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \
8164   __ret; \
8165 })
8166 #else
8167 #define vld1_dup_s8(__p0) __extension__ ({ \
8168   int8x8_t __ret; \
8169   __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \
8170   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
8171   __ret; \
8172 })
8173 #endif
8174 
8175 #ifdef __LITTLE_ENDIAN__
8176 #define vld1_dup_f32(__p0) __extension__ ({ \
8177   float32x2_t __ret; \
8178   __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \
8179   __ret; \
8180 })
8181 #else
8182 #define vld1_dup_f32(__p0) __extension__ ({ \
8183   float32x2_t __ret; \
8184   __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \
8185   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
8186   __ret; \
8187 })
8188 #endif
8189 
8190 #ifdef __LITTLE_ENDIAN__
8191 #define vld1_dup_f16(__p0) __extension__ ({ \
8192   float16x4_t __ret; \
8193   __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \
8194   __ret; \
8195 })
8196 #else
8197 #define vld1_dup_f16(__p0) __extension__ ({ \
8198   float16x4_t __ret; \
8199   __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \
8200   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
8201   __ret; \
8202 })
8203 #endif
8204 
8205 #ifdef __LITTLE_ENDIAN__
8206 #define vld1_dup_s32(__p0) __extension__ ({ \
8207   int32x2_t __ret; \
8208   __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \
8209   __ret; \
8210 })
8211 #else
8212 #define vld1_dup_s32(__p0) __extension__ ({ \
8213   int32x2_t __ret; \
8214   __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \
8215   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
8216   __ret; \
8217 })
8218 #endif
8219 
8220 #ifdef __LITTLE_ENDIAN__
8221 #define vld1_dup_s64(__p0) __extension__ ({ \
8222   int64x1_t __ret; \
8223   __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \
8224   __ret; \
8225 })
8226 #else
8227 #define vld1_dup_s64(__p0) __extension__ ({ \
8228   int64x1_t __ret; \
8229   __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \
8230   __ret; \
8231 })
8232 #endif
8233 
8234 #ifdef __LITTLE_ENDIAN__
8235 #define vld1_dup_s16(__p0) __extension__ ({ \
8236   int16x4_t __ret; \
8237   __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \
8238   __ret; \
8239 })
8240 #else
8241 #define vld1_dup_s16(__p0) __extension__ ({ \
8242   int16x4_t __ret; \
8243   __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \
8244   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
8245   __ret; \
8246 })
8247 #endif
8248 
8249 #ifdef __LITTLE_ENDIAN__
8250 #define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
8251   poly8x8_t __s1 = __p1; \
8252   poly8x8_t __ret; \
8253   __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \
8254   __ret; \
8255 })
8256 #else
8257 #define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
8258   poly8x8_t __s1 = __p1; \
8259   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
8260   poly8x8_t __ret; \
8261   __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
8262   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
8263   __ret; \
8264 })
8265 #endif
8266 
8267 #ifdef __LITTLE_ENDIAN__
8268 #define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
8269   poly16x4_t __s1 = __p1; \
8270   poly16x4_t __ret; \
8271   __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \
8272   __ret; \
8273 })
8274 #else
8275 #define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
8276   poly16x4_t __s1 = __p1; \
8277   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
8278   poly16x4_t __ret; \
8279   __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
8280   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
8281   __ret; \
8282 })
8283 #endif
8284 
8285 #ifdef __LITTLE_ENDIAN__
8286 #define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
8287   poly8x16_t __s1 = __p1; \
8288   poly8x16_t __ret; \
8289   __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \
8290   __ret; \
8291 })
8292 #else
8293 #define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
8294   poly8x16_t __s1 = __p1; \
8295   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
8296   poly8x16_t __ret; \
8297   __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
8298   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
8299   __ret; \
8300 })
8301 #endif
8302 
8303 #ifdef __LITTLE_ENDIAN__
8304 #define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
8305   poly16x8_t __s1 = __p1; \
8306   poly16x8_t __ret; \
8307   __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \
8308   __ret; \
8309 })
8310 #else
8311 #define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
8312   poly16x8_t __s1 = __p1; \
8313   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
8314   poly16x8_t __ret; \
8315   __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
8316   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
8317   __ret; \
8318 })
8319 #endif
8320 
8321 #ifdef __LITTLE_ENDIAN__
8322 #define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
8323   uint8x16_t __s1 = __p1; \
8324   uint8x16_t __ret; \
8325   __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \
8326   __ret; \
8327 })
8328 #else
8329 #define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
8330   uint8x16_t __s1 = __p1; \
8331   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
8332   uint8x16_t __ret; \
8333   __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
8334   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
8335   __ret; \
8336 })
8337 #endif
8338 
8339 #ifdef __LITTLE_ENDIAN__
8340 #define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
8341   uint32x4_t __s1 = __p1; \
8342   uint32x4_t __ret; \
8343   __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \
8344   __ret; \
8345 })
8346 #else
8347 #define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
8348   uint32x4_t __s1 = __p1; \
8349   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
8350   uint32x4_t __ret; \
8351   __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
8352   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
8353   __ret; \
8354 })
8355 #endif
8356 
8357 #ifdef __LITTLE_ENDIAN__
8358 #define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
8359   uint64x2_t __s1 = __p1; \
8360   uint64x2_t __ret; \
8361   __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \
8362   __ret; \
8363 })
8364 #else
8365 #define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
8366   uint64x2_t __s1 = __p1; \
8367   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
8368   uint64x2_t __ret; \
8369   __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
8370   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
8371   __ret; \
8372 })
8373 #endif
8374 
8375 #ifdef __LITTLE_ENDIAN__
8376 #define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
8377   uint16x8_t __s1 = __p1; \
8378   uint16x8_t __ret; \
8379   __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \
8380   __ret; \
8381 })
8382 #else
8383 #define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
8384   uint16x8_t __s1 = __p1; \
8385   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
8386   uint16x8_t __ret; \
8387   __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
8388   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
8389   __ret; \
8390 })
8391 #endif
8392 
8393 #ifdef __LITTLE_ENDIAN__
8394 #define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
8395   int8x16_t __s1 = __p1; \
8396   int8x16_t __ret; \
8397   __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \
8398   __ret; \
8399 })
8400 #else
8401 #define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
8402   int8x16_t __s1 = __p1; \
8403   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
8404   int8x16_t __ret; \
8405   __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
8406   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
8407   __ret; \
8408 })
8409 #endif
8410 
8411 #ifdef __LITTLE_ENDIAN__
8412 #define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
8413   float32x4_t __s1 = __p1; \
8414   float32x4_t __ret; \
8415   __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \
8416   __ret; \
8417 })
8418 #else
8419 #define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
8420   float32x4_t __s1 = __p1; \
8421   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
8422   float32x4_t __ret; \
8423   __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
8424   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
8425   __ret; \
8426 })
8427 #endif
8428 
8429 #ifdef __LITTLE_ENDIAN__
8430 #define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
8431   float16x8_t __s1 = __p1; \
8432   float16x8_t __ret; \
8433   __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \
8434   __ret; \
8435 })
8436 #else
8437 #define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
8438   float16x8_t __s1 = __p1; \
8439   float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
8440   float16x8_t __ret; \
8441   __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
8442   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
8443   __ret; \
8444 })
8445 #endif
8446 
8447 #ifdef __LITTLE_ENDIAN__
8448 #define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
8449   int32x4_t __s1 = __p1; \
8450   int32x4_t __ret; \
8451   __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \
8452   __ret; \
8453 })
8454 #else
8455 #define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
8456   int32x4_t __s1 = __p1; \
8457   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
8458   int32x4_t __ret; \
8459   __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
8460   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
8461   __ret; \
8462 })
8463 #endif
8464 
8465 #ifdef __LITTLE_ENDIAN__
8466 #define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
8467   int64x2_t __s1 = __p1; \
8468   int64x2_t __ret; \
8469   __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \
8470   __ret; \
8471 })
8472 #else
8473 #define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
8474   int64x2_t __s1 = __p1; \
8475   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
8476   int64x2_t __ret; \
8477   __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
8478   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
8479   __ret; \
8480 })
8481 #endif
8482 
8483 #ifdef __LITTLE_ENDIAN__
8484 #define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
8485   int16x8_t __s1 = __p1; \
8486   int16x8_t __ret; \
8487   __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \
8488   __ret; \
8489 })
8490 #else
8491 #define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
8492   int16x8_t __s1 = __p1; \
8493   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
8494   int16x8_t __ret; \
8495   __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
8496   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
8497   __ret; \
8498 })
8499 #endif
8500 
8501 #ifdef __LITTLE_ENDIAN__
8502 #define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
8503   uint8x8_t __s1 = __p1; \
8504   uint8x8_t __ret; \
8505   __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \
8506   __ret; \
8507 })
8508 #else
8509 #define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
8510   uint8x8_t __s1 = __p1; \
8511   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
8512   uint8x8_t __ret; \
8513   __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
8514   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
8515   __ret; \
8516 })
8517 #endif
8518 
8519 #ifdef __LITTLE_ENDIAN__
8520 #define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
8521   uint32x2_t __s1 = __p1; \
8522   uint32x2_t __ret; \
8523   __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \
8524   __ret; \
8525 })
8526 #else
8527 #define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
8528   uint32x2_t __s1 = __p1; \
8529   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
8530   uint32x2_t __ret; \
8531   __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
8532   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
8533   __ret; \
8534 })
8535 #endif
8536 
8537 #ifdef __LITTLE_ENDIAN__
8538 #define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
8539   uint64x1_t __s1 = __p1; \
8540   uint64x1_t __ret; \
8541   __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
8542   __ret; \
8543 })
8544 #else
8545 #define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
8546   uint64x1_t __s1 = __p1; \
8547   uint64x1_t __ret; \
8548   __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
8549   __ret; \
8550 })
8551 #endif
8552 
8553 #ifdef __LITTLE_ENDIAN__
8554 #define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
8555   uint16x4_t __s1 = __p1; \
8556   uint16x4_t __ret; \
8557   __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \
8558   __ret; \
8559 })
8560 #else
8561 #define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
8562   uint16x4_t __s1 = __p1; \
8563   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
8564   uint16x4_t __ret; \
8565   __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
8566   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
8567   __ret; \
8568 })
8569 #endif
8570 
8571 #ifdef __LITTLE_ENDIAN__
8572 #define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
8573   int8x8_t __s1 = __p1; \
8574   int8x8_t __ret; \
8575   __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \
8576   __ret; \
8577 })
8578 #else
8579 #define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
8580   int8x8_t __s1 = __p1; \
8581   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
8582   int8x8_t __ret; \
8583   __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
8584   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
8585   __ret; \
8586 })
8587 #endif
8588 
8589 #ifdef __LITTLE_ENDIAN__
8590 #define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
8591   float32x2_t __s1 = __p1; \
8592   float32x2_t __ret; \
8593   __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \
8594   __ret; \
8595 })
8596 #else
8597 #define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
8598   float32x2_t __s1 = __p1; \
8599   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
8600   float32x2_t __ret; \
8601   __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
8602   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
8603   __ret; \
8604 })
8605 #endif
8606 
8607 #ifdef __LITTLE_ENDIAN__
8608 #define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
8609   float16x4_t __s1 = __p1; \
8610   float16x4_t __ret; \
8611   __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \
8612   __ret; \
8613 })
8614 #else
8615 #define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
8616   float16x4_t __s1 = __p1; \
8617   float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
8618   float16x4_t __ret; \
8619   __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
8620   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
8621   __ret; \
8622 })
8623 #endif
8624 
8625 #ifdef __LITTLE_ENDIAN__
8626 #define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
8627   int32x2_t __s1 = __p1; \
8628   int32x2_t __ret; \
8629   __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \
8630   __ret; \
8631 })
8632 #else
8633 #define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
8634   int32x2_t __s1 = __p1; \
8635   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
8636   int32x2_t __ret; \
8637   __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
8638   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
8639   __ret; \
8640 })
8641 #endif
8642 
8643 #ifdef __LITTLE_ENDIAN__
8644 #define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
8645   int64x1_t __s1 = __p1; \
8646   int64x1_t __ret; \
8647   __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
8648   __ret; \
8649 })
8650 #else
8651 #define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
8652   int64x1_t __s1 = __p1; \
8653   int64x1_t __ret; \
8654   __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
8655   __ret; \
8656 })
8657 #endif
8658 
8659 #ifdef __LITTLE_ENDIAN__
8660 #define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
8661   int16x4_t __s1 = __p1; \
8662   int16x4_t __ret; \
8663   __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \
8664   __ret; \
8665 })
8666 #else
8667 #define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
8668   int16x4_t __s1 = __p1; \
8669   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
8670   int16x4_t __ret; \
8671   __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
8672   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
8673   __ret; \
8674 })
8675 #endif
8676 
8677 #ifdef __LITTLE_ENDIAN__
8678 #define vld2_p8(__p0) __extension__ ({ \
8679   poly8x8x2_t __ret; \
8680   __builtin_neon_vld2_v(&__ret, __p0, 4); \
8681   __ret; \
8682 })
8683 #else
8684 #define vld2_p8(__p0) __extension__ ({ \
8685   poly8x8x2_t __ret; \
8686   __builtin_neon_vld2_v(&__ret, __p0, 4); \
8687  \
8688   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
8689   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
8690   __ret; \
8691 })
8692 #endif
8693 
8694 #ifdef __LITTLE_ENDIAN__
8695 #define vld2_p16(__p0) __extension__ ({ \
8696   poly16x4x2_t __ret; \
8697   __builtin_neon_vld2_v(&__ret, __p0, 5); \
8698   __ret; \
8699 })
8700 #else
8701 #define vld2_p16(__p0) __extension__ ({ \
8702   poly16x4x2_t __ret; \
8703   __builtin_neon_vld2_v(&__ret, __p0, 5); \
8704  \
8705   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
8706   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
8707   __ret; \
8708 })
8709 #endif
8710 
8711 #ifdef __LITTLE_ENDIAN__
8712 #define vld2q_p8(__p0) __extension__ ({ \
8713   poly8x16x2_t __ret; \
8714   __builtin_neon_vld2q_v(&__ret, __p0, 36); \
8715   __ret; \
8716 })
8717 #else
8718 #define vld2q_p8(__p0) __extension__ ({ \
8719   poly8x16x2_t __ret; \
8720   __builtin_neon_vld2q_v(&__ret, __p0, 36); \
8721  \
8722   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
8723   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
8724   __ret; \
8725 })
8726 #endif
8727 
8728 #ifdef __LITTLE_ENDIAN__
8729 #define vld2q_p16(__p0) __extension__ ({ \
8730   poly16x8x2_t __ret; \
8731   __builtin_neon_vld2q_v(&__ret, __p0, 37); \
8732   __ret; \
8733 })
8734 #else
8735 #define vld2q_p16(__p0) __extension__ ({ \
8736   poly16x8x2_t __ret; \
8737   __builtin_neon_vld2q_v(&__ret, __p0, 37); \
8738  \
8739   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
8740   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
8741   __ret; \
8742 })
8743 #endif
8744 
8745 #ifdef __LITTLE_ENDIAN__
8746 #define vld2q_u8(__p0) __extension__ ({ \
8747   uint8x16x2_t __ret; \
8748   __builtin_neon_vld2q_v(&__ret, __p0, 48); \
8749   __ret; \
8750 })
8751 #else
8752 #define vld2q_u8(__p0) __extension__ ({ \
8753   uint8x16x2_t __ret; \
8754   __builtin_neon_vld2q_v(&__ret, __p0, 48); \
8755  \
8756   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
8757   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
8758   __ret; \
8759 })
8760 #endif
8761 
8762 #ifdef __LITTLE_ENDIAN__
8763 #define vld2q_u32(__p0) __extension__ ({ \
8764   uint32x4x2_t __ret; \
8765   __builtin_neon_vld2q_v(&__ret, __p0, 50); \
8766   __ret; \
8767 })
8768 #else
8769 #define vld2q_u32(__p0) __extension__ ({ \
8770   uint32x4x2_t __ret; \
8771   __builtin_neon_vld2q_v(&__ret, __p0, 50); \
8772  \
8773   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
8774   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
8775   __ret; \
8776 })
8777 #endif
8778 
8779 #ifdef __LITTLE_ENDIAN__
8780 #define vld2q_u16(__p0) __extension__ ({ \
8781   uint16x8x2_t __ret; \
8782   __builtin_neon_vld2q_v(&__ret, __p0, 49); \
8783   __ret; \
8784 })
8785 #else
8786 #define vld2q_u16(__p0) __extension__ ({ \
8787   uint16x8x2_t __ret; \
8788   __builtin_neon_vld2q_v(&__ret, __p0, 49); \
8789  \
8790   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
8791   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
8792   __ret; \
8793 })
8794 #endif
8795 
8796 #ifdef __LITTLE_ENDIAN__
8797 #define vld2q_s8(__p0) __extension__ ({ \
8798   int8x16x2_t __ret; \
8799   __builtin_neon_vld2q_v(&__ret, __p0, 32); \
8800   __ret; \
8801 })
8802 #else
8803 #define vld2q_s8(__p0) __extension__ ({ \
8804   int8x16x2_t __ret; \
8805   __builtin_neon_vld2q_v(&__ret, __p0, 32); \
8806  \
8807   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
8808   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
8809   __ret; \
8810 })
8811 #endif
8812 
8813 #ifdef __LITTLE_ENDIAN__
8814 #define vld2q_f32(__p0) __extension__ ({ \
8815   float32x4x2_t __ret; \
8816   __builtin_neon_vld2q_v(&__ret, __p0, 41); \
8817   __ret; \
8818 })
8819 #else
8820 #define vld2q_f32(__p0) __extension__ ({ \
8821   float32x4x2_t __ret; \
8822   __builtin_neon_vld2q_v(&__ret, __p0, 41); \
8823  \
8824   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
8825   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
8826   __ret; \
8827 })
8828 #endif
8829 
8830 #ifdef __LITTLE_ENDIAN__
8831 #define vld2q_f16(__p0) __extension__ ({ \
8832   float16x8x2_t __ret; \
8833   __builtin_neon_vld2q_v(&__ret, __p0, 40); \
8834   __ret; \
8835 })
8836 #else
8837 #define vld2q_f16(__p0) __extension__ ({ \
8838   float16x8x2_t __ret; \
8839   __builtin_neon_vld2q_v(&__ret, __p0, 40); \
8840  \
8841   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
8842   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
8843   __ret; \
8844 })
8845 #endif
8846 
8847 #ifdef __LITTLE_ENDIAN__
8848 #define vld2q_s32(__p0) __extension__ ({ \
8849   int32x4x2_t __ret; \
8850   __builtin_neon_vld2q_v(&__ret, __p0, 34); \
8851   __ret; \
8852 })
8853 #else
8854 #define vld2q_s32(__p0) __extension__ ({ \
8855   int32x4x2_t __ret; \
8856   __builtin_neon_vld2q_v(&__ret, __p0, 34); \
8857  \
8858   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
8859   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
8860   __ret; \
8861 })
8862 #endif
8863 
8864 #ifdef __LITTLE_ENDIAN__
8865 #define vld2q_s16(__p0) __extension__ ({ \
8866   int16x8x2_t __ret; \
8867   __builtin_neon_vld2q_v(&__ret, __p0, 33); \
8868   __ret; \
8869 })
8870 #else
8871 #define vld2q_s16(__p0) __extension__ ({ \
8872   int16x8x2_t __ret; \
8873   __builtin_neon_vld2q_v(&__ret, __p0, 33); \
8874  \
8875   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
8876   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
8877   __ret; \
8878 })
8879 #endif
8880 
8881 #ifdef __LITTLE_ENDIAN__
8882 #define vld2_u8(__p0) __extension__ ({ \
8883   uint8x8x2_t __ret; \
8884   __builtin_neon_vld2_v(&__ret, __p0, 16); \
8885   __ret; \
8886 })
8887 #else
8888 #define vld2_u8(__p0) __extension__ ({ \
8889   uint8x8x2_t __ret; \
8890   __builtin_neon_vld2_v(&__ret, __p0, 16); \
8891  \
8892   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
8893   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
8894   __ret; \
8895 })
8896 #endif
8897 
8898 #ifdef __LITTLE_ENDIAN__
8899 #define vld2_u32(__p0) __extension__ ({ \
8900   uint32x2x2_t __ret; \
8901   __builtin_neon_vld2_v(&__ret, __p0, 18); \
8902   __ret; \
8903 })
8904 #else
8905 #define vld2_u32(__p0) __extension__ ({ \
8906   uint32x2x2_t __ret; \
8907   __builtin_neon_vld2_v(&__ret, __p0, 18); \
8908  \
8909   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
8910   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
8911   __ret; \
8912 })
8913 #endif
8914 
8915 #ifdef __LITTLE_ENDIAN__
8916 #define vld2_u64(__p0) __extension__ ({ \
8917   uint64x1x2_t __ret; \
8918   __builtin_neon_vld2_v(&__ret, __p0, 19); \
8919   __ret; \
8920 })
8921 #else
8922 #define vld2_u64(__p0) __extension__ ({ \
8923   uint64x1x2_t __ret; \
8924   __builtin_neon_vld2_v(&__ret, __p0, 19); \
8925   __ret; \
8926 })
8927 #endif
8928 
8929 #ifdef __LITTLE_ENDIAN__
8930 #define vld2_u16(__p0) __extension__ ({ \
8931   uint16x4x2_t __ret; \
8932   __builtin_neon_vld2_v(&__ret, __p0, 17); \
8933   __ret; \
8934 })
8935 #else
8936 #define vld2_u16(__p0) __extension__ ({ \
8937   uint16x4x2_t __ret; \
8938   __builtin_neon_vld2_v(&__ret, __p0, 17); \
8939  \
8940   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
8941   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
8942   __ret; \
8943 })
8944 #endif
8945 
8946 #ifdef __LITTLE_ENDIAN__
8947 #define vld2_s8(__p0) __extension__ ({ \
8948   int8x8x2_t __ret; \
8949   __builtin_neon_vld2_v(&__ret, __p0, 0); \
8950   __ret; \
8951 })
8952 #else
8953 #define vld2_s8(__p0) __extension__ ({ \
8954   int8x8x2_t __ret; \
8955   __builtin_neon_vld2_v(&__ret, __p0, 0); \
8956  \
8957   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
8958   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
8959   __ret; \
8960 })
8961 #endif
8962 
8963 #ifdef __LITTLE_ENDIAN__
8964 #define vld2_f32(__p0) __extension__ ({ \
8965   float32x2x2_t __ret; \
8966   __builtin_neon_vld2_v(&__ret, __p0, 9); \
8967   __ret; \
8968 })
8969 #else
8970 #define vld2_f32(__p0) __extension__ ({ \
8971   float32x2x2_t __ret; \
8972   __builtin_neon_vld2_v(&__ret, __p0, 9); \
8973  \
8974   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
8975   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
8976   __ret; \
8977 })
8978 #endif
8979 
8980 #ifdef __LITTLE_ENDIAN__
8981 #define vld2_f16(__p0) __extension__ ({ \
8982   float16x4x2_t __ret; \
8983   __builtin_neon_vld2_v(&__ret, __p0, 8); \
8984   __ret; \
8985 })
8986 #else
8987 #define vld2_f16(__p0) __extension__ ({ \
8988   float16x4x2_t __ret; \
8989   __builtin_neon_vld2_v(&__ret, __p0, 8); \
8990  \
8991   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
8992   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
8993   __ret; \
8994 })
8995 #endif
8996 
8997 #ifdef __LITTLE_ENDIAN__
8998 #define vld2_s32(__p0) __extension__ ({ \
8999   int32x2x2_t __ret; \
9000   __builtin_neon_vld2_v(&__ret, __p0, 2); \
9001   __ret; \
9002 })
9003 #else
9004 #define vld2_s32(__p0) __extension__ ({ \
9005   int32x2x2_t __ret; \
9006   __builtin_neon_vld2_v(&__ret, __p0, 2); \
9007  \
9008   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
9009   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
9010   __ret; \
9011 })
9012 #endif
9013 
9014 #ifdef __LITTLE_ENDIAN__
9015 #define vld2_s64(__p0) __extension__ ({ \
9016   int64x1x2_t __ret; \
9017   __builtin_neon_vld2_v(&__ret, __p0, 3); \
9018   __ret; \
9019 })
9020 #else
9021 #define vld2_s64(__p0) __extension__ ({ \
9022   int64x1x2_t __ret; \
9023   __builtin_neon_vld2_v(&__ret, __p0, 3); \
9024   __ret; \
9025 })
9026 #endif
9027 
9028 #ifdef __LITTLE_ENDIAN__
9029 #define vld2_s16(__p0) __extension__ ({ \
9030   int16x4x2_t __ret; \
9031   __builtin_neon_vld2_v(&__ret, __p0, 1); \
9032   __ret; \
9033 })
9034 #else
9035 #define vld2_s16(__p0) __extension__ ({ \
9036   int16x4x2_t __ret; \
9037   __builtin_neon_vld2_v(&__ret, __p0, 1); \
9038  \
9039   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9040   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9041   __ret; \
9042 })
9043 #endif
9044 
9045 #ifdef __LITTLE_ENDIAN__
9046 #define vld2_dup_p8(__p0) __extension__ ({ \
9047   poly8x8x2_t __ret; \
9048   __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \
9049   __ret; \
9050 })
9051 #else
9052 #define vld2_dup_p8(__p0) __extension__ ({ \
9053   poly8x8x2_t __ret; \
9054   __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \
9055  \
9056   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9057   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9058   __ret; \
9059 })
9060 #endif
9061 
9062 #ifdef __LITTLE_ENDIAN__
9063 #define vld2_dup_p16(__p0) __extension__ ({ \
9064   poly16x4x2_t __ret; \
9065   __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \
9066   __ret; \
9067 })
9068 #else
9069 #define vld2_dup_p16(__p0) __extension__ ({ \
9070   poly16x4x2_t __ret; \
9071   __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \
9072  \
9073   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9074   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9075   __ret; \
9076 })
9077 #endif
9078 
9079 #ifdef __LITTLE_ENDIAN__
9080 #define vld2_dup_u8(__p0) __extension__ ({ \
9081   uint8x8x2_t __ret; \
9082   __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \
9083   __ret; \
9084 })
9085 #else
9086 #define vld2_dup_u8(__p0) __extension__ ({ \
9087   uint8x8x2_t __ret; \
9088   __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \
9089  \
9090   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9091   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9092   __ret; \
9093 })
9094 #endif
9095 
9096 #ifdef __LITTLE_ENDIAN__
9097 #define vld2_dup_u32(__p0) __extension__ ({ \
9098   uint32x2x2_t __ret; \
9099   __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \
9100   __ret; \
9101 })
9102 #else
9103 #define vld2_dup_u32(__p0) __extension__ ({ \
9104   uint32x2x2_t __ret; \
9105   __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \
9106  \
9107   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
9108   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
9109   __ret; \
9110 })
9111 #endif
9112 
9113 #ifdef __LITTLE_ENDIAN__
9114 #define vld2_dup_u64(__p0) __extension__ ({ \
9115   uint64x1x2_t __ret; \
9116   __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \
9117   __ret; \
9118 })
9119 #else
9120 #define vld2_dup_u64(__p0) __extension__ ({ \
9121   uint64x1x2_t __ret; \
9122   __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \
9123   __ret; \
9124 })
9125 #endif
9126 
9127 #ifdef __LITTLE_ENDIAN__
9128 #define vld2_dup_u16(__p0) __extension__ ({ \
9129   uint16x4x2_t __ret; \
9130   __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \
9131   __ret; \
9132 })
9133 #else
9134 #define vld2_dup_u16(__p0) __extension__ ({ \
9135   uint16x4x2_t __ret; \
9136   __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \
9137  \
9138   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9139   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9140   __ret; \
9141 })
9142 #endif
9143 
9144 #ifdef __LITTLE_ENDIAN__
9145 #define vld2_dup_s8(__p0) __extension__ ({ \
9146   int8x8x2_t __ret; \
9147   __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \
9148   __ret; \
9149 })
9150 #else
9151 #define vld2_dup_s8(__p0) __extension__ ({ \
9152   int8x8x2_t __ret; \
9153   __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \
9154  \
9155   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9156   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9157   __ret; \
9158 })
9159 #endif
9160 
9161 #ifdef __LITTLE_ENDIAN__
9162 #define vld2_dup_f32(__p0) __extension__ ({ \
9163   float32x2x2_t __ret; \
9164   __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \
9165   __ret; \
9166 })
9167 #else
9168 #define vld2_dup_f32(__p0) __extension__ ({ \
9169   float32x2x2_t __ret; \
9170   __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \
9171  \
9172   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
9173   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
9174   __ret; \
9175 })
9176 #endif
9177 
9178 #ifdef __LITTLE_ENDIAN__
9179 #define vld2_dup_f16(__p0) __extension__ ({ \
9180   float16x4x2_t __ret; \
9181   __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \
9182   __ret; \
9183 })
9184 #else
9185 #define vld2_dup_f16(__p0) __extension__ ({ \
9186   float16x4x2_t __ret; \
9187   __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \
9188  \
9189   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9190   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9191   __ret; \
9192 })
9193 #endif
9194 
9195 #ifdef __LITTLE_ENDIAN__
9196 #define vld2_dup_s32(__p0) __extension__ ({ \
9197   int32x2x2_t __ret; \
9198   __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \
9199   __ret; \
9200 })
9201 #else
9202 #define vld2_dup_s32(__p0) __extension__ ({ \
9203   int32x2x2_t __ret; \
9204   __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \
9205  \
9206   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
9207   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
9208   __ret; \
9209 })
9210 #endif
9211 
9212 #ifdef __LITTLE_ENDIAN__
9213 #define vld2_dup_s64(__p0) __extension__ ({ \
9214   int64x1x2_t __ret; \
9215   __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \
9216   __ret; \
9217 })
9218 #else
9219 #define vld2_dup_s64(__p0) __extension__ ({ \
9220   int64x1x2_t __ret; \
9221   __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \
9222   __ret; \
9223 })
9224 #endif
9225 
9226 #ifdef __LITTLE_ENDIAN__
9227 #define vld2_dup_s16(__p0) __extension__ ({ \
9228   int16x4x2_t __ret; \
9229   __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \
9230   __ret; \
9231 })
9232 #else
9233 #define vld2_dup_s16(__p0) __extension__ ({ \
9234   int16x4x2_t __ret; \
9235   __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \
9236  \
9237   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9238   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9239   __ret; \
9240 })
9241 #endif
9242 
9243 #ifdef __LITTLE_ENDIAN__
9244 #define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
9245   poly8x8x2_t __s1 = __p1; \
9246   poly8x8x2_t __ret; \
9247   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \
9248   __ret; \
9249 })
9250 #else
9251 #define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
9252   poly8x8x2_t __s1 = __p1; \
9253   poly8x8x2_t __rev1; \
9254   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9255   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9256   poly8x8x2_t __ret; \
9257   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
9258  \
9259   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9260   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9261   __ret; \
9262 })
9263 #endif
9264 
9265 #ifdef __LITTLE_ENDIAN__
9266 #define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
9267   poly16x4x2_t __s1 = __p1; \
9268   poly16x4x2_t __ret; \
9269   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \
9270   __ret; \
9271 })
9272 #else
9273 #define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
9274   poly16x4x2_t __s1 = __p1; \
9275   poly16x4x2_t __rev1; \
9276   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
9277   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
9278   poly16x4x2_t __ret; \
9279   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
9280  \
9281   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9282   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9283   __ret; \
9284 })
9285 #endif
9286 
9287 #ifdef __LITTLE_ENDIAN__
9288 #define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
9289   poly16x8x2_t __s1 = __p1; \
9290   poly16x8x2_t __ret; \
9291   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \
9292   __ret; \
9293 })
9294 #else
9295 #define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
9296   poly16x8x2_t __s1 = __p1; \
9297   poly16x8x2_t __rev1; \
9298   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9299   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9300   poly16x8x2_t __ret; \
9301   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
9302  \
9303   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9304   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9305   __ret; \
9306 })
9307 #endif
9308 
9309 #ifdef __LITTLE_ENDIAN__
9310 #define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
9311   uint32x4x2_t __s1 = __p1; \
9312   uint32x4x2_t __ret; \
9313   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \
9314   __ret; \
9315 })
9316 #else
9317 #define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
9318   uint32x4x2_t __s1 = __p1; \
9319   uint32x4x2_t __rev1; \
9320   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
9321   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
9322   uint32x4x2_t __ret; \
9323   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
9324  \
9325   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9326   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9327   __ret; \
9328 })
9329 #endif
9330 
9331 #ifdef __LITTLE_ENDIAN__
9332 #define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
9333   uint16x8x2_t __s1 = __p1; \
9334   uint16x8x2_t __ret; \
9335   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \
9336   __ret; \
9337 })
9338 #else
9339 #define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
9340   uint16x8x2_t __s1 = __p1; \
9341   uint16x8x2_t __rev1; \
9342   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9343   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9344   uint16x8x2_t __ret; \
9345   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
9346  \
9347   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9348   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9349   __ret; \
9350 })
9351 #endif
9352 
9353 #ifdef __LITTLE_ENDIAN__
9354 #define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
9355   float32x4x2_t __s1 = __p1; \
9356   float32x4x2_t __ret; \
9357   __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 41); \
9358   __ret; \
9359 })
9360 #else
9361 #define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
9362   float32x4x2_t __s1 = __p1; \
9363   float32x4x2_t __rev1; \
9364   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
9365   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
9366   float32x4x2_t __ret; \
9367   __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 41); \
9368  \
9369   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9370   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9371   __ret; \
9372 })
9373 #endif
9374 
9375 #ifdef __LITTLE_ENDIAN__
9376 #define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
9377   float16x8x2_t __s1 = __p1; \
9378   float16x8x2_t __ret; \
9379   __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 40); \
9380   __ret; \
9381 })
9382 #else
9383 #define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
9384   float16x8x2_t __s1 = __p1; \
9385   float16x8x2_t __rev1; \
9386   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9387   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9388   float16x8x2_t __ret; \
9389   __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 40); \
9390  \
9391   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9392   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9393   __ret; \
9394 })
9395 #endif
9396 
9397 #ifdef __LITTLE_ENDIAN__
9398 #define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
9399   int32x4x2_t __s1 = __p1; \
9400   int32x4x2_t __ret; \
9401   __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 34); \
9402   __ret; \
9403 })
9404 #else
9405 #define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
9406   int32x4x2_t __s1 = __p1; \
9407   int32x4x2_t __rev1; \
9408   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
9409   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
9410   int32x4x2_t __ret; \
9411   __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 34); \
9412  \
9413   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9414   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9415   __ret; \
9416 })
9417 #endif
9418 
9419 #ifdef __LITTLE_ENDIAN__
9420 #define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
9421   int16x8x2_t __s1 = __p1; \
9422   int16x8x2_t __ret; \
9423   __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 33); \
9424   __ret; \
9425 })
9426 #else
9427 #define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
9428   int16x8x2_t __s1 = __p1; \
9429   int16x8x2_t __rev1; \
9430   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9431   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9432   int16x8x2_t __ret; \
9433   __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 33); \
9434  \
9435   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9436   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9437   __ret; \
9438 })
9439 #endif
9440 
9441 #ifdef __LITTLE_ENDIAN__
9442 #define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
9443   uint8x8x2_t __s1 = __p1; \
9444   uint8x8x2_t __ret; \
9445   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \
9446   __ret; \
9447 })
9448 #else
9449 #define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
9450   uint8x8x2_t __s1 = __p1; \
9451   uint8x8x2_t __rev1; \
9452   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9453   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9454   uint8x8x2_t __ret; \
9455   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
9456  \
9457   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9458   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9459   __ret; \
9460 })
9461 #endif
9462 
9463 #ifdef __LITTLE_ENDIAN__
9464 #define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
9465   uint32x2x2_t __s1 = __p1; \
9466   uint32x2x2_t __ret; \
9467   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \
9468   __ret; \
9469 })
9470 #else
9471 #define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
9472   uint32x2x2_t __s1 = __p1; \
9473   uint32x2x2_t __rev1; \
9474   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
9475   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
9476   uint32x2x2_t __ret; \
9477   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
9478  \
9479   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
9480   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
9481   __ret; \
9482 })
9483 #endif
9484 
9485 #ifdef __LITTLE_ENDIAN__
9486 #define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
9487   uint16x4x2_t __s1 = __p1; \
9488   uint16x4x2_t __ret; \
9489   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \
9490   __ret; \
9491 })
9492 #else
9493 #define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
9494   uint16x4x2_t __s1 = __p1; \
9495   uint16x4x2_t __rev1; \
9496   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
9497   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
9498   uint16x4x2_t __ret; \
9499   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
9500  \
9501   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9502   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9503   __ret; \
9504 })
9505 #endif
9506 
9507 #ifdef __LITTLE_ENDIAN__
9508 #define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
9509   int8x8x2_t __s1 = __p1; \
9510   int8x8x2_t __ret; \
9511   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \
9512   __ret; \
9513 })
9514 #else
9515 #define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
9516   int8x8x2_t __s1 = __p1; \
9517   int8x8x2_t __rev1; \
9518   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9519   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9520   int8x8x2_t __ret; \
9521   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
9522  \
9523   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9524   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9525   __ret; \
9526 })
9527 #endif
9528 
9529 #ifdef __LITTLE_ENDIAN__
9530 #define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
9531   float32x2x2_t __s1 = __p1; \
9532   float32x2x2_t __ret; \
9533   __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 9); \
9534   __ret; \
9535 })
9536 #else
9537 #define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
9538   float32x2x2_t __s1 = __p1; \
9539   float32x2x2_t __rev1; \
9540   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
9541   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
9542   float32x2x2_t __ret; \
9543   __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 9); \
9544  \
9545   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
9546   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
9547   __ret; \
9548 })
9549 #endif
9550 
9551 #ifdef __LITTLE_ENDIAN__
9552 #define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
9553   float16x4x2_t __s1 = __p1; \
9554   float16x4x2_t __ret; \
9555   __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 8); \
9556   __ret; \
9557 })
9558 #else
9559 #define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
9560   float16x4x2_t __s1 = __p1; \
9561   float16x4x2_t __rev1; \
9562   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
9563   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
9564   float16x4x2_t __ret; \
9565   __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 8); \
9566  \
9567   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9568   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9569   __ret; \
9570 })
9571 #endif
9572 
9573 #ifdef __LITTLE_ENDIAN__
9574 #define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
9575   int32x2x2_t __s1 = __p1; \
9576   int32x2x2_t __ret; \
9577   __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 2); \
9578   __ret; \
9579 })
9580 #else
9581 #define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
9582   int32x2x2_t __s1 = __p1; \
9583   int32x2x2_t __rev1; \
9584   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
9585   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
9586   int32x2x2_t __ret; \
9587   __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 2); \
9588  \
9589   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
9590   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
9591   __ret; \
9592 })
9593 #endif
9594 
9595 #ifdef __LITTLE_ENDIAN__
9596 #define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
9597   int16x4x2_t __s1 = __p1; \
9598   int16x4x2_t __ret; \
9599   __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 1); \
9600   __ret; \
9601 })
9602 #else
9603 #define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
9604   int16x4x2_t __s1 = __p1; \
9605   int16x4x2_t __rev1; \
9606   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
9607   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
9608   int16x4x2_t __ret; \
9609   __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 1); \
9610  \
9611   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9612   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9613   __ret; \
9614 })
9615 #endif
9616 
9617 #ifdef __LITTLE_ENDIAN__
9618 #define vld3_p8(__p0) __extension__ ({ \
9619   poly8x8x3_t __ret; \
9620   __builtin_neon_vld3_v(&__ret, __p0, 4); \
9621   __ret; \
9622 })
9623 #else
9624 #define vld3_p8(__p0) __extension__ ({ \
9625   poly8x8x3_t __ret; \
9626   __builtin_neon_vld3_v(&__ret, __p0, 4); \
9627  \
9628   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9629   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9630   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
9631   __ret; \
9632 })
9633 #endif
9634 
9635 #ifdef __LITTLE_ENDIAN__
9636 #define vld3_p16(__p0) __extension__ ({ \
9637   poly16x4x3_t __ret; \
9638   __builtin_neon_vld3_v(&__ret, __p0, 5); \
9639   __ret; \
9640 })
9641 #else
9642 #define vld3_p16(__p0) __extension__ ({ \
9643   poly16x4x3_t __ret; \
9644   __builtin_neon_vld3_v(&__ret, __p0, 5); \
9645  \
9646   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9647   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9648   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
9649   __ret; \
9650 })
9651 #endif
9652 
9653 #ifdef __LITTLE_ENDIAN__
9654 #define vld3q_p8(__p0) __extension__ ({ \
9655   poly8x16x3_t __ret; \
9656   __builtin_neon_vld3q_v(&__ret, __p0, 36); \
9657   __ret; \
9658 })
9659 #else
9660 #define vld3q_p8(__p0) __extension__ ({ \
9661   poly8x16x3_t __ret; \
9662   __builtin_neon_vld3q_v(&__ret, __p0, 36); \
9663  \
9664   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
9665   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
9666   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
9667   __ret; \
9668 })
9669 #endif
9670 
9671 #ifdef __LITTLE_ENDIAN__
9672 #define vld3q_p16(__p0) __extension__ ({ \
9673   poly16x8x3_t __ret; \
9674   __builtin_neon_vld3q_v(&__ret, __p0, 37); \
9675   __ret; \
9676 })
9677 #else
9678 #define vld3q_p16(__p0) __extension__ ({ \
9679   poly16x8x3_t __ret; \
9680   __builtin_neon_vld3q_v(&__ret, __p0, 37); \
9681  \
9682   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9683   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9684   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
9685   __ret; \
9686 })
9687 #endif
9688 
9689 #ifdef __LITTLE_ENDIAN__
9690 #define vld3q_u8(__p0) __extension__ ({ \
9691   uint8x16x3_t __ret; \
9692   __builtin_neon_vld3q_v(&__ret, __p0, 48); \
9693   __ret; \
9694 })
9695 #else
9696 #define vld3q_u8(__p0) __extension__ ({ \
9697   uint8x16x3_t __ret; \
9698   __builtin_neon_vld3q_v(&__ret, __p0, 48); \
9699  \
9700   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
9701   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
9702   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
9703   __ret; \
9704 })
9705 #endif
9706 
9707 #ifdef __LITTLE_ENDIAN__
9708 #define vld3q_u32(__p0) __extension__ ({ \
9709   uint32x4x3_t __ret; \
9710   __builtin_neon_vld3q_v(&__ret, __p0, 50); \
9711   __ret; \
9712 })
9713 #else
9714 #define vld3q_u32(__p0) __extension__ ({ \
9715   uint32x4x3_t __ret; \
9716   __builtin_neon_vld3q_v(&__ret, __p0, 50); \
9717  \
9718   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9719   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9720   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
9721   __ret; \
9722 })
9723 #endif
9724 
9725 #ifdef __LITTLE_ENDIAN__
9726 #define vld3q_u16(__p0) __extension__ ({ \
9727   uint16x8x3_t __ret; \
9728   __builtin_neon_vld3q_v(&__ret, __p0, 49); \
9729   __ret; \
9730 })
9731 #else
9732 #define vld3q_u16(__p0) __extension__ ({ \
9733   uint16x8x3_t __ret; \
9734   __builtin_neon_vld3q_v(&__ret, __p0, 49); \
9735  \
9736   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9737   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9738   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
9739   __ret; \
9740 })
9741 #endif
9742 
9743 #ifdef __LITTLE_ENDIAN__
9744 #define vld3q_s8(__p0) __extension__ ({ \
9745   int8x16x3_t __ret; \
9746   __builtin_neon_vld3q_v(&__ret, __p0, 32); \
9747   __ret; \
9748 })
9749 #else
9750 #define vld3q_s8(__p0) __extension__ ({ \
9751   int8x16x3_t __ret; \
9752   __builtin_neon_vld3q_v(&__ret, __p0, 32); \
9753  \
9754   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
9755   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
9756   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
9757   __ret; \
9758 })
9759 #endif
9760 
9761 #ifdef __LITTLE_ENDIAN__
9762 #define vld3q_f32(__p0) __extension__ ({ \
9763   float32x4x3_t __ret; \
9764   __builtin_neon_vld3q_v(&__ret, __p0, 41); \
9765   __ret; \
9766 })
9767 #else
9768 #define vld3q_f32(__p0) __extension__ ({ \
9769   float32x4x3_t __ret; \
9770   __builtin_neon_vld3q_v(&__ret, __p0, 41); \
9771  \
9772   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9773   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9774   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
9775   __ret; \
9776 })
9777 #endif
9778 
9779 #ifdef __LITTLE_ENDIAN__
9780 #define vld3q_f16(__p0) __extension__ ({ \
9781   float16x8x3_t __ret; \
9782   __builtin_neon_vld3q_v(&__ret, __p0, 40); \
9783   __ret; \
9784 })
9785 #else
9786 #define vld3q_f16(__p0) __extension__ ({ \
9787   float16x8x3_t __ret; \
9788   __builtin_neon_vld3q_v(&__ret, __p0, 40); \
9789  \
9790   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9791   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9792   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
9793   __ret; \
9794 })
9795 #endif
9796 
9797 #ifdef __LITTLE_ENDIAN__
9798 #define vld3q_s32(__p0) __extension__ ({ \
9799   int32x4x3_t __ret; \
9800   __builtin_neon_vld3q_v(&__ret, __p0, 34); \
9801   __ret; \
9802 })
9803 #else
9804 #define vld3q_s32(__p0) __extension__ ({ \
9805   int32x4x3_t __ret; \
9806   __builtin_neon_vld3q_v(&__ret, __p0, 34); \
9807  \
9808   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9809   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9810   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
9811   __ret; \
9812 })
9813 #endif
9814 
9815 #ifdef __LITTLE_ENDIAN__
9816 #define vld3q_s16(__p0) __extension__ ({ \
9817   int16x8x3_t __ret; \
9818   __builtin_neon_vld3q_v(&__ret, __p0, 33); \
9819   __ret; \
9820 })
9821 #else
9822 #define vld3q_s16(__p0) __extension__ ({ \
9823   int16x8x3_t __ret; \
9824   __builtin_neon_vld3q_v(&__ret, __p0, 33); \
9825  \
9826   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9827   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9828   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
9829   __ret; \
9830 })
9831 #endif
9832 
9833 #ifdef __LITTLE_ENDIAN__
9834 #define vld3_u8(__p0) __extension__ ({ \
9835   uint8x8x3_t __ret; \
9836   __builtin_neon_vld3_v(&__ret, __p0, 16); \
9837   __ret; \
9838 })
9839 #else
9840 #define vld3_u8(__p0) __extension__ ({ \
9841   uint8x8x3_t __ret; \
9842   __builtin_neon_vld3_v(&__ret, __p0, 16); \
9843  \
9844   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9845   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9846   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
9847   __ret; \
9848 })
9849 #endif
9850 
9851 #ifdef __LITTLE_ENDIAN__
9852 #define vld3_u32(__p0) __extension__ ({ \
9853   uint32x2x3_t __ret; \
9854   __builtin_neon_vld3_v(&__ret, __p0, 18); \
9855   __ret; \
9856 })
9857 #else
9858 #define vld3_u32(__p0) __extension__ ({ \
9859   uint32x2x3_t __ret; \
9860   __builtin_neon_vld3_v(&__ret, __p0, 18); \
9861  \
9862   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
9863   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
9864   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
9865   __ret; \
9866 })
9867 #endif
9868 
9869 #ifdef __LITTLE_ENDIAN__
9870 #define vld3_u64(__p0) __extension__ ({ \
9871   uint64x1x3_t __ret; \
9872   __builtin_neon_vld3_v(&__ret, __p0, 19); \
9873   __ret; \
9874 })
9875 #else
9876 #define vld3_u64(__p0) __extension__ ({ \
9877   uint64x1x3_t __ret; \
9878   __builtin_neon_vld3_v(&__ret, __p0, 19); \
9879   __ret; \
9880 })
9881 #endif
9882 
9883 #ifdef __LITTLE_ENDIAN__
9884 #define vld3_u16(__p0) __extension__ ({ \
9885   uint16x4x3_t __ret; \
9886   __builtin_neon_vld3_v(&__ret, __p0, 17); \
9887   __ret; \
9888 })
9889 #else
9890 #define vld3_u16(__p0) __extension__ ({ \
9891   uint16x4x3_t __ret; \
9892   __builtin_neon_vld3_v(&__ret, __p0, 17); \
9893  \
9894   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9895   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9896   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
9897   __ret; \
9898 })
9899 #endif
9900 
9901 #ifdef __LITTLE_ENDIAN__
9902 #define vld3_s8(__p0) __extension__ ({ \
9903   int8x8x3_t __ret; \
9904   __builtin_neon_vld3_v(&__ret, __p0, 0); \
9905   __ret; \
9906 })
9907 #else
9908 #define vld3_s8(__p0) __extension__ ({ \
9909   int8x8x3_t __ret; \
9910   __builtin_neon_vld3_v(&__ret, __p0, 0); \
9911  \
9912   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9913   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9914   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
9915   __ret; \
9916 })
9917 #endif
9918 
9919 #ifdef __LITTLE_ENDIAN__
9920 #define vld3_f32(__p0) __extension__ ({ \
9921   float32x2x3_t __ret; \
9922   __builtin_neon_vld3_v(&__ret, __p0, 9); \
9923   __ret; \
9924 })
9925 #else
9926 #define vld3_f32(__p0) __extension__ ({ \
9927   float32x2x3_t __ret; \
9928   __builtin_neon_vld3_v(&__ret, __p0, 9); \
9929  \
9930   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
9931   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
9932   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
9933   __ret; \
9934 })
9935 #endif
9936 
9937 #ifdef __LITTLE_ENDIAN__
9938 #define vld3_f16(__p0) __extension__ ({ \
9939   float16x4x3_t __ret; \
9940   __builtin_neon_vld3_v(&__ret, __p0, 8); \
9941   __ret; \
9942 })
9943 #else
9944 #define vld3_f16(__p0) __extension__ ({ \
9945   float16x4x3_t __ret; \
9946   __builtin_neon_vld3_v(&__ret, __p0, 8); \
9947  \
9948   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9949   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9950   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
9951   __ret; \
9952 })
9953 #endif
9954 
9955 #ifdef __LITTLE_ENDIAN__
9956 #define vld3_s32(__p0) __extension__ ({ \
9957   int32x2x3_t __ret; \
9958   __builtin_neon_vld3_v(&__ret, __p0, 2); \
9959   __ret; \
9960 })
9961 #else
9962 #define vld3_s32(__p0) __extension__ ({ \
9963   int32x2x3_t __ret; \
9964   __builtin_neon_vld3_v(&__ret, __p0, 2); \
9965  \
9966   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
9967   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
9968   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
9969   __ret; \
9970 })
9971 #endif
9972 
9973 #ifdef __LITTLE_ENDIAN__
9974 #define vld3_s64(__p0) __extension__ ({ \
9975   int64x1x3_t __ret; \
9976   __builtin_neon_vld3_v(&__ret, __p0, 3); \
9977   __ret; \
9978 })
9979 #else
9980 #define vld3_s64(__p0) __extension__ ({ \
9981   int64x1x3_t __ret; \
9982   __builtin_neon_vld3_v(&__ret, __p0, 3); \
9983   __ret; \
9984 })
9985 #endif
9986 
9987 #ifdef __LITTLE_ENDIAN__
9988 #define vld3_s16(__p0) __extension__ ({ \
9989   int16x4x3_t __ret; \
9990   __builtin_neon_vld3_v(&__ret, __p0, 1); \
9991   __ret; \
9992 })
9993 #else
9994 #define vld3_s16(__p0) __extension__ ({ \
9995   int16x4x3_t __ret; \
9996   __builtin_neon_vld3_v(&__ret, __p0, 1); \
9997  \
9998   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9999   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10000   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10001   __ret; \
10002 })
10003 #endif
10004 
10005 #ifdef __LITTLE_ENDIAN__
10006 #define vld3_dup_p8(__p0) __extension__ ({ \
10007   poly8x8x3_t __ret; \
10008   __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \
10009   __ret; \
10010 })
10011 #else
10012 #define vld3_dup_p8(__p0) __extension__ ({ \
10013   poly8x8x3_t __ret; \
10014   __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \
10015  \
10016   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10017   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10018   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10019   __ret; \
10020 })
10021 #endif
10022 
10023 #ifdef __LITTLE_ENDIAN__
10024 #define vld3_dup_p16(__p0) __extension__ ({ \
10025   poly16x4x3_t __ret; \
10026   __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \
10027   __ret; \
10028 })
10029 #else
10030 #define vld3_dup_p16(__p0) __extension__ ({ \
10031   poly16x4x3_t __ret; \
10032   __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \
10033  \
10034   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10035   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10036   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10037   __ret; \
10038 })
10039 #endif
10040 
10041 #ifdef __LITTLE_ENDIAN__
10042 #define vld3_dup_u8(__p0) __extension__ ({ \
10043   uint8x8x3_t __ret; \
10044   __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \
10045   __ret; \
10046 })
10047 #else
10048 #define vld3_dup_u8(__p0) __extension__ ({ \
10049   uint8x8x3_t __ret; \
10050   __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \
10051  \
10052   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10053   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10054   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10055   __ret; \
10056 })
10057 #endif
10058 
10059 #ifdef __LITTLE_ENDIAN__
10060 #define vld3_dup_u32(__p0) __extension__ ({ \
10061   uint32x2x3_t __ret; \
10062   __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \
10063   __ret; \
10064 })
10065 #else
10066 #define vld3_dup_u32(__p0) __extension__ ({ \
10067   uint32x2x3_t __ret; \
10068   __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \
10069  \
10070   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
10071   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
10072   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
10073   __ret; \
10074 })
10075 #endif
10076 
10077 #ifdef __LITTLE_ENDIAN__
10078 #define vld3_dup_u64(__p0) __extension__ ({ \
10079   uint64x1x3_t __ret; \
10080   __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \
10081   __ret; \
10082 })
10083 #else
10084 #define vld3_dup_u64(__p0) __extension__ ({ \
10085   uint64x1x3_t __ret; \
10086   __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \
10087   __ret; \
10088 })
10089 #endif
10090 
10091 #ifdef __LITTLE_ENDIAN__
10092 #define vld3_dup_u16(__p0) __extension__ ({ \
10093   uint16x4x3_t __ret; \
10094   __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \
10095   __ret; \
10096 })
10097 #else
10098 #define vld3_dup_u16(__p0) __extension__ ({ \
10099   uint16x4x3_t __ret; \
10100   __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \
10101  \
10102   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10103   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10104   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10105   __ret; \
10106 })
10107 #endif
10108 
10109 #ifdef __LITTLE_ENDIAN__
10110 #define vld3_dup_s8(__p0) __extension__ ({ \
10111   int8x8x3_t __ret; \
10112   __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \
10113   __ret; \
10114 })
10115 #else
10116 #define vld3_dup_s8(__p0) __extension__ ({ \
10117   int8x8x3_t __ret; \
10118   __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \
10119  \
10120   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10121   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10122   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10123   __ret; \
10124 })
10125 #endif
10126 
10127 #ifdef __LITTLE_ENDIAN__
10128 #define vld3_dup_f32(__p0) __extension__ ({ \
10129   float32x2x3_t __ret; \
10130   __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \
10131   __ret; \
10132 })
10133 #else
10134 #define vld3_dup_f32(__p0) __extension__ ({ \
10135   float32x2x3_t __ret; \
10136   __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \
10137  \
10138   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
10139   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
10140   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
10141   __ret; \
10142 })
10143 #endif
10144 
10145 #ifdef __LITTLE_ENDIAN__
10146 #define vld3_dup_f16(__p0) __extension__ ({ \
10147   float16x4x3_t __ret; \
10148   __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \
10149   __ret; \
10150 })
10151 #else
10152 #define vld3_dup_f16(__p0) __extension__ ({ \
10153   float16x4x3_t __ret; \
10154   __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \
10155  \
10156   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10157   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10158   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10159   __ret; \
10160 })
10161 #endif
10162 
10163 #ifdef __LITTLE_ENDIAN__
10164 #define vld3_dup_s32(__p0) __extension__ ({ \
10165   int32x2x3_t __ret; \
10166   __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \
10167   __ret; \
10168 })
10169 #else
10170 #define vld3_dup_s32(__p0) __extension__ ({ \
10171   int32x2x3_t __ret; \
10172   __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \
10173  \
10174   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
10175   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
10176   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
10177   __ret; \
10178 })
10179 #endif
10180 
10181 #ifdef __LITTLE_ENDIAN__
10182 #define vld3_dup_s64(__p0) __extension__ ({ \
10183   int64x1x3_t __ret; \
10184   __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \
10185   __ret; \
10186 })
10187 #else
10188 #define vld3_dup_s64(__p0) __extension__ ({ \
10189   int64x1x3_t __ret; \
10190   __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \
10191   __ret; \
10192 })
10193 #endif
10194 
10195 #ifdef __LITTLE_ENDIAN__
10196 #define vld3_dup_s16(__p0) __extension__ ({ \
10197   int16x4x3_t __ret; \
10198   __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \
10199   __ret; \
10200 })
10201 #else
10202 #define vld3_dup_s16(__p0) __extension__ ({ \
10203   int16x4x3_t __ret; \
10204   __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \
10205  \
10206   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10207   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10208   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10209   __ret; \
10210 })
10211 #endif
10212 
10213 #ifdef __LITTLE_ENDIAN__
10214 #define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
10215   poly8x8x3_t __s1 = __p1; \
10216   poly8x8x3_t __ret; \
10217   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \
10218   __ret; \
10219 })
10220 #else
10221 #define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
10222   poly8x8x3_t __s1 = __p1; \
10223   poly8x8x3_t __rev1; \
10224   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10225   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10226   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10227   poly8x8x3_t __ret; \
10228   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
10229  \
10230   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10231   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10232   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10233   __ret; \
10234 })
10235 #endif
10236 
10237 #ifdef __LITTLE_ENDIAN__
10238 #define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
10239   poly16x4x3_t __s1 = __p1; \
10240   poly16x4x3_t __ret; \
10241   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \
10242   __ret; \
10243 })
10244 #else
10245 #define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
10246   poly16x4x3_t __s1 = __p1; \
10247   poly16x4x3_t __rev1; \
10248   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
10249   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
10250   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
10251   poly16x4x3_t __ret; \
10252   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
10253  \
10254   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10255   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10256   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10257   __ret; \
10258 })
10259 #endif
10260 
10261 #ifdef __LITTLE_ENDIAN__
10262 #define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
10263   poly16x8x3_t __s1 = __p1; \
10264   poly16x8x3_t __ret; \
10265   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \
10266   __ret; \
10267 })
10268 #else
10269 #define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
10270   poly16x8x3_t __s1 = __p1; \
10271   poly16x8x3_t __rev1; \
10272   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10273   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10274   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10275   poly16x8x3_t __ret; \
10276   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
10277  \
10278   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10279   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10280   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10281   __ret; \
10282 })
10283 #endif
10284 
10285 #ifdef __LITTLE_ENDIAN__
10286 #define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
10287   uint32x4x3_t __s1 = __p1; \
10288   uint32x4x3_t __ret; \
10289   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \
10290   __ret; \
10291 })
10292 #else
10293 #define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
10294   uint32x4x3_t __s1 = __p1; \
10295   uint32x4x3_t __rev1; \
10296   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
10297   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
10298   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
10299   uint32x4x3_t __ret; \
10300   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
10301  \
10302   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10303   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10304   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10305   __ret; \
10306 })
10307 #endif
10308 
10309 #ifdef __LITTLE_ENDIAN__
10310 #define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
10311   uint16x8x3_t __s1 = __p1; \
10312   uint16x8x3_t __ret; \
10313   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \
10314   __ret; \
10315 })
10316 #else
10317 #define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
10318   uint16x8x3_t __s1 = __p1; \
10319   uint16x8x3_t __rev1; \
10320   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10321   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10322   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10323   uint16x8x3_t __ret; \
10324   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
10325  \
10326   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10327   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10328   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10329   __ret; \
10330 })
10331 #endif
10332 
10333 #ifdef __LITTLE_ENDIAN__
10334 #define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
10335   float32x4x3_t __s1 = __p1; \
10336   float32x4x3_t __ret; \
10337   __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 41); \
10338   __ret; \
10339 })
10340 #else
10341 #define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
10342   float32x4x3_t __s1 = __p1; \
10343   float32x4x3_t __rev1; \
10344   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
10345   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
10346   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
10347   float32x4x3_t __ret; \
10348   __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 41); \
10349  \
10350   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10351   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10352   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10353   __ret; \
10354 })
10355 #endif
10356 
10357 #ifdef __LITTLE_ENDIAN__
10358 #define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
10359   float16x8x3_t __s1 = __p1; \
10360   float16x8x3_t __ret; \
10361   __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 40); \
10362   __ret; \
10363 })
10364 #else
10365 #define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
10366   float16x8x3_t __s1 = __p1; \
10367   float16x8x3_t __rev1; \
10368   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10369   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10370   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10371   float16x8x3_t __ret; \
10372   __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 40); \
10373  \
10374   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10375   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10376   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10377   __ret; \
10378 })
10379 #endif
10380 
10381 #ifdef __LITTLE_ENDIAN__
10382 #define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
10383   int32x4x3_t __s1 = __p1; \
10384   int32x4x3_t __ret; \
10385   __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 34); \
10386   __ret; \
10387 })
10388 #else
10389 #define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
10390   int32x4x3_t __s1 = __p1; \
10391   int32x4x3_t __rev1; \
10392   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
10393   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
10394   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
10395   int32x4x3_t __ret; \
10396   __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 34); \
10397  \
10398   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10399   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10400   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10401   __ret; \
10402 })
10403 #endif
10404 
10405 #ifdef __LITTLE_ENDIAN__
10406 #define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
10407   int16x8x3_t __s1 = __p1; \
10408   int16x8x3_t __ret; \
10409   __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 33); \
10410   __ret; \
10411 })
10412 #else
10413 #define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
10414   int16x8x3_t __s1 = __p1; \
10415   int16x8x3_t __rev1; \
10416   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10417   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10418   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10419   int16x8x3_t __ret; \
10420   __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 33); \
10421  \
10422   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10423   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10424   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10425   __ret; \
10426 })
10427 #endif
10428 
10429 #ifdef __LITTLE_ENDIAN__
10430 #define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
10431   uint8x8x3_t __s1 = __p1; \
10432   uint8x8x3_t __ret; \
10433   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \
10434   __ret; \
10435 })
10436 #else
10437 #define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
10438   uint8x8x3_t __s1 = __p1; \
10439   uint8x8x3_t __rev1; \
10440   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10441   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10442   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10443   uint8x8x3_t __ret; \
10444   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
10445  \
10446   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10447   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10448   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10449   __ret; \
10450 })
10451 #endif
10452 
10453 #ifdef __LITTLE_ENDIAN__
10454 #define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
10455   uint32x2x3_t __s1 = __p1; \
10456   uint32x2x3_t __ret; \
10457   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \
10458   __ret; \
10459 })
10460 #else
10461 #define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
10462   uint32x2x3_t __s1 = __p1; \
10463   uint32x2x3_t __rev1; \
10464   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
10465   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
10466   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
10467   uint32x2x3_t __ret; \
10468   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
10469  \
10470   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
10471   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
10472   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
10473   __ret; \
10474 })
10475 #endif
10476 
10477 #ifdef __LITTLE_ENDIAN__
10478 #define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
10479   uint16x4x3_t __s1 = __p1; \
10480   uint16x4x3_t __ret; \
10481   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \
10482   __ret; \
10483 })
10484 #else
10485 #define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
10486   uint16x4x3_t __s1 = __p1; \
10487   uint16x4x3_t __rev1; \
10488   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
10489   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
10490   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
10491   uint16x4x3_t __ret; \
10492   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
10493  \
10494   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10495   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10496   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10497   __ret; \
10498 })
10499 #endif
10500 
10501 #ifdef __LITTLE_ENDIAN__
10502 #define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
10503   int8x8x3_t __s1 = __p1; \
10504   int8x8x3_t __ret; \
10505   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \
10506   __ret; \
10507 })
10508 #else
10509 #define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
10510   int8x8x3_t __s1 = __p1; \
10511   int8x8x3_t __rev1; \
10512   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10513   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10514   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10515   int8x8x3_t __ret; \
10516   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
10517  \
10518   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10519   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10520   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10521   __ret; \
10522 })
10523 #endif
10524 
10525 #ifdef __LITTLE_ENDIAN__
10526 #define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
10527   float32x2x3_t __s1 = __p1; \
10528   float32x2x3_t __ret; \
10529   __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 9); \
10530   __ret; \
10531 })
10532 #else
10533 #define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
10534   float32x2x3_t __s1 = __p1; \
10535   float32x2x3_t __rev1; \
10536   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
10537   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
10538   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
10539   float32x2x3_t __ret; \
10540   __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 9); \
10541  \
10542   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
10543   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
10544   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
10545   __ret; \
10546 })
10547 #endif
10548 
10549 #ifdef __LITTLE_ENDIAN__
10550 #define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
10551   float16x4x3_t __s1 = __p1; \
10552   float16x4x3_t __ret; \
10553   __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 8); \
10554   __ret; \
10555 })
10556 #else
10557 #define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
10558   float16x4x3_t __s1 = __p1; \
10559   float16x4x3_t __rev1; \
10560   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
10561   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
10562   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
10563   float16x4x3_t __ret; \
10564   __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 8); \
10565  \
10566   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10567   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10568   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10569   __ret; \
10570 })
10571 #endif
10572 
10573 #ifdef __LITTLE_ENDIAN__
10574 #define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
10575   int32x2x3_t __s1 = __p1; \
10576   int32x2x3_t __ret; \
10577   __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 2); \
10578   __ret; \
10579 })
10580 #else
10581 #define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
10582   int32x2x3_t __s1 = __p1; \
10583   int32x2x3_t __rev1; \
10584   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
10585   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
10586   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
10587   int32x2x3_t __ret; \
10588   __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 2); \
10589  \
10590   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
10591   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
10592   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
10593   __ret; \
10594 })
10595 #endif
10596 
10597 #ifdef __LITTLE_ENDIAN__
10598 #define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
10599   int16x4x3_t __s1 = __p1; \
10600   int16x4x3_t __ret; \
10601   __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 1); \
10602   __ret; \
10603 })
10604 #else
10605 #define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
10606   int16x4x3_t __s1 = __p1; \
10607   int16x4x3_t __rev1; \
10608   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
10609   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
10610   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
10611   int16x4x3_t __ret; \
10612   __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 1); \
10613  \
10614   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10615   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10616   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10617   __ret; \
10618 })
10619 #endif
10620 
10621 #ifdef __LITTLE_ENDIAN__
10622 #define vld4_p8(__p0) __extension__ ({ \
10623   poly8x8x4_t __ret; \
10624   __builtin_neon_vld4_v(&__ret, __p0, 4); \
10625   __ret; \
10626 })
10627 #else
10628 #define vld4_p8(__p0) __extension__ ({ \
10629   poly8x8x4_t __ret; \
10630   __builtin_neon_vld4_v(&__ret, __p0, 4); \
10631  \
10632   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10633   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10634   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10635   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
10636   __ret; \
10637 })
10638 #endif
10639 
10640 #ifdef __LITTLE_ENDIAN__
10641 #define vld4_p16(__p0) __extension__ ({ \
10642   poly16x4x4_t __ret; \
10643   __builtin_neon_vld4_v(&__ret, __p0, 5); \
10644   __ret; \
10645 })
10646 #else
10647 #define vld4_p16(__p0) __extension__ ({ \
10648   poly16x4x4_t __ret; \
10649   __builtin_neon_vld4_v(&__ret, __p0, 5); \
10650  \
10651   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10652   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10653   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10654   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
10655   __ret; \
10656 })
10657 #endif
10658 
10659 #ifdef __LITTLE_ENDIAN__
10660 #define vld4q_p8(__p0) __extension__ ({ \
10661   poly8x16x4_t __ret; \
10662   __builtin_neon_vld4q_v(&__ret, __p0, 36); \
10663   __ret; \
10664 })
10665 #else
10666 #define vld4q_p8(__p0) __extension__ ({ \
10667   poly8x16x4_t __ret; \
10668   __builtin_neon_vld4q_v(&__ret, __p0, 36); \
10669  \
10670   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
10671   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
10672   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
10673   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
10674   __ret; \
10675 })
10676 #endif
10677 
10678 #ifdef __LITTLE_ENDIAN__
10679 #define vld4q_p16(__p0) __extension__ ({ \
10680   poly16x8x4_t __ret; \
10681   __builtin_neon_vld4q_v(&__ret, __p0, 37); \
10682   __ret; \
10683 })
10684 #else
10685 #define vld4q_p16(__p0) __extension__ ({ \
10686   poly16x8x4_t __ret; \
10687   __builtin_neon_vld4q_v(&__ret, __p0, 37); \
10688  \
10689   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10690   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10691   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10692   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
10693   __ret; \
10694 })
10695 #endif
10696 
10697 #ifdef __LITTLE_ENDIAN__
10698 #define vld4q_u8(__p0) __extension__ ({ \
10699   uint8x16x4_t __ret; \
10700   __builtin_neon_vld4q_v(&__ret, __p0, 48); \
10701   __ret; \
10702 })
10703 #else
10704 #define vld4q_u8(__p0) __extension__ ({ \
10705   uint8x16x4_t __ret; \
10706   __builtin_neon_vld4q_v(&__ret, __p0, 48); \
10707  \
10708   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
10709   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
10710   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
10711   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
10712   __ret; \
10713 })
10714 #endif
10715 
10716 #ifdef __LITTLE_ENDIAN__
10717 #define vld4q_u32(__p0) __extension__ ({ \
10718   uint32x4x4_t __ret; \
10719   __builtin_neon_vld4q_v(&__ret, __p0, 50); \
10720   __ret; \
10721 })
10722 #else
10723 #define vld4q_u32(__p0) __extension__ ({ \
10724   uint32x4x4_t __ret; \
10725   __builtin_neon_vld4q_v(&__ret, __p0, 50); \
10726  \
10727   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10728   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10729   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10730   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
10731   __ret; \
10732 })
10733 #endif
10734 
10735 #ifdef __LITTLE_ENDIAN__
10736 #define vld4q_u16(__p0) __extension__ ({ \
10737   uint16x8x4_t __ret; \
10738   __builtin_neon_vld4q_v(&__ret, __p0, 49); \
10739   __ret; \
10740 })
10741 #else
10742 #define vld4q_u16(__p0) __extension__ ({ \
10743   uint16x8x4_t __ret; \
10744   __builtin_neon_vld4q_v(&__ret, __p0, 49); \
10745  \
10746   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10747   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10748   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10749   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
10750   __ret; \
10751 })
10752 #endif
10753 
10754 #ifdef __LITTLE_ENDIAN__
10755 #define vld4q_s8(__p0) __extension__ ({ \
10756   int8x16x4_t __ret; \
10757   __builtin_neon_vld4q_v(&__ret, __p0, 32); \
10758   __ret; \
10759 })
10760 #else
10761 #define vld4q_s8(__p0) __extension__ ({ \
10762   int8x16x4_t __ret; \
10763   __builtin_neon_vld4q_v(&__ret, __p0, 32); \
10764  \
10765   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
10766   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
10767   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
10768   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
10769   __ret; \
10770 })
10771 #endif
10772 
10773 #ifdef __LITTLE_ENDIAN__
10774 #define vld4q_f32(__p0) __extension__ ({ \
10775   float32x4x4_t __ret; \
10776   __builtin_neon_vld4q_v(&__ret, __p0, 41); \
10777   __ret; \
10778 })
10779 #else
10780 #define vld4q_f32(__p0) __extension__ ({ \
10781   float32x4x4_t __ret; \
10782   __builtin_neon_vld4q_v(&__ret, __p0, 41); \
10783  \
10784   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10785   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10786   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10787   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
10788   __ret; \
10789 })
10790 #endif
10791 
10792 #ifdef __LITTLE_ENDIAN__
10793 #define vld4q_f16(__p0) __extension__ ({ \
10794   float16x8x4_t __ret; \
10795   __builtin_neon_vld4q_v(&__ret, __p0, 40); \
10796   __ret; \
10797 })
10798 #else
10799 #define vld4q_f16(__p0) __extension__ ({ \
10800   float16x8x4_t __ret; \
10801   __builtin_neon_vld4q_v(&__ret, __p0, 40); \
10802  \
10803   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10804   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10805   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10806   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
10807   __ret; \
10808 })
10809 #endif
10810 
10811 #ifdef __LITTLE_ENDIAN__
10812 #define vld4q_s32(__p0) __extension__ ({ \
10813   int32x4x4_t __ret; \
10814   __builtin_neon_vld4q_v(&__ret, __p0, 34); \
10815   __ret; \
10816 })
10817 #else
10818 #define vld4q_s32(__p0) __extension__ ({ \
10819   int32x4x4_t __ret; \
10820   __builtin_neon_vld4q_v(&__ret, __p0, 34); \
10821  \
10822   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10823   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10824   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10825   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
10826   __ret; \
10827 })
10828 #endif
10829 
10830 #ifdef __LITTLE_ENDIAN__
10831 #define vld4q_s16(__p0) __extension__ ({ \
10832   int16x8x4_t __ret; \
10833   __builtin_neon_vld4q_v(&__ret, __p0, 33); \
10834   __ret; \
10835 })
10836 #else
10837 #define vld4q_s16(__p0) __extension__ ({ \
10838   int16x8x4_t __ret; \
10839   __builtin_neon_vld4q_v(&__ret, __p0, 33); \
10840  \
10841   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10842   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10843   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10844   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
10845   __ret; \
10846 })
10847 #endif
10848 
10849 #ifdef __LITTLE_ENDIAN__
10850 #define vld4_u8(__p0) __extension__ ({ \
10851   uint8x8x4_t __ret; \
10852   __builtin_neon_vld4_v(&__ret, __p0, 16); \
10853   __ret; \
10854 })
10855 #else
10856 #define vld4_u8(__p0) __extension__ ({ \
10857   uint8x8x4_t __ret; \
10858   __builtin_neon_vld4_v(&__ret, __p0, 16); \
10859  \
10860   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10861   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10862   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10863   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
10864   __ret; \
10865 })
10866 #endif
10867 
10868 #ifdef __LITTLE_ENDIAN__
10869 #define vld4_u32(__p0) __extension__ ({ \
10870   uint32x2x4_t __ret; \
10871   __builtin_neon_vld4_v(&__ret, __p0, 18); \
10872   __ret; \
10873 })
10874 #else
10875 #define vld4_u32(__p0) __extension__ ({ \
10876   uint32x2x4_t __ret; \
10877   __builtin_neon_vld4_v(&__ret, __p0, 18); \
10878  \
10879   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
10880   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
10881   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
10882   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
10883   __ret; \
10884 })
10885 #endif
10886 
10887 #ifdef __LITTLE_ENDIAN__
10888 #define vld4_u64(__p0) __extension__ ({ \
10889   uint64x1x4_t __ret; \
10890   __builtin_neon_vld4_v(&__ret, __p0, 19); \
10891   __ret; \
10892 })
10893 #else
10894 #define vld4_u64(__p0) __extension__ ({ \
10895   uint64x1x4_t __ret; \
10896   __builtin_neon_vld4_v(&__ret, __p0, 19); \
10897   __ret; \
10898 })
10899 #endif
10900 
10901 #ifdef __LITTLE_ENDIAN__
10902 #define vld4_u16(__p0) __extension__ ({ \
10903   uint16x4x4_t __ret; \
10904   __builtin_neon_vld4_v(&__ret, __p0, 17); \
10905   __ret; \
10906 })
10907 #else
10908 #define vld4_u16(__p0) __extension__ ({ \
10909   uint16x4x4_t __ret; \
10910   __builtin_neon_vld4_v(&__ret, __p0, 17); \
10911  \
10912   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10913   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10914   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10915   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
10916   __ret; \
10917 })
10918 #endif
10919 
10920 #ifdef __LITTLE_ENDIAN__
10921 #define vld4_s8(__p0) __extension__ ({ \
10922   int8x8x4_t __ret; \
10923   __builtin_neon_vld4_v(&__ret, __p0, 0); \
10924   __ret; \
10925 })
10926 #else
10927 #define vld4_s8(__p0) __extension__ ({ \
10928   int8x8x4_t __ret; \
10929   __builtin_neon_vld4_v(&__ret, __p0, 0); \
10930  \
10931   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10932   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10933   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10934   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
10935   __ret; \
10936 })
10937 #endif
10938 
10939 #ifdef __LITTLE_ENDIAN__
10940 #define vld4_f32(__p0) __extension__ ({ \
10941   float32x2x4_t __ret; \
10942   __builtin_neon_vld4_v(&__ret, __p0, 9); \
10943   __ret; \
10944 })
10945 #else
10946 #define vld4_f32(__p0) __extension__ ({ \
10947   float32x2x4_t __ret; \
10948   __builtin_neon_vld4_v(&__ret, __p0, 9); \
10949  \
10950   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
10951   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
10952   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
10953   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
10954   __ret; \
10955 })
10956 #endif
10957 
10958 #ifdef __LITTLE_ENDIAN__
10959 #define vld4_f16(__p0) __extension__ ({ \
10960   float16x4x4_t __ret; \
10961   __builtin_neon_vld4_v(&__ret, __p0, 8); \
10962   __ret; \
10963 })
10964 #else
10965 #define vld4_f16(__p0) __extension__ ({ \
10966   float16x4x4_t __ret; \
10967   __builtin_neon_vld4_v(&__ret, __p0, 8); \
10968  \
10969   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10970   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10971   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10972   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
10973   __ret; \
10974 })
10975 #endif
10976 
10977 #ifdef __LITTLE_ENDIAN__
10978 #define vld4_s32(__p0) __extension__ ({ \
10979   int32x2x4_t __ret; \
10980   __builtin_neon_vld4_v(&__ret, __p0, 2); \
10981   __ret; \
10982 })
10983 #else
10984 #define vld4_s32(__p0) __extension__ ({ \
10985   int32x2x4_t __ret; \
10986   __builtin_neon_vld4_v(&__ret, __p0, 2); \
10987  \
10988   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
10989   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
10990   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
10991   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
10992   __ret; \
10993 })
10994 #endif
10995 
10996 #ifdef __LITTLE_ENDIAN__
10997 #define vld4_s64(__p0) __extension__ ({ \
10998   int64x1x4_t __ret; \
10999   __builtin_neon_vld4_v(&__ret, __p0, 3); \
11000   __ret; \
11001 })
11002 #else
11003 #define vld4_s64(__p0) __extension__ ({ \
11004   int64x1x4_t __ret; \
11005   __builtin_neon_vld4_v(&__ret, __p0, 3); \
11006   __ret; \
11007 })
11008 #endif
11009 
11010 #ifdef __LITTLE_ENDIAN__
11011 #define vld4_s16(__p0) __extension__ ({ \
11012   int16x4x4_t __ret; \
11013   __builtin_neon_vld4_v(&__ret, __p0, 1); \
11014   __ret; \
11015 })
11016 #else
11017 #define vld4_s16(__p0) __extension__ ({ \
11018   int16x4x4_t __ret; \
11019   __builtin_neon_vld4_v(&__ret, __p0, 1); \
11020  \
11021   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
11022   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
11023   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
11024   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
11025   __ret; \
11026 })
11027 #endif
11028 
11029 #ifdef __LITTLE_ENDIAN__
11030 #define vld4_dup_p8(__p0) __extension__ ({ \
11031   poly8x8x4_t __ret; \
11032   __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \
11033   __ret; \
11034 })
11035 #else
11036 #define vld4_dup_p8(__p0) __extension__ ({ \
11037   poly8x8x4_t __ret; \
11038   __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \
11039  \
11040   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11041   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11042   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11043   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11044   __ret; \
11045 })
11046 #endif
11047 
11048 #ifdef __LITTLE_ENDIAN__
11049 #define vld4_dup_p16(__p0) __extension__ ({ \
11050   poly16x4x4_t __ret; \
11051   __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \
11052   __ret; \
11053 })
11054 #else
11055 #define vld4_dup_p16(__p0) __extension__ ({ \
11056   poly16x4x4_t __ret; \
11057   __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \
11058  \
11059   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
11060   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
11061   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
11062   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
11063   __ret; \
11064 })
11065 #endif
11066 
11067 #ifdef __LITTLE_ENDIAN__
11068 #define vld4_dup_u8(__p0) __extension__ ({ \
11069   uint8x8x4_t __ret; \
11070   __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \
11071   __ret; \
11072 })
11073 #else
11074 #define vld4_dup_u8(__p0) __extension__ ({ \
11075   uint8x8x4_t __ret; \
11076   __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \
11077  \
11078   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11079   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11080   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11081   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11082   __ret; \
11083 })
11084 #endif
11085 
11086 #ifdef __LITTLE_ENDIAN__
11087 #define vld4_dup_u32(__p0) __extension__ ({ \
11088   uint32x2x4_t __ret; \
11089   __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \
11090   __ret; \
11091 })
11092 #else
11093 #define vld4_dup_u32(__p0) __extension__ ({ \
11094   uint32x2x4_t __ret; \
11095   __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \
11096  \
11097   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
11098   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
11099   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
11100   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
11101   __ret; \
11102 })
11103 #endif
11104 
11105 #ifdef __LITTLE_ENDIAN__
11106 #define vld4_dup_u64(__p0) __extension__ ({ \
11107   uint64x1x4_t __ret; \
11108   __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \
11109   __ret; \
11110 })
11111 #else
11112 #define vld4_dup_u64(__p0) __extension__ ({ \
11113   uint64x1x4_t __ret; \
11114   __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \
11115   __ret; \
11116 })
11117 #endif
11118 
11119 #ifdef __LITTLE_ENDIAN__
11120 #define vld4_dup_u16(__p0) __extension__ ({ \
11121   uint16x4x4_t __ret; \
11122   __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \
11123   __ret; \
11124 })
11125 #else
11126 #define vld4_dup_u16(__p0) __extension__ ({ \
11127   uint16x4x4_t __ret; \
11128   __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \
11129  \
11130   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
11131   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
11132   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
11133   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
11134   __ret; \
11135 })
11136 #endif
11137 
11138 #ifdef __LITTLE_ENDIAN__
11139 #define vld4_dup_s8(__p0) __extension__ ({ \
11140   int8x8x4_t __ret; \
11141   __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \
11142   __ret; \
11143 })
11144 #else
11145 #define vld4_dup_s8(__p0) __extension__ ({ \
11146   int8x8x4_t __ret; \
11147   __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \
11148  \
11149   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11150   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11151   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11152   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11153   __ret; \
11154 })
11155 #endif
11156 
11157 #ifdef __LITTLE_ENDIAN__
11158 #define vld4_dup_f32(__p0) __extension__ ({ \
11159   float32x2x4_t __ret; \
11160   __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \
11161   __ret; \
11162 })
11163 #else
11164 #define vld4_dup_f32(__p0) __extension__ ({ \
11165   float32x2x4_t __ret; \
11166   __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \
11167  \
11168   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
11169   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
11170   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
11171   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
11172   __ret; \
11173 })
11174 #endif
11175 
11176 #ifdef __LITTLE_ENDIAN__
11177 #define vld4_dup_f16(__p0) __extension__ ({ \
11178   float16x4x4_t __ret; \
11179   __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \
11180   __ret; \
11181 })
11182 #else
11183 #define vld4_dup_f16(__p0) __extension__ ({ \
11184   float16x4x4_t __ret; \
11185   __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \
11186  \
11187   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
11188   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
11189   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
11190   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
11191   __ret; \
11192 })
11193 #endif
11194 
11195 #ifdef __LITTLE_ENDIAN__
11196 #define vld4_dup_s32(__p0) __extension__ ({ \
11197   int32x2x4_t __ret; \
11198   __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \
11199   __ret; \
11200 })
11201 #else
11202 #define vld4_dup_s32(__p0) __extension__ ({ \
11203   int32x2x4_t __ret; \
11204   __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \
11205  \
11206   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
11207   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
11208   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
11209   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
11210   __ret; \
11211 })
11212 #endif
11213 
11214 #ifdef __LITTLE_ENDIAN__
11215 #define vld4_dup_s64(__p0) __extension__ ({ \
11216   int64x1x4_t __ret; \
11217   __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \
11218   __ret; \
11219 })
11220 #else
11221 #define vld4_dup_s64(__p0) __extension__ ({ \
11222   int64x1x4_t __ret; \
11223   __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \
11224   __ret; \
11225 })
11226 #endif
11227 
11228 #ifdef __LITTLE_ENDIAN__
11229 #define vld4_dup_s16(__p0) __extension__ ({ \
11230   int16x4x4_t __ret; \
11231   __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \
11232   __ret; \
11233 })
11234 #else
11235 #define vld4_dup_s16(__p0) __extension__ ({ \
11236   int16x4x4_t __ret; \
11237   __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \
11238  \
11239   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
11240   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
11241   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
11242   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
11243   __ret; \
11244 })
11245 #endif
11246 
11247 #ifdef __LITTLE_ENDIAN__
11248 #define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
11249   poly8x8x4_t __s1 = __p1; \
11250   poly8x8x4_t __ret; \
11251   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \
11252   __ret; \
11253 })
11254 #else
11255 #define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
11256   poly8x8x4_t __s1 = __p1; \
11257   poly8x8x4_t __rev1; \
11258   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11259   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11260   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11261   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11262   poly8x8x4_t __ret; \
11263   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
11264  \
11265   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11266   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11267   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11268   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11269   __ret; \
11270 })
11271 #endif
11272 
11273 #ifdef __LITTLE_ENDIAN__
11274 #define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
11275   poly16x4x4_t __s1 = __p1; \
11276   poly16x4x4_t __ret; \
11277   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \
11278   __ret; \
11279 })
11280 #else
11281 #define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
11282   poly16x4x4_t __s1 = __p1; \
11283   poly16x4x4_t __rev1; \
11284   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
11285   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
11286   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
11287   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
11288   poly16x4x4_t __ret; \
11289   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
11290  \
11291   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
11292   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
11293   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
11294   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
11295   __ret; \
11296 })
11297 #endif
11298 
11299 #ifdef __LITTLE_ENDIAN__
11300 #define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
11301   poly16x8x4_t __s1 = __p1; \
11302   poly16x8x4_t __ret; \
11303   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \
11304   __ret; \
11305 })
11306 #else
11307 #define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
11308   poly16x8x4_t __s1 = __p1; \
11309   poly16x8x4_t __rev1; \
11310   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11311   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11312   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11313   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11314   poly16x8x4_t __ret; \
11315   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
11316  \
11317   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11318   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11319   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11320   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11321   __ret; \
11322 })
11323 #endif
11324 
11325 #ifdef __LITTLE_ENDIAN__
11326 #define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
11327   uint32x4x4_t __s1 = __p1; \
11328   uint32x4x4_t __ret; \
11329   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \
11330   __ret; \
11331 })
11332 #else
11333 #define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
11334   uint32x4x4_t __s1 = __p1; \
11335   uint32x4x4_t __rev1; \
11336   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
11337   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
11338   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
11339   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
11340   uint32x4x4_t __ret; \
11341   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
11342  \
11343   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
11344   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
11345   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
11346   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
11347   __ret; \
11348 })
11349 #endif
11350 
11351 #ifdef __LITTLE_ENDIAN__
11352 #define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
11353   uint16x8x4_t __s1 = __p1; \
11354   uint16x8x4_t __ret; \
11355   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \
11356   __ret; \
11357 })
11358 #else
11359 #define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
11360   uint16x8x4_t __s1 = __p1; \
11361   uint16x8x4_t __rev1; \
11362   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11363   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11364   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11365   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11366   uint16x8x4_t __ret; \
11367   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
11368  \
11369   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11370   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11371   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11372   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11373   __ret; \
11374 })
11375 #endif
11376 
11377 #ifdef __LITTLE_ENDIAN__
11378 #define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
11379   float32x4x4_t __s1 = __p1; \
11380   float32x4x4_t __ret; \
11381   __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 41); \
11382   __ret; \
11383 })
11384 #else
11385 #define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
11386   float32x4x4_t __s1 = __p1; \
11387   float32x4x4_t __rev1; \
11388   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
11389   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
11390   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
11391   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
11392   float32x4x4_t __ret; \
11393   __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 41); \
11394  \
11395   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
11396   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
11397   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
11398   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
11399   __ret; \
11400 })
11401 #endif
11402 
11403 #ifdef __LITTLE_ENDIAN__
11404 #define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
11405   float16x8x4_t __s1 = __p1; \
11406   float16x8x4_t __ret; \
11407   __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 40); \
11408   __ret; \
11409 })
11410 #else
11411 #define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
11412   float16x8x4_t __s1 = __p1; \
11413   float16x8x4_t __rev1; \
11414   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11415   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11416   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11417   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11418   float16x8x4_t __ret; \
11419   __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 40); \
11420  \
11421   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11422   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11423   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11424   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11425   __ret; \
11426 })
11427 #endif
11428 
11429 #ifdef __LITTLE_ENDIAN__
11430 #define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
11431   int32x4x4_t __s1 = __p1; \
11432   int32x4x4_t __ret; \
11433   __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 34); \
11434   __ret; \
11435 })
11436 #else
11437 #define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
11438   int32x4x4_t __s1 = __p1; \
11439   int32x4x4_t __rev1; \
11440   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
11441   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
11442   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
11443   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
11444   int32x4x4_t __ret; \
11445   __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 34); \
11446  \
11447   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
11448   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
11449   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
11450   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
11451   __ret; \
11452 })
11453 #endif
11454 
11455 #ifdef __LITTLE_ENDIAN__
11456 #define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
11457   int16x8x4_t __s1 = __p1; \
11458   int16x8x4_t __ret; \
11459   __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 33); \
11460   __ret; \
11461 })
11462 #else
11463 #define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
11464   int16x8x4_t __s1 = __p1; \
11465   int16x8x4_t __rev1; \
11466   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11467   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11468   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11469   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11470   int16x8x4_t __ret; \
11471   __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 33); \
11472  \
11473   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11474   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11475   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11476   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11477   __ret; \
11478 })
11479 #endif
11480 
11481 #ifdef __LITTLE_ENDIAN__
11482 #define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
11483   uint8x8x4_t __s1 = __p1; \
11484   uint8x8x4_t __ret; \
11485   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \
11486   __ret; \
11487 })
11488 #else
11489 #define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
11490   uint8x8x4_t __s1 = __p1; \
11491   uint8x8x4_t __rev1; \
11492   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11493   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11494   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11495   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11496   uint8x8x4_t __ret; \
11497   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
11498  \
11499   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11500   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11501   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11502   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11503   __ret; \
11504 })
11505 #endif
11506 
11507 #ifdef __LITTLE_ENDIAN__
11508 #define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
11509   uint32x2x4_t __s1 = __p1; \
11510   uint32x2x4_t __ret; \
11511   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \
11512   __ret; \
11513 })
11514 #else
11515 #define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
11516   uint32x2x4_t __s1 = __p1; \
11517   uint32x2x4_t __rev1; \
11518   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
11519   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
11520   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
11521   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
11522   uint32x2x4_t __ret; \
11523   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
11524  \
11525   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
11526   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
11527   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
11528   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
11529   __ret; \
11530 })
11531 #endif
11532 
11533 #ifdef __LITTLE_ENDIAN__
11534 #define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
11535   uint16x4x4_t __s1 = __p1; \
11536   uint16x4x4_t __ret; \
11537   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \
11538   __ret; \
11539 })
11540 #else
11541 #define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
11542   uint16x4x4_t __s1 = __p1; \
11543   uint16x4x4_t __rev1; \
11544   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
11545   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
11546   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
11547   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
11548   uint16x4x4_t __ret; \
11549   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
11550  \
11551   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
11552   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
11553   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
11554   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
11555   __ret; \
11556 })
11557 #endif
11558 
11559 #ifdef __LITTLE_ENDIAN__
11560 #define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
11561   int8x8x4_t __s1 = __p1; \
11562   int8x8x4_t __ret; \
11563   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \
11564   __ret; \
11565 })
11566 #else
11567 #define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
11568   int8x8x4_t __s1 = __p1; \
11569   int8x8x4_t __rev1; \
11570   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11571   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11572   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11573   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11574   int8x8x4_t __ret; \
11575   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
11576  \
11577   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11578   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11579   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11580   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11581   __ret; \
11582 })
11583 #endif
11584 
11585 #ifdef __LITTLE_ENDIAN__
11586 #define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
11587   float32x2x4_t __s1 = __p1; \
11588   float32x2x4_t __ret; \
11589   __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 9); \
11590   __ret; \
11591 })
11592 #else
11593 #define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
11594   float32x2x4_t __s1 = __p1; \
11595   float32x2x4_t __rev1; \
11596   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
11597   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
11598   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
11599   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
11600   float32x2x4_t __ret; \
11601   __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 9); \
11602  \
11603   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
11604   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
11605   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
11606   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
11607   __ret; \
11608 })
11609 #endif
11610 
11611 #ifdef __LITTLE_ENDIAN__
11612 #define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
11613   float16x4x4_t __s1 = __p1; \
11614   float16x4x4_t __ret; \
11615   __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 8); \
11616   __ret; \
11617 })
11618 #else
11619 #define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
11620   float16x4x4_t __s1 = __p1; \
11621   float16x4x4_t __rev1; \
11622   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
11623   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
11624   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
11625   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
11626   float16x4x4_t __ret; \
11627   __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 8); \
11628  \
11629   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
11630   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
11631   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
11632   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
11633   __ret; \
11634 })
11635 #endif
11636 
11637 #ifdef __LITTLE_ENDIAN__
11638 #define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
11639   int32x2x4_t __s1 = __p1; \
11640   int32x2x4_t __ret; \
11641   __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 2); \
11642   __ret; \
11643 })
11644 #else
11645 #define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
11646   int32x2x4_t __s1 = __p1; \
11647   int32x2x4_t __rev1; \
11648   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
11649   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
11650   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
11651   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
11652   int32x2x4_t __ret; \
11653   __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 2); \
11654  \
11655   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
11656   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
11657   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
11658   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
11659   __ret; \
11660 })
11661 #endif
11662 
11663 #ifdef __LITTLE_ENDIAN__
11664 #define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
11665   int16x4x4_t __s1 = __p1; \
11666   int16x4x4_t __ret; \
11667   __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 1); \
11668   __ret; \
11669 })
11670 #else
11671 #define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
11672   int16x4x4_t __s1 = __p1; \
11673   int16x4x4_t __rev1; \
11674   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
11675   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
11676   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
11677   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
11678   int16x4x4_t __ret; \
11679   __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 1); \
11680  \
11681   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
11682   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
11683   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
11684   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
11685   __ret; \
11686 })
11687 #endif
11688 
11689 #ifdef __LITTLE_ENDIAN__
vmaxq_u8(uint8x16_t __p0,uint8x16_t __p1)11690 __ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
11691   uint8x16_t __ret;
11692   __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
11693   return __ret;
11694 }
11695 #else
vmaxq_u8(uint8x16_t __p0,uint8x16_t __p1)11696 __ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
11697   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
11698   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
11699   uint8x16_t __ret;
11700   __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
11701   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
11702   return __ret;
11703 }
11704 #endif
11705 
11706 #ifdef __LITTLE_ENDIAN__
vmaxq_u32(uint32x4_t __p0,uint32x4_t __p1)11707 __ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
11708   uint32x4_t __ret;
11709   __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
11710   return __ret;
11711 }
11712 #else
vmaxq_u32(uint32x4_t __p0,uint32x4_t __p1)11713 __ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
11714   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
11715   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
11716   uint32x4_t __ret;
11717   __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
11718   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
11719   return __ret;
11720 }
11721 #endif
11722 
11723 #ifdef __LITTLE_ENDIAN__
vmaxq_u16(uint16x8_t __p0,uint16x8_t __p1)11724 __ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
11725   uint16x8_t __ret;
11726   __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
11727   return __ret;
11728 }
11729 #else
vmaxq_u16(uint16x8_t __p0,uint16x8_t __p1)11730 __ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
11731   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
11732   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
11733   uint16x8_t __ret;
11734   __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
11735   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
11736   return __ret;
11737 }
11738 #endif
11739 
11740 #ifdef __LITTLE_ENDIAN__
vmaxq_s8(int8x16_t __p0,int8x16_t __p1)11741 __ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
11742   int8x16_t __ret;
11743   __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
11744   return __ret;
11745 }
11746 #else
vmaxq_s8(int8x16_t __p0,int8x16_t __p1)11747 __ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
11748   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
11749   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
11750   int8x16_t __ret;
11751   __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
11752   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
11753   return __ret;
11754 }
11755 #endif
11756 
11757 #ifdef __LITTLE_ENDIAN__
vmaxq_f32(float32x4_t __p0,float32x4_t __p1)11758 __ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
11759   float32x4_t __ret;
11760   __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
11761   return __ret;
11762 }
11763 #else
vmaxq_f32(float32x4_t __p0,float32x4_t __p1)11764 __ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
11765   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
11766   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
11767   float32x4_t __ret;
11768   __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
11769   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
11770   return __ret;
11771 }
11772 #endif
11773 
11774 #ifdef __LITTLE_ENDIAN__
vmaxq_s32(int32x4_t __p0,int32x4_t __p1)11775 __ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
11776   int32x4_t __ret;
11777   __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
11778   return __ret;
11779 }
11780 #else
vmaxq_s32(int32x4_t __p0,int32x4_t __p1)11781 __ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
11782   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
11783   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
11784   int32x4_t __ret;
11785   __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
11786   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
11787   return __ret;
11788 }
11789 #endif
11790 
11791 #ifdef __LITTLE_ENDIAN__
vmaxq_s16(int16x8_t __p0,int16x8_t __p1)11792 __ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
11793   int16x8_t __ret;
11794   __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
11795   return __ret;
11796 }
11797 #else
vmaxq_s16(int16x8_t __p0,int16x8_t __p1)11798 __ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
11799   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
11800   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
11801   int16x8_t __ret;
11802   __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
11803   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
11804   return __ret;
11805 }
11806 #endif
11807 
11808 #ifdef __LITTLE_ENDIAN__
vmax_u8(uint8x8_t __p0,uint8x8_t __p1)11809 __ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
11810   uint8x8_t __ret;
11811   __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
11812   return __ret;
11813 }
11814 #else
vmax_u8(uint8x8_t __p0,uint8x8_t __p1)11815 __ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
11816   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
11817   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
11818   uint8x8_t __ret;
11819   __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
11820   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
11821   return __ret;
11822 }
11823 #endif
11824 
11825 #ifdef __LITTLE_ENDIAN__
vmax_u32(uint32x2_t __p0,uint32x2_t __p1)11826 __ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
11827   uint32x2_t __ret;
11828   __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
11829   return __ret;
11830 }
11831 #else
vmax_u32(uint32x2_t __p0,uint32x2_t __p1)11832 __ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
11833   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
11834   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
11835   uint32x2_t __ret;
11836   __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
11837   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
11838   return __ret;
11839 }
11840 #endif
11841 
11842 #ifdef __LITTLE_ENDIAN__
vmax_u16(uint16x4_t __p0,uint16x4_t __p1)11843 __ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
11844   uint16x4_t __ret;
11845   __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
11846   return __ret;
11847 }
11848 #else
vmax_u16(uint16x4_t __p0,uint16x4_t __p1)11849 __ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
11850   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
11851   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
11852   uint16x4_t __ret;
11853   __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
11854   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
11855   return __ret;
11856 }
11857 #endif
11858 
11859 #ifdef __LITTLE_ENDIAN__
vmax_s8(int8x8_t __p0,int8x8_t __p1)11860 __ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
11861   int8x8_t __ret;
11862   __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
11863   return __ret;
11864 }
11865 #else
vmax_s8(int8x8_t __p0,int8x8_t __p1)11866 __ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
11867   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
11868   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
11869   int8x8_t __ret;
11870   __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
11871   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
11872   return __ret;
11873 }
11874 #endif
11875 
11876 #ifdef __LITTLE_ENDIAN__
vmax_f32(float32x2_t __p0,float32x2_t __p1)11877 __ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
11878   float32x2_t __ret;
11879   __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
11880   return __ret;
11881 }
11882 #else
vmax_f32(float32x2_t __p0,float32x2_t __p1)11883 __ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
11884   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
11885   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
11886   float32x2_t __ret;
11887   __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
11888   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
11889   return __ret;
11890 }
11891 #endif
11892 
11893 #ifdef __LITTLE_ENDIAN__
vmax_s32(int32x2_t __p0,int32x2_t __p1)11894 __ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
11895   int32x2_t __ret;
11896   __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
11897   return __ret;
11898 }
11899 #else
vmax_s32(int32x2_t __p0,int32x2_t __p1)11900 __ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
11901   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
11902   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
11903   int32x2_t __ret;
11904   __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
11905   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
11906   return __ret;
11907 }
11908 #endif
11909 
11910 #ifdef __LITTLE_ENDIAN__
vmax_s16(int16x4_t __p0,int16x4_t __p1)11911 __ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
11912   int16x4_t __ret;
11913   __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
11914   return __ret;
11915 }
11916 #else
vmax_s16(int16x4_t __p0,int16x4_t __p1)11917 __ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
11918   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
11919   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
11920   int16x4_t __ret;
11921   __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
11922   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
11923   return __ret;
11924 }
11925 #endif
11926 
11927 #ifdef __LITTLE_ENDIAN__
vminq_u8(uint8x16_t __p0,uint8x16_t __p1)11928 __ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
11929   uint8x16_t __ret;
11930   __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
11931   return __ret;
11932 }
11933 #else
vminq_u8(uint8x16_t __p0,uint8x16_t __p1)11934 __ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
11935   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
11936   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
11937   uint8x16_t __ret;
11938   __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
11939   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
11940   return __ret;
11941 }
11942 #endif
11943 
11944 #ifdef __LITTLE_ENDIAN__
vminq_u32(uint32x4_t __p0,uint32x4_t __p1)11945 __ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
11946   uint32x4_t __ret;
11947   __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
11948   return __ret;
11949 }
11950 #else
vminq_u32(uint32x4_t __p0,uint32x4_t __p1)11951 __ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
11952   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
11953   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
11954   uint32x4_t __ret;
11955   __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
11956   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
11957   return __ret;
11958 }
11959 #endif
11960 
11961 #ifdef __LITTLE_ENDIAN__
vminq_u16(uint16x8_t __p0,uint16x8_t __p1)11962 __ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
11963   uint16x8_t __ret;
11964   __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
11965   return __ret;
11966 }
11967 #else
vminq_u16(uint16x8_t __p0,uint16x8_t __p1)11968 __ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
11969   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
11970   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
11971   uint16x8_t __ret;
11972   __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
11973   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
11974   return __ret;
11975 }
11976 #endif
11977 
11978 #ifdef __LITTLE_ENDIAN__
vminq_s8(int8x16_t __p0,int8x16_t __p1)11979 __ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
11980   int8x16_t __ret;
11981   __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
11982   return __ret;
11983 }
11984 #else
vminq_s8(int8x16_t __p0,int8x16_t __p1)11985 __ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
11986   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
11987   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
11988   int8x16_t __ret;
11989   __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
11990   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
11991   return __ret;
11992 }
11993 #endif
11994 
11995 #ifdef __LITTLE_ENDIAN__
vminq_f32(float32x4_t __p0,float32x4_t __p1)11996 __ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
11997   float32x4_t __ret;
11998   __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
11999   return __ret;
12000 }
12001 #else
vminq_f32(float32x4_t __p0,float32x4_t __p1)12002 __ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
12003   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12004   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12005   float32x4_t __ret;
12006   __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
12007   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12008   return __ret;
12009 }
12010 #endif
12011 
12012 #ifdef __LITTLE_ENDIAN__
vminq_s32(int32x4_t __p0,int32x4_t __p1)12013 __ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
12014   int32x4_t __ret;
12015   __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
12016   return __ret;
12017 }
12018 #else
vminq_s32(int32x4_t __p0,int32x4_t __p1)12019 __ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
12020   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12021   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12022   int32x4_t __ret;
12023   __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
12024   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12025   return __ret;
12026 }
12027 #endif
12028 
12029 #ifdef __LITTLE_ENDIAN__
vminq_s16(int16x8_t __p0,int16x8_t __p1)12030 __ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
12031   int16x8_t __ret;
12032   __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
12033   return __ret;
12034 }
12035 #else
vminq_s16(int16x8_t __p0,int16x8_t __p1)12036 __ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
12037   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
12038   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
12039   int16x8_t __ret;
12040   __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
12041   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
12042   return __ret;
12043 }
12044 #endif
12045 
12046 #ifdef __LITTLE_ENDIAN__
vmin_u8(uint8x8_t __p0,uint8x8_t __p1)12047 __ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
12048   uint8x8_t __ret;
12049   __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
12050   return __ret;
12051 }
12052 #else
vmin_u8(uint8x8_t __p0,uint8x8_t __p1)12053 __ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
12054   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
12055   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
12056   uint8x8_t __ret;
12057   __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
12058   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
12059   return __ret;
12060 }
12061 #endif
12062 
12063 #ifdef __LITTLE_ENDIAN__
vmin_u32(uint32x2_t __p0,uint32x2_t __p1)12064 __ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
12065   uint32x2_t __ret;
12066   __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
12067   return __ret;
12068 }
12069 #else
vmin_u32(uint32x2_t __p0,uint32x2_t __p1)12070 __ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
12071   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
12072   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
12073   uint32x2_t __ret;
12074   __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
12075   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
12076   return __ret;
12077 }
12078 #endif
12079 
12080 #ifdef __LITTLE_ENDIAN__
vmin_u16(uint16x4_t __p0,uint16x4_t __p1)12081 __ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
12082   uint16x4_t __ret;
12083   __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
12084   return __ret;
12085 }
12086 #else
vmin_u16(uint16x4_t __p0,uint16x4_t __p1)12087 __ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
12088   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12089   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12090   uint16x4_t __ret;
12091   __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
12092   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12093   return __ret;
12094 }
12095 #endif
12096 
12097 #ifdef __LITTLE_ENDIAN__
vmin_s8(int8x8_t __p0,int8x8_t __p1)12098 __ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
12099   int8x8_t __ret;
12100   __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
12101   return __ret;
12102 }
12103 #else
vmin_s8(int8x8_t __p0,int8x8_t __p1)12104 __ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
12105   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
12106   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
12107   int8x8_t __ret;
12108   __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
12109   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
12110   return __ret;
12111 }
12112 #endif
12113 
12114 #ifdef __LITTLE_ENDIAN__
vmin_f32(float32x2_t __p0,float32x2_t __p1)12115 __ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
12116   float32x2_t __ret;
12117   __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
12118   return __ret;
12119 }
12120 #else
vmin_f32(float32x2_t __p0,float32x2_t __p1)12121 __ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
12122   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
12123   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
12124   float32x2_t __ret;
12125   __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
12126   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
12127   return __ret;
12128 }
12129 #endif
12130 
12131 #ifdef __LITTLE_ENDIAN__
vmin_s32(int32x2_t __p0,int32x2_t __p1)12132 __ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
12133   int32x2_t __ret;
12134   __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
12135   return __ret;
12136 }
12137 #else
vmin_s32(int32x2_t __p0,int32x2_t __p1)12138 __ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
12139   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
12140   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
12141   int32x2_t __ret;
12142   __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
12143   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
12144   return __ret;
12145 }
12146 #endif
12147 
12148 #ifdef __LITTLE_ENDIAN__
vmin_s16(int16x4_t __p0,int16x4_t __p1)12149 __ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
12150   int16x4_t __ret;
12151   __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
12152   return __ret;
12153 }
12154 #else
vmin_s16(int16x4_t __p0,int16x4_t __p1)12155 __ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
12156   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12157   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12158   int16x4_t __ret;
12159   __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
12160   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12161   return __ret;
12162 }
12163 #endif
12164 
12165 #ifdef __LITTLE_ENDIAN__
vmlaq_u8(uint8x16_t __p0,uint8x16_t __p1,uint8x16_t __p2)12166 __ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
12167   uint8x16_t __ret;
12168   __ret = __p0 + __p1 * __p2;
12169   return __ret;
12170 }
12171 #else
vmlaq_u8(uint8x16_t __p0,uint8x16_t __p1,uint8x16_t __p2)12172 __ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
12173   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12174   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12175   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12176   uint8x16_t __ret;
12177   __ret = __rev0 + __rev1 * __rev2;
12178   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12179   return __ret;
12180 }
12181 #endif
12182 
12183 #ifdef __LITTLE_ENDIAN__
vmlaq_u32(uint32x4_t __p0,uint32x4_t __p1,uint32x4_t __p2)12184 __ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
12185   uint32x4_t __ret;
12186   __ret = __p0 + __p1 * __p2;
12187   return __ret;
12188 }
12189 #else
vmlaq_u32(uint32x4_t __p0,uint32x4_t __p1,uint32x4_t __p2)12190 __ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
12191   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12192   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12193   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
12194   uint32x4_t __ret;
12195   __ret = __rev0 + __rev1 * __rev2;
12196   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12197   return __ret;
12198 }
12199 #endif
12200 
12201 #ifdef __LITTLE_ENDIAN__
vmlaq_u16(uint16x8_t __p0,uint16x8_t __p1,uint16x8_t __p2)12202 __ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
12203   uint16x8_t __ret;
12204   __ret = __p0 + __p1 * __p2;
12205   return __ret;
12206 }
12207 #else
vmlaq_u16(uint16x8_t __p0,uint16x8_t __p1,uint16x8_t __p2)12208 __ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
12209   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
12210   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
12211   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
12212   uint16x8_t __ret;
12213   __ret = __rev0 + __rev1 * __rev2;
12214   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
12215   return __ret;
12216 }
12217 #endif
12218 
12219 #ifdef __LITTLE_ENDIAN__
vmlaq_s8(int8x16_t __p0,int8x16_t __p1,int8x16_t __p2)12220 __ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
12221   int8x16_t __ret;
12222   __ret = __p0 + __p1 * __p2;
12223   return __ret;
12224 }
12225 #else
vmlaq_s8(int8x16_t __p0,int8x16_t __p1,int8x16_t __p2)12226 __ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
12227   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12228   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12229   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12230   int8x16_t __ret;
12231   __ret = __rev0 + __rev1 * __rev2;
12232   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12233   return __ret;
12234 }
12235 #endif
12236 
12237 #ifdef __LITTLE_ENDIAN__
vmlaq_f32(float32x4_t __p0,float32x4_t __p1,float32x4_t __p2)12238 __ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
12239   float32x4_t __ret;
12240   __ret = __p0 + __p1 * __p2;
12241   return __ret;
12242 }
12243 #else
vmlaq_f32(float32x4_t __p0,float32x4_t __p1,float32x4_t __p2)12244 __ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
12245   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12246   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12247   float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
12248   float32x4_t __ret;
12249   __ret = __rev0 + __rev1 * __rev2;
12250   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12251   return __ret;
12252 }
12253 #endif
12254 
12255 #ifdef __LITTLE_ENDIAN__
vmlaq_s32(int32x4_t __p0,int32x4_t __p1,int32x4_t __p2)12256 __ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
12257   int32x4_t __ret;
12258   __ret = __p0 + __p1 * __p2;
12259   return __ret;
12260 }
12261 #else
vmlaq_s32(int32x4_t __p0,int32x4_t __p1,int32x4_t __p2)12262 __ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
12263   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12264   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12265   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
12266   int32x4_t __ret;
12267   __ret = __rev0 + __rev1 * __rev2;
12268   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12269   return __ret;
12270 }
12271 #endif
12272 
12273 #ifdef __LITTLE_ENDIAN__
vmlaq_s16(int16x8_t __p0,int16x8_t __p1,int16x8_t __p2)12274 __ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
12275   int16x8_t __ret;
12276   __ret = __p0 + __p1 * __p2;
12277   return __ret;
12278 }
12279 #else
vmlaq_s16(int16x8_t __p0,int16x8_t __p1,int16x8_t __p2)12280 __ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
12281   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
12282   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
12283   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
12284   int16x8_t __ret;
12285   __ret = __rev0 + __rev1 * __rev2;
12286   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
12287   return __ret;
12288 }
12289 #endif
12290 
12291 #ifdef __LITTLE_ENDIAN__
vmla_u8(uint8x8_t __p0,uint8x8_t __p1,uint8x8_t __p2)12292 __ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
12293   uint8x8_t __ret;
12294   __ret = __p0 + __p1 * __p2;
12295   return __ret;
12296 }
12297 #else
vmla_u8(uint8x8_t __p0,uint8x8_t __p1,uint8x8_t __p2)12298 __ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
12299   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
12300   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
12301   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
12302   uint8x8_t __ret;
12303   __ret = __rev0 + __rev1 * __rev2;
12304   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
12305   return __ret;
12306 }
12307 #endif
12308 
12309 #ifdef __LITTLE_ENDIAN__
vmla_u32(uint32x2_t __p0,uint32x2_t __p1,uint32x2_t __p2)12310 __ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
12311   uint32x2_t __ret;
12312   __ret = __p0 + __p1 * __p2;
12313   return __ret;
12314 }
12315 #else
vmla_u32(uint32x2_t __p0,uint32x2_t __p1,uint32x2_t __p2)12316 __ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
12317   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
12318   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
12319   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
12320   uint32x2_t __ret;
12321   __ret = __rev0 + __rev1 * __rev2;
12322   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
12323   return __ret;
12324 }
12325 #endif
12326 
12327 #ifdef __LITTLE_ENDIAN__
vmla_u16(uint16x4_t __p0,uint16x4_t __p1,uint16x4_t __p2)12328 __ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
12329   uint16x4_t __ret;
12330   __ret = __p0 + __p1 * __p2;
12331   return __ret;
12332 }
12333 #else
vmla_u16(uint16x4_t __p0,uint16x4_t __p1,uint16x4_t __p2)12334 __ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
12335   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12336   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12337   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
12338   uint16x4_t __ret;
12339   __ret = __rev0 + __rev1 * __rev2;
12340   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12341   return __ret;
12342 }
12343 #endif
12344 
12345 #ifdef __LITTLE_ENDIAN__
vmla_s8(int8x8_t __p0,int8x8_t __p1,int8x8_t __p2)12346 __ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
12347   int8x8_t __ret;
12348   __ret = __p0 + __p1 * __p2;
12349   return __ret;
12350 }
12351 #else
vmla_s8(int8x8_t __p0,int8x8_t __p1,int8x8_t __p2)12352 __ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
12353   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
12354   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
12355   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
12356   int8x8_t __ret;
12357   __ret = __rev0 + __rev1 * __rev2;
12358   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
12359   return __ret;
12360 }
12361 #endif
12362 
12363 #ifdef __LITTLE_ENDIAN__
vmla_f32(float32x2_t __p0,float32x2_t __p1,float32x2_t __p2)12364 __ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
12365   float32x2_t __ret;
12366   __ret = __p0 + __p1 * __p2;
12367   return __ret;
12368 }
12369 #else
vmla_f32(float32x2_t __p0,float32x2_t __p1,float32x2_t __p2)12370 __ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
12371   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
12372   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
12373   float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
12374   float32x2_t __ret;
12375   __ret = __rev0 + __rev1 * __rev2;
12376   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
12377   return __ret;
12378 }
12379 #endif
12380 
12381 #ifdef __LITTLE_ENDIAN__
vmla_s32(int32x2_t __p0,int32x2_t __p1,int32x2_t __p2)12382 __ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
12383   int32x2_t __ret;
12384   __ret = __p0 + __p1 * __p2;
12385   return __ret;
12386 }
12387 #else
vmla_s32(int32x2_t __p0,int32x2_t __p1,int32x2_t __p2)12388 __ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
12389   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
12390   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
12391   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
12392   int32x2_t __ret;
12393   __ret = __rev0 + __rev1 * __rev2;
12394   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
12395   return __ret;
12396 }
12397 #endif
12398 
12399 #ifdef __LITTLE_ENDIAN__
vmla_s16(int16x4_t __p0,int16x4_t __p1,int16x4_t __p2)12400 __ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
12401   int16x4_t __ret;
12402   __ret = __p0 + __p1 * __p2;
12403   return __ret;
12404 }
12405 #else
vmla_s16(int16x4_t __p0,int16x4_t __p1,int16x4_t __p2)12406 __ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
12407   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12408   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12409   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
12410   int16x4_t __ret;
12411   __ret = __rev0 + __rev1 * __rev2;
12412   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12413   return __ret;
12414 }
12415 #endif
12416 
12417 #ifdef __LITTLE_ENDIAN__
12418 #define vmlaq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
12419   uint32x4_t __s0 = __p0; \
12420   uint32x4_t __s1 = __p1; \
12421   uint32x2_t __s2 = __p2; \
12422   uint32x4_t __ret; \
12423   __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
12424   __ret; \
12425 })
12426 #else
12427 #define vmlaq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
12428   uint32x4_t __s0 = __p0; \
12429   uint32x4_t __s1 = __p1; \
12430   uint32x2_t __s2 = __p2; \
12431   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
12432   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
12433   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
12434   uint32x4_t __ret; \
12435   __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
12436   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
12437   __ret; \
12438 })
12439 #endif
12440 
12441 #ifdef __LITTLE_ENDIAN__
12442 #define vmlaq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
12443   uint16x8_t __s0 = __p0; \
12444   uint16x8_t __s1 = __p1; \
12445   uint16x4_t __s2 = __p2; \
12446   uint16x8_t __ret; \
12447   __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
12448   __ret; \
12449 })
12450 #else
12451 #define vmlaq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
12452   uint16x8_t __s0 = __p0; \
12453   uint16x8_t __s1 = __p1; \
12454   uint16x4_t __s2 = __p2; \
12455   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
12456   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
12457   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
12458   uint16x8_t __ret; \
12459   __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
12460   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
12461   __ret; \
12462 })
12463 #endif
12464 
12465 #ifdef __LITTLE_ENDIAN__
12466 #define vmlaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
12467   float32x4_t __s0 = __p0; \
12468   float32x4_t __s1 = __p1; \
12469   float32x2_t __s2 = __p2; \
12470   float32x4_t __ret; \
12471   __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
12472   __ret; \
12473 })
12474 #else
12475 #define vmlaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
12476   float32x4_t __s0 = __p0; \
12477   float32x4_t __s1 = __p1; \
12478   float32x2_t __s2 = __p2; \
12479   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
12480   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
12481   float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
12482   float32x4_t __ret; \
12483   __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
12484   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
12485   __ret; \
12486 })
12487 #endif
12488 
12489 #ifdef __LITTLE_ENDIAN__
12490 #define vmlaq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
12491   int32x4_t __s0 = __p0; \
12492   int32x4_t __s1 = __p1; \
12493   int32x2_t __s2 = __p2; \
12494   int32x4_t __ret; \
12495   __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
12496   __ret; \
12497 })
12498 #else
12499 #define vmlaq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
12500   int32x4_t __s0 = __p0; \
12501   int32x4_t __s1 = __p1; \
12502   int32x2_t __s2 = __p2; \
12503   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
12504   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
12505   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
12506   int32x4_t __ret; \
12507   __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
12508   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
12509   __ret; \
12510 })
12511 #endif
12512 
12513 #ifdef __LITTLE_ENDIAN__
12514 #define vmlaq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
12515   int16x8_t __s0 = __p0; \
12516   int16x8_t __s1 = __p1; \
12517   int16x4_t __s2 = __p2; \
12518   int16x8_t __ret; \
12519   __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
12520   __ret; \
12521 })
12522 #else
12523 #define vmlaq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
12524   int16x8_t __s0 = __p0; \
12525   int16x8_t __s1 = __p1; \
12526   int16x4_t __s2 = __p2; \
12527   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
12528   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
12529   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
12530   int16x8_t __ret; \
12531   __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
12532   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
12533   __ret; \
12534 })
12535 #endif
12536 
12537 #ifdef __LITTLE_ENDIAN__
12538 #define vmla_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
12539   uint32x2_t __s0 = __p0; \
12540   uint32x2_t __s1 = __p1; \
12541   uint32x2_t __s2 = __p2; \
12542   uint32x2_t __ret; \
12543   __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
12544   __ret; \
12545 })
12546 #else
12547 #define vmla_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
12548   uint32x2_t __s0 = __p0; \
12549   uint32x2_t __s1 = __p1; \
12550   uint32x2_t __s2 = __p2; \
12551   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
12552   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
12553   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
12554   uint32x2_t __ret; \
12555   __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
12556   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
12557   __ret; \
12558 })
12559 #endif
12560 
12561 #ifdef __LITTLE_ENDIAN__
12562 #define vmla_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
12563   uint16x4_t __s0 = __p0; \
12564   uint16x4_t __s1 = __p1; \
12565   uint16x4_t __s2 = __p2; \
12566   uint16x4_t __ret; \
12567   __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
12568   __ret; \
12569 })
12570 #else
12571 #define vmla_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
12572   uint16x4_t __s0 = __p0; \
12573   uint16x4_t __s1 = __p1; \
12574   uint16x4_t __s2 = __p2; \
12575   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
12576   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
12577   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
12578   uint16x4_t __ret; \
12579   __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
12580   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
12581   __ret; \
12582 })
12583 #endif
12584 
12585 #ifdef __LITTLE_ENDIAN__
12586 #define vmla_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
12587   float32x2_t __s0 = __p0; \
12588   float32x2_t __s1 = __p1; \
12589   float32x2_t __s2 = __p2; \
12590   float32x2_t __ret; \
12591   __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
12592   __ret; \
12593 })
12594 #else
12595 #define vmla_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
12596   float32x2_t __s0 = __p0; \
12597   float32x2_t __s1 = __p1; \
12598   float32x2_t __s2 = __p2; \
12599   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
12600   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
12601   float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
12602   float32x2_t __ret; \
12603   __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
12604   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
12605   __ret; \
12606 })
12607 #endif
12608 
12609 #ifdef __LITTLE_ENDIAN__
12610 #define vmla_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
12611   int32x2_t __s0 = __p0; \
12612   int32x2_t __s1 = __p1; \
12613   int32x2_t __s2 = __p2; \
12614   int32x2_t __ret; \
12615   __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
12616   __ret; \
12617 })
12618 #else
12619 #define vmla_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
12620   int32x2_t __s0 = __p0; \
12621   int32x2_t __s1 = __p1; \
12622   int32x2_t __s2 = __p2; \
12623   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
12624   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
12625   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
12626   int32x2_t __ret; \
12627   __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
12628   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
12629   __ret; \
12630 })
12631 #endif
12632 
12633 #ifdef __LITTLE_ENDIAN__
12634 #define vmla_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
12635   int16x4_t __s0 = __p0; \
12636   int16x4_t __s1 = __p1; \
12637   int16x4_t __s2 = __p2; \
12638   int16x4_t __ret; \
12639   __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
12640   __ret; \
12641 })
12642 #else
12643 #define vmla_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
12644   int16x4_t __s0 = __p0; \
12645   int16x4_t __s1 = __p1; \
12646   int16x4_t __s2 = __p2; \
12647   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
12648   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
12649   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
12650   int16x4_t __ret; \
12651   __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
12652   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
12653   __ret; \
12654 })
12655 #endif
12656 
12657 #ifdef __LITTLE_ENDIAN__
vmlaq_n_u32(uint32x4_t __p0,uint32x4_t __p1,uint32_t __p2)12658 __ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
12659   uint32x4_t __ret;
12660   __ret = __p0 + __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
12661   return __ret;
12662 }
12663 #else
vmlaq_n_u32(uint32x4_t __p0,uint32x4_t __p1,uint32_t __p2)12664 __ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
12665   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12666   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12667   uint32x4_t __ret;
12668   __ret = __rev0 + __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
12669   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12670   return __ret;
12671 }
12672 #endif
12673 
12674 #ifdef __LITTLE_ENDIAN__
vmlaq_n_u16(uint16x8_t __p0,uint16x8_t __p1,uint16_t __p2)12675 __ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
12676   uint16x8_t __ret;
12677   __ret = __p0 + __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
12678   return __ret;
12679 }
12680 #else
vmlaq_n_u16(uint16x8_t __p0,uint16x8_t __p1,uint16_t __p2)12681 __ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
12682   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
12683   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
12684   uint16x8_t __ret;
12685   __ret = __rev0 + __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
12686   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
12687   return __ret;
12688 }
12689 #endif
12690 
12691 #ifdef __LITTLE_ENDIAN__
vmlaq_n_f32(float32x4_t __p0,float32x4_t __p1,float32_t __p2)12692 __ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
12693   float32x4_t __ret;
12694   __ret = __p0 + __p1 * (float32x4_t) {__p2, __p2, __p2, __p2};
12695   return __ret;
12696 }
12697 #else
vmlaq_n_f32(float32x4_t __p0,float32x4_t __p1,float32_t __p2)12698 __ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
12699   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12700   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12701   float32x4_t __ret;
12702   __ret = __rev0 + __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
12703   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12704   return __ret;
12705 }
12706 #endif
12707 
12708 #ifdef __LITTLE_ENDIAN__
vmlaq_n_s32(int32x4_t __p0,int32x4_t __p1,int32_t __p2)12709 __ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
12710   int32x4_t __ret;
12711   __ret = __p0 + __p1 * (int32x4_t) {__p2, __p2, __p2, __p2};
12712   return __ret;
12713 }
12714 #else
vmlaq_n_s32(int32x4_t __p0,int32x4_t __p1,int32_t __p2)12715 __ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
12716   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12717   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12718   int32x4_t __ret;
12719   __ret = __rev0 + __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
12720   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12721   return __ret;
12722 }
12723 #endif
12724 
12725 #ifdef __LITTLE_ENDIAN__
vmlaq_n_s16(int16x8_t __p0,int16x8_t __p1,int16_t __p2)12726 __ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
12727   int16x8_t __ret;
12728   __ret = __p0 + __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
12729   return __ret;
12730 }
12731 #else
vmlaq_n_s16(int16x8_t __p0,int16x8_t __p1,int16_t __p2)12732 __ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
12733   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
12734   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
12735   int16x8_t __ret;
12736   __ret = __rev0 + __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
12737   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
12738   return __ret;
12739 }
12740 #endif
12741 
12742 #ifdef __LITTLE_ENDIAN__
vmla_n_u32(uint32x2_t __p0,uint32x2_t __p1,uint32_t __p2)12743 __ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
12744   uint32x2_t __ret;
12745   __ret = __p0 + __p1 * (uint32x2_t) {__p2, __p2};
12746   return __ret;
12747 }
12748 #else
vmla_n_u32(uint32x2_t __p0,uint32x2_t __p1,uint32_t __p2)12749 __ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
12750   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
12751   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
12752   uint32x2_t __ret;
12753   __ret = __rev0 + __rev1 * (uint32x2_t) {__p2, __p2};
12754   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
12755   return __ret;
12756 }
12757 #endif
12758 
12759 #ifdef __LITTLE_ENDIAN__
vmla_n_u16(uint16x4_t __p0,uint16x4_t __p1,uint16_t __p2)12760 __ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
12761   uint16x4_t __ret;
12762   __ret = __p0 + __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
12763   return __ret;
12764 }
12765 #else
vmla_n_u16(uint16x4_t __p0,uint16x4_t __p1,uint16_t __p2)12766 __ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
12767   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12768   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12769   uint16x4_t __ret;
12770   __ret = __rev0 + __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
12771   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12772   return __ret;
12773 }
12774 #endif
12775 
12776 #ifdef __LITTLE_ENDIAN__
vmla_n_f32(float32x2_t __p0,float32x2_t __p1,float32_t __p2)12777 __ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
12778   float32x2_t __ret;
12779   __ret = __p0 + __p1 * (float32x2_t) {__p2, __p2};
12780   return __ret;
12781 }
12782 #else
vmla_n_f32(float32x2_t __p0,float32x2_t __p1,float32_t __p2)12783 __ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
12784   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
12785   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
12786   float32x2_t __ret;
12787   __ret = __rev0 + __rev1 * (float32x2_t) {__p2, __p2};
12788   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
12789   return __ret;
12790 }
12791 #endif
12792 
12793 #ifdef __LITTLE_ENDIAN__
vmla_n_s32(int32x2_t __p0,int32x2_t __p1,int32_t __p2)12794 __ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
12795   int32x2_t __ret;
12796   __ret = __p0 + __p1 * (int32x2_t) {__p2, __p2};
12797   return __ret;
12798 }
12799 #else
vmla_n_s32(int32x2_t __p0,int32x2_t __p1,int32_t __p2)12800 __ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
12801   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
12802   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
12803   int32x2_t __ret;
12804   __ret = __rev0 + __rev1 * (int32x2_t) {__p2, __p2};
12805   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
12806   return __ret;
12807 }
12808 #endif
12809 
12810 #ifdef __LITTLE_ENDIAN__
vmla_n_s16(int16x4_t __p0,int16x4_t __p1,int16_t __p2)12811 __ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
12812   int16x4_t __ret;
12813   __ret = __p0 + __p1 * (int16x4_t) {__p2, __p2, __p2, __p2};
12814   return __ret;
12815 }
12816 #else
vmla_n_s16(int16x4_t __p0,int16x4_t __p1,int16_t __p2)12817 __ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
12818   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12819   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12820   int16x4_t __ret;
12821   __ret = __rev0 + __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
12822   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12823   return __ret;
12824 }
12825 #endif
12826 
12827 #ifdef __LITTLE_ENDIAN__
vmlsq_u8(uint8x16_t __p0,uint8x16_t __p1,uint8x16_t __p2)12828 __ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
12829   uint8x16_t __ret;
12830   __ret = __p0 - __p1 * __p2;
12831   return __ret;
12832 }
12833 #else
vmlsq_u8(uint8x16_t __p0,uint8x16_t __p1,uint8x16_t __p2)12834 __ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
12835   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12836   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12837   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12838   uint8x16_t __ret;
12839   __ret = __rev0 - __rev1 * __rev2;
12840   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12841   return __ret;
12842 }
12843 #endif
12844 
12845 #ifdef __LITTLE_ENDIAN__
vmlsq_u32(uint32x4_t __p0,uint32x4_t __p1,uint32x4_t __p2)12846 __ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
12847   uint32x4_t __ret;
12848   __ret = __p0 - __p1 * __p2;
12849   return __ret;
12850 }
12851 #else
vmlsq_u32(uint32x4_t __p0,uint32x4_t __p1,uint32x4_t __p2)12852 __ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
12853   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12854   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12855   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
12856   uint32x4_t __ret;
12857   __ret = __rev0 - __rev1 * __rev2;
12858   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12859   return __ret;
12860 }
12861 #endif
12862 
12863 #ifdef __LITTLE_ENDIAN__
vmlsq_u16(uint16x8_t __p0,uint16x8_t __p1,uint16x8_t __p2)12864 __ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
12865   uint16x8_t __ret;
12866   __ret = __p0 - __p1 * __p2;
12867   return __ret;
12868 }
12869 #else
vmlsq_u16(uint16x8_t __p0,uint16x8_t __p1,uint16x8_t __p2)12870 __ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
12871   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
12872   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
12873   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
12874   uint16x8_t __ret;
12875   __ret = __rev0 - __rev1 * __rev2;
12876   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
12877   return __ret;
12878 }
12879 #endif
12880 
12881 #ifdef __LITTLE_ENDIAN__
vmlsq_s8(int8x16_t __p0,int8x16_t __p1,int8x16_t __p2)12882 __ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
12883   int8x16_t __ret;
12884   __ret = __p0 - __p1 * __p2;
12885   return __ret;
12886 }
12887 #else
vmlsq_s8(int8x16_t __p0,int8x16_t __p1,int8x16_t __p2)12888 __ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
12889   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12890   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12891   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12892   int8x16_t __ret;
12893   __ret = __rev0 - __rev1 * __rev2;
12894   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12895   return __ret;
12896 }
12897 #endif
12898 
12899 #ifdef __LITTLE_ENDIAN__
vmlsq_f32(float32x4_t __p0,float32x4_t __p1,float32x4_t __p2)12900 __ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
12901   float32x4_t __ret;
12902   __ret = __p0 - __p1 * __p2;
12903   return __ret;
12904 }
12905 #else
vmlsq_f32(float32x4_t __p0,float32x4_t __p1,float32x4_t __p2)12906 __ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
12907   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12908   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12909   float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
12910   float32x4_t __ret;
12911   __ret = __rev0 - __rev1 * __rev2;
12912   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12913   return __ret;
12914 }
12915 #endif
12916 
12917 #ifdef __LITTLE_ENDIAN__
vmlsq_s32(int32x4_t __p0,int32x4_t __p1,int32x4_t __p2)12918 __ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
12919   int32x4_t __ret;
12920   __ret = __p0 - __p1 * __p2;
12921   return __ret;
12922 }
12923 #else
vmlsq_s32(int32x4_t __p0,int32x4_t __p1,int32x4_t __p2)12924 __ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
12925   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12926   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12927   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
12928   int32x4_t __ret;
12929   __ret = __rev0 - __rev1 * __rev2;
12930   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12931   return __ret;
12932 }
12933 #endif
12934 
12935 #ifdef __LITTLE_ENDIAN__
vmlsq_s16(int16x8_t __p0,int16x8_t __p1,int16x8_t __p2)12936 __ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
12937   int16x8_t __ret;
12938   __ret = __p0 - __p1 * __p2;
12939   return __ret;
12940 }
12941 #else
vmlsq_s16(int16x8_t __p0,int16x8_t __p1,int16x8_t __p2)12942 __ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
12943   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
12944   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
12945   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
12946   int16x8_t __ret;
12947   __ret = __rev0 - __rev1 * __rev2;
12948   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
12949   return __ret;
12950 }
12951 #endif
12952 
12953 #ifdef __LITTLE_ENDIAN__
vmls_u8(uint8x8_t __p0,uint8x8_t __p1,uint8x8_t __p2)12954 __ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
12955   uint8x8_t __ret;
12956   __ret = __p0 - __p1 * __p2;
12957   return __ret;
12958 }
12959 #else
vmls_u8(uint8x8_t __p0,uint8x8_t __p1,uint8x8_t __p2)12960 __ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
12961   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
12962   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
12963   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
12964   uint8x8_t __ret;
12965   __ret = __rev0 - __rev1 * __rev2;
12966   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
12967   return __ret;
12968 }
12969 #endif
12970 
12971 #ifdef __LITTLE_ENDIAN__
vmls_u32(uint32x2_t __p0,uint32x2_t __p1,uint32x2_t __p2)12972 __ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
12973   uint32x2_t __ret;
12974   __ret = __p0 - __p1 * __p2;
12975   return __ret;
12976 }
12977 #else
vmls_u32(uint32x2_t __p0,uint32x2_t __p1,uint32x2_t __p2)12978 __ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
12979   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
12980   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
12981   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
12982   uint32x2_t __ret;
12983   __ret = __rev0 - __rev1 * __rev2;
12984   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
12985   return __ret;
12986 }
12987 #endif
12988 
12989 #ifdef __LITTLE_ENDIAN__
vmls_u16(uint16x4_t __p0,uint16x4_t __p1,uint16x4_t __p2)12990 __ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
12991   uint16x4_t __ret;
12992   __ret = __p0 - __p1 * __p2;
12993   return __ret;
12994 }
12995 #else
vmls_u16(uint16x4_t __p0,uint16x4_t __p1,uint16x4_t __p2)12996 __ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
12997   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12998   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12999   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
13000   uint16x4_t __ret;
13001   __ret = __rev0 - __rev1 * __rev2;
13002   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13003   return __ret;
13004 }
13005 #endif
13006 
13007 #ifdef __LITTLE_ENDIAN__
vmls_s8(int8x8_t __p0,int8x8_t __p1,int8x8_t __p2)13008 __ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
13009   int8x8_t __ret;
13010   __ret = __p0 - __p1 * __p2;
13011   return __ret;
13012 }
13013 #else
vmls_s8(int8x8_t __p0,int8x8_t __p1,int8x8_t __p2)13014 __ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
13015   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
13016   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
13017   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
13018   int8x8_t __ret;
13019   __ret = __rev0 - __rev1 * __rev2;
13020   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
13021   return __ret;
13022 }
13023 #endif
13024 
13025 #ifdef __LITTLE_ENDIAN__
vmls_f32(float32x2_t __p0,float32x2_t __p1,float32x2_t __p2)13026 __ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
13027   float32x2_t __ret;
13028   __ret = __p0 - __p1 * __p2;
13029   return __ret;
13030 }
13031 #else
vmls_f32(float32x2_t __p0,float32x2_t __p1,float32x2_t __p2)13032 __ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
13033   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
13034   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
13035   float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
13036   float32x2_t __ret;
13037   __ret = __rev0 - __rev1 * __rev2;
13038   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
13039   return __ret;
13040 }
13041 #endif
13042 
13043 #ifdef __LITTLE_ENDIAN__
vmls_s32(int32x2_t __p0,int32x2_t __p1,int32x2_t __p2)13044 __ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
13045   int32x2_t __ret;
13046   __ret = __p0 - __p1 * __p2;
13047   return __ret;
13048 }
13049 #else
vmls_s32(int32x2_t __p0,int32x2_t __p1,int32x2_t __p2)13050 __ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
13051   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
13052   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
13053   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
13054   int32x2_t __ret;
13055   __ret = __rev0 - __rev1 * __rev2;
13056   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
13057   return __ret;
13058 }
13059 #endif
13060 
13061 #ifdef __LITTLE_ENDIAN__
vmls_s16(int16x4_t __p0,int16x4_t __p1,int16x4_t __p2)13062 __ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
13063   int16x4_t __ret;
13064   __ret = __p0 - __p1 * __p2;
13065   return __ret;
13066 }
13067 #else
vmls_s16(int16x4_t __p0,int16x4_t __p1,int16x4_t __p2)13068 __ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
13069   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
13070   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
13071   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
13072   int16x4_t __ret;
13073   __ret = __rev0 - __rev1 * __rev2;
13074   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13075   return __ret;
13076 }
13077 #endif
13078 
13079 #ifdef __LITTLE_ENDIAN__
13080 #define vmlsq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
13081   uint32x4_t __s0 = __p0; \
13082   uint32x4_t __s1 = __p1; \
13083   uint32x2_t __s2 = __p2; \
13084   uint32x4_t __ret; \
13085   __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
13086   __ret; \
13087 })
13088 #else
13089 #define vmlsq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
13090   uint32x4_t __s0 = __p0; \
13091   uint32x4_t __s1 = __p1; \
13092   uint32x2_t __s2 = __p2; \
13093   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
13094   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
13095   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
13096   uint32x4_t __ret; \
13097   __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
13098   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
13099   __ret; \
13100 })
13101 #endif
13102 
13103 #ifdef __LITTLE_ENDIAN__
13104 #define vmlsq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
13105   uint16x8_t __s0 = __p0; \
13106   uint16x8_t __s1 = __p1; \
13107   uint16x4_t __s2 = __p2; \
13108   uint16x8_t __ret; \
13109   __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
13110   __ret; \
13111 })
13112 #else
13113 #define vmlsq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
13114   uint16x8_t __s0 = __p0; \
13115   uint16x8_t __s1 = __p1; \
13116   uint16x4_t __s2 = __p2; \
13117   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
13118   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
13119   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
13120   uint16x8_t __ret; \
13121   __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
13122   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
13123   __ret; \
13124 })
13125 #endif
13126 
13127 #ifdef __LITTLE_ENDIAN__
13128 #define vmlsq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
13129   float32x4_t __s0 = __p0; \
13130   float32x4_t __s1 = __p1; \
13131   float32x2_t __s2 = __p2; \
13132   float32x4_t __ret; \
13133   __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
13134   __ret; \
13135 })
13136 #else
13137 #define vmlsq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
13138   float32x4_t __s0 = __p0; \
13139   float32x4_t __s1 = __p1; \
13140   float32x2_t __s2 = __p2; \
13141   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
13142   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
13143   float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
13144   float32x4_t __ret; \
13145   __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
13146   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
13147   __ret; \
13148 })
13149 #endif
13150 
13151 #ifdef __LITTLE_ENDIAN__
13152 #define vmlsq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
13153   int32x4_t __s0 = __p0; \
13154   int32x4_t __s1 = __p1; \
13155   int32x2_t __s2 = __p2; \
13156   int32x4_t __ret; \
13157   __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
13158   __ret; \
13159 })
13160 #else
13161 #define vmlsq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
13162   int32x4_t __s0 = __p0; \
13163   int32x4_t __s1 = __p1; \
13164   int32x2_t __s2 = __p2; \
13165   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
13166   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
13167   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
13168   int32x4_t __ret; \
13169   __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
13170   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
13171   __ret; \
13172 })
13173 #endif
13174 
13175 #ifdef __LITTLE_ENDIAN__
13176 #define vmlsq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
13177   int16x8_t __s0 = __p0; \
13178   int16x8_t __s1 = __p1; \
13179   int16x4_t __s2 = __p2; \
13180   int16x8_t __ret; \
13181   __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
13182   __ret; \
13183 })
13184 #else
13185 #define vmlsq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
13186   int16x8_t __s0 = __p0; \
13187   int16x8_t __s1 = __p1; \
13188   int16x4_t __s2 = __p2; \
13189   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
13190   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
13191   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
13192   int16x8_t __ret; \
13193   __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
13194   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
13195   __ret; \
13196 })
13197 #endif
13198 
13199 #ifdef __LITTLE_ENDIAN__
13200 #define vmls_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
13201   uint32x2_t __s0 = __p0; \
13202   uint32x2_t __s1 = __p1; \
13203   uint32x2_t __s2 = __p2; \
13204   uint32x2_t __ret; \
13205   __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
13206   __ret; \
13207 })
13208 #else
13209 #define vmls_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
13210   uint32x2_t __s0 = __p0; \
13211   uint32x2_t __s1 = __p1; \
13212   uint32x2_t __s2 = __p2; \
13213   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
13214   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
13215   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
13216   uint32x2_t __ret; \
13217   __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
13218   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
13219   __ret; \
13220 })
13221 #endif
13222 
13223 #ifdef __LITTLE_ENDIAN__
13224 #define vmls_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
13225   uint16x4_t __s0 = __p0; \
13226   uint16x4_t __s1 = __p1; \
13227   uint16x4_t __s2 = __p2; \
13228   uint16x4_t __ret; \
13229   __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
13230   __ret; \
13231 })
13232 #else
13233 #define vmls_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
13234   uint16x4_t __s0 = __p0; \
13235   uint16x4_t __s1 = __p1; \
13236   uint16x4_t __s2 = __p2; \
13237   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
13238   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
13239   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
13240   uint16x4_t __ret; \
13241   __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
13242   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
13243   __ret; \
13244 })
13245 #endif
13246 
13247 #ifdef __LITTLE_ENDIAN__
13248 #define vmls_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
13249   float32x2_t __s0 = __p0; \
13250   float32x2_t __s1 = __p1; \
13251   float32x2_t __s2 = __p2; \
13252   float32x2_t __ret; \
13253   __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
13254   __ret; \
13255 })
13256 #else
13257 #define vmls_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
13258   float32x2_t __s0 = __p0; \
13259   float32x2_t __s1 = __p1; \
13260   float32x2_t __s2 = __p2; \
13261   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
13262   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
13263   float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
13264   float32x2_t __ret; \
13265   __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
13266   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
13267   __ret; \
13268 })
13269 #endif
13270 
13271 #ifdef __LITTLE_ENDIAN__
13272 #define vmls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
13273   int32x2_t __s0 = __p0; \
13274   int32x2_t __s1 = __p1; \
13275   int32x2_t __s2 = __p2; \
13276   int32x2_t __ret; \
13277   __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
13278   __ret; \
13279 })
13280 #else
13281 #define vmls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
13282   int32x2_t __s0 = __p0; \
13283   int32x2_t __s1 = __p1; \
13284   int32x2_t __s2 = __p2; \
13285   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
13286   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
13287   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
13288   int32x2_t __ret; \
13289   __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
13290   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
13291   __ret; \
13292 })
13293 #endif
13294 
13295 #ifdef __LITTLE_ENDIAN__
13296 #define vmls_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
13297   int16x4_t __s0 = __p0; \
13298   int16x4_t __s1 = __p1; \
13299   int16x4_t __s2 = __p2; \
13300   int16x4_t __ret; \
13301   __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
13302   __ret; \
13303 })
13304 #else
13305 #define vmls_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
13306   int16x4_t __s0 = __p0; \
13307   int16x4_t __s1 = __p1; \
13308   int16x4_t __s2 = __p2; \
13309   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
13310   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
13311   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
13312   int16x4_t __ret; \
13313   __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
13314   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
13315   __ret; \
13316 })
13317 #endif
13318 
13319 #ifdef __LITTLE_ENDIAN__
vmlsq_n_u32(uint32x4_t __p0,uint32x4_t __p1,uint32_t __p2)13320 __ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
13321   uint32x4_t __ret;
13322   __ret = __p0 - __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
13323   return __ret;
13324 }
13325 #else
vmlsq_n_u32(uint32x4_t __p0,uint32x4_t __p1,uint32_t __p2)13326 __ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
13327   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
13328   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
13329   uint32x4_t __ret;
13330   __ret = __rev0 - __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
13331   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13332   return __ret;
13333 }
13334 #endif
13335 
13336 #ifdef __LITTLE_ENDIAN__
vmlsq_n_u16(uint16x8_t __p0,uint16x8_t __p1,uint16_t __p2)13337 __ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
13338   uint16x8_t __ret;
13339   __ret = __p0 - __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
13340   return __ret;
13341 }
13342 #else
vmlsq_n_u16(uint16x8_t __p0,uint16x8_t __p1,uint16_t __p2)13343 __ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
13344   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
13345   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
13346   uint16x8_t __ret;
13347   __ret = __rev0 - __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
13348   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
13349   return __ret;
13350 }
13351 #endif
13352 
13353 #ifdef __LITTLE_ENDIAN__
vmlsq_n_f32(float32x4_t __p0,float32x4_t __p1,float32_t __p2)13354 __ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
13355   float32x4_t __ret;
13356   __ret = __p0 - __p1 * (float32x4_t) {__p2, __p2, __p2, __p2};
13357   return __ret;
13358 }
13359 #else
vmlsq_n_f32(float32x4_t __p0,float32x4_t __p1,float32_t __p2)13360 __ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
13361   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
13362   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
13363   float32x4_t __ret;
13364   __ret = __rev0 - __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
13365   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13366   return __ret;
13367 }
13368 #endif
13369 
13370 #ifdef __LITTLE_ENDIAN__
vmlsq_n_s32(int32x4_t __p0,int32x4_t __p1,int32_t __p2)13371 __ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
13372   int32x4_t __ret;
13373   __ret = __p0 - __p1 * (int32x4_t) {__p2, __p2, __p2, __p2};
13374   return __ret;
13375 }
13376 #else
vmlsq_n_s32(int32x4_t __p0,int32x4_t __p1,int32_t __p2)13377 __ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
13378   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
13379   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
13380   int32x4_t __ret;
13381   __ret = __rev0 - __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
13382   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13383   return __ret;
13384 }
13385 #endif
13386 
13387 #ifdef __LITTLE_ENDIAN__
vmlsq_n_s16(int16x8_t __p0,int16x8_t __p1,int16_t __p2)13388 __ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
13389   int16x8_t __ret;
13390   __ret = __p0 - __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
13391   return __ret;
13392 }
13393 #else
vmlsq_n_s16(int16x8_t __p0,int16x8_t __p1,int16_t __p2)13394 __ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
13395   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
13396   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
13397   int16x8_t __ret;
13398   __ret = __rev0 - __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
13399   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
13400   return __ret;
13401 }
13402 #endif
13403 
13404 #ifdef __LITTLE_ENDIAN__
vmls_n_u32(uint32x2_t __p0,uint32x2_t __p1,uint32_t __p2)13405 __ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
13406   uint32x2_t __ret;
13407   __ret = __p0 - __p1 * (uint32x2_t) {__p2, __p2};
13408   return __ret;
13409 }
13410 #else
vmls_n_u32(uint32x2_t __p0,uint32x2_t __p1,uint32_t __p2)13411 __ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
13412   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
13413   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
13414   uint32x2_t __ret;
13415   __ret = __rev0 - __rev1 * (uint32x2_t) {__p2, __p2};
13416   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
13417   return __ret;
13418 }
13419 #endif
13420 
13421 #ifdef __LITTLE_ENDIAN__
vmls_n_u16(uint16x4_t __p0,uint16x4_t __p1,uint16_t __p2)13422 __ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
13423   uint16x4_t __ret;
13424   __ret = __p0 - __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
13425   return __ret;
13426 }
13427 #else
vmls_n_u16(uint16x4_t __p0,uint16x4_t __p1,uint16_t __p2)13428 __ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
13429   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
13430   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
13431   uint16x4_t __ret;
13432   __ret = __rev0 - __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
13433   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13434   return __ret;
13435 }
13436 #endif
13437 
13438 #ifdef __LITTLE_ENDIAN__
vmls_n_f32(float32x2_t __p0,float32x2_t __p1,float32_t __p2)13439 __ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
13440   float32x2_t __ret;
13441   __ret = __p0 - __p1 * (float32x2_t) {__p2, __p2};
13442   return __ret;
13443 }
13444 #else
vmls_n_f32(float32x2_t __p0,float32x2_t __p1,float32_t __p2)13445 __ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
13446   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
13447   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
13448   float32x2_t __ret;
13449   __ret = __rev0 - __rev1 * (float32x2_t) {__p2, __p2};
13450   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
13451   return __ret;
13452 }
13453 #endif
13454 
13455 #ifdef __LITTLE_ENDIAN__
vmls_n_s32(int32x2_t __p0,int32x2_t __p1,int32_t __p2)13456 __ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
13457   int32x2_t __ret;
13458   __ret = __p0 - __p1 * (int32x2_t) {__p2, __p2};
13459   return __ret;
13460 }
13461 #else
vmls_n_s32(int32x2_t __p0,int32x2_t __p1,int32_t __p2)13462 __ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
13463   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
13464   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
13465   int32x2_t __ret;
13466   __ret = __rev0 - __rev1 * (int32x2_t) {__p2, __p2};
13467   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
13468   return __ret;
13469 }
13470 #endif
13471 
13472 #ifdef __LITTLE_ENDIAN__
vmls_n_s16(int16x4_t __p0,int16x4_t __p1,int16_t __p2)13473 __ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
13474   int16x4_t __ret;
13475   __ret = __p0 - __p1 * (int16x4_t) {__p2, __p2, __p2, __p2};
13476   return __ret;
13477 }
13478 #else
vmls_n_s16(int16x4_t __p0,int16x4_t __p1,int16_t __p2)13479 __ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
13480   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
13481   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
13482   int16x4_t __ret;
13483   __ret = __rev0 - __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
13484   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13485   return __ret;
13486 }
13487 #endif
13488 
13489 #ifdef __LITTLE_ENDIAN__
vmov_n_p8(poly8_t __p0)13490 __ai poly8x8_t vmov_n_p8(poly8_t __p0) {
13491   poly8x8_t __ret;
13492   __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13493   return __ret;
13494 }
13495 #else
vmov_n_p8(poly8_t __p0)13496 __ai poly8x8_t vmov_n_p8(poly8_t __p0) {
13497   poly8x8_t __ret;
13498   __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13499   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
13500   return __ret;
13501 }
13502 #endif
13503 
13504 #ifdef __LITTLE_ENDIAN__
vmov_n_p16(poly16_t __p0)13505 __ai poly16x4_t vmov_n_p16(poly16_t __p0) {
13506   poly16x4_t __ret;
13507   __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
13508   return __ret;
13509 }
13510 #else
vmov_n_p16(poly16_t __p0)13511 __ai poly16x4_t vmov_n_p16(poly16_t __p0) {
13512   poly16x4_t __ret;
13513   __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
13514   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13515   return __ret;
13516 }
13517 #endif
13518 
13519 #ifdef __LITTLE_ENDIAN__
vmovq_n_p8(poly8_t __p0)13520 __ai poly8x16_t vmovq_n_p8(poly8_t __p0) {
13521   poly8x16_t __ret;
13522   __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13523   return __ret;
13524 }
13525 #else
vmovq_n_p8(poly8_t __p0)13526 __ai poly8x16_t vmovq_n_p8(poly8_t __p0) {
13527   poly8x16_t __ret;
13528   __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13529   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
13530   return __ret;
13531 }
13532 #endif
13533 
13534 #ifdef __LITTLE_ENDIAN__
vmovq_n_p16(poly16_t __p0)13535 __ai poly16x8_t vmovq_n_p16(poly16_t __p0) {
13536   poly16x8_t __ret;
13537   __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13538   return __ret;
13539 }
13540 #else
vmovq_n_p16(poly16_t __p0)13541 __ai poly16x8_t vmovq_n_p16(poly16_t __p0) {
13542   poly16x8_t __ret;
13543   __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13544   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
13545   return __ret;
13546 }
13547 #endif
13548 
13549 #ifdef __LITTLE_ENDIAN__
vmovq_n_u8(uint8_t __p0)13550 __ai uint8x16_t vmovq_n_u8(uint8_t __p0) {
13551   uint8x16_t __ret;
13552   __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13553   return __ret;
13554 }
13555 #else
vmovq_n_u8(uint8_t __p0)13556 __ai uint8x16_t vmovq_n_u8(uint8_t __p0) {
13557   uint8x16_t __ret;
13558   __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13559   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
13560   return __ret;
13561 }
13562 #endif
13563 
13564 #ifdef __LITTLE_ENDIAN__
vmovq_n_u32(uint32_t __p0)13565 __ai uint32x4_t vmovq_n_u32(uint32_t __p0) {
13566   uint32x4_t __ret;
13567   __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
13568   return __ret;
13569 }
13570 #else
vmovq_n_u32(uint32_t __p0)13571 __ai uint32x4_t vmovq_n_u32(uint32_t __p0) {
13572   uint32x4_t __ret;
13573   __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
13574   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13575   return __ret;
13576 }
13577 #endif
13578 
13579 #ifdef __LITTLE_ENDIAN__
vmovq_n_u64(uint64_t __p0)13580 __ai uint64x2_t vmovq_n_u64(uint64_t __p0) {
13581   uint64x2_t __ret;
13582   __ret = (uint64x2_t) {__p0, __p0};
13583   return __ret;
13584 }
13585 #else
vmovq_n_u64(uint64_t __p0)13586 __ai uint64x2_t vmovq_n_u64(uint64_t __p0) {
13587   uint64x2_t __ret;
13588   __ret = (uint64x2_t) {__p0, __p0};
13589   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
13590   return __ret;
13591 }
13592 #endif
13593 
13594 #ifdef __LITTLE_ENDIAN__
vmovq_n_u16(uint16_t __p0)13595 __ai uint16x8_t vmovq_n_u16(uint16_t __p0) {
13596   uint16x8_t __ret;
13597   __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13598   return __ret;
13599 }
13600 #else
vmovq_n_u16(uint16_t __p0)13601 __ai uint16x8_t vmovq_n_u16(uint16_t __p0) {
13602   uint16x8_t __ret;
13603   __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13604   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
13605   return __ret;
13606 }
13607 #endif
13608 
13609 #ifdef __LITTLE_ENDIAN__
vmovq_n_s8(int8_t __p0)13610 __ai int8x16_t vmovq_n_s8(int8_t __p0) {
13611   int8x16_t __ret;
13612   __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13613   return __ret;
13614 }
13615 #else
vmovq_n_s8(int8_t __p0)13616 __ai int8x16_t vmovq_n_s8(int8_t __p0) {
13617   int8x16_t __ret;
13618   __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13619   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
13620   return __ret;
13621 }
13622 #endif
13623 
13624 #ifdef __LITTLE_ENDIAN__
vmovq_n_f32(float32_t __p0)13625 __ai float32x4_t vmovq_n_f32(float32_t __p0) {
13626   float32x4_t __ret;
13627   __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
13628   return __ret;
13629 }
13630 #else
vmovq_n_f32(float32_t __p0)13631 __ai float32x4_t vmovq_n_f32(float32_t __p0) {
13632   float32x4_t __ret;
13633   __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
13634   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13635   return __ret;
13636 }
13637 #endif
13638 
13639 #ifdef __LITTLE_ENDIAN__
13640 #define vmovq_n_f16(__p0) __extension__ ({ \
13641   float16_t __s0 = __p0; \
13642   float16x8_t __ret; \
13643   __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
13644   __ret; \
13645 })
13646 #else
13647 #define vmovq_n_f16(__p0) __extension__ ({ \
13648   float16_t __s0 = __p0; \
13649   float16x8_t __ret; \
13650   __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
13651   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
13652   __ret; \
13653 })
13654 #endif
13655 
13656 #ifdef __LITTLE_ENDIAN__
vmovq_n_s32(int32_t __p0)13657 __ai int32x4_t vmovq_n_s32(int32_t __p0) {
13658   int32x4_t __ret;
13659   __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
13660   return __ret;
13661 }
13662 #else
vmovq_n_s32(int32_t __p0)13663 __ai int32x4_t vmovq_n_s32(int32_t __p0) {
13664   int32x4_t __ret;
13665   __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
13666   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13667   return __ret;
13668 }
13669 #endif
13670 
13671 #ifdef __LITTLE_ENDIAN__
vmovq_n_s64(int64_t __p0)13672 __ai int64x2_t vmovq_n_s64(int64_t __p0) {
13673   int64x2_t __ret;
13674   __ret = (int64x2_t) {__p0, __p0};
13675   return __ret;
13676 }
13677 #else
vmovq_n_s64(int64_t __p0)13678 __ai int64x2_t vmovq_n_s64(int64_t __p0) {
13679   int64x2_t __ret;
13680   __ret = (int64x2_t) {__p0, __p0};
13681   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
13682   return __ret;
13683 }
13684 #endif
13685 
13686 #ifdef __LITTLE_ENDIAN__
vmovq_n_s16(int16_t __p0)13687 __ai int16x8_t vmovq_n_s16(int16_t __p0) {
13688   int16x8_t __ret;
13689   __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13690   return __ret;
13691 }
13692 #else
vmovq_n_s16(int16_t __p0)13693 __ai int16x8_t vmovq_n_s16(int16_t __p0) {
13694   int16x8_t __ret;
13695   __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13696   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
13697   return __ret;
13698 }
13699 #endif
13700 
13701 #ifdef __LITTLE_ENDIAN__
vmov_n_u8(uint8_t __p0)13702 __ai uint8x8_t vmov_n_u8(uint8_t __p0) {
13703   uint8x8_t __ret;
13704   __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13705   return __ret;
13706 }
13707 #else
vmov_n_u8(uint8_t __p0)13708 __ai uint8x8_t vmov_n_u8(uint8_t __p0) {
13709   uint8x8_t __ret;
13710   __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13711   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
13712   return __ret;
13713 }
13714 #endif
13715 
13716 #ifdef __LITTLE_ENDIAN__
vmov_n_u32(uint32_t __p0)13717 __ai uint32x2_t vmov_n_u32(uint32_t __p0) {
13718   uint32x2_t __ret;
13719   __ret = (uint32x2_t) {__p0, __p0};
13720   return __ret;
13721 }
13722 #else
vmov_n_u32(uint32_t __p0)13723 __ai uint32x2_t vmov_n_u32(uint32_t __p0) {
13724   uint32x2_t __ret;
13725   __ret = (uint32x2_t) {__p0, __p0};
13726   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
13727   return __ret;
13728 }
13729 #endif
13730 
13731 #ifdef __LITTLE_ENDIAN__
vmov_n_u64(uint64_t __p0)13732 __ai uint64x1_t vmov_n_u64(uint64_t __p0) {
13733   uint64x1_t __ret;
13734   __ret = (uint64x1_t) {__p0};
13735   return __ret;
13736 }
13737 #else
vmov_n_u64(uint64_t __p0)13738 __ai uint64x1_t vmov_n_u64(uint64_t __p0) {
13739   uint64x1_t __ret;
13740   __ret = (uint64x1_t) {__p0};
13741   return __ret;
13742 }
13743 #endif
13744 
13745 #ifdef __LITTLE_ENDIAN__
vmov_n_u16(uint16_t __p0)13746 __ai uint16x4_t vmov_n_u16(uint16_t __p0) {
13747   uint16x4_t __ret;
13748   __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
13749   return __ret;
13750 }
13751 #else
vmov_n_u16(uint16_t __p0)13752 __ai uint16x4_t vmov_n_u16(uint16_t __p0) {
13753   uint16x4_t __ret;
13754   __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
13755   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13756   return __ret;
13757 }
13758 #endif
13759 
13760 #ifdef __LITTLE_ENDIAN__
vmov_n_s8(int8_t __p0)13761 __ai int8x8_t vmov_n_s8(int8_t __p0) {
13762   int8x8_t __ret;
13763   __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13764   return __ret;
13765 }
13766 #else
vmov_n_s8(int8_t __p0)13767 __ai int8x8_t vmov_n_s8(int8_t __p0) {
13768   int8x8_t __ret;
13769   __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13770   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
13771   return __ret;
13772 }
13773 #endif
13774 
13775 #ifdef __LITTLE_ENDIAN__
vmov_n_f32(float32_t __p0)13776 __ai float32x2_t vmov_n_f32(float32_t __p0) {
13777   float32x2_t __ret;
13778   __ret = (float32x2_t) {__p0, __p0};
13779   return __ret;
13780 }
13781 #else
vmov_n_f32(float32_t __p0)13782 __ai float32x2_t vmov_n_f32(float32_t __p0) {
13783   float32x2_t __ret;
13784   __ret = (float32x2_t) {__p0, __p0};
13785   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
13786   return __ret;
13787 }
13788 #endif
13789 
13790 #ifdef __LITTLE_ENDIAN__
13791 #define vmov_n_f16(__p0) __extension__ ({ \
13792   float16_t __s0 = __p0; \
13793   float16x4_t __ret; \
13794   __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
13795   __ret; \
13796 })
13797 #else
13798 #define vmov_n_f16(__p0) __extension__ ({ \
13799   float16_t __s0 = __p0; \
13800   float16x4_t __ret; \
13801   __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
13802   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
13803   __ret; \
13804 })
13805 #endif
13806 
13807 #ifdef __LITTLE_ENDIAN__
vmov_n_s32(int32_t __p0)13808 __ai int32x2_t vmov_n_s32(int32_t __p0) {
13809   int32x2_t __ret;
13810   __ret = (int32x2_t) {__p0, __p0};
13811   return __ret;
13812 }
13813 #else
vmov_n_s32(int32_t __p0)13814 __ai int32x2_t vmov_n_s32(int32_t __p0) {
13815   int32x2_t __ret;
13816   __ret = (int32x2_t) {__p0, __p0};
13817   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
13818   return __ret;
13819 }
13820 #endif
13821 
13822 #ifdef __LITTLE_ENDIAN__
vmov_n_s64(int64_t __p0)13823 __ai int64x1_t vmov_n_s64(int64_t __p0) {
13824   int64x1_t __ret;
13825   __ret = (int64x1_t) {__p0};
13826   return __ret;
13827 }
13828 #else
vmov_n_s64(int64_t __p0)13829 __ai int64x1_t vmov_n_s64(int64_t __p0) {
13830   int64x1_t __ret;
13831   __ret = (int64x1_t) {__p0};
13832   return __ret;
13833 }
13834 #endif
13835 
13836 #ifdef __LITTLE_ENDIAN__
vmov_n_s16(int16_t __p0)13837 __ai int16x4_t vmov_n_s16(int16_t __p0) {
13838   int16x4_t __ret;
13839   __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
13840   return __ret;
13841 }
13842 #else
vmov_n_s16(int16_t __p0)13843 __ai int16x4_t vmov_n_s16(int16_t __p0) {
13844   int16x4_t __ret;
13845   __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
13846   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13847   return __ret;
13848 }
13849 #endif
13850 
13851 #ifdef __LITTLE_ENDIAN__
vmovl_u8(uint8x8_t __p0)13852 __ai uint16x8_t vmovl_u8(uint8x8_t __p0) {
13853   uint16x8_t __ret;
13854   __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49);
13855   return __ret;
13856 }
13857 #else
vmovl_u8(uint8x8_t __p0)13858 __ai uint16x8_t vmovl_u8(uint8x8_t __p0) {
13859   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
13860   uint16x8_t __ret;
13861   __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 49);
13862   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
13863   return __ret;
13864 }
__noswap_vmovl_u8(uint8x8_t __p0)13865 __ai uint16x8_t __noswap_vmovl_u8(uint8x8_t __p0) {
13866   uint16x8_t __ret;
13867   __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49);
13868   return __ret;
13869 }
13870 #endif
13871 
13872 #ifdef __LITTLE_ENDIAN__
vmovl_u32(uint32x2_t __p0)13873 __ai uint64x2_t vmovl_u32(uint32x2_t __p0) {
13874   uint64x2_t __ret;
13875   __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51);
13876   return __ret;
13877 }
13878 #else
vmovl_u32(uint32x2_t __p0)13879 __ai uint64x2_t vmovl_u32(uint32x2_t __p0) {
13880   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
13881   uint64x2_t __ret;
13882   __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 51);
13883   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
13884   return __ret;
13885 }
__noswap_vmovl_u32(uint32x2_t __p0)13886 __ai uint64x2_t __noswap_vmovl_u32(uint32x2_t __p0) {
13887   uint64x2_t __ret;
13888   __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51);
13889   return __ret;
13890 }
13891 #endif
13892 
13893 #ifdef __LITTLE_ENDIAN__
vmovl_u16(uint16x4_t __p0)13894 __ai uint32x4_t vmovl_u16(uint16x4_t __p0) {
13895   uint32x4_t __ret;
13896   __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50);
13897   return __ret;
13898 }
13899 #else
vmovl_u16(uint16x4_t __p0)13900 __ai uint32x4_t vmovl_u16(uint16x4_t __p0) {
13901   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
13902   uint32x4_t __ret;
13903   __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 50);
13904   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13905   return __ret;
13906 }
__noswap_vmovl_u16(uint16x4_t __p0)13907 __ai uint32x4_t __noswap_vmovl_u16(uint16x4_t __p0) {
13908   uint32x4_t __ret;
13909   __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50);
13910   return __ret;
13911 }
13912 #endif
13913 
13914 #ifdef __LITTLE_ENDIAN__
vmovl_s8(int8x8_t __p0)13915 __ai int16x8_t vmovl_s8(int8x8_t __p0) {
13916   int16x8_t __ret;
13917   __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33);
13918   return __ret;
13919 }
13920 #else
vmovl_s8(int8x8_t __p0)13921 __ai int16x8_t vmovl_s8(int8x8_t __p0) {
13922   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
13923   int16x8_t __ret;
13924   __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 33);
13925   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
13926   return __ret;
13927 }
__noswap_vmovl_s8(int8x8_t __p0)13928 __ai int16x8_t __noswap_vmovl_s8(int8x8_t __p0) {
13929   int16x8_t __ret;
13930   __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33);
13931   return __ret;
13932 }
13933 #endif
13934 
13935 #ifdef __LITTLE_ENDIAN__
vmovl_s32(int32x2_t __p0)13936 __ai int64x2_t vmovl_s32(int32x2_t __p0) {
13937   int64x2_t __ret;
13938   __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35);
13939   return __ret;
13940 }
13941 #else
vmovl_s32(int32x2_t __p0)13942 __ai int64x2_t vmovl_s32(int32x2_t __p0) {
13943   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
13944   int64x2_t __ret;
13945   __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 35);
13946   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
13947   return __ret;
13948 }
__noswap_vmovl_s32(int32x2_t __p0)13949 __ai int64x2_t __noswap_vmovl_s32(int32x2_t __p0) {
13950   int64x2_t __ret;
13951   __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35);
13952   return __ret;
13953 }
13954 #endif
13955 
13956 #ifdef __LITTLE_ENDIAN__
vmovl_s16(int16x4_t __p0)13957 __ai int32x4_t vmovl_s16(int16x4_t __p0) {
13958   int32x4_t __ret;
13959   __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34);
13960   return __ret;
13961 }
13962 #else
vmovl_s16(int16x4_t __p0)13963 __ai int32x4_t vmovl_s16(int16x4_t __p0) {
13964   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
13965   int32x4_t __ret;
13966   __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 34);
13967   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13968   return __ret;
13969 }
__noswap_vmovl_s16(int16x4_t __p0)13970 __ai int32x4_t __noswap_vmovl_s16(int16x4_t __p0) {
13971   int32x4_t __ret;
13972   __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34);
13973   return __ret;
13974 }
13975 #endif
13976 
13977 #ifdef __LITTLE_ENDIAN__
vmovn_u32(uint32x4_t __p0)13978 __ai uint16x4_t vmovn_u32(uint32x4_t __p0) {
13979   uint16x4_t __ret;
13980   __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17);
13981   return __ret;
13982 }
13983 #else
vmovn_u32(uint32x4_t __p0)13984 __ai uint16x4_t vmovn_u32(uint32x4_t __p0) {
13985   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
13986   uint16x4_t __ret;
13987   __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 17);
13988   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13989   return __ret;
13990 }
__noswap_vmovn_u32(uint32x4_t __p0)13991 __ai uint16x4_t __noswap_vmovn_u32(uint32x4_t __p0) {
13992   uint16x4_t __ret;
13993   __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17);
13994   return __ret;
13995 }
13996 #endif
13997 
13998 #ifdef __LITTLE_ENDIAN__
vmovn_u64(uint64x2_t __p0)13999 __ai uint32x2_t vmovn_u64(uint64x2_t __p0) {
14000   uint32x2_t __ret;
14001   __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18);
14002   return __ret;
14003 }
14004 #else
vmovn_u64(uint64x2_t __p0)14005 __ai uint32x2_t vmovn_u64(uint64x2_t __p0) {
14006   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
14007   uint32x2_t __ret;
14008   __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 18);
14009   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
14010   return __ret;
14011 }
__noswap_vmovn_u64(uint64x2_t __p0)14012 __ai uint32x2_t __noswap_vmovn_u64(uint64x2_t __p0) {
14013   uint32x2_t __ret;
14014   __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18);
14015   return __ret;
14016 }
14017 #endif
14018 
14019 #ifdef __LITTLE_ENDIAN__
vmovn_u16(uint16x8_t __p0)14020 __ai uint8x8_t vmovn_u16(uint16x8_t __p0) {
14021   uint8x8_t __ret;
14022   __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16);
14023   return __ret;
14024 }
14025 #else
vmovn_u16(uint16x8_t __p0)14026 __ai uint8x8_t vmovn_u16(uint16x8_t __p0) {
14027   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
14028   uint8x8_t __ret;
14029   __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 16);
14030   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
14031   return __ret;
14032 }
__noswap_vmovn_u16(uint16x8_t __p0)14033 __ai uint8x8_t __noswap_vmovn_u16(uint16x8_t __p0) {
14034   uint8x8_t __ret;
14035   __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16);
14036   return __ret;
14037 }
14038 #endif
14039 
14040 #ifdef __LITTLE_ENDIAN__
vmovn_s32(int32x4_t __p0)14041 __ai int16x4_t vmovn_s32(int32x4_t __p0) {
14042   int16x4_t __ret;
14043   __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1);
14044   return __ret;
14045 }
14046 #else
vmovn_s32(int32x4_t __p0)14047 __ai int16x4_t vmovn_s32(int32x4_t __p0) {
14048   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14049   int16x4_t __ret;
14050   __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 1);
14051   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14052   return __ret;
14053 }
__noswap_vmovn_s32(int32x4_t __p0)14054 __ai int16x4_t __noswap_vmovn_s32(int32x4_t __p0) {
14055   int16x4_t __ret;
14056   __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1);
14057   return __ret;
14058 }
14059 #endif
14060 
14061 #ifdef __LITTLE_ENDIAN__
vmovn_s64(int64x2_t __p0)14062 __ai int32x2_t vmovn_s64(int64x2_t __p0) {
14063   int32x2_t __ret;
14064   __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2);
14065   return __ret;
14066 }
14067 #else
vmovn_s64(int64x2_t __p0)14068 __ai int32x2_t vmovn_s64(int64x2_t __p0) {
14069   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
14070   int32x2_t __ret;
14071   __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 2);
14072   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
14073   return __ret;
14074 }
__noswap_vmovn_s64(int64x2_t __p0)14075 __ai int32x2_t __noswap_vmovn_s64(int64x2_t __p0) {
14076   int32x2_t __ret;
14077   __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2);
14078   return __ret;
14079 }
14080 #endif
14081 
14082 #ifdef __LITTLE_ENDIAN__
vmovn_s16(int16x8_t __p0)14083 __ai int8x8_t vmovn_s16(int16x8_t __p0) {
14084   int8x8_t __ret;
14085   __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0);
14086   return __ret;
14087 }
14088 #else
vmovn_s16(int16x8_t __p0)14089 __ai int8x8_t vmovn_s16(int16x8_t __p0) {
14090   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
14091   int8x8_t __ret;
14092   __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 0);
14093   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
14094   return __ret;
14095 }
__noswap_vmovn_s16(int16x8_t __p0)14096 __ai int8x8_t __noswap_vmovn_s16(int16x8_t __p0) {
14097   int8x8_t __ret;
14098   __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0);
14099   return __ret;
14100 }
14101 #endif
14102 
14103 #ifdef __LITTLE_ENDIAN__
vmulq_u8(uint8x16_t __p0,uint8x16_t __p1)14104 __ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
14105   uint8x16_t __ret;
14106   __ret = __p0 * __p1;
14107   return __ret;
14108 }
14109 #else
vmulq_u8(uint8x16_t __p0,uint8x16_t __p1)14110 __ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
14111   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
14112   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
14113   uint8x16_t __ret;
14114   __ret = __rev0 * __rev1;
14115   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
14116   return __ret;
14117 }
14118 #endif
14119 
14120 #ifdef __LITTLE_ENDIAN__
vmulq_u32(uint32x4_t __p0,uint32x4_t __p1)14121 __ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
14122   uint32x4_t __ret;
14123   __ret = __p0 * __p1;
14124   return __ret;
14125 }
14126 #else
vmulq_u32(uint32x4_t __p0,uint32x4_t __p1)14127 __ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
14128   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14129   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
14130   uint32x4_t __ret;
14131   __ret = __rev0 * __rev1;
14132   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14133   return __ret;
14134 }
14135 #endif
14136 
14137 #ifdef __LITTLE_ENDIAN__
vmulq_u16(uint16x8_t __p0,uint16x8_t __p1)14138 __ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
14139   uint16x8_t __ret;
14140   __ret = __p0 * __p1;
14141   return __ret;
14142 }
14143 #else
vmulq_u16(uint16x8_t __p0,uint16x8_t __p1)14144 __ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
14145   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
14146   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
14147   uint16x8_t __ret;
14148   __ret = __rev0 * __rev1;
14149   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
14150   return __ret;
14151 }
14152 #endif
14153 
14154 #ifdef __LITTLE_ENDIAN__
vmulq_s8(int8x16_t __p0,int8x16_t __p1)14155 __ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
14156   int8x16_t __ret;
14157   __ret = __p0 * __p1;
14158   return __ret;
14159 }
14160 #else
vmulq_s8(int8x16_t __p0,int8x16_t __p1)14161 __ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
14162   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
14163   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
14164   int8x16_t __ret;
14165   __ret = __rev0 * __rev1;
14166   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
14167   return __ret;
14168 }
14169 #endif
14170 
14171 #ifdef __LITTLE_ENDIAN__
vmulq_f32(float32x4_t __p0,float32x4_t __p1)14172 __ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
14173   float32x4_t __ret;
14174   __ret = __p0 * __p1;
14175   return __ret;
14176 }
14177 #else
vmulq_f32(float32x4_t __p0,float32x4_t __p1)14178 __ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
14179   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14180   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
14181   float32x4_t __ret;
14182   __ret = __rev0 * __rev1;
14183   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14184   return __ret;
14185 }
14186 #endif
14187 
14188 #ifdef __LITTLE_ENDIAN__
vmulq_s32(int32x4_t __p0,int32x4_t __p1)14189 __ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
14190   int32x4_t __ret;
14191   __ret = __p0 * __p1;
14192   return __ret;
14193 }
14194 #else
vmulq_s32(int32x4_t __p0,int32x4_t __p1)14195 __ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
14196   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14197   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
14198   int32x4_t __ret;
14199   __ret = __rev0 * __rev1;
14200   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14201   return __ret;
14202 }
14203 #endif
14204 
14205 #ifdef __LITTLE_ENDIAN__
vmulq_s16(int16x8_t __p0,int16x8_t __p1)14206 __ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
14207   int16x8_t __ret;
14208   __ret = __p0 * __p1;
14209   return __ret;
14210 }
14211 #else
vmulq_s16(int16x8_t __p0,int16x8_t __p1)14212 __ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
14213   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
14214   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
14215   int16x8_t __ret;
14216   __ret = __rev0 * __rev1;
14217   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
14218   return __ret;
14219 }
14220 #endif
14221 
14222 #ifdef __LITTLE_ENDIAN__
vmul_u8(uint8x8_t __p0,uint8x8_t __p1)14223 __ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
14224   uint8x8_t __ret;
14225   __ret = __p0 * __p1;
14226   return __ret;
14227 }
14228 #else
vmul_u8(uint8x8_t __p0,uint8x8_t __p1)14229 __ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
14230   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
14231   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
14232   uint8x8_t __ret;
14233   __ret = __rev0 * __rev1;
14234   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
14235   return __ret;
14236 }
14237 #endif
14238 
14239 #ifdef __LITTLE_ENDIAN__
vmul_u32(uint32x2_t __p0,uint32x2_t __p1)14240 __ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
14241   uint32x2_t __ret;
14242   __ret = __p0 * __p1;
14243   return __ret;
14244 }
14245 #else
vmul_u32(uint32x2_t __p0,uint32x2_t __p1)14246 __ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
14247   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
14248   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
14249   uint32x2_t __ret;
14250   __ret = __rev0 * __rev1;
14251   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
14252   return __ret;
14253 }
14254 #endif
14255 
14256 #ifdef __LITTLE_ENDIAN__
vmul_u16(uint16x4_t __p0,uint16x4_t __p1)14257 __ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
14258   uint16x4_t __ret;
14259   __ret = __p0 * __p1;
14260   return __ret;
14261 }
14262 #else
vmul_u16(uint16x4_t __p0,uint16x4_t __p1)14263 __ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
14264   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14265   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
14266   uint16x4_t __ret;
14267   __ret = __rev0 * __rev1;
14268   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14269   return __ret;
14270 }
14271 #endif
14272 
14273 #ifdef __LITTLE_ENDIAN__
vmul_s8(int8x8_t __p0,int8x8_t __p1)14274 __ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
14275   int8x8_t __ret;
14276   __ret = __p0 * __p1;
14277   return __ret;
14278 }
14279 #else
vmul_s8(int8x8_t __p0,int8x8_t __p1)14280 __ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
14281   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
14282   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
14283   int8x8_t __ret;
14284   __ret = __rev0 * __rev1;
14285   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
14286   return __ret;
14287 }
14288 #endif
14289 
14290 #ifdef __LITTLE_ENDIAN__
vmul_f32(float32x2_t __p0,float32x2_t __p1)14291 __ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
14292   float32x2_t __ret;
14293   __ret = __p0 * __p1;
14294   return __ret;
14295 }
14296 #else
vmul_f32(float32x2_t __p0,float32x2_t __p1)14297 __ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
14298   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
14299   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
14300   float32x2_t __ret;
14301   __ret = __rev0 * __rev1;
14302   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
14303   return __ret;
14304 }
14305 #endif
14306 
14307 #ifdef __LITTLE_ENDIAN__
vmul_s32(int32x2_t __p0,int32x2_t __p1)14308 __ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
14309   int32x2_t __ret;
14310   __ret = __p0 * __p1;
14311   return __ret;
14312 }
14313 #else
vmul_s32(int32x2_t __p0,int32x2_t __p1)14314 __ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
14315   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
14316   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
14317   int32x2_t __ret;
14318   __ret = __rev0 * __rev1;
14319   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
14320   return __ret;
14321 }
14322 #endif
14323 
14324 #ifdef __LITTLE_ENDIAN__
vmul_s16(int16x4_t __p0,int16x4_t __p1)14325 __ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
14326   int16x4_t __ret;
14327   __ret = __p0 * __p1;
14328   return __ret;
14329 }
14330 #else
vmul_s16(int16x4_t __p0,int16x4_t __p1)14331 __ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
14332   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14333   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
14334   int16x4_t __ret;
14335   __ret = __rev0 * __rev1;
14336   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14337   return __ret;
14338 }
14339 #endif
14340 
14341 #ifdef __LITTLE_ENDIAN__
vmul_p8(poly8x8_t __p0,poly8x8_t __p1)14342 __ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
14343   poly8x8_t __ret;
14344   __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__p0, (int8x8_t)__p1, 4);
14345   return __ret;
14346 }
14347 #else
vmul_p8(poly8x8_t __p0,poly8x8_t __p1)14348 __ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
14349   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
14350   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
14351   poly8x8_t __ret;
14352   __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
14353   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
14354   return __ret;
14355 }
14356 #endif
14357 
14358 #ifdef __LITTLE_ENDIAN__
vmulq_p8(poly8x16_t __p0,poly8x16_t __p1)14359 __ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
14360   poly8x16_t __ret;
14361   __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
14362   return __ret;
14363 }
14364 #else
vmulq_p8(poly8x16_t __p0,poly8x16_t __p1)14365 __ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
14366   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
14367   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
14368   poly8x16_t __ret;
14369   __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
14370   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
14371   return __ret;
14372 }
14373 #endif
14374 
14375 #ifdef __LITTLE_ENDIAN__
14376 #define vmulq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
14377   uint32x4_t __s0 = __p0; \
14378   uint32x2_t __s1 = __p1; \
14379   uint32x4_t __ret; \
14380   __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
14381   __ret; \
14382 })
14383 #else
14384 #define vmulq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
14385   uint32x4_t __s0 = __p0; \
14386   uint32x2_t __s1 = __p1; \
14387   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
14388   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
14389   uint32x4_t __ret; \
14390   __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
14391   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
14392   __ret; \
14393 })
14394 #endif
14395 
14396 #ifdef __LITTLE_ENDIAN__
14397 #define vmulq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
14398   uint16x8_t __s0 = __p0; \
14399   uint16x4_t __s1 = __p1; \
14400   uint16x8_t __ret; \
14401   __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
14402   __ret; \
14403 })
14404 #else
14405 #define vmulq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
14406   uint16x8_t __s0 = __p0; \
14407   uint16x4_t __s1 = __p1; \
14408   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
14409   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
14410   uint16x8_t __ret; \
14411   __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
14412   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
14413   __ret; \
14414 })
14415 #endif
14416 
14417 #ifdef __LITTLE_ENDIAN__
14418 #define vmulq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
14419   float32x4_t __s0 = __p0; \
14420   float32x2_t __s1 = __p1; \
14421   float32x4_t __ret; \
14422   __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
14423   __ret; \
14424 })
14425 #else
14426 #define vmulq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
14427   float32x4_t __s0 = __p0; \
14428   float32x2_t __s1 = __p1; \
14429   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
14430   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
14431   float32x4_t __ret; \
14432   __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
14433   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
14434   __ret; \
14435 })
14436 #endif
14437 
14438 #ifdef __LITTLE_ENDIAN__
14439 #define vmulq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
14440   int32x4_t __s0 = __p0; \
14441   int32x2_t __s1 = __p1; \
14442   int32x4_t __ret; \
14443   __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
14444   __ret; \
14445 })
14446 #else
14447 #define vmulq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
14448   int32x4_t __s0 = __p0; \
14449   int32x2_t __s1 = __p1; \
14450   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
14451   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
14452   int32x4_t __ret; \
14453   __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
14454   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
14455   __ret; \
14456 })
14457 #endif
14458 
14459 #ifdef __LITTLE_ENDIAN__
14460 #define vmulq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
14461   int16x8_t __s0 = __p0; \
14462   int16x4_t __s1 = __p1; \
14463   int16x8_t __ret; \
14464   __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
14465   __ret; \
14466 })
14467 #else
14468 #define vmulq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
14469   int16x8_t __s0 = __p0; \
14470   int16x4_t __s1 = __p1; \
14471   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
14472   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
14473   int16x8_t __ret; \
14474   __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
14475   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
14476   __ret; \
14477 })
14478 #endif
14479 
14480 #ifdef __LITTLE_ENDIAN__
14481 #define vmul_lane_u32(__p0, __p1, __p2) __extension__ ({ \
14482   uint32x2_t __s0 = __p0; \
14483   uint32x2_t __s1 = __p1; \
14484   uint32x2_t __ret; \
14485   __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
14486   __ret; \
14487 })
14488 #else
14489 #define vmul_lane_u32(__p0, __p1, __p2) __extension__ ({ \
14490   uint32x2_t __s0 = __p0; \
14491   uint32x2_t __s1 = __p1; \
14492   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
14493   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
14494   uint32x2_t __ret; \
14495   __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
14496   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
14497   __ret; \
14498 })
14499 #endif
14500 
14501 #ifdef __LITTLE_ENDIAN__
14502 #define vmul_lane_u16(__p0, __p1, __p2) __extension__ ({ \
14503   uint16x4_t __s0 = __p0; \
14504   uint16x4_t __s1 = __p1; \
14505   uint16x4_t __ret; \
14506   __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
14507   __ret; \
14508 })
14509 #else
14510 #define vmul_lane_u16(__p0, __p1, __p2) __extension__ ({ \
14511   uint16x4_t __s0 = __p0; \
14512   uint16x4_t __s1 = __p1; \
14513   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
14514   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
14515   uint16x4_t __ret; \
14516   __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
14517   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
14518   __ret; \
14519 })
14520 #endif
14521 
14522 #ifdef __LITTLE_ENDIAN__
14523 #define vmul_lane_f32(__p0, __p1, __p2) __extension__ ({ \
14524   float32x2_t __s0 = __p0; \
14525   float32x2_t __s1 = __p1; \
14526   float32x2_t __ret; \
14527   __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
14528   __ret; \
14529 })
14530 #else
14531 #define vmul_lane_f32(__p0, __p1, __p2) __extension__ ({ \
14532   float32x2_t __s0 = __p0; \
14533   float32x2_t __s1 = __p1; \
14534   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
14535   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
14536   float32x2_t __ret; \
14537   __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
14538   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
14539   __ret; \
14540 })
14541 #endif
14542 
14543 #ifdef __LITTLE_ENDIAN__
14544 #define vmul_lane_s32(__p0, __p1, __p2) __extension__ ({ \
14545   int32x2_t __s0 = __p0; \
14546   int32x2_t __s1 = __p1; \
14547   int32x2_t __ret; \
14548   __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
14549   __ret; \
14550 })
14551 #else
14552 #define vmul_lane_s32(__p0, __p1, __p2) __extension__ ({ \
14553   int32x2_t __s0 = __p0; \
14554   int32x2_t __s1 = __p1; \
14555   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
14556   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
14557   int32x2_t __ret; \
14558   __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
14559   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
14560   __ret; \
14561 })
14562 #endif
14563 
14564 #ifdef __LITTLE_ENDIAN__
14565 #define vmul_lane_s16(__p0, __p1, __p2) __extension__ ({ \
14566   int16x4_t __s0 = __p0; \
14567   int16x4_t __s1 = __p1; \
14568   int16x4_t __ret; \
14569   __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
14570   __ret; \
14571 })
14572 #else
14573 #define vmul_lane_s16(__p0, __p1, __p2) __extension__ ({ \
14574   int16x4_t __s0 = __p0; \
14575   int16x4_t __s1 = __p1; \
14576   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
14577   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
14578   int16x4_t __ret; \
14579   __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
14580   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
14581   __ret; \
14582 })
14583 #endif
14584 
14585 #ifdef __LITTLE_ENDIAN__
vmulq_n_u32(uint32x4_t __p0,uint32_t __p1)14586 __ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
14587   uint32x4_t __ret;
14588   __ret = __p0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
14589   return __ret;
14590 }
14591 #else
vmulq_n_u32(uint32x4_t __p0,uint32_t __p1)14592 __ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
14593   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14594   uint32x4_t __ret;
14595   __ret = __rev0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
14596   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14597   return __ret;
14598 }
14599 #endif
14600 
14601 #ifdef __LITTLE_ENDIAN__
vmulq_n_u16(uint16x8_t __p0,uint16_t __p1)14602 __ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
14603   uint16x8_t __ret;
14604   __ret = __p0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
14605   return __ret;
14606 }
14607 #else
vmulq_n_u16(uint16x8_t __p0,uint16_t __p1)14608 __ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
14609   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
14610   uint16x8_t __ret;
14611   __ret = __rev0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
14612   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
14613   return __ret;
14614 }
14615 #endif
14616 
14617 #ifdef __LITTLE_ENDIAN__
vmulq_n_f32(float32x4_t __p0,float32_t __p1)14618 __ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
14619   float32x4_t __ret;
14620   __ret = __p0 * (float32x4_t) {__p1, __p1, __p1, __p1};
14621   return __ret;
14622 }
14623 #else
vmulq_n_f32(float32x4_t __p0,float32_t __p1)14624 __ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
14625   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14626   float32x4_t __ret;
14627   __ret = __rev0 * (float32x4_t) {__p1, __p1, __p1, __p1};
14628   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14629   return __ret;
14630 }
14631 #endif
14632 
14633 #ifdef __LITTLE_ENDIAN__
vmulq_n_s32(int32x4_t __p0,int32_t __p1)14634 __ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
14635   int32x4_t __ret;
14636   __ret = __p0 * (int32x4_t) {__p1, __p1, __p1, __p1};
14637   return __ret;
14638 }
14639 #else
vmulq_n_s32(int32x4_t __p0,int32_t __p1)14640 __ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
14641   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14642   int32x4_t __ret;
14643   __ret = __rev0 * (int32x4_t) {__p1, __p1, __p1, __p1};
14644   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14645   return __ret;
14646 }
14647 #endif
14648 
14649 #ifdef __LITTLE_ENDIAN__
vmulq_n_s16(int16x8_t __p0,int16_t __p1)14650 __ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
14651   int16x8_t __ret;
14652   __ret = __p0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
14653   return __ret;
14654 }
14655 #else
vmulq_n_s16(int16x8_t __p0,int16_t __p1)14656 __ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
14657   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
14658   int16x8_t __ret;
14659   __ret = __rev0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
14660   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
14661   return __ret;
14662 }
14663 #endif
14664 
14665 #ifdef __LITTLE_ENDIAN__
vmul_n_u32(uint32x2_t __p0,uint32_t __p1)14666 __ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
14667   uint32x2_t __ret;
14668   __ret = __p0 * (uint32x2_t) {__p1, __p1};
14669   return __ret;
14670 }
14671 #else
vmul_n_u32(uint32x2_t __p0,uint32_t __p1)14672 __ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
14673   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
14674   uint32x2_t __ret;
14675   __ret = __rev0 * (uint32x2_t) {__p1, __p1};
14676   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
14677   return __ret;
14678 }
14679 #endif
14680 
14681 #ifdef __LITTLE_ENDIAN__
vmul_n_u16(uint16x4_t __p0,uint16_t __p1)14682 __ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
14683   uint16x4_t __ret;
14684   __ret = __p0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
14685   return __ret;
14686 }
14687 #else
vmul_n_u16(uint16x4_t __p0,uint16_t __p1)14688 __ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
14689   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14690   uint16x4_t __ret;
14691   __ret = __rev0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
14692   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14693   return __ret;
14694 }
14695 #endif
14696 
14697 #ifdef __LITTLE_ENDIAN__
vmul_n_f32(float32x2_t __p0,float32_t __p1)14698 __ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
14699   float32x2_t __ret;
14700   __ret = __p0 * (float32x2_t) {__p1, __p1};
14701   return __ret;
14702 }
14703 #else
vmul_n_f32(float32x2_t __p0,float32_t __p1)14704 __ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
14705   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
14706   float32x2_t __ret;
14707   __ret = __rev0 * (float32x2_t) {__p1, __p1};
14708   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
14709   return __ret;
14710 }
14711 #endif
14712 
14713 #ifdef __LITTLE_ENDIAN__
vmul_n_s32(int32x2_t __p0,int32_t __p1)14714 __ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
14715   int32x2_t __ret;
14716   __ret = __p0 * (int32x2_t) {__p1, __p1};
14717   return __ret;
14718 }
14719 #else
vmul_n_s32(int32x2_t __p0,int32_t __p1)14720 __ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
14721   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
14722   int32x2_t __ret;
14723   __ret = __rev0 * (int32x2_t) {__p1, __p1};
14724   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
14725   return __ret;
14726 }
14727 #endif
14728 
14729 #ifdef __LITTLE_ENDIAN__
vmul_n_s16(int16x4_t __p0,int16_t __p1)14730 __ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
14731   int16x4_t __ret;
14732   __ret = __p0 * (int16x4_t) {__p1, __p1, __p1, __p1};
14733   return __ret;
14734 }
14735 #else
vmul_n_s16(int16x4_t __p0,int16_t __p1)14736 __ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
14737   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14738   int16x4_t __ret;
14739   __ret = __rev0 * (int16x4_t) {__p1, __p1, __p1, __p1};
14740   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14741   return __ret;
14742 }
14743 #endif
14744 
14745 #ifdef __LITTLE_ENDIAN__
vmull_p8(poly8x8_t __p0,poly8x8_t __p1)14746 __ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
14747   poly16x8_t __ret;
14748   __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37);
14749   return __ret;
14750 }
14751 #else
vmull_p8(poly8x8_t __p0,poly8x8_t __p1)14752 __ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
14753   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
14754   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
14755   poly16x8_t __ret;
14756   __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 37);
14757   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
14758   return __ret;
14759 }
__noswap_vmull_p8(poly8x8_t __p0,poly8x8_t __p1)14760 __ai poly16x8_t __noswap_vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
14761   poly16x8_t __ret;
14762   __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37);
14763   return __ret;
14764 }
14765 #endif
14766 
14767 #ifdef __LITTLE_ENDIAN__
vmull_u8(uint8x8_t __p0,uint8x8_t __p1)14768 __ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
14769   uint16x8_t __ret;
14770   __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49);
14771   return __ret;
14772 }
14773 #else
vmull_u8(uint8x8_t __p0,uint8x8_t __p1)14774 __ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
14775   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
14776   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
14777   uint16x8_t __ret;
14778   __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 49);
14779   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
14780   return __ret;
14781 }
__noswap_vmull_u8(uint8x8_t __p0,uint8x8_t __p1)14782 __ai uint16x8_t __noswap_vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
14783   uint16x8_t __ret;
14784   __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49);
14785   return __ret;
14786 }
14787 #endif
14788 
14789 #ifdef __LITTLE_ENDIAN__
vmull_u32(uint32x2_t __p0,uint32x2_t __p1)14790 __ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
14791   uint64x2_t __ret;
14792   __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51);
14793   return __ret;
14794 }
14795 #else
vmull_u32(uint32x2_t __p0,uint32x2_t __p1)14796 __ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
14797   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
14798   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
14799   uint64x2_t __ret;
14800   __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 51);
14801   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
14802   return __ret;
14803 }
__noswap_vmull_u32(uint32x2_t __p0,uint32x2_t __p1)14804 __ai uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
14805   uint64x2_t __ret;
14806   __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51);
14807   return __ret;
14808 }
14809 #endif
14810 
14811 #ifdef __LITTLE_ENDIAN__
vmull_u16(uint16x4_t __p0,uint16x4_t __p1)14812 __ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
14813   uint32x4_t __ret;
14814   __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50);
14815   return __ret;
14816 }
14817 #else
vmull_u16(uint16x4_t __p0,uint16x4_t __p1)14818 __ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
14819   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14820   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
14821   uint32x4_t __ret;
14822   __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 50);
14823   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14824   return __ret;
14825 }
__noswap_vmull_u16(uint16x4_t __p0,uint16x4_t __p1)14826 __ai uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
14827   uint32x4_t __ret;
14828   __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50);
14829   return __ret;
14830 }
14831 #endif
14832 
14833 #ifdef __LITTLE_ENDIAN__
vmull_s8(int8x8_t __p0,int8x8_t __p1)14834 __ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
14835   int16x8_t __ret;
14836   __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33);
14837   return __ret;
14838 }
14839 #else
vmull_s8(int8x8_t __p0,int8x8_t __p1)14840 __ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
14841   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
14842   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
14843   int16x8_t __ret;
14844   __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 33);
14845   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
14846   return __ret;
14847 }
__noswap_vmull_s8(int8x8_t __p0,int8x8_t __p1)14848 __ai int16x8_t __noswap_vmull_s8(int8x8_t __p0, int8x8_t __p1) {
14849   int16x8_t __ret;
14850   __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33);
14851   return __ret;
14852 }
14853 #endif
14854 
14855 #ifdef __LITTLE_ENDIAN__
vmull_s32(int32x2_t __p0,int32x2_t __p1)14856 __ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
14857   int64x2_t __ret;
14858   __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
14859   return __ret;
14860 }
14861 #else
vmull_s32(int32x2_t __p0,int32x2_t __p1)14862 __ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
14863   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
14864   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
14865   int64x2_t __ret;
14866   __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
14867   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
14868   return __ret;
14869 }
__noswap_vmull_s32(int32x2_t __p0,int32x2_t __p1)14870 __ai int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1) {
14871   int64x2_t __ret;
14872   __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
14873   return __ret;
14874 }
14875 #endif
14876 
14877 #ifdef __LITTLE_ENDIAN__
vmull_s16(int16x4_t __p0,int16x4_t __p1)14878 __ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
14879   int32x4_t __ret;
14880   __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
14881   return __ret;
14882 }
14883 #else
vmull_s16(int16x4_t __p0,int16x4_t __p1)14884 __ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
14885   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14886   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
14887   int32x4_t __ret;
14888   __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
14889   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14890   return __ret;
14891 }
__noswap_vmull_s16(int16x4_t __p0,int16x4_t __p1)14892 __ai int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1) {
14893   int32x4_t __ret;
14894   __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
14895   return __ret;
14896 }
14897 #endif
14898 
14899 #ifdef __LITTLE_ENDIAN__
14900 #define vmull_lane_u32(__p0, __p1, __p2) __extension__ ({ \
14901   uint32x2_t __s0 = __p0; \
14902   uint32x2_t __s1 = __p1; \
14903   uint64x2_t __ret; \
14904   __ret = vmull_u32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
14905   __ret; \
14906 })
14907 #else
14908 #define vmull_lane_u32(__p0, __p1, __p2) __extension__ ({ \
14909   uint32x2_t __s0 = __p0; \
14910   uint32x2_t __s1 = __p1; \
14911   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
14912   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
14913   uint64x2_t __ret; \
14914   __ret = __noswap_vmull_u32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
14915   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
14916   __ret; \
14917 })
14918 #endif
14919 
14920 #ifdef __LITTLE_ENDIAN__
14921 #define vmull_lane_u16(__p0, __p1, __p2) __extension__ ({ \
14922   uint16x4_t __s0 = __p0; \
14923   uint16x4_t __s1 = __p1; \
14924   uint32x4_t __ret; \
14925   __ret = vmull_u16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
14926   __ret; \
14927 })
14928 #else
14929 #define vmull_lane_u16(__p0, __p1, __p2) __extension__ ({ \
14930   uint16x4_t __s0 = __p0; \
14931   uint16x4_t __s1 = __p1; \
14932   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
14933   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
14934   uint32x4_t __ret; \
14935   __ret = __noswap_vmull_u16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
14936   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
14937   __ret; \
14938 })
14939 #endif
14940 
14941 #ifdef __LITTLE_ENDIAN__
14942 #define vmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
14943   int32x2_t __s0 = __p0; \
14944   int32x2_t __s1 = __p1; \
14945   int64x2_t __ret; \
14946   __ret = vmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
14947   __ret; \
14948 })
14949 #else
14950 #define vmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
14951   int32x2_t __s0 = __p0; \
14952   int32x2_t __s1 = __p1; \
14953   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
14954   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
14955   int64x2_t __ret; \
14956   __ret = __noswap_vmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
14957   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
14958   __ret; \
14959 })
14960 #endif
14961 
14962 #ifdef __LITTLE_ENDIAN__
14963 #define vmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
14964   int16x4_t __s0 = __p0; \
14965   int16x4_t __s1 = __p1; \
14966   int32x4_t __ret; \
14967   __ret = vmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
14968   __ret; \
14969 })
14970 #else
14971 #define vmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
14972   int16x4_t __s0 = __p0; \
14973   int16x4_t __s1 = __p1; \
14974   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
14975   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
14976   int32x4_t __ret; \
14977   __ret = __noswap_vmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
14978   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
14979   __ret; \
14980 })
14981 #endif
14982 
14983 #ifdef __LITTLE_ENDIAN__
vmull_n_u32(uint32x2_t __p0,uint32_t __p1)14984 __ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
14985   uint64x2_t __ret;
14986   __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
14987   return __ret;
14988 }
14989 #else
vmull_n_u32(uint32x2_t __p0,uint32_t __p1)14990 __ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
14991   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
14992   uint64x2_t __ret;
14993   __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
14994   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
14995   return __ret;
14996 }
__noswap_vmull_n_u32(uint32x2_t __p0,uint32_t __p1)14997 __ai uint64x2_t __noswap_vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
14998   uint64x2_t __ret;
14999   __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
15000   return __ret;
15001 }
15002 #endif
15003 
15004 #ifdef __LITTLE_ENDIAN__
vmull_n_u16(uint16x4_t __p0,uint16_t __p1)15005 __ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
15006   uint32x4_t __ret;
15007   __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
15008   return __ret;
15009 }
15010 #else
vmull_n_u16(uint16x4_t __p0,uint16_t __p1)15011 __ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
15012   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15013   uint32x4_t __ret;
15014   __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
15015   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15016   return __ret;
15017 }
__noswap_vmull_n_u16(uint16x4_t __p0,uint16_t __p1)15018 __ai uint32x4_t __noswap_vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
15019   uint32x4_t __ret;
15020   __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
15021   return __ret;
15022 }
15023 #endif
15024 
15025 #ifdef __LITTLE_ENDIAN__
vmull_n_s32(int32x2_t __p0,int32_t __p1)15026 __ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
15027   int64x2_t __ret;
15028   __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
15029   return __ret;
15030 }
15031 #else
vmull_n_s32(int32x2_t __p0,int32_t __p1)15032 __ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
15033   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15034   int64x2_t __ret;
15035   __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
15036   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15037   return __ret;
15038 }
__noswap_vmull_n_s32(int32x2_t __p0,int32_t __p1)15039 __ai int64x2_t __noswap_vmull_n_s32(int32x2_t __p0, int32_t __p1) {
15040   int64x2_t __ret;
15041   __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
15042   return __ret;
15043 }
15044 #endif
15045 
15046 #ifdef __LITTLE_ENDIAN__
vmull_n_s16(int16x4_t __p0,int16_t __p1)15047 __ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
15048   int32x4_t __ret;
15049   __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
15050   return __ret;
15051 }
15052 #else
vmull_n_s16(int16x4_t __p0,int16_t __p1)15053 __ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
15054   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15055   int32x4_t __ret;
15056   __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
15057   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15058   return __ret;
15059 }
__noswap_vmull_n_s16(int16x4_t __p0,int16_t __p1)15060 __ai int32x4_t __noswap_vmull_n_s16(int16x4_t __p0, int16_t __p1) {
15061   int32x4_t __ret;
15062   __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
15063   return __ret;
15064 }
15065 #endif
15066 
15067 #ifdef __LITTLE_ENDIAN__
vmvn_p8(poly8x8_t __p0)15068 __ai poly8x8_t vmvn_p8(poly8x8_t __p0) {
15069   poly8x8_t __ret;
15070   __ret = ~__p0;
15071   return __ret;
15072 }
15073 #else
vmvn_p8(poly8x8_t __p0)15074 __ai poly8x8_t vmvn_p8(poly8x8_t __p0) {
15075   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15076   poly8x8_t __ret;
15077   __ret = ~__rev0;
15078   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15079   return __ret;
15080 }
15081 #endif
15082 
15083 #ifdef __LITTLE_ENDIAN__
vmvnq_p8(poly8x16_t __p0)15084 __ai poly8x16_t vmvnq_p8(poly8x16_t __p0) {
15085   poly8x16_t __ret;
15086   __ret = ~__p0;
15087   return __ret;
15088 }
15089 #else
vmvnq_p8(poly8x16_t __p0)15090 __ai poly8x16_t vmvnq_p8(poly8x16_t __p0) {
15091   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15092   poly8x16_t __ret;
15093   __ret = ~__rev0;
15094   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15095   return __ret;
15096 }
15097 #endif
15098 
15099 #ifdef __LITTLE_ENDIAN__
vmvnq_u8(uint8x16_t __p0)15100 __ai uint8x16_t vmvnq_u8(uint8x16_t __p0) {
15101   uint8x16_t __ret;
15102   __ret = ~__p0;
15103   return __ret;
15104 }
15105 #else
vmvnq_u8(uint8x16_t __p0)15106 __ai uint8x16_t vmvnq_u8(uint8x16_t __p0) {
15107   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15108   uint8x16_t __ret;
15109   __ret = ~__rev0;
15110   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15111   return __ret;
15112 }
15113 #endif
15114 
15115 #ifdef __LITTLE_ENDIAN__
vmvnq_u32(uint32x4_t __p0)15116 __ai uint32x4_t vmvnq_u32(uint32x4_t __p0) {
15117   uint32x4_t __ret;
15118   __ret = ~__p0;
15119   return __ret;
15120 }
15121 #else
vmvnq_u32(uint32x4_t __p0)15122 __ai uint32x4_t vmvnq_u32(uint32x4_t __p0) {
15123   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15124   uint32x4_t __ret;
15125   __ret = ~__rev0;
15126   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15127   return __ret;
15128 }
15129 #endif
15130 
15131 #ifdef __LITTLE_ENDIAN__
vmvnq_u16(uint16x8_t __p0)15132 __ai uint16x8_t vmvnq_u16(uint16x8_t __p0) {
15133   uint16x8_t __ret;
15134   __ret = ~__p0;
15135   return __ret;
15136 }
15137 #else
vmvnq_u16(uint16x8_t __p0)15138 __ai uint16x8_t vmvnq_u16(uint16x8_t __p0) {
15139   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15140   uint16x8_t __ret;
15141   __ret = ~__rev0;
15142   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15143   return __ret;
15144 }
15145 #endif
15146 
15147 #ifdef __LITTLE_ENDIAN__
vmvnq_s8(int8x16_t __p0)15148 __ai int8x16_t vmvnq_s8(int8x16_t __p0) {
15149   int8x16_t __ret;
15150   __ret = ~__p0;
15151   return __ret;
15152 }
15153 #else
vmvnq_s8(int8x16_t __p0)15154 __ai int8x16_t vmvnq_s8(int8x16_t __p0) {
15155   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15156   int8x16_t __ret;
15157   __ret = ~__rev0;
15158   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15159   return __ret;
15160 }
15161 #endif
15162 
15163 #ifdef __LITTLE_ENDIAN__
vmvnq_s32(int32x4_t __p0)15164 __ai int32x4_t vmvnq_s32(int32x4_t __p0) {
15165   int32x4_t __ret;
15166   __ret = ~__p0;
15167   return __ret;
15168 }
15169 #else
vmvnq_s32(int32x4_t __p0)15170 __ai int32x4_t vmvnq_s32(int32x4_t __p0) {
15171   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15172   int32x4_t __ret;
15173   __ret = ~__rev0;
15174   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15175   return __ret;
15176 }
15177 #endif
15178 
15179 #ifdef __LITTLE_ENDIAN__
vmvnq_s16(int16x8_t __p0)15180 __ai int16x8_t vmvnq_s16(int16x8_t __p0) {
15181   int16x8_t __ret;
15182   __ret = ~__p0;
15183   return __ret;
15184 }
15185 #else
vmvnq_s16(int16x8_t __p0)15186 __ai int16x8_t vmvnq_s16(int16x8_t __p0) {
15187   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15188   int16x8_t __ret;
15189   __ret = ~__rev0;
15190   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15191   return __ret;
15192 }
15193 #endif
15194 
15195 #ifdef __LITTLE_ENDIAN__
vmvn_u8(uint8x8_t __p0)15196 __ai uint8x8_t vmvn_u8(uint8x8_t __p0) {
15197   uint8x8_t __ret;
15198   __ret = ~__p0;
15199   return __ret;
15200 }
15201 #else
vmvn_u8(uint8x8_t __p0)15202 __ai uint8x8_t vmvn_u8(uint8x8_t __p0) {
15203   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15204   uint8x8_t __ret;
15205   __ret = ~__rev0;
15206   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15207   return __ret;
15208 }
15209 #endif
15210 
15211 #ifdef __LITTLE_ENDIAN__
vmvn_u32(uint32x2_t __p0)15212 __ai uint32x2_t vmvn_u32(uint32x2_t __p0) {
15213   uint32x2_t __ret;
15214   __ret = ~__p0;
15215   return __ret;
15216 }
15217 #else
vmvn_u32(uint32x2_t __p0)15218 __ai uint32x2_t vmvn_u32(uint32x2_t __p0) {
15219   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15220   uint32x2_t __ret;
15221   __ret = ~__rev0;
15222   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15223   return __ret;
15224 }
15225 #endif
15226 
15227 #ifdef __LITTLE_ENDIAN__
vmvn_u16(uint16x4_t __p0)15228 __ai uint16x4_t vmvn_u16(uint16x4_t __p0) {
15229   uint16x4_t __ret;
15230   __ret = ~__p0;
15231   return __ret;
15232 }
15233 #else
vmvn_u16(uint16x4_t __p0)15234 __ai uint16x4_t vmvn_u16(uint16x4_t __p0) {
15235   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15236   uint16x4_t __ret;
15237   __ret = ~__rev0;
15238   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15239   return __ret;
15240 }
15241 #endif
15242 
15243 #ifdef __LITTLE_ENDIAN__
vmvn_s8(int8x8_t __p0)15244 __ai int8x8_t vmvn_s8(int8x8_t __p0) {
15245   int8x8_t __ret;
15246   __ret = ~__p0;
15247   return __ret;
15248 }
15249 #else
vmvn_s8(int8x8_t __p0)15250 __ai int8x8_t vmvn_s8(int8x8_t __p0) {
15251   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15252   int8x8_t __ret;
15253   __ret = ~__rev0;
15254   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15255   return __ret;
15256 }
15257 #endif
15258 
15259 #ifdef __LITTLE_ENDIAN__
vmvn_s32(int32x2_t __p0)15260 __ai int32x2_t vmvn_s32(int32x2_t __p0) {
15261   int32x2_t __ret;
15262   __ret = ~__p0;
15263   return __ret;
15264 }
15265 #else
vmvn_s32(int32x2_t __p0)15266 __ai int32x2_t vmvn_s32(int32x2_t __p0) {
15267   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15268   int32x2_t __ret;
15269   __ret = ~__rev0;
15270   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15271   return __ret;
15272 }
15273 #endif
15274 
15275 #ifdef __LITTLE_ENDIAN__
vmvn_s16(int16x4_t __p0)15276 __ai int16x4_t vmvn_s16(int16x4_t __p0) {
15277   int16x4_t __ret;
15278   __ret = ~__p0;
15279   return __ret;
15280 }
15281 #else
vmvn_s16(int16x4_t __p0)15282 __ai int16x4_t vmvn_s16(int16x4_t __p0) {
15283   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15284   int16x4_t __ret;
15285   __ret = ~__rev0;
15286   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15287   return __ret;
15288 }
15289 #endif
15290 
15291 #ifdef __LITTLE_ENDIAN__
vnegq_s8(int8x16_t __p0)15292 __ai int8x16_t vnegq_s8(int8x16_t __p0) {
15293   int8x16_t __ret;
15294   __ret = -__p0;
15295   return __ret;
15296 }
15297 #else
vnegq_s8(int8x16_t __p0)15298 __ai int8x16_t vnegq_s8(int8x16_t __p0) {
15299   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15300   int8x16_t __ret;
15301   __ret = -__rev0;
15302   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15303   return __ret;
15304 }
15305 #endif
15306 
15307 #ifdef __LITTLE_ENDIAN__
vnegq_f32(float32x4_t __p0)15308 __ai float32x4_t vnegq_f32(float32x4_t __p0) {
15309   float32x4_t __ret;
15310   __ret = -__p0;
15311   return __ret;
15312 }
15313 #else
vnegq_f32(float32x4_t __p0)15314 __ai float32x4_t vnegq_f32(float32x4_t __p0) {
15315   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15316   float32x4_t __ret;
15317   __ret = -__rev0;
15318   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15319   return __ret;
15320 }
15321 #endif
15322 
15323 #ifdef __LITTLE_ENDIAN__
vnegq_s32(int32x4_t __p0)15324 __ai int32x4_t vnegq_s32(int32x4_t __p0) {
15325   int32x4_t __ret;
15326   __ret = -__p0;
15327   return __ret;
15328 }
15329 #else
vnegq_s32(int32x4_t __p0)15330 __ai int32x4_t vnegq_s32(int32x4_t __p0) {
15331   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15332   int32x4_t __ret;
15333   __ret = -__rev0;
15334   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15335   return __ret;
15336 }
15337 #endif
15338 
15339 #ifdef __LITTLE_ENDIAN__
vnegq_s16(int16x8_t __p0)15340 __ai int16x8_t vnegq_s16(int16x8_t __p0) {
15341   int16x8_t __ret;
15342   __ret = -__p0;
15343   return __ret;
15344 }
15345 #else
vnegq_s16(int16x8_t __p0)15346 __ai int16x8_t vnegq_s16(int16x8_t __p0) {
15347   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15348   int16x8_t __ret;
15349   __ret = -__rev0;
15350   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15351   return __ret;
15352 }
15353 #endif
15354 
15355 #ifdef __LITTLE_ENDIAN__
vneg_s8(int8x8_t __p0)15356 __ai int8x8_t vneg_s8(int8x8_t __p0) {
15357   int8x8_t __ret;
15358   __ret = -__p0;
15359   return __ret;
15360 }
15361 #else
vneg_s8(int8x8_t __p0)15362 __ai int8x8_t vneg_s8(int8x8_t __p0) {
15363   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15364   int8x8_t __ret;
15365   __ret = -__rev0;
15366   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15367   return __ret;
15368 }
15369 #endif
15370 
15371 #ifdef __LITTLE_ENDIAN__
vneg_f32(float32x2_t __p0)15372 __ai float32x2_t vneg_f32(float32x2_t __p0) {
15373   float32x2_t __ret;
15374   __ret = -__p0;
15375   return __ret;
15376 }
15377 #else
vneg_f32(float32x2_t __p0)15378 __ai float32x2_t vneg_f32(float32x2_t __p0) {
15379   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15380   float32x2_t __ret;
15381   __ret = -__rev0;
15382   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15383   return __ret;
15384 }
15385 #endif
15386 
15387 #ifdef __LITTLE_ENDIAN__
vneg_s32(int32x2_t __p0)15388 __ai int32x2_t vneg_s32(int32x2_t __p0) {
15389   int32x2_t __ret;
15390   __ret = -__p0;
15391   return __ret;
15392 }
15393 #else
vneg_s32(int32x2_t __p0)15394 __ai int32x2_t vneg_s32(int32x2_t __p0) {
15395   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15396   int32x2_t __ret;
15397   __ret = -__rev0;
15398   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15399   return __ret;
15400 }
15401 #endif
15402 
15403 #ifdef __LITTLE_ENDIAN__
vneg_s16(int16x4_t __p0)15404 __ai int16x4_t vneg_s16(int16x4_t __p0) {
15405   int16x4_t __ret;
15406   __ret = -__p0;
15407   return __ret;
15408 }
15409 #else
vneg_s16(int16x4_t __p0)15410 __ai int16x4_t vneg_s16(int16x4_t __p0) {
15411   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15412   int16x4_t __ret;
15413   __ret = -__rev0;
15414   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15415   return __ret;
15416 }
15417 #endif
15418 
15419 #ifdef __LITTLE_ENDIAN__
vornq_u8(uint8x16_t __p0,uint8x16_t __p1)15420 __ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
15421   uint8x16_t __ret;
15422   __ret = __p0 | ~__p1;
15423   return __ret;
15424 }
15425 #else
vornq_u8(uint8x16_t __p0,uint8x16_t __p1)15426 __ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
15427   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15428   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15429   uint8x16_t __ret;
15430   __ret = __rev0 | ~__rev1;
15431   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15432   return __ret;
15433 }
15434 #endif
15435 
15436 #ifdef __LITTLE_ENDIAN__
vornq_u32(uint32x4_t __p0,uint32x4_t __p1)15437 __ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
15438   uint32x4_t __ret;
15439   __ret = __p0 | ~__p1;
15440   return __ret;
15441 }
15442 #else
vornq_u32(uint32x4_t __p0,uint32x4_t __p1)15443 __ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
15444   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15445   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
15446   uint32x4_t __ret;
15447   __ret = __rev0 | ~__rev1;
15448   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15449   return __ret;
15450 }
15451 #endif
15452 
15453 #ifdef __LITTLE_ENDIAN__
vornq_u64(uint64x2_t __p0,uint64x2_t __p1)15454 __ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
15455   uint64x2_t __ret;
15456   __ret = __p0 | ~__p1;
15457   return __ret;
15458 }
15459 #else
vornq_u64(uint64x2_t __p0,uint64x2_t __p1)15460 __ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
15461   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15462   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
15463   uint64x2_t __ret;
15464   __ret = __rev0 | ~__rev1;
15465   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15466   return __ret;
15467 }
15468 #endif
15469 
15470 #ifdef __LITTLE_ENDIAN__
vornq_u16(uint16x8_t __p0,uint16x8_t __p1)15471 __ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
15472   uint16x8_t __ret;
15473   __ret = __p0 | ~__p1;
15474   return __ret;
15475 }
15476 #else
vornq_u16(uint16x8_t __p0,uint16x8_t __p1)15477 __ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
15478   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15479   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
15480   uint16x8_t __ret;
15481   __ret = __rev0 | ~__rev1;
15482   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15483   return __ret;
15484 }
15485 #endif
15486 
15487 #ifdef __LITTLE_ENDIAN__
vornq_s8(int8x16_t __p0,int8x16_t __p1)15488 __ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
15489   int8x16_t __ret;
15490   __ret = __p0 | ~__p1;
15491   return __ret;
15492 }
15493 #else
vornq_s8(int8x16_t __p0,int8x16_t __p1)15494 __ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
15495   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15496   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15497   int8x16_t __ret;
15498   __ret = __rev0 | ~__rev1;
15499   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15500   return __ret;
15501 }
15502 #endif
15503 
15504 #ifdef __LITTLE_ENDIAN__
vornq_s32(int32x4_t __p0,int32x4_t __p1)15505 __ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
15506   int32x4_t __ret;
15507   __ret = __p0 | ~__p1;
15508   return __ret;
15509 }
15510 #else
vornq_s32(int32x4_t __p0,int32x4_t __p1)15511 __ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
15512   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15513   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
15514   int32x4_t __ret;
15515   __ret = __rev0 | ~__rev1;
15516   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15517   return __ret;
15518 }
15519 #endif
15520 
15521 #ifdef __LITTLE_ENDIAN__
vornq_s64(int64x2_t __p0,int64x2_t __p1)15522 __ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
15523   int64x2_t __ret;
15524   __ret = __p0 | ~__p1;
15525   return __ret;
15526 }
15527 #else
vornq_s64(int64x2_t __p0,int64x2_t __p1)15528 __ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
15529   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15530   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
15531   int64x2_t __ret;
15532   __ret = __rev0 | ~__rev1;
15533   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15534   return __ret;
15535 }
15536 #endif
15537 
15538 #ifdef __LITTLE_ENDIAN__
vornq_s16(int16x8_t __p0,int16x8_t __p1)15539 __ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
15540   int16x8_t __ret;
15541   __ret = __p0 | ~__p1;
15542   return __ret;
15543 }
15544 #else
vornq_s16(int16x8_t __p0,int16x8_t __p1)15545 __ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
15546   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15547   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
15548   int16x8_t __ret;
15549   __ret = __rev0 | ~__rev1;
15550   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15551   return __ret;
15552 }
15553 #endif
15554 
15555 #ifdef __LITTLE_ENDIAN__
vorn_u8(uint8x8_t __p0,uint8x8_t __p1)15556 __ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
15557   uint8x8_t __ret;
15558   __ret = __p0 | ~__p1;
15559   return __ret;
15560 }
15561 #else
vorn_u8(uint8x8_t __p0,uint8x8_t __p1)15562 __ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
15563   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15564   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
15565   uint8x8_t __ret;
15566   __ret = __rev0 | ~__rev1;
15567   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15568   return __ret;
15569 }
15570 #endif
15571 
15572 #ifdef __LITTLE_ENDIAN__
vorn_u32(uint32x2_t __p0,uint32x2_t __p1)15573 __ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
15574   uint32x2_t __ret;
15575   __ret = __p0 | ~__p1;
15576   return __ret;
15577 }
15578 #else
vorn_u32(uint32x2_t __p0,uint32x2_t __p1)15579 __ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
15580   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15581   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
15582   uint32x2_t __ret;
15583   __ret = __rev0 | ~__rev1;
15584   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15585   return __ret;
15586 }
15587 #endif
15588 
15589 #ifdef __LITTLE_ENDIAN__
vorn_u64(uint64x1_t __p0,uint64x1_t __p1)15590 __ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) {
15591   uint64x1_t __ret;
15592   __ret = __p0 | ~__p1;
15593   return __ret;
15594 }
15595 #else
vorn_u64(uint64x1_t __p0,uint64x1_t __p1)15596 __ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) {
15597   uint64x1_t __ret;
15598   __ret = __p0 | ~__p1;
15599   return __ret;
15600 }
15601 #endif
15602 
15603 #ifdef __LITTLE_ENDIAN__
vorn_u16(uint16x4_t __p0,uint16x4_t __p1)15604 __ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
15605   uint16x4_t __ret;
15606   __ret = __p0 | ~__p1;
15607   return __ret;
15608 }
15609 #else
vorn_u16(uint16x4_t __p0,uint16x4_t __p1)15610 __ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
15611   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15612   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
15613   uint16x4_t __ret;
15614   __ret = __rev0 | ~__rev1;
15615   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15616   return __ret;
15617 }
15618 #endif
15619 
15620 #ifdef __LITTLE_ENDIAN__
vorn_s8(int8x8_t __p0,int8x8_t __p1)15621 __ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
15622   int8x8_t __ret;
15623   __ret = __p0 | ~__p1;
15624   return __ret;
15625 }
15626 #else
vorn_s8(int8x8_t __p0,int8x8_t __p1)15627 __ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
15628   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15629   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
15630   int8x8_t __ret;
15631   __ret = __rev0 | ~__rev1;
15632   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15633   return __ret;
15634 }
15635 #endif
15636 
15637 #ifdef __LITTLE_ENDIAN__
vorn_s32(int32x2_t __p0,int32x2_t __p1)15638 __ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
15639   int32x2_t __ret;
15640   __ret = __p0 | ~__p1;
15641   return __ret;
15642 }
15643 #else
vorn_s32(int32x2_t __p0,int32x2_t __p1)15644 __ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
15645   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15646   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
15647   int32x2_t __ret;
15648   __ret = __rev0 | ~__rev1;
15649   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15650   return __ret;
15651 }
15652 #endif
15653 
15654 #ifdef __LITTLE_ENDIAN__
vorn_s64(int64x1_t __p0,int64x1_t __p1)15655 __ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) {
15656   int64x1_t __ret;
15657   __ret = __p0 | ~__p1;
15658   return __ret;
15659 }
15660 #else
vorn_s64(int64x1_t __p0,int64x1_t __p1)15661 __ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) {
15662   int64x1_t __ret;
15663   __ret = __p0 | ~__p1;
15664   return __ret;
15665 }
15666 #endif
15667 
15668 #ifdef __LITTLE_ENDIAN__
vorn_s16(int16x4_t __p0,int16x4_t __p1)15669 __ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
15670   int16x4_t __ret;
15671   __ret = __p0 | ~__p1;
15672   return __ret;
15673 }
15674 #else
vorn_s16(int16x4_t __p0,int16x4_t __p1)15675 __ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
15676   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15677   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
15678   int16x4_t __ret;
15679   __ret = __rev0 | ~__rev1;
15680   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15681   return __ret;
15682 }
15683 #endif
15684 
15685 #ifdef __LITTLE_ENDIAN__
vorrq_u8(uint8x16_t __p0,uint8x16_t __p1)15686 __ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
15687   uint8x16_t __ret;
15688   __ret = __p0 | __p1;
15689   return __ret;
15690 }
15691 #else
vorrq_u8(uint8x16_t __p0,uint8x16_t __p1)15692 __ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
15693   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15694   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15695   uint8x16_t __ret;
15696   __ret = __rev0 | __rev1;
15697   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15698   return __ret;
15699 }
15700 #endif
15701 
15702 #ifdef __LITTLE_ENDIAN__
vorrq_u32(uint32x4_t __p0,uint32x4_t __p1)15703 __ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
15704   uint32x4_t __ret;
15705   __ret = __p0 | __p1;
15706   return __ret;
15707 }
15708 #else
vorrq_u32(uint32x4_t __p0,uint32x4_t __p1)15709 __ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
15710   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15711   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
15712   uint32x4_t __ret;
15713   __ret = __rev0 | __rev1;
15714   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15715   return __ret;
15716 }
15717 #endif
15718 
15719 #ifdef __LITTLE_ENDIAN__
vorrq_u64(uint64x2_t __p0,uint64x2_t __p1)15720 __ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
15721   uint64x2_t __ret;
15722   __ret = __p0 | __p1;
15723   return __ret;
15724 }
15725 #else
vorrq_u64(uint64x2_t __p0,uint64x2_t __p1)15726 __ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
15727   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15728   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
15729   uint64x2_t __ret;
15730   __ret = __rev0 | __rev1;
15731   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15732   return __ret;
15733 }
15734 #endif
15735 
15736 #ifdef __LITTLE_ENDIAN__
vorrq_u16(uint16x8_t __p0,uint16x8_t __p1)15737 __ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
15738   uint16x8_t __ret;
15739   __ret = __p0 | __p1;
15740   return __ret;
15741 }
15742 #else
vorrq_u16(uint16x8_t __p0,uint16x8_t __p1)15743 __ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
15744   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15745   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
15746   uint16x8_t __ret;
15747   __ret = __rev0 | __rev1;
15748   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15749   return __ret;
15750 }
15751 #endif
15752 
15753 #ifdef __LITTLE_ENDIAN__
vorrq_s8(int8x16_t __p0,int8x16_t __p1)15754 __ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
15755   int8x16_t __ret;
15756   __ret = __p0 | __p1;
15757   return __ret;
15758 }
15759 #else
vorrq_s8(int8x16_t __p0,int8x16_t __p1)15760 __ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
15761   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15762   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15763   int8x16_t __ret;
15764   __ret = __rev0 | __rev1;
15765   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15766   return __ret;
15767 }
15768 #endif
15769 
15770 #ifdef __LITTLE_ENDIAN__
vorrq_s32(int32x4_t __p0,int32x4_t __p1)15771 __ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
15772   int32x4_t __ret;
15773   __ret = __p0 | __p1;
15774   return __ret;
15775 }
15776 #else
vorrq_s32(int32x4_t __p0,int32x4_t __p1)15777 __ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
15778   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15779   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
15780   int32x4_t __ret;
15781   __ret = __rev0 | __rev1;
15782   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15783   return __ret;
15784 }
15785 #endif
15786 
15787 #ifdef __LITTLE_ENDIAN__
vorrq_s64(int64x2_t __p0,int64x2_t __p1)15788 __ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
15789   int64x2_t __ret;
15790   __ret = __p0 | __p1;
15791   return __ret;
15792 }
15793 #else
vorrq_s64(int64x2_t __p0,int64x2_t __p1)15794 __ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
15795   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15796   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
15797   int64x2_t __ret;
15798   __ret = __rev0 | __rev1;
15799   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15800   return __ret;
15801 }
15802 #endif
15803 
15804 #ifdef __LITTLE_ENDIAN__
vorrq_s16(int16x8_t __p0,int16x8_t __p1)15805 __ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
15806   int16x8_t __ret;
15807   __ret = __p0 | __p1;
15808   return __ret;
15809 }
15810 #else
vorrq_s16(int16x8_t __p0,int16x8_t __p1)15811 __ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
15812   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15813   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
15814   int16x8_t __ret;
15815   __ret = __rev0 | __rev1;
15816   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15817   return __ret;
15818 }
15819 #endif
15820 
15821 #ifdef __LITTLE_ENDIAN__
vorr_u8(uint8x8_t __p0,uint8x8_t __p1)15822 __ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
15823   uint8x8_t __ret;
15824   __ret = __p0 | __p1;
15825   return __ret;
15826 }
15827 #else
vorr_u8(uint8x8_t __p0,uint8x8_t __p1)15828 __ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
15829   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15830   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
15831   uint8x8_t __ret;
15832   __ret = __rev0 | __rev1;
15833   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15834   return __ret;
15835 }
15836 #endif
15837 
15838 #ifdef __LITTLE_ENDIAN__
vorr_u32(uint32x2_t __p0,uint32x2_t __p1)15839 __ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
15840   uint32x2_t __ret;
15841   __ret = __p0 | __p1;
15842   return __ret;
15843 }
15844 #else
vorr_u32(uint32x2_t __p0,uint32x2_t __p1)15845 __ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
15846   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15847   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
15848   uint32x2_t __ret;
15849   __ret = __rev0 | __rev1;
15850   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15851   return __ret;
15852 }
15853 #endif
15854 
15855 #ifdef __LITTLE_ENDIAN__
vorr_u64(uint64x1_t __p0,uint64x1_t __p1)15856 __ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) {
15857   uint64x1_t __ret;
15858   __ret = __p0 | __p1;
15859   return __ret;
15860 }
15861 #else
vorr_u64(uint64x1_t __p0,uint64x1_t __p1)15862 __ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) {
15863   uint64x1_t __ret;
15864   __ret = __p0 | __p1;
15865   return __ret;
15866 }
15867 #endif
15868 
15869 #ifdef __LITTLE_ENDIAN__
vorr_u16(uint16x4_t __p0,uint16x4_t __p1)15870 __ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
15871   uint16x4_t __ret;
15872   __ret = __p0 | __p1;
15873   return __ret;
15874 }
15875 #else
vorr_u16(uint16x4_t __p0,uint16x4_t __p1)15876 __ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
15877   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15878   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
15879   uint16x4_t __ret;
15880   __ret = __rev0 | __rev1;
15881   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15882   return __ret;
15883 }
15884 #endif
15885 
15886 #ifdef __LITTLE_ENDIAN__
vorr_s8(int8x8_t __p0,int8x8_t __p1)15887 __ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
15888   int8x8_t __ret;
15889   __ret = __p0 | __p1;
15890   return __ret;
15891 }
15892 #else
vorr_s8(int8x8_t __p0,int8x8_t __p1)15893 __ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
15894   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15895   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
15896   int8x8_t __ret;
15897   __ret = __rev0 | __rev1;
15898   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15899   return __ret;
15900 }
15901 #endif
15902 
15903 #ifdef __LITTLE_ENDIAN__
vorr_s32(int32x2_t __p0,int32x2_t __p1)15904 __ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
15905   int32x2_t __ret;
15906   __ret = __p0 | __p1;
15907   return __ret;
15908 }
15909 #else
vorr_s32(int32x2_t __p0,int32x2_t __p1)15910 __ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
15911   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15912   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
15913   int32x2_t __ret;
15914   __ret = __rev0 | __rev1;
15915   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15916   return __ret;
15917 }
15918 #endif
15919 
15920 #ifdef __LITTLE_ENDIAN__
vorr_s64(int64x1_t __p0,int64x1_t __p1)15921 __ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) {
15922   int64x1_t __ret;
15923   __ret = __p0 | __p1;
15924   return __ret;
15925 }
15926 #else
vorr_s64(int64x1_t __p0,int64x1_t __p1)15927 __ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) {
15928   int64x1_t __ret;
15929   __ret = __p0 | __p1;
15930   return __ret;
15931 }
15932 #endif
15933 
15934 #ifdef __LITTLE_ENDIAN__
vorr_s16(int16x4_t __p0,int16x4_t __p1)15935 __ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
15936   int16x4_t __ret;
15937   __ret = __p0 | __p1;
15938   return __ret;
15939 }
15940 #else
vorr_s16(int16x4_t __p0,int16x4_t __p1)15941 __ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
15942   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15943   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
15944   int16x4_t __ret;
15945   __ret = __rev0 | __rev1;
15946   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15947   return __ret;
15948 }
15949 #endif
15950 
15951 #ifdef __LITTLE_ENDIAN__
vpadalq_u8(uint16x8_t __p0,uint8x16_t __p1)15952 __ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
15953   uint16x8_t __ret;
15954   __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
15955   return __ret;
15956 }
15957 #else
vpadalq_u8(uint16x8_t __p0,uint8x16_t __p1)15958 __ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
15959   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15960   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15961   uint16x8_t __ret;
15962   __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
15963   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15964   return __ret;
15965 }
15966 #endif
15967 
15968 #ifdef __LITTLE_ENDIAN__
vpadalq_u32(uint64x2_t __p0,uint32x4_t __p1)15969 __ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
15970   uint64x2_t __ret;
15971   __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
15972   return __ret;
15973 }
15974 #else
vpadalq_u32(uint64x2_t __p0,uint32x4_t __p1)15975 __ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
15976   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15977   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
15978   uint64x2_t __ret;
15979   __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
15980   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15981   return __ret;
15982 }
15983 #endif
15984 
15985 #ifdef __LITTLE_ENDIAN__
vpadalq_u16(uint32x4_t __p0,uint16x8_t __p1)15986 __ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
15987   uint32x4_t __ret;
15988   __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
15989   return __ret;
15990 }
15991 #else
vpadalq_u16(uint32x4_t __p0,uint16x8_t __p1)15992 __ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
15993   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15994   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
15995   uint32x4_t __ret;
15996   __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
15997   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15998   return __ret;
15999 }
16000 #endif
16001 
16002 #ifdef __LITTLE_ENDIAN__
vpadalq_s8(int16x8_t __p0,int8x16_t __p1)16003 __ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
16004   int16x8_t __ret;
16005   __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
16006   return __ret;
16007 }
16008 #else
vpadalq_s8(int16x8_t __p0,int8x16_t __p1)16009 __ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
16010   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16011   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
16012   int16x8_t __ret;
16013   __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
16014   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16015   return __ret;
16016 }
16017 #endif
16018 
16019 #ifdef __LITTLE_ENDIAN__
vpadalq_s32(int64x2_t __p0,int32x4_t __p1)16020 __ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
16021   int64x2_t __ret;
16022   __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
16023   return __ret;
16024 }
16025 #else
vpadalq_s32(int64x2_t __p0,int32x4_t __p1)16026 __ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
16027   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16028   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
16029   int64x2_t __ret;
16030   __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
16031   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16032   return __ret;
16033 }
16034 #endif
16035 
16036 #ifdef __LITTLE_ENDIAN__
vpadalq_s16(int32x4_t __p0,int16x8_t __p1)16037 __ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
16038   int32x4_t __ret;
16039   __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
16040   return __ret;
16041 }
16042 #else
vpadalq_s16(int32x4_t __p0,int16x8_t __p1)16043 __ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
16044   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16045   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
16046   int32x4_t __ret;
16047   __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
16048   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16049   return __ret;
16050 }
16051 #endif
16052 
16053 #ifdef __LITTLE_ENDIAN__
vpadal_u8(uint16x4_t __p0,uint8x8_t __p1)16054 __ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
16055   uint16x4_t __ret;
16056   __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
16057   return __ret;
16058 }
16059 #else
vpadal_u8(uint16x4_t __p0,uint8x8_t __p1)16060 __ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
16061   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16062   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
16063   uint16x4_t __ret;
16064   __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
16065   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16066   return __ret;
16067 }
16068 #endif
16069 
16070 #ifdef __LITTLE_ENDIAN__
vpadal_u32(uint64x1_t __p0,uint32x2_t __p1)16071 __ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
16072   uint64x1_t __ret;
16073   __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
16074   return __ret;
16075 }
16076 #else
vpadal_u32(uint64x1_t __p0,uint32x2_t __p1)16077 __ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
16078   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16079   uint64x1_t __ret;
16080   __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 19);
16081   return __ret;
16082 }
16083 #endif
16084 
16085 #ifdef __LITTLE_ENDIAN__
vpadal_u16(uint32x2_t __p0,uint16x4_t __p1)16086 __ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
16087   uint32x2_t __ret;
16088   __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
16089   return __ret;
16090 }
16091 #else
vpadal_u16(uint32x2_t __p0,uint16x4_t __p1)16092 __ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
16093   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16094   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
16095   uint32x2_t __ret;
16096   __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
16097   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16098   return __ret;
16099 }
16100 #endif
16101 
16102 #ifdef __LITTLE_ENDIAN__
vpadal_s8(int16x4_t __p0,int8x8_t __p1)16103 __ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
16104   int16x4_t __ret;
16105   __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
16106   return __ret;
16107 }
16108 #else
vpadal_s8(int16x4_t __p0,int8x8_t __p1)16109 __ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
16110   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16111   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
16112   int16x4_t __ret;
16113   __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
16114   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16115   return __ret;
16116 }
16117 #endif
16118 
16119 #ifdef __LITTLE_ENDIAN__
vpadal_s32(int64x1_t __p0,int32x2_t __p1)16120 __ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
16121   int64x1_t __ret;
16122   __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
16123   return __ret;
16124 }
16125 #else
vpadal_s32(int64x1_t __p0,int32x2_t __p1)16126 __ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
16127   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16128   int64x1_t __ret;
16129   __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 3);
16130   return __ret;
16131 }
16132 #endif
16133 
16134 #ifdef __LITTLE_ENDIAN__
vpadal_s16(int32x2_t __p0,int16x4_t __p1)16135 __ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
16136   int32x2_t __ret;
16137   __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
16138   return __ret;
16139 }
16140 #else
vpadal_s16(int32x2_t __p0,int16x4_t __p1)16141 __ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
16142   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16143   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
16144   int32x2_t __ret;
16145   __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
16146   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16147   return __ret;
16148 }
16149 #endif
16150 
16151 #ifdef __LITTLE_ENDIAN__
vpadd_u8(uint8x8_t __p0,uint8x8_t __p1)16152 __ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
16153   uint8x8_t __ret;
16154   __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
16155   return __ret;
16156 }
16157 #else
vpadd_u8(uint8x8_t __p0,uint8x8_t __p1)16158 __ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
16159   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16160   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
16161   uint8x8_t __ret;
16162   __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
16163   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16164   return __ret;
16165 }
16166 #endif
16167 
16168 #ifdef __LITTLE_ENDIAN__
vpadd_u32(uint32x2_t __p0,uint32x2_t __p1)16169 __ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
16170   uint32x2_t __ret;
16171   __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
16172   return __ret;
16173 }
16174 #else
vpadd_u32(uint32x2_t __p0,uint32x2_t __p1)16175 __ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
16176   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16177   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16178   uint32x2_t __ret;
16179   __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
16180   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16181   return __ret;
16182 }
16183 #endif
16184 
16185 #ifdef __LITTLE_ENDIAN__
vpadd_u16(uint16x4_t __p0,uint16x4_t __p1)16186 __ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
16187   uint16x4_t __ret;
16188   __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
16189   return __ret;
16190 }
16191 #else
vpadd_u16(uint16x4_t __p0,uint16x4_t __p1)16192 __ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
16193   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16194   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
16195   uint16x4_t __ret;
16196   __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
16197   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16198   return __ret;
16199 }
16200 #endif
16201 
16202 #ifdef __LITTLE_ENDIAN__
vpadd_s8(int8x8_t __p0,int8x8_t __p1)16203 __ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
16204   int8x8_t __ret;
16205   __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
16206   return __ret;
16207 }
16208 #else
vpadd_s8(int8x8_t __p0,int8x8_t __p1)16209 __ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
16210   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16211   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
16212   int8x8_t __ret;
16213   __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
16214   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16215   return __ret;
16216 }
16217 #endif
16218 
16219 #ifdef __LITTLE_ENDIAN__
vpadd_f32(float32x2_t __p0,float32x2_t __p1)16220 __ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
16221   float32x2_t __ret;
16222   __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
16223   return __ret;
16224 }
16225 #else
vpadd_f32(float32x2_t __p0,float32x2_t __p1)16226 __ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
16227   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16228   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16229   float32x2_t __ret;
16230   __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
16231   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16232   return __ret;
16233 }
16234 #endif
16235 
16236 #ifdef __LITTLE_ENDIAN__
vpadd_s32(int32x2_t __p0,int32x2_t __p1)16237 __ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
16238   int32x2_t __ret;
16239   __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
16240   return __ret;
16241 }
16242 #else
vpadd_s32(int32x2_t __p0,int32x2_t __p1)16243 __ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
16244   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16245   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16246   int32x2_t __ret;
16247   __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
16248   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16249   return __ret;
16250 }
16251 #endif
16252 
16253 #ifdef __LITTLE_ENDIAN__
vpadd_s16(int16x4_t __p0,int16x4_t __p1)16254 __ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
16255   int16x4_t __ret;
16256   __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
16257   return __ret;
16258 }
16259 #else
vpadd_s16(int16x4_t __p0,int16x4_t __p1)16260 __ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
16261   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16262   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
16263   int16x4_t __ret;
16264   __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
16265   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16266   return __ret;
16267 }
16268 #endif
16269 
16270 #ifdef __LITTLE_ENDIAN__
vpaddlq_u8(uint8x16_t __p0)16271 __ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
16272   uint16x8_t __ret;
16273   __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 49);
16274   return __ret;
16275 }
16276 #else
vpaddlq_u8(uint8x16_t __p0)16277 __ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
16278   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
16279   uint16x8_t __ret;
16280   __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 49);
16281   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16282   return __ret;
16283 }
16284 #endif
16285 
16286 #ifdef __LITTLE_ENDIAN__
vpaddlq_u32(uint32x4_t __p0)16287 __ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
16288   uint64x2_t __ret;
16289   __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 51);
16290   return __ret;
16291 }
16292 #else
vpaddlq_u32(uint32x4_t __p0)16293 __ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
16294   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16295   uint64x2_t __ret;
16296   __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 51);
16297   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16298   return __ret;
16299 }
16300 #endif
16301 
16302 #ifdef __LITTLE_ENDIAN__
vpaddlq_u16(uint16x8_t __p0)16303 __ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
16304   uint32x4_t __ret;
16305   __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 50);
16306   return __ret;
16307 }
16308 #else
vpaddlq_u16(uint16x8_t __p0)16309 __ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
16310   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16311   uint32x4_t __ret;
16312   __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 50);
16313   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16314   return __ret;
16315 }
16316 #endif
16317 
16318 #ifdef __LITTLE_ENDIAN__
vpaddlq_s8(int8x16_t __p0)16319 __ai int16x8_t vpaddlq_s8(int8x16_t __p0) {
16320   int16x8_t __ret;
16321   __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 33);
16322   return __ret;
16323 }
16324 #else
vpaddlq_s8(int8x16_t __p0)16325 __ai int16x8_t vpaddlq_s8(int8x16_t __p0) {
16326   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
16327   int16x8_t __ret;
16328   __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 33);
16329   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16330   return __ret;
16331 }
16332 #endif
16333 
16334 #ifdef __LITTLE_ENDIAN__
vpaddlq_s32(int32x4_t __p0)16335 __ai int64x2_t vpaddlq_s32(int32x4_t __p0) {
16336   int64x2_t __ret;
16337   __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 35);
16338   return __ret;
16339 }
16340 #else
vpaddlq_s32(int32x4_t __p0)16341 __ai int64x2_t vpaddlq_s32(int32x4_t __p0) {
16342   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16343   int64x2_t __ret;
16344   __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 35);
16345   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16346   return __ret;
16347 }
16348 #endif
16349 
16350 #ifdef __LITTLE_ENDIAN__
vpaddlq_s16(int16x8_t __p0)16351 __ai int32x4_t vpaddlq_s16(int16x8_t __p0) {
16352   int32x4_t __ret;
16353   __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 34);
16354   return __ret;
16355 }
16356 #else
vpaddlq_s16(int16x8_t __p0)16357 __ai int32x4_t vpaddlq_s16(int16x8_t __p0) {
16358   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16359   int32x4_t __ret;
16360   __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 34);
16361   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16362   return __ret;
16363 }
16364 #endif
16365 
16366 #ifdef __LITTLE_ENDIAN__
vpaddl_u8(uint8x8_t __p0)16367 __ai uint16x4_t vpaddl_u8(uint8x8_t __p0) {
16368   uint16x4_t __ret;
16369   __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 17);
16370   return __ret;
16371 }
16372 #else
vpaddl_u8(uint8x8_t __p0)16373 __ai uint16x4_t vpaddl_u8(uint8x8_t __p0) {
16374   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16375   uint16x4_t __ret;
16376   __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 17);
16377   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16378   return __ret;
16379 }
16380 #endif
16381 
16382 #ifdef __LITTLE_ENDIAN__
vpaddl_u32(uint32x2_t __p0)16383 __ai uint64x1_t vpaddl_u32(uint32x2_t __p0) {
16384   uint64x1_t __ret;
16385   __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 19);
16386   return __ret;
16387 }
16388 #else
vpaddl_u32(uint32x2_t __p0)16389 __ai uint64x1_t vpaddl_u32(uint32x2_t __p0) {
16390   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16391   uint64x1_t __ret;
16392   __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 19);
16393   return __ret;
16394 }
16395 #endif
16396 
16397 #ifdef __LITTLE_ENDIAN__
vpaddl_u16(uint16x4_t __p0)16398 __ai uint32x2_t vpaddl_u16(uint16x4_t __p0) {
16399   uint32x2_t __ret;
16400   __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 18);
16401   return __ret;
16402 }
16403 #else
vpaddl_u16(uint16x4_t __p0)16404 __ai uint32x2_t vpaddl_u16(uint16x4_t __p0) {
16405   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16406   uint32x2_t __ret;
16407   __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 18);
16408   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16409   return __ret;
16410 }
16411 #endif
16412 
16413 #ifdef __LITTLE_ENDIAN__
vpaddl_s8(int8x8_t __p0)16414 __ai int16x4_t vpaddl_s8(int8x8_t __p0) {
16415   int16x4_t __ret;
16416   __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 1);
16417   return __ret;
16418 }
16419 #else
vpaddl_s8(int8x8_t __p0)16420 __ai int16x4_t vpaddl_s8(int8x8_t __p0) {
16421   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16422   int16x4_t __ret;
16423   __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 1);
16424   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16425   return __ret;
16426 }
16427 #endif
16428 
16429 #ifdef __LITTLE_ENDIAN__
vpaddl_s32(int32x2_t __p0)16430 __ai int64x1_t vpaddl_s32(int32x2_t __p0) {
16431   int64x1_t __ret;
16432   __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 3);
16433   return __ret;
16434 }
16435 #else
vpaddl_s32(int32x2_t __p0)16436 __ai int64x1_t vpaddl_s32(int32x2_t __p0) {
16437   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16438   int64x1_t __ret;
16439   __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 3);
16440   return __ret;
16441 }
16442 #endif
16443 
16444 #ifdef __LITTLE_ENDIAN__
vpaddl_s16(int16x4_t __p0)16445 __ai int32x2_t vpaddl_s16(int16x4_t __p0) {
16446   int32x2_t __ret;
16447   __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 2);
16448   return __ret;
16449 }
16450 #else
vpaddl_s16(int16x4_t __p0)16451 __ai int32x2_t vpaddl_s16(int16x4_t __p0) {
16452   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16453   int32x2_t __ret;
16454   __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 2);
16455   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16456   return __ret;
16457 }
16458 #endif
16459 
16460 #ifdef __LITTLE_ENDIAN__
vpmax_u8(uint8x8_t __p0,uint8x8_t __p1)16461 __ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
16462   uint8x8_t __ret;
16463   __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
16464   return __ret;
16465 }
16466 #else
vpmax_u8(uint8x8_t __p0,uint8x8_t __p1)16467 __ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
16468   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16469   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
16470   uint8x8_t __ret;
16471   __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
16472   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16473   return __ret;
16474 }
16475 #endif
16476 
16477 #ifdef __LITTLE_ENDIAN__
vpmax_u32(uint32x2_t __p0,uint32x2_t __p1)16478 __ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
16479   uint32x2_t __ret;
16480   __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
16481   return __ret;
16482 }
16483 #else
vpmax_u32(uint32x2_t __p0,uint32x2_t __p1)16484 __ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
16485   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16486   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16487   uint32x2_t __ret;
16488   __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
16489   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16490   return __ret;
16491 }
16492 #endif
16493 
16494 #ifdef __LITTLE_ENDIAN__
vpmax_u16(uint16x4_t __p0,uint16x4_t __p1)16495 __ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
16496   uint16x4_t __ret;
16497   __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
16498   return __ret;
16499 }
16500 #else
vpmax_u16(uint16x4_t __p0,uint16x4_t __p1)16501 __ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
16502   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16503   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
16504   uint16x4_t __ret;
16505   __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
16506   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16507   return __ret;
16508 }
16509 #endif
16510 
16511 #ifdef __LITTLE_ENDIAN__
vpmax_s8(int8x8_t __p0,int8x8_t __p1)16512 __ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
16513   int8x8_t __ret;
16514   __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
16515   return __ret;
16516 }
16517 #else
vpmax_s8(int8x8_t __p0,int8x8_t __p1)16518 __ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
16519   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16520   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
16521   int8x8_t __ret;
16522   __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
16523   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16524   return __ret;
16525 }
16526 #endif
16527 
16528 #ifdef __LITTLE_ENDIAN__
vpmax_f32(float32x2_t __p0,float32x2_t __p1)16529 __ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
16530   float32x2_t __ret;
16531   __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
16532   return __ret;
16533 }
16534 #else
vpmax_f32(float32x2_t __p0,float32x2_t __p1)16535 __ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
16536   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16537   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16538   float32x2_t __ret;
16539   __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
16540   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16541   return __ret;
16542 }
16543 #endif
16544 
16545 #ifdef __LITTLE_ENDIAN__
vpmax_s32(int32x2_t __p0,int32x2_t __p1)16546 __ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
16547   int32x2_t __ret;
16548   __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
16549   return __ret;
16550 }
16551 #else
vpmax_s32(int32x2_t __p0,int32x2_t __p1)16552 __ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
16553   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16554   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16555   int32x2_t __ret;
16556   __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
16557   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16558   return __ret;
16559 }
16560 #endif
16561 
16562 #ifdef __LITTLE_ENDIAN__
vpmax_s16(int16x4_t __p0,int16x4_t __p1)16563 __ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
16564   int16x4_t __ret;
16565   __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
16566   return __ret;
16567 }
16568 #else
vpmax_s16(int16x4_t __p0,int16x4_t __p1)16569 __ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
16570   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16571   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
16572   int16x4_t __ret;
16573   __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
16574   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16575   return __ret;
16576 }
16577 #endif
16578 
16579 #ifdef __LITTLE_ENDIAN__
vpmin_u8(uint8x8_t __p0,uint8x8_t __p1)16580 __ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
16581   uint8x8_t __ret;
16582   __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
16583   return __ret;
16584 }
16585 #else
vpmin_u8(uint8x8_t __p0,uint8x8_t __p1)16586 __ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
16587   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16588   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
16589   uint8x8_t __ret;
16590   __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
16591   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16592   return __ret;
16593 }
16594 #endif
16595 
16596 #ifdef __LITTLE_ENDIAN__
vpmin_u32(uint32x2_t __p0,uint32x2_t __p1)16597 __ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
16598   uint32x2_t __ret;
16599   __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
16600   return __ret;
16601 }
16602 #else
vpmin_u32(uint32x2_t __p0,uint32x2_t __p1)16603 __ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
16604   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16605   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16606   uint32x2_t __ret;
16607   __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
16608   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16609   return __ret;
16610 }
16611 #endif
16612 
16613 #ifdef __LITTLE_ENDIAN__
vpmin_u16(uint16x4_t __p0,uint16x4_t __p1)16614 __ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
16615   uint16x4_t __ret;
16616   __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
16617   return __ret;
16618 }
16619 #else
vpmin_u16(uint16x4_t __p0,uint16x4_t __p1)16620 __ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
16621   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16622   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
16623   uint16x4_t __ret;
16624   __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
16625   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16626   return __ret;
16627 }
16628 #endif
16629 
16630 #ifdef __LITTLE_ENDIAN__
vpmin_s8(int8x8_t __p0,int8x8_t __p1)16631 __ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
16632   int8x8_t __ret;
16633   __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
16634   return __ret;
16635 }
16636 #else
vpmin_s8(int8x8_t __p0,int8x8_t __p1)16637 __ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
16638   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16639   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
16640   int8x8_t __ret;
16641   __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
16642   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16643   return __ret;
16644 }
16645 #endif
16646 
16647 #ifdef __LITTLE_ENDIAN__
vpmin_f32(float32x2_t __p0,float32x2_t __p1)16648 __ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
16649   float32x2_t __ret;
16650   __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
16651   return __ret;
16652 }
16653 #else
vpmin_f32(float32x2_t __p0,float32x2_t __p1)16654 __ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
16655   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16656   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16657   float32x2_t __ret;
16658   __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
16659   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16660   return __ret;
16661 }
16662 #endif
16663 
16664 #ifdef __LITTLE_ENDIAN__
vpmin_s32(int32x2_t __p0,int32x2_t __p1)16665 __ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
16666   int32x2_t __ret;
16667   __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
16668   return __ret;
16669 }
16670 #else
vpmin_s32(int32x2_t __p0,int32x2_t __p1)16671 __ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
16672   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16673   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16674   int32x2_t __ret;
16675   __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
16676   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16677   return __ret;
16678 }
16679 #endif
16680 
16681 #ifdef __LITTLE_ENDIAN__
vpmin_s16(int16x4_t __p0,int16x4_t __p1)16682 __ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
16683   int16x4_t __ret;
16684   __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
16685   return __ret;
16686 }
16687 #else
vpmin_s16(int16x4_t __p0,int16x4_t __p1)16688 __ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
16689   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16690   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
16691   int16x4_t __ret;
16692   __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
16693   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16694   return __ret;
16695 }
16696 #endif
16697 
16698 #ifdef __LITTLE_ENDIAN__
vqabsq_s8(int8x16_t __p0)16699 __ai int8x16_t vqabsq_s8(int8x16_t __p0) {
16700   int8x16_t __ret;
16701   __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 32);
16702   return __ret;
16703 }
16704 #else
vqabsq_s8(int8x16_t __p0)16705 __ai int8x16_t vqabsq_s8(int8x16_t __p0) {
16706   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
16707   int8x16_t __ret;
16708   __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 32);
16709   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
16710   return __ret;
16711 }
16712 #endif
16713 
16714 #ifdef __LITTLE_ENDIAN__
vqabsq_s32(int32x4_t __p0)16715 __ai int32x4_t vqabsq_s32(int32x4_t __p0) {
16716   int32x4_t __ret;
16717   __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 34);
16718   return __ret;
16719 }
16720 #else
vqabsq_s32(int32x4_t __p0)16721 __ai int32x4_t vqabsq_s32(int32x4_t __p0) {
16722   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16723   int32x4_t __ret;
16724   __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 34);
16725   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16726   return __ret;
16727 }
16728 #endif
16729 
16730 #ifdef __LITTLE_ENDIAN__
vqabsq_s16(int16x8_t __p0)16731 __ai int16x8_t vqabsq_s16(int16x8_t __p0) {
16732   int16x8_t __ret;
16733   __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 33);
16734   return __ret;
16735 }
16736 #else
vqabsq_s16(int16x8_t __p0)16737 __ai int16x8_t vqabsq_s16(int16x8_t __p0) {
16738   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16739   int16x8_t __ret;
16740   __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 33);
16741   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16742   return __ret;
16743 }
16744 #endif
16745 
16746 #ifdef __LITTLE_ENDIAN__
vqabs_s8(int8x8_t __p0)16747 __ai int8x8_t vqabs_s8(int8x8_t __p0) {
16748   int8x8_t __ret;
16749   __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 0);
16750   return __ret;
16751 }
16752 #else
vqabs_s8(int8x8_t __p0)16753 __ai int8x8_t vqabs_s8(int8x8_t __p0) {
16754   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16755   int8x8_t __ret;
16756   __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 0);
16757   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16758   return __ret;
16759 }
16760 #endif
16761 
16762 #ifdef __LITTLE_ENDIAN__
vqabs_s32(int32x2_t __p0)16763 __ai int32x2_t vqabs_s32(int32x2_t __p0) {
16764   int32x2_t __ret;
16765   __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 2);
16766   return __ret;
16767 }
16768 #else
vqabs_s32(int32x2_t __p0)16769 __ai int32x2_t vqabs_s32(int32x2_t __p0) {
16770   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16771   int32x2_t __ret;
16772   __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 2);
16773   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16774   return __ret;
16775 }
16776 #endif
16777 
16778 #ifdef __LITTLE_ENDIAN__
vqabs_s16(int16x4_t __p0)16779 __ai int16x4_t vqabs_s16(int16x4_t __p0) {
16780   int16x4_t __ret;
16781   __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 1);
16782   return __ret;
16783 }
16784 #else
vqabs_s16(int16x4_t __p0)16785 __ai int16x4_t vqabs_s16(int16x4_t __p0) {
16786   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16787   int16x4_t __ret;
16788   __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 1);
16789   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16790   return __ret;
16791 }
16792 #endif
16793 
16794 #ifdef __LITTLE_ENDIAN__
vqaddq_u8(uint8x16_t __p0,uint8x16_t __p1)16795 __ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
16796   uint8x16_t __ret;
16797   __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
16798   return __ret;
16799 }
16800 #else
vqaddq_u8(uint8x16_t __p0,uint8x16_t __p1)16801 __ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
16802   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
16803   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
16804   uint8x16_t __ret;
16805   __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
16806   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
16807   return __ret;
16808 }
16809 #endif
16810 
16811 #ifdef __LITTLE_ENDIAN__
vqaddq_u32(uint32x4_t __p0,uint32x4_t __p1)16812 __ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
16813   uint32x4_t __ret;
16814   __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
16815   return __ret;
16816 }
16817 #else
vqaddq_u32(uint32x4_t __p0,uint32x4_t __p1)16818 __ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
16819   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16820   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
16821   uint32x4_t __ret;
16822   __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
16823   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16824   return __ret;
16825 }
16826 #endif
16827 
16828 #ifdef __LITTLE_ENDIAN__
vqaddq_u64(uint64x2_t __p0,uint64x2_t __p1)16829 __ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
16830   uint64x2_t __ret;
16831   __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
16832   return __ret;
16833 }
16834 #else
vqaddq_u64(uint64x2_t __p0,uint64x2_t __p1)16835 __ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
16836   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16837   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16838   uint64x2_t __ret;
16839   __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
16840   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16841   return __ret;
16842 }
16843 #endif
16844 
16845 #ifdef __LITTLE_ENDIAN__
vqaddq_u16(uint16x8_t __p0,uint16x8_t __p1)16846 __ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
16847   uint16x8_t __ret;
16848   __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
16849   return __ret;
16850 }
16851 #else
vqaddq_u16(uint16x8_t __p0,uint16x8_t __p1)16852 __ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
16853   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16854   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
16855   uint16x8_t __ret;
16856   __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
16857   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16858   return __ret;
16859 }
16860 #endif
16861 
16862 #ifdef __LITTLE_ENDIAN__
vqaddq_s8(int8x16_t __p0,int8x16_t __p1)16863 __ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
16864   int8x16_t __ret;
16865   __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
16866   return __ret;
16867 }
16868 #else
vqaddq_s8(int8x16_t __p0,int8x16_t __p1)16869 __ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
16870   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
16871   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
16872   int8x16_t __ret;
16873   __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
16874   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
16875   return __ret;
16876 }
16877 #endif
16878 
16879 #ifdef __LITTLE_ENDIAN__
vqaddq_s32(int32x4_t __p0,int32x4_t __p1)16880 __ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
16881   int32x4_t __ret;
16882   __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
16883   return __ret;
16884 }
16885 #else
vqaddq_s32(int32x4_t __p0,int32x4_t __p1)16886 __ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
16887   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16888   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
16889   int32x4_t __ret;
16890   __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
16891   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16892   return __ret;
16893 }
16894 #endif
16895 
16896 #ifdef __LITTLE_ENDIAN__
vqaddq_s64(int64x2_t __p0,int64x2_t __p1)16897 __ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
16898   int64x2_t __ret;
16899   __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
16900   return __ret;
16901 }
16902 #else
vqaddq_s64(int64x2_t __p0,int64x2_t __p1)16903 __ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
16904   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16905   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16906   int64x2_t __ret;
16907   __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
16908   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16909   return __ret;
16910 }
16911 #endif
16912 
16913 #ifdef __LITTLE_ENDIAN__
vqaddq_s16(int16x8_t __p0,int16x8_t __p1)16914 __ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
16915   int16x8_t __ret;
16916   __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
16917   return __ret;
16918 }
16919 #else
vqaddq_s16(int16x8_t __p0,int16x8_t __p1)16920 __ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
16921   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16922   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
16923   int16x8_t __ret;
16924   __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
16925   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16926   return __ret;
16927 }
16928 #endif
16929 
16930 #ifdef __LITTLE_ENDIAN__
vqadd_u8(uint8x8_t __p0,uint8x8_t __p1)16931 __ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
16932   uint8x8_t __ret;
16933   __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
16934   return __ret;
16935 }
16936 #else
vqadd_u8(uint8x8_t __p0,uint8x8_t __p1)16937 __ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
16938   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16939   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
16940   uint8x8_t __ret;
16941   __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
16942   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16943   return __ret;
16944 }
16945 #endif
16946 
16947 #ifdef __LITTLE_ENDIAN__
vqadd_u32(uint32x2_t __p0,uint32x2_t __p1)16948 __ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
16949   uint32x2_t __ret;
16950   __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
16951   return __ret;
16952 }
16953 #else
vqadd_u32(uint32x2_t __p0,uint32x2_t __p1)16954 __ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
16955   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16956   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16957   uint32x2_t __ret;
16958   __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
16959   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16960   return __ret;
16961 }
16962 #endif
16963 
16964 #ifdef __LITTLE_ENDIAN__
vqadd_u64(uint64x1_t __p0,uint64x1_t __p1)16965 __ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
16966   uint64x1_t __ret;
16967   __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
16968   return __ret;
16969 }
16970 #else
vqadd_u64(uint64x1_t __p0,uint64x1_t __p1)16971 __ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
16972   uint64x1_t __ret;
16973   __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
16974   return __ret;
16975 }
16976 #endif
16977 
16978 #ifdef __LITTLE_ENDIAN__
vqadd_u16(uint16x4_t __p0,uint16x4_t __p1)16979 __ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
16980   uint16x4_t __ret;
16981   __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
16982   return __ret;
16983 }
16984 #else
vqadd_u16(uint16x4_t __p0,uint16x4_t __p1)16985 __ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
16986   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16987   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
16988   uint16x4_t __ret;
16989   __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
16990   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16991   return __ret;
16992 }
16993 #endif
16994 
16995 #ifdef __LITTLE_ENDIAN__
vqadd_s8(int8x8_t __p0,int8x8_t __p1)16996 __ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
16997   int8x8_t __ret;
16998   __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
16999   return __ret;
17000 }
17001 #else
vqadd_s8(int8x8_t __p0,int8x8_t __p1)17002 __ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
17003   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
17004   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
17005   int8x8_t __ret;
17006   __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
17007   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
17008   return __ret;
17009 }
17010 #endif
17011 
17012 #ifdef __LITTLE_ENDIAN__
vqadd_s32(int32x2_t __p0,int32x2_t __p1)17013 __ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
17014   int32x2_t __ret;
17015   __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
17016   return __ret;
17017 }
17018 #else
vqadd_s32(int32x2_t __p0,int32x2_t __p1)17019 __ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
17020   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
17021   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
17022   int32x2_t __ret;
17023   __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
17024   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
17025   return __ret;
17026 }
17027 #endif
17028 
17029 #ifdef __LITTLE_ENDIAN__
vqadd_s64(int64x1_t __p0,int64x1_t __p1)17030 __ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) {
17031   int64x1_t __ret;
17032   __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
17033   return __ret;
17034 }
17035 #else
vqadd_s64(int64x1_t __p0,int64x1_t __p1)17036 __ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) {
17037   int64x1_t __ret;
17038   __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
17039   return __ret;
17040 }
17041 #endif
17042 
17043 #ifdef __LITTLE_ENDIAN__
vqadd_s16(int16x4_t __p0,int16x4_t __p1)17044 __ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
17045   int16x4_t __ret;
17046   __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
17047   return __ret;
17048 }
17049 #else
vqadd_s16(int16x4_t __p0,int16x4_t __p1)17050 __ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
17051   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17052   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
17053   int16x4_t __ret;
17054   __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
17055   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17056   return __ret;
17057 }
17058 #endif
17059 
17060 #ifdef __LITTLE_ENDIAN__
vqdmlal_s32(int64x2_t __p0,int32x2_t __p1,int32x2_t __p2)17061 __ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
17062   int64x2_t __ret;
17063   __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
17064   return __ret;
17065 }
17066 #else
vqdmlal_s32(int64x2_t __p0,int32x2_t __p1,int32x2_t __p2)17067 __ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
17068   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
17069   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
17070   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
17071   int64x2_t __ret;
17072   __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
17073   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
17074   return __ret;
17075 }
__noswap_vqdmlal_s32(int64x2_t __p0,int32x2_t __p1,int32x2_t __p2)17076 __ai int64x2_t __noswap_vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
17077   int64x2_t __ret;
17078   __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
17079   return __ret;
17080 }
17081 #endif
17082 
17083 #ifdef __LITTLE_ENDIAN__
vqdmlal_s16(int32x4_t __p0,int16x4_t __p1,int16x4_t __p2)17084 __ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
17085   int32x4_t __ret;
17086   __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
17087   return __ret;
17088 }
17089 #else
vqdmlal_s16(int32x4_t __p0,int16x4_t __p1,int16x4_t __p2)17090 __ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
17091   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17092   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
17093   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
17094   int32x4_t __ret;
17095   __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
17096   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17097   return __ret;
17098 }
__noswap_vqdmlal_s16(int32x4_t __p0,int16x4_t __p1,int16x4_t __p2)17099 __ai int32x4_t __noswap_vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
17100   int32x4_t __ret;
17101   __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
17102   return __ret;
17103 }
17104 #endif
17105 
17106 #ifdef __LITTLE_ENDIAN__
17107 #define vqdmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
17108   int64x2_t __s0 = __p0; \
17109   int32x2_t __s1 = __p1; \
17110   int32x2_t __s2 = __p2; \
17111   int64x2_t __ret; \
17112   __ret = vqdmlal_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
17113   __ret; \
17114 })
17115 #else
17116 #define vqdmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
17117   int64x2_t __s0 = __p0; \
17118   int32x2_t __s1 = __p1; \
17119   int32x2_t __s2 = __p2; \
17120   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
17121   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
17122   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
17123   int64x2_t __ret; \
17124   __ret = __noswap_vqdmlal_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
17125   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
17126   __ret; \
17127 })
17128 #endif
17129 
17130 #ifdef __LITTLE_ENDIAN__
17131 #define vqdmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
17132   int32x4_t __s0 = __p0; \
17133   int16x4_t __s1 = __p1; \
17134   int16x4_t __s2 = __p2; \
17135   int32x4_t __ret; \
17136   __ret = vqdmlal_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
17137   __ret; \
17138 })
17139 #else
17140 #define vqdmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
17141   int32x4_t __s0 = __p0; \
17142   int16x4_t __s1 = __p1; \
17143   int16x4_t __s2 = __p2; \
17144   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
17145   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
17146   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
17147   int32x4_t __ret; \
17148   __ret = __noswap_vqdmlal_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
17149   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
17150   __ret; \
17151 })
17152 #endif
17153 
17154 #ifdef __LITTLE_ENDIAN__
vqdmlal_n_s32(int64x2_t __p0,int32x2_t __p1,int32_t __p2)17155 __ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
17156   int64x2_t __ret;
17157   __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
17158   return __ret;
17159 }
17160 #else
vqdmlal_n_s32(int64x2_t __p0,int32x2_t __p1,int32_t __p2)17161 __ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
17162   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
17163   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
17164   int64x2_t __ret;
17165   __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
17166   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
17167   return __ret;
17168 }
__noswap_vqdmlal_n_s32(int64x2_t __p0,int32x2_t __p1,int32_t __p2)17169 __ai int64x2_t __noswap_vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
17170   int64x2_t __ret;
17171   __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
17172   return __ret;
17173 }
17174 #endif
17175 
17176 #ifdef __LITTLE_ENDIAN__
vqdmlal_n_s16(int32x4_t __p0,int16x4_t __p1,int16_t __p2)17177 __ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
17178   int32x4_t __ret;
17179   __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
17180   return __ret;
17181 }
17182 #else
vqdmlal_n_s16(int32x4_t __p0,int16x4_t __p1,int16_t __p2)17183 __ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
17184   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17185   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
17186   int32x4_t __ret;
17187   __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
17188   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17189   return __ret;
17190 }
__noswap_vqdmlal_n_s16(int32x4_t __p0,int16x4_t __p1,int16_t __p2)17191 __ai int32x4_t __noswap_vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
17192   int32x4_t __ret;
17193   __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
17194   return __ret;
17195 }
17196 #endif
17197 
17198 #ifdef __LITTLE_ENDIAN__
vqdmlsl_s32(int64x2_t __p0,int32x2_t __p1,int32x2_t __p2)17199 __ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
17200   int64x2_t __ret;
17201   __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
17202   return __ret;
17203 }
17204 #else
vqdmlsl_s32(int64x2_t __p0,int32x2_t __p1,int32x2_t __p2)17205 __ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
17206   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
17207   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
17208   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
17209   int64x2_t __ret;
17210   __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
17211   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
17212   return __ret;
17213 }
__noswap_vqdmlsl_s32(int64x2_t __p0,int32x2_t __p1,int32x2_t __p2)17214 __ai int64x2_t __noswap_vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
17215   int64x2_t __ret;
17216   __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
17217   return __ret;
17218 }
17219 #endif
17220 
17221 #ifdef __LITTLE_ENDIAN__
vqdmlsl_s16(int32x4_t __p0,int16x4_t __p1,int16x4_t __p2)17222 __ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
17223   int32x4_t __ret;
17224   __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
17225   return __ret;
17226 }
17227 #else
vqdmlsl_s16(int32x4_t __p0,int16x4_t __p1,int16x4_t __p2)17228 __ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
17229   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17230   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
17231   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
17232   int32x4_t __ret;
17233   __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
17234   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17235   return __ret;
17236 }
__noswap_vqdmlsl_s16(int32x4_t __p0,int16x4_t __p1,int16x4_t __p2)17237 __ai int32x4_t __noswap_vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
17238   int32x4_t __ret;
17239   __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
17240   return __ret;
17241 }
17242 #endif
17243 
17244 #ifdef __LITTLE_ENDIAN__
17245 #define vqdmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
17246   int64x2_t __s0 = __p0; \
17247   int32x2_t __s1 = __p1; \
17248   int32x2_t __s2 = __p2; \
17249   int64x2_t __ret; \
17250   __ret = vqdmlsl_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
17251   __ret; \
17252 })
17253 #else
17254 #define vqdmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
17255   int64x2_t __s0 = __p0; \
17256   int32x2_t __s1 = __p1; \
17257   int32x2_t __s2 = __p2; \
17258   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
17259   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
17260   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
17261   int64x2_t __ret; \
17262   __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
17263   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
17264   __ret; \
17265 })
17266 #endif
17267 
17268 #ifdef __LITTLE_ENDIAN__
17269 #define vqdmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
17270   int32x4_t __s0 = __p0; \
17271   int16x4_t __s1 = __p1; \
17272   int16x4_t __s2 = __p2; \
17273   int32x4_t __ret; \
17274   __ret = vqdmlsl_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
17275   __ret; \
17276 })
17277 #else
17278 #define vqdmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
17279   int32x4_t __s0 = __p0; \
17280   int16x4_t __s1 = __p1; \
17281   int16x4_t __s2 = __p2; \
17282   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
17283   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
17284   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
17285   int32x4_t __ret; \
17286   __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
17287   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
17288   __ret; \
17289 })
17290 #endif
17291 
17292 #ifdef __LITTLE_ENDIAN__
vqdmlsl_n_s32(int64x2_t __p0,int32x2_t __p1,int32_t __p2)17293 __ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
17294   int64x2_t __ret;
17295   __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
17296   return __ret;
17297 }
17298 #else
vqdmlsl_n_s32(int64x2_t __p0,int32x2_t __p1,int32_t __p2)17299 __ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
17300   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
17301   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
17302   int64x2_t __ret;
17303   __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
17304   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
17305   return __ret;
17306 }
__noswap_vqdmlsl_n_s32(int64x2_t __p0,int32x2_t __p1,int32_t __p2)17307 __ai int64x2_t __noswap_vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
17308   int64x2_t __ret;
17309   __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
17310   return __ret;
17311 }
17312 #endif
17313 
17314 #ifdef __LITTLE_ENDIAN__
vqdmlsl_n_s16(int32x4_t __p0,int16x4_t __p1,int16_t __p2)17315 __ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
17316   int32x4_t __ret;
17317   __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
17318   return __ret;
17319 }
17320 #else
vqdmlsl_n_s16(int32x4_t __p0,int16x4_t __p1,int16_t __p2)17321 __ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
17322   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17323   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
17324   int32x4_t __ret;
17325   __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
17326   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17327   return __ret;
17328 }
__noswap_vqdmlsl_n_s16(int32x4_t __p0,int16x4_t __p1,int16_t __p2)17329 __ai int32x4_t __noswap_vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
17330   int32x4_t __ret;
17331   __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
17332   return __ret;
17333 }
17334 #endif
17335 
17336 #ifdef __LITTLE_ENDIAN__
vqdmulhq_s32(int32x4_t __p0,int32x4_t __p1)17337 __ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
17338   int32x4_t __ret;
17339   __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
17340   return __ret;
17341 }
17342 #else
vqdmulhq_s32(int32x4_t __p0,int32x4_t __p1)17343 __ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
17344   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17345   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
17346   int32x4_t __ret;
17347   __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
17348   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17349   return __ret;
17350 }
__noswap_vqdmulhq_s32(int32x4_t __p0,int32x4_t __p1)17351 __ai int32x4_t __noswap_vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
17352   int32x4_t __ret;
17353   __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
17354   return __ret;
17355 }
17356 #endif
17357 
17358 #ifdef __LITTLE_ENDIAN__
vqdmulhq_s16(int16x8_t __p0,int16x8_t __p1)17359 __ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
17360   int16x8_t __ret;
17361   __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
17362   return __ret;
17363 }
17364 #else
vqdmulhq_s16(int16x8_t __p0,int16x8_t __p1)17365 __ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
17366   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
17367   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
17368   int16x8_t __ret;
17369   __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
17370   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
17371   return __ret;
17372 }
__noswap_vqdmulhq_s16(int16x8_t __p0,int16x8_t __p1)17373 __ai int16x8_t __noswap_vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
17374   int16x8_t __ret;
17375   __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
17376   return __ret;
17377 }
17378 #endif
17379 
17380 #ifdef __LITTLE_ENDIAN__
vqdmulh_s32(int32x2_t __p0,int32x2_t __p1)17381 __ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
17382   int32x2_t __ret;
17383   __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
17384   return __ret;
17385 }
17386 #else
vqdmulh_s32(int32x2_t __p0,int32x2_t __p1)17387 __ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
17388   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
17389   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
17390   int32x2_t __ret;
17391   __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
17392   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
17393   return __ret;
17394 }
__noswap_vqdmulh_s32(int32x2_t __p0,int32x2_t __p1)17395 __ai int32x2_t __noswap_vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
17396   int32x2_t __ret;
17397   __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
17398   return __ret;
17399 }
17400 #endif
17401 
17402 #ifdef __LITTLE_ENDIAN__
vqdmulh_s16(int16x4_t __p0,int16x4_t __p1)17403 __ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
17404   int16x4_t __ret;
17405   __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
17406   return __ret;
17407 }
17408 #else
vqdmulh_s16(int16x4_t __p0,int16x4_t __p1)17409 __ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
17410   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17411   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
17412   int16x4_t __ret;
17413   __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
17414   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17415   return __ret;
17416 }
__noswap_vqdmulh_s16(int16x4_t __p0,int16x4_t __p1)17417 __ai int16x4_t __noswap_vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
17418   int16x4_t __ret;
17419   __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
17420   return __ret;
17421 }
17422 #endif
17423 
17424 #ifdef __LITTLE_ENDIAN__
17425 #define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
17426   int32x4_t __s0 = __p0; \
17427   int32x2_t __s1 = __p1; \
17428   int32x4_t __ret; \
17429   __ret = vqdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
17430   __ret; \
17431 })
17432 #else
17433 #define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
17434   int32x4_t __s0 = __p0; \
17435   int32x2_t __s1 = __p1; \
17436   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
17437   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
17438   int32x4_t __ret; \
17439   __ret = __noswap_vqdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
17440   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
17441   __ret; \
17442 })
17443 #endif
17444 
17445 #ifdef __LITTLE_ENDIAN__
17446 #define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
17447   int16x8_t __s0 = __p0; \
17448   int16x4_t __s1 = __p1; \
17449   int16x8_t __ret; \
17450   __ret = vqdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
17451   __ret; \
17452 })
17453 #else
17454 #define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
17455   int16x8_t __s0 = __p0; \
17456   int16x4_t __s1 = __p1; \
17457   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
17458   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
17459   int16x8_t __ret; \
17460   __ret = __noswap_vqdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
17461   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
17462   __ret; \
17463 })
17464 #endif
17465 
17466 #ifdef __LITTLE_ENDIAN__
17467 #define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
17468   int32x2_t __s0 = __p0; \
17469   int32x2_t __s1 = __p1; \
17470   int32x2_t __ret; \
17471   __ret = vqdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
17472   __ret; \
17473 })
17474 #else
17475 #define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
17476   int32x2_t __s0 = __p0; \
17477   int32x2_t __s1 = __p1; \
17478   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
17479   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
17480   int32x2_t __ret; \
17481   __ret = __noswap_vqdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
17482   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
17483   __ret; \
17484 })
17485 #endif
17486 
17487 #ifdef __LITTLE_ENDIAN__
17488 #define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
17489   int16x4_t __s0 = __p0; \
17490   int16x4_t __s1 = __p1; \
17491   int16x4_t __ret; \
17492   __ret = vqdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
17493   __ret; \
17494 })
17495 #else
17496 #define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
17497   int16x4_t __s0 = __p0; \
17498   int16x4_t __s1 = __p1; \
17499   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
17500   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
17501   int16x4_t __ret; \
17502   __ret = __noswap_vqdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
17503   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
17504   __ret; \
17505 })
17506 #endif
17507 
17508 #ifdef __LITTLE_ENDIAN__
vqdmulhq_n_s32(int32x4_t __p0,int32_t __p1)17509 __ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
17510   int32x4_t __ret;
17511   __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
17512   return __ret;
17513 }
17514 #else
vqdmulhq_n_s32(int32x4_t __p0,int32_t __p1)17515 __ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
17516   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17517   int32x4_t __ret;
17518   __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
17519   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17520   return __ret;
17521 }
17522 #endif
17523 
17524 #ifdef __LITTLE_ENDIAN__
vqdmulhq_n_s16(int16x8_t __p0,int16_t __p1)17525 __ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
17526   int16x8_t __ret;
17527   __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
17528   return __ret;
17529 }
17530 #else
vqdmulhq_n_s16(int16x8_t __p0,int16_t __p1)17531 __ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
17532   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
17533   int16x8_t __ret;
17534   __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
17535   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
17536   return __ret;
17537 }
17538 #endif
17539 
17540 #ifdef __LITTLE_ENDIAN__
vqdmulh_n_s32(int32x2_t __p0,int32_t __p1)17541 __ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
17542   int32x2_t __ret;
17543   __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
17544   return __ret;
17545 }
17546 #else
vqdmulh_n_s32(int32x2_t __p0,int32_t __p1)17547 __ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
17548   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
17549   int32x2_t __ret;
17550   __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
17551   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
17552   return __ret;
17553 }
17554 #endif
17555 
17556 #ifdef __LITTLE_ENDIAN__
vqdmulh_n_s16(int16x4_t __p0,int16_t __p1)17557 __ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
17558   int16x4_t __ret;
17559   __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
17560   return __ret;
17561 }
17562 #else
vqdmulh_n_s16(int16x4_t __p0,int16_t __p1)17563 __ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
17564   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17565   int16x4_t __ret;
17566   __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
17567   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17568   return __ret;
17569 }
17570 #endif
17571 
17572 #ifdef __LITTLE_ENDIAN__
vqdmull_s32(int32x2_t __p0,int32x2_t __p1)17573 __ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
17574   int64x2_t __ret;
17575   __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
17576   return __ret;
17577 }
17578 #else
vqdmull_s32(int32x2_t __p0,int32x2_t __p1)17579 __ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
17580   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
17581   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
17582   int64x2_t __ret;
17583   __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
17584   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
17585   return __ret;
17586 }
__noswap_vqdmull_s32(int32x2_t __p0,int32x2_t __p1)17587 __ai int64x2_t __noswap_vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
17588   int64x2_t __ret;
17589   __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
17590   return __ret;
17591 }
17592 #endif
17593 
17594 #ifdef __LITTLE_ENDIAN__
vqdmull_s16(int16x4_t __p0,int16x4_t __p1)17595 __ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
17596   int32x4_t __ret;
17597   __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
17598   return __ret;
17599 }
17600 #else
vqdmull_s16(int16x4_t __p0,int16x4_t __p1)17601 __ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
17602   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17603   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
17604   int32x4_t __ret;
17605   __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
17606   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17607   return __ret;
17608 }
__noswap_vqdmull_s16(int16x4_t __p0,int16x4_t __p1)17609 __ai int32x4_t __noswap_vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
17610   int32x4_t __ret;
17611   __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
17612   return __ret;
17613 }
17614 #endif
17615 
17616 #ifdef __LITTLE_ENDIAN__
17617 #define vqdmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
17618   int32x2_t __s0 = __p0; \
17619   int32x2_t __s1 = __p1; \
17620   int64x2_t __ret; \
17621   __ret = vqdmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
17622   __ret; \
17623 })
17624 #else
17625 #define vqdmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
17626   int32x2_t __s0 = __p0; \
17627   int32x2_t __s1 = __p1; \
17628   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
17629   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
17630   int64x2_t __ret; \
17631   __ret = __noswap_vqdmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
17632   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
17633   __ret; \
17634 })
17635 #endif
17636 
17637 #ifdef __LITTLE_ENDIAN__
17638 #define vqdmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
17639   int16x4_t __s0 = __p0; \
17640   int16x4_t __s1 = __p1; \
17641   int32x4_t __ret; \
17642   __ret = vqdmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
17643   __ret; \
17644 })
17645 #else
17646 #define vqdmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
17647   int16x4_t __s0 = __p0; \
17648   int16x4_t __s1 = __p1; \
17649   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
17650   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
17651   int32x4_t __ret; \
17652   __ret = __noswap_vqdmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
17653   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
17654   __ret; \
17655 })
17656 #endif
17657 
17658 #ifdef __LITTLE_ENDIAN__
vqdmull_n_s32(int32x2_t __p0,int32_t __p1)17659 __ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
17660   int64x2_t __ret;
17661   __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
17662   return __ret;
17663 }
17664 #else
vqdmull_n_s32(int32x2_t __p0,int32_t __p1)17665 __ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
17666   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
17667   int64x2_t __ret;
17668   __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
17669   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
17670   return __ret;
17671 }
__noswap_vqdmull_n_s32(int32x2_t __p0,int32_t __p1)17672 __ai int64x2_t __noswap_vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
17673   int64x2_t __ret;
17674   __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
17675   return __ret;
17676 }
17677 #endif
17678 
17679 #ifdef __LITTLE_ENDIAN__
vqdmull_n_s16(int16x4_t __p0,int16_t __p1)17680 __ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
17681   int32x4_t __ret;
17682   __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
17683   return __ret;
17684 }
17685 #else
vqdmull_n_s16(int16x4_t __p0,int16_t __p1)17686 __ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
17687   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17688   int32x4_t __ret;
17689   __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
17690   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17691   return __ret;
17692 }
__noswap_vqdmull_n_s16(int16x4_t __p0,int16_t __p1)17693 __ai int32x4_t __noswap_vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
17694   int32x4_t __ret;
17695   __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
17696   return __ret;
17697 }
17698 #endif
17699 
17700 #ifdef __LITTLE_ENDIAN__
vqmovn_u32(uint32x4_t __p0)17701 __ai uint16x4_t vqmovn_u32(uint32x4_t __p0) {
17702   uint16x4_t __ret;
17703   __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17);
17704   return __ret;
17705 }
17706 #else
vqmovn_u32(uint32x4_t __p0)17707 __ai uint16x4_t vqmovn_u32(uint32x4_t __p0) {
17708   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17709   uint16x4_t __ret;
17710   __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 17);
17711   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17712   return __ret;
17713 }
__noswap_vqmovn_u32(uint32x4_t __p0)17714 __ai uint16x4_t __noswap_vqmovn_u32(uint32x4_t __p0) {
17715   uint16x4_t __ret;
17716   __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17);
17717   return __ret;
17718 }
17719 #endif
17720 
17721 #ifdef __LITTLE_ENDIAN__
vqmovn_u64(uint64x2_t __p0)17722 __ai uint32x2_t vqmovn_u64(uint64x2_t __p0) {
17723   uint32x2_t __ret;
17724   __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18);
17725   return __ret;
17726 }
17727 #else
vqmovn_u64(uint64x2_t __p0)17728 __ai uint32x2_t vqmovn_u64(uint64x2_t __p0) {
17729   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
17730   uint32x2_t __ret;
17731   __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 18);
17732   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
17733   return __ret;
17734 }
__noswap_vqmovn_u64(uint64x2_t __p0)17735 __ai uint32x2_t __noswap_vqmovn_u64(uint64x2_t __p0) {
17736   uint32x2_t __ret;
17737   __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18);
17738   return __ret;
17739 }
17740 #endif
17741 
17742 #ifdef __LITTLE_ENDIAN__
vqmovn_u16(uint16x8_t __p0)17743 __ai uint8x8_t vqmovn_u16(uint16x8_t __p0) {
17744   uint8x8_t __ret;
17745   __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16);
17746   return __ret;
17747 }
17748 #else
vqmovn_u16(uint16x8_t __p0)17749 __ai uint8x8_t vqmovn_u16(uint16x8_t __p0) {
17750   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
17751   uint8x8_t __ret;
17752   __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 16);
17753   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
17754   return __ret;
17755 }
__noswap_vqmovn_u16(uint16x8_t __p0)17756 __ai uint8x8_t __noswap_vqmovn_u16(uint16x8_t __p0) {
17757   uint8x8_t __ret;
17758   __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16);
17759   return __ret;
17760 }
17761 #endif
17762 
17763 #ifdef __LITTLE_ENDIAN__
vqmovn_s32(int32x4_t __p0)17764 __ai int16x4_t vqmovn_s32(int32x4_t __p0) {
17765   int16x4_t __ret;
17766   __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1);
17767   return __ret;
17768 }
17769 #else
vqmovn_s32(int32x4_t __p0)17770 __ai int16x4_t vqmovn_s32(int32x4_t __p0) {
17771   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17772   int16x4_t __ret;
17773   __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 1);
17774   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17775   return __ret;
17776 }
__noswap_vqmovn_s32(int32x4_t __p0)17777 __ai int16x4_t __noswap_vqmovn_s32(int32x4_t __p0) {
17778   int16x4_t __ret;
17779   __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1);
17780   return __ret;
17781 }
17782 #endif
17783 
17784 #ifdef __LITTLE_ENDIAN__
vqmovn_s64(int64x2_t __p0)17785 __ai int32x2_t vqmovn_s64(int64x2_t __p0) {
17786   int32x2_t __ret;
17787   __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2);
17788   return __ret;
17789 }
17790 #else
vqmovn_s64(int64x2_t __p0)17791 __ai int32x2_t vqmovn_s64(int64x2_t __p0) {
17792   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
17793   int32x2_t __ret;
17794   __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 2);
17795   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
17796   return __ret;
17797 }
__noswap_vqmovn_s64(int64x2_t __p0)17798 __ai int32x2_t __noswap_vqmovn_s64(int64x2_t __p0) {
17799   int32x2_t __ret;
17800   __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2);
17801   return __ret;
17802 }
17803 #endif
17804 
17805 #ifdef __LITTLE_ENDIAN__
vqmovn_s16(int16x8_t __p0)17806 __ai int8x8_t vqmovn_s16(int16x8_t __p0) {
17807   int8x8_t __ret;
17808   __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0);
17809   return __ret;
17810 }
17811 #else
vqmovn_s16(int16x8_t __p0)17812 __ai int8x8_t vqmovn_s16(int16x8_t __p0) {
17813   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
17814   int8x8_t __ret;
17815   __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 0);
17816   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
17817   return __ret;
17818 }
__noswap_vqmovn_s16(int16x8_t __p0)17819 __ai int8x8_t __noswap_vqmovn_s16(int16x8_t __p0) {
17820   int8x8_t __ret;
17821   __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0);
17822   return __ret;
17823 }
17824 #endif
17825 
17826 #ifdef __LITTLE_ENDIAN__
vqmovun_s32(int32x4_t __p0)17827 __ai uint16x4_t vqmovun_s32(int32x4_t __p0) {
17828   uint16x4_t __ret;
17829   __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17);
17830   return __ret;
17831 }
17832 #else
vqmovun_s32(int32x4_t __p0)17833 __ai uint16x4_t vqmovun_s32(int32x4_t __p0) {
17834   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17835   uint16x4_t __ret;
17836   __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 17);
17837   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17838   return __ret;
17839 }
__noswap_vqmovun_s32(int32x4_t __p0)17840 __ai uint16x4_t __noswap_vqmovun_s32(int32x4_t __p0) {
17841   uint16x4_t __ret;
17842   __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17);
17843   return __ret;
17844 }
17845 #endif
17846 
17847 #ifdef __LITTLE_ENDIAN__
vqmovun_s64(int64x2_t __p0)17848 __ai uint32x2_t vqmovun_s64(int64x2_t __p0) {
17849   uint32x2_t __ret;
17850   __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18);
17851   return __ret;
17852 }
17853 #else
vqmovun_s64(int64x2_t __p0)17854 __ai uint32x2_t vqmovun_s64(int64x2_t __p0) {
17855   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
17856   uint32x2_t __ret;
17857   __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 18);
17858   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
17859   return __ret;
17860 }
__noswap_vqmovun_s64(int64x2_t __p0)17861 __ai uint32x2_t __noswap_vqmovun_s64(int64x2_t __p0) {
17862   uint32x2_t __ret;
17863   __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18);
17864   return __ret;
17865 }
17866 #endif
17867 
17868 #ifdef __LITTLE_ENDIAN__
vqmovun_s16(int16x8_t __p0)17869 __ai uint8x8_t vqmovun_s16(int16x8_t __p0) {
17870   uint8x8_t __ret;
17871   __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16);
17872   return __ret;
17873 }
17874 #else
vqmovun_s16(int16x8_t __p0)17875 __ai uint8x8_t vqmovun_s16(int16x8_t __p0) {
17876   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
17877   uint8x8_t __ret;
17878   __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 16);
17879   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
17880   return __ret;
17881 }
__noswap_vqmovun_s16(int16x8_t __p0)17882 __ai uint8x8_t __noswap_vqmovun_s16(int16x8_t __p0) {
17883   uint8x8_t __ret;
17884   __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16);
17885   return __ret;
17886 }
17887 #endif
17888 
17889 #ifdef __LITTLE_ENDIAN__
vqnegq_s8(int8x16_t __p0)17890 __ai int8x16_t vqnegq_s8(int8x16_t __p0) {
17891   int8x16_t __ret;
17892   __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 32);
17893   return __ret;
17894 }
17895 #else
vqnegq_s8(int8x16_t __p0)17896 __ai int8x16_t vqnegq_s8(int8x16_t __p0) {
17897   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
17898   int8x16_t __ret;
17899   __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 32);
17900   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
17901   return __ret;
17902 }
17903 #endif
17904 
17905 #ifdef __LITTLE_ENDIAN__
vqnegq_s32(int32x4_t __p0)17906 __ai int32x4_t vqnegq_s32(int32x4_t __p0) {
17907   int32x4_t __ret;
17908   __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 34);
17909   return __ret;
17910 }
17911 #else
vqnegq_s32(int32x4_t __p0)17912 __ai int32x4_t vqnegq_s32(int32x4_t __p0) {
17913   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17914   int32x4_t __ret;
17915   __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 34);
17916   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17917   return __ret;
17918 }
17919 #endif
17920 
17921 #ifdef __LITTLE_ENDIAN__
vqnegq_s16(int16x8_t __p0)17922 __ai int16x8_t vqnegq_s16(int16x8_t __p0) {
17923   int16x8_t __ret;
17924   __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 33);
17925   return __ret;
17926 }
17927 #else
vqnegq_s16(int16x8_t __p0)17928 __ai int16x8_t vqnegq_s16(int16x8_t __p0) {
17929   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
17930   int16x8_t __ret;
17931   __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 33);
17932   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
17933   return __ret;
17934 }
17935 #endif
17936 
17937 #ifdef __LITTLE_ENDIAN__
vqneg_s8(int8x8_t __p0)17938 __ai int8x8_t vqneg_s8(int8x8_t __p0) {
17939   int8x8_t __ret;
17940   __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 0);
17941   return __ret;
17942 }
17943 #else
vqneg_s8(int8x8_t __p0)17944 __ai int8x8_t vqneg_s8(int8x8_t __p0) {
17945   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
17946   int8x8_t __ret;
17947   __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 0);
17948   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
17949   return __ret;
17950 }
17951 #endif
17952 
17953 #ifdef __LITTLE_ENDIAN__
vqneg_s32(int32x2_t __p0)17954 __ai int32x2_t vqneg_s32(int32x2_t __p0) {
17955   int32x2_t __ret;
17956   __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 2);
17957   return __ret;
17958 }
17959 #else
vqneg_s32(int32x2_t __p0)17960 __ai int32x2_t vqneg_s32(int32x2_t __p0) {
17961   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
17962   int32x2_t __ret;
17963   __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 2);
17964   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
17965   return __ret;
17966 }
17967 #endif
17968 
17969 #ifdef __LITTLE_ENDIAN__
vqneg_s16(int16x4_t __p0)17970 __ai int16x4_t vqneg_s16(int16x4_t __p0) {
17971   int16x4_t __ret;
17972   __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 1);
17973   return __ret;
17974 }
17975 #else
vqneg_s16(int16x4_t __p0)17976 __ai int16x4_t vqneg_s16(int16x4_t __p0) {
17977   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17978   int16x4_t __ret;
17979   __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 1);
17980   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17981   return __ret;
17982 }
17983 #endif
17984 
17985 #ifdef __LITTLE_ENDIAN__
vqrdmulhq_s32(int32x4_t __p0,int32x4_t __p1)17986 __ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
17987   int32x4_t __ret;
17988   __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
17989   return __ret;
17990 }
17991 #else
vqrdmulhq_s32(int32x4_t __p0,int32x4_t __p1)17992 __ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
17993   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17994   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
17995   int32x4_t __ret;
17996   __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
17997   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17998   return __ret;
17999 }
__noswap_vqrdmulhq_s32(int32x4_t __p0,int32x4_t __p1)18000 __ai int32x4_t __noswap_vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
18001   int32x4_t __ret;
18002   __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
18003   return __ret;
18004 }
18005 #endif
18006 
18007 #ifdef __LITTLE_ENDIAN__
vqrdmulhq_s16(int16x8_t __p0,int16x8_t __p1)18008 __ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
18009   int16x8_t __ret;
18010   __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
18011   return __ret;
18012 }
18013 #else
vqrdmulhq_s16(int16x8_t __p0,int16x8_t __p1)18014 __ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
18015   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
18016   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
18017   int16x8_t __ret;
18018   __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
18019   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
18020   return __ret;
18021 }
__noswap_vqrdmulhq_s16(int16x8_t __p0,int16x8_t __p1)18022 __ai int16x8_t __noswap_vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
18023   int16x8_t __ret;
18024   __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
18025   return __ret;
18026 }
18027 #endif
18028 
18029 #ifdef __LITTLE_ENDIAN__
vqrdmulh_s32(int32x2_t __p0,int32x2_t __p1)18030 __ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
18031   int32x2_t __ret;
18032   __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
18033   return __ret;
18034 }
18035 #else
vqrdmulh_s32(int32x2_t __p0,int32x2_t __p1)18036 __ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
18037   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
18038   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
18039   int32x2_t __ret;
18040   __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
18041   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
18042   return __ret;
18043 }
__noswap_vqrdmulh_s32(int32x2_t __p0,int32x2_t __p1)18044 __ai int32x2_t __noswap_vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
18045   int32x2_t __ret;
18046   __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
18047   return __ret;
18048 }
18049 #endif
18050 
18051 #ifdef __LITTLE_ENDIAN__
vqrdmulh_s16(int16x4_t __p0,int16x4_t __p1)18052 __ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
18053   int16x4_t __ret;
18054   __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
18055   return __ret;
18056 }
18057 #else
vqrdmulh_s16(int16x4_t __p0,int16x4_t __p1)18058 __ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
18059   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
18060   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
18061   int16x4_t __ret;
18062   __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
18063   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
18064   return __ret;
18065 }
__noswap_vqrdmulh_s16(int16x4_t __p0,int16x4_t __p1)18066 __ai int16x4_t __noswap_vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
18067   int16x4_t __ret;
18068   __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
18069   return __ret;
18070 }
18071 #endif
18072 
18073 #ifdef __LITTLE_ENDIAN__
18074 #define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
18075   int32x4_t __s0 = __p0; \
18076   int32x2_t __s1 = __p1; \
18077   int32x4_t __ret; \
18078   __ret = vqrdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
18079   __ret; \
18080 })
18081 #else
18082 #define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
18083   int32x4_t __s0 = __p0; \
18084   int32x2_t __s1 = __p1; \
18085   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
18086   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
18087   int32x4_t __ret; \
18088   __ret = __noswap_vqrdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
18089   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
18090   __ret; \
18091 })
18092 #endif
18093 
18094 #ifdef __LITTLE_ENDIAN__
18095 #define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
18096   int16x8_t __s0 = __p0; \
18097   int16x4_t __s1 = __p1; \
18098   int16x8_t __ret; \
18099   __ret = vqrdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
18100   __ret; \
18101 })
18102 #else
18103 #define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
18104   int16x8_t __s0 = __p0; \
18105   int16x4_t __s1 = __p1; \
18106   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
18107   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
18108   int16x8_t __ret; \
18109   __ret = __noswap_vqrdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
18110   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
18111   __ret; \
18112 })
18113 #endif
18114 
18115 #ifdef __LITTLE_ENDIAN__
18116 #define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
18117   int32x2_t __s0 = __p0; \
18118   int32x2_t __s1 = __p1; \
18119   int32x2_t __ret; \
18120   __ret = vqrdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
18121   __ret; \
18122 })
18123 #else
18124 #define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
18125   int32x2_t __s0 = __p0; \
18126   int32x2_t __s1 = __p1; \
18127   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
18128   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
18129   int32x2_t __ret; \
18130   __ret = __noswap_vqrdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
18131   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
18132   __ret; \
18133 })
18134 #endif
18135 
18136 #ifdef __LITTLE_ENDIAN__
18137 #define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
18138   int16x4_t __s0 = __p0; \
18139   int16x4_t __s1 = __p1; \
18140   int16x4_t __ret; \
18141   __ret = vqrdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
18142   __ret; \
18143 })
18144 #else
18145 #define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
18146   int16x4_t __s0 = __p0; \
18147   int16x4_t __s1 = __p1; \
18148   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
18149   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
18150   int16x4_t __ret; \
18151   __ret = __noswap_vqrdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
18152   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
18153   __ret; \
18154 })
18155 #endif
18156 
18157 #ifdef __LITTLE_ENDIAN__
vqrdmulhq_n_s32(int32x4_t __p0,int32_t __p1)18158 __ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
18159   int32x4_t __ret;
18160   __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
18161   return __ret;
18162 }
18163 #else
vqrdmulhq_n_s32(int32x4_t __p0,int32_t __p1)18164 __ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
18165   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
18166   int32x4_t __ret;
18167   __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
18168   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
18169   return __ret;
18170 }
18171 #endif
18172 
18173 #ifdef __LITTLE_ENDIAN__
vqrdmulhq_n_s16(int16x8_t __p0,int16_t __p1)18174 __ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
18175   int16x8_t __ret;
18176   __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
18177   return __ret;
18178 }
18179 #else
vqrdmulhq_n_s16(int16x8_t __p0,int16_t __p1)18180 __ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
18181   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
18182   int16x8_t __ret;
18183   __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
18184   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
18185   return __ret;
18186 }
18187 #endif
18188 
18189 #ifdef __LITTLE_ENDIAN__
vqrdmulh_n_s32(int32x2_t __p0,int32_t __p1)18190 __ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
18191   int32x2_t __ret;
18192   __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
18193   return __ret;
18194 }
18195 #else
vqrdmulh_n_s32(int32x2_t __p0,int32_t __p1)18196 __ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
18197   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
18198   int32x2_t __ret;
18199   __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
18200   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
18201   return __ret;
18202 }
18203 #endif
18204 
18205 #ifdef __LITTLE_ENDIAN__
vqrdmulh_n_s16(int16x4_t __p0,int16_t __p1)18206 __ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
18207   int16x4_t __ret;
18208   __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
18209   return __ret;
18210 }
18211 #else
vqrdmulh_n_s16(int16x4_t __p0,int16_t __p1)18212 __ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
18213   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
18214   int16x4_t __ret;
18215   __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
18216   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
18217   return __ret;
18218 }
18219 #endif
18220 
18221 #ifdef __LITTLE_ENDIAN__
vqrshlq_u8(uint8x16_t __p0,int8x16_t __p1)18222 __ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
18223   uint8x16_t __ret;
18224   __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
18225   return __ret;
18226 }
18227 #else
vqrshlq_u8(uint8x16_t __p0,int8x16_t __p1)18228 __ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
18229   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
18230   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
18231   uint8x16_t __ret;
18232   __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
18233   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
18234   return __ret;
18235 }
18236 #endif
18237 
18238 #ifdef __LITTLE_ENDIAN__
vqrshlq_u32(uint32x4_t __p0,int32x4_t __p1)18239 __ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
18240   uint32x4_t __ret;
18241   __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
18242   return __ret;
18243 }
18244 #else
vqrshlq_u32(uint32x4_t __p0,int32x4_t __p1)18245 __ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
18246   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
18247   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
18248   uint32x4_t __ret;
18249   __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
18250   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
18251   return __ret;
18252 }
18253 #endif
18254 
18255 #ifdef __LITTLE_ENDIAN__
vqrshlq_u64(uint64x2_t __p0,int64x2_t __p1)18256 __ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
18257   uint64x2_t __ret;
18258   __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
18259   return __ret;
18260 }
18261 #else
vqrshlq_u64(uint64x2_t __p0,int64x2_t __p1)18262 __ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
18263   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
18264   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
18265   uint64x2_t __ret;
18266   __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
18267   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
18268   return __ret;
18269 }
18270 #endif
18271 
18272 #ifdef __LITTLE_ENDIAN__
vqrshlq_u16(uint16x8_t __p0,int16x8_t __p1)18273 __ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
18274   uint16x8_t __ret;
18275   __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
18276   return __ret;
18277 }
18278 #else
vqrshlq_u16(uint16x8_t __p0,int16x8_t __p1)18279 __ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
18280   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
18281   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
18282   uint16x8_t __ret;
18283   __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
18284   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
18285   return __ret;
18286 }
18287 #endif
18288 
18289 #ifdef __LITTLE_ENDIAN__
vqrshlq_s8(int8x16_t __p0,int8x16_t __p1)18290 __ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
18291   int8x16_t __ret;
18292   __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
18293   return __ret;
18294 }
18295 #else
vqrshlq_s8(int8x16_t __p0,int8x16_t __p1)18296 __ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
18297   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
18298   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
18299   int8x16_t __ret;
18300   __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
18301   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
18302   return __ret;
18303 }
18304 #endif
18305 
18306 #ifdef __LITTLE_ENDIAN__
vqrshlq_s32(int32x4_t __p0,int32x4_t __p1)18307 __ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
18308   int32x4_t __ret;
18309   __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
18310   return __ret;
18311 }
18312 #else
vqrshlq_s32(int32x4_t __p0,int32x4_t __p1)18313 __ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
18314   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
18315   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
18316   int32x4_t __ret;
18317   __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
18318   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
18319   return __ret;
18320 }
18321 #endif
18322 
18323 #ifdef __LITTLE_ENDIAN__
vqrshlq_s64(int64x2_t __p0,int64x2_t __p1)18324 __ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
18325   int64x2_t __ret;
18326   __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
18327   return __ret;
18328 }
18329 #else
vqrshlq_s64(int64x2_t __p0,int64x2_t __p1)18330 __ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
18331   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
18332   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
18333   int64x2_t __ret;
18334   __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
18335   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
18336   return __ret;
18337 }
18338 #endif
18339 
18340 #ifdef __LITTLE_ENDIAN__
vqrshlq_s16(int16x8_t __p0,int16x8_t __p1)18341 __ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
18342   int16x8_t __ret;
18343   __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
18344   return __ret;
18345 }
18346 #else
vqrshlq_s16(int16x8_t __p0,int16x8_t __p1)18347 __ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
18348   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
18349   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
18350   int16x8_t __ret;
18351   __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
18352   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
18353   return __ret;
18354 }
18355 #endif
18356 
18357 #ifdef __LITTLE_ENDIAN__
vqrshl_u8(uint8x8_t __p0,int8x8_t __p1)18358 __ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
18359   uint8x8_t __ret;
18360   __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
18361   return __ret;
18362 }
18363 #else
vqrshl_u8(uint8x8_t __p0,int8x8_t __p1)18364 __ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
18365   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
18366   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
18367   uint8x8_t __ret;
18368   __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
18369   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
18370   return __ret;
18371 }
18372 #endif
18373 
18374 #ifdef __LITTLE_ENDIAN__
vqrshl_u32(uint32x2_t __p0,int32x2_t __p1)18375 __ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
18376   uint32x2_t __ret;
18377   __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
18378   return __ret;
18379 }
18380 #else
vqrshl_u32(uint32x2_t __p0,int32x2_t __p1)18381 __ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
18382   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
18383   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
18384   uint32x2_t __ret;
18385   __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
18386   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
18387   return __ret;
18388 }
18389 #endif
18390 
18391 #ifdef __LITTLE_ENDIAN__
vqrshl_u64(uint64x1_t __p0,int64x1_t __p1)18392 __ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
18393   uint64x1_t __ret;
18394   __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
18395   return __ret;
18396 }
18397 #else
vqrshl_u64(uint64x1_t __p0,int64x1_t __p1)18398 __ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
18399   uint64x1_t __ret;
18400   __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
18401   return __ret;
18402 }
18403 #endif
18404 
18405 #ifdef __LITTLE_ENDIAN__
vqrshl_u16(uint16x4_t __p0,int16x4_t __p1)18406 __ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
18407   uint16x4_t __ret;
18408   __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
18409   return __ret;
18410 }
18411 #else
vqrshl_u16(uint16x4_t __p0,int16x4_t __p1)18412 __ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
18413   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
18414   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
18415   uint16x4_t __ret;
18416   __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
18417   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
18418   return __ret;
18419 }
18420 #endif
18421 
18422 #ifdef __LITTLE_ENDIAN__
vqrshl_s8(int8x8_t __p0,int8x8_t __p1)18423 __ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
18424   int8x8_t __ret;
18425   __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
18426   return __ret;
18427 }
18428 #else
vqrshl_s8(int8x8_t __p0,int8x8_t __p1)18429 __ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
18430   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
18431   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
18432   int8x8_t __ret;
18433   __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
18434   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
18435   return __ret;
18436 }
18437 #endif
18438 
18439 #ifdef __LITTLE_ENDIAN__
vqrshl_s32(int32x2_t __p0,int32x2_t __p1)18440 __ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
18441   int32x2_t __ret;
18442   __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
18443   return __ret;
18444 }
18445 #else
vqrshl_s32(int32x2_t __p0,int32x2_t __p1)18446 __ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
18447   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
18448   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
18449   int32x2_t __ret;
18450   __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
18451   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
18452   return __ret;
18453 }
18454 #endif
18455 
18456 #ifdef __LITTLE_ENDIAN__
vqrshl_s64(int64x1_t __p0,int64x1_t __p1)18457 __ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) {
18458   int64x1_t __ret;
18459   __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
18460   return __ret;
18461 }
18462 #else
vqrshl_s64(int64x1_t __p0,int64x1_t __p1)18463 __ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) {
18464   int64x1_t __ret;
18465   __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
18466   return __ret;
18467 }
18468 #endif
18469 
18470 #ifdef __LITTLE_ENDIAN__
vqrshl_s16(int16x4_t __p0,int16x4_t __p1)18471 __ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
18472   int16x4_t __ret;
18473   __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
18474   return __ret;
18475 }
18476 #else
vqrshl_s16(int16x4_t __p0,int16x4_t __p1)18477 __ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
18478   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
18479   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
18480   int16x4_t __ret;
18481   __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
18482   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
18483   return __ret;
18484 }
18485 #endif
18486 
18487 #ifdef __LITTLE_ENDIAN__
18488 #define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
18489   uint32x4_t __s0 = __p0; \
18490   uint16x4_t __ret; \
18491   __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
18492   __ret; \
18493 })
18494 #else
18495 #define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
18496   uint32x4_t __s0 = __p0; \
18497   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
18498   uint16x4_t __ret; \
18499   __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
18500   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
18501   __ret; \
18502 })
18503 #define __noswap_vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
18504   uint32x4_t __s0 = __p0; \
18505   uint16x4_t __ret; \
18506   __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
18507   __ret; \
18508 })
18509 #endif
18510 
18511 #ifdef __LITTLE_ENDIAN__
18512 #define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
18513   uint64x2_t __s0 = __p0; \
18514   uint32x2_t __ret; \
18515   __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
18516   __ret; \
18517 })
18518 #else
18519 #define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
18520   uint64x2_t __s0 = __p0; \
18521   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
18522   uint32x2_t __ret; \
18523   __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
18524   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
18525   __ret; \
18526 })
18527 #define __noswap_vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
18528   uint64x2_t __s0 = __p0; \
18529   uint32x2_t __ret; \
18530   __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
18531   __ret; \
18532 })
18533 #endif
18534 
18535 #ifdef __LITTLE_ENDIAN__
18536 #define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
18537   uint16x8_t __s0 = __p0; \
18538   uint8x8_t __ret; \
18539   __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
18540   __ret; \
18541 })
18542 #else
18543 #define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
18544   uint16x8_t __s0 = __p0; \
18545   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
18546   uint8x8_t __ret; \
18547   __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
18548   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
18549   __ret; \
18550 })
18551 #define __noswap_vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
18552   uint16x8_t __s0 = __p0; \
18553   uint8x8_t __ret; \
18554   __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
18555   __ret; \
18556 })
18557 #endif
18558 
18559 #ifdef __LITTLE_ENDIAN__
18560 #define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
18561   int32x4_t __s0 = __p0; \
18562   int16x4_t __ret; \
18563   __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
18564   __ret; \
18565 })
18566 #else
18567 #define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
18568   int32x4_t __s0 = __p0; \
18569   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
18570   int16x4_t __ret; \
18571   __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
18572   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
18573   __ret; \
18574 })
18575 #define __noswap_vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
18576   int32x4_t __s0 = __p0; \
18577   int16x4_t __ret; \
18578   __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
18579   __ret; \
18580 })
18581 #endif
18582 
18583 #ifdef __LITTLE_ENDIAN__
18584 #define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
18585   int64x2_t __s0 = __p0; \
18586   int32x2_t __ret; \
18587   __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
18588   __ret; \
18589 })
18590 #else
18591 #define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
18592   int64x2_t __s0 = __p0; \
18593   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
18594   int32x2_t __ret; \
18595   __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
18596   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
18597   __ret; \
18598 })
18599 #define __noswap_vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
18600   int64x2_t __s0 = __p0; \
18601   int32x2_t __ret; \
18602   __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
18603   __ret; \
18604 })
18605 #endif
18606 
18607 #ifdef __LITTLE_ENDIAN__
18608 #define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
18609   int16x8_t __s0 = __p0; \
18610   int8x8_t __ret; \
18611   __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
18612   __ret; \
18613 })
18614 #else
18615 #define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
18616   int16x8_t __s0 = __p0; \
18617   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
18618   int8x8_t __ret; \
18619   __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
18620   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
18621   __ret; \
18622 })
18623 #define __noswap_vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
18624   int16x8_t __s0 = __p0; \
18625   int8x8_t __ret; \
18626   __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
18627   __ret; \
18628 })
18629 #endif
18630 
18631 #ifdef __LITTLE_ENDIAN__
18632 #define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
18633   int32x4_t __s0 = __p0; \
18634   uint16x4_t __ret; \
18635   __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
18636   __ret; \
18637 })
18638 #else
18639 #define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
18640   int32x4_t __s0 = __p0; \
18641   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
18642   uint16x4_t __ret; \
18643   __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 17); \
18644   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
18645   __ret; \
18646 })
18647 #define __noswap_vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
18648   int32x4_t __s0 = __p0; \
18649   uint16x4_t __ret; \
18650   __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
18651   __ret; \
18652 })
18653 #endif
18654 
18655 #ifdef __LITTLE_ENDIAN__
18656 #define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
18657   int64x2_t __s0 = __p0; \
18658   uint32x2_t __ret; \
18659   __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
18660   __ret; \
18661 })
18662 #else
18663 #define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
18664   int64x2_t __s0 = __p0; \
18665   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
18666   uint32x2_t __ret; \
18667   __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 18); \
18668   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
18669   __ret; \
18670 })
18671 #define __noswap_vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
18672   int64x2_t __s0 = __p0; \
18673   uint32x2_t __ret; \
18674   __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
18675   __ret; \
18676 })
18677 #endif
18678 
18679 #ifdef __LITTLE_ENDIAN__
18680 #define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
18681   int16x8_t __s0 = __p0; \
18682   uint8x8_t __ret; \
18683   __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
18684   __ret; \
18685 })
18686 #else
18687 #define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
18688   int16x8_t __s0 = __p0; \
18689   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
18690   uint8x8_t __ret; \
18691   __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 16); \
18692   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
18693   __ret; \
18694 })
18695 #define __noswap_vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
18696   int16x8_t __s0 = __p0; \
18697   uint8x8_t __ret; \
18698   __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
18699   __ret; \
18700 })
18701 #endif
18702 
18703 #ifdef __LITTLE_ENDIAN__
vqshlq_u8(uint8x16_t __p0,int8x16_t __p1)18704 __ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
18705   uint8x16_t __ret;
18706   __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
18707   return __ret;
18708 }
18709 #else
vqshlq_u8(uint8x16_t __p0,int8x16_t __p1)18710 __ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
18711   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
18712   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
18713   uint8x16_t __ret;
18714   __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
18715   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
18716   return __ret;
18717 }
18718 #endif
18719 
18720 #ifdef __LITTLE_ENDIAN__
vqshlq_u32(uint32x4_t __p0,int32x4_t __p1)18721 __ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
18722   uint32x4_t __ret;
18723   __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
18724   return __ret;
18725 }
18726 #else
vqshlq_u32(uint32x4_t __p0,int32x4_t __p1)18727 __ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
18728   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
18729   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
18730   uint32x4_t __ret;
18731   __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
18732   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
18733   return __ret;
18734 }
18735 #endif
18736 
18737 #ifdef __LITTLE_ENDIAN__
vqshlq_u64(uint64x2_t __p0,int64x2_t __p1)18738 __ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
18739   uint64x2_t __ret;
18740   __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
18741   return __ret;
18742 }
18743 #else
vqshlq_u64(uint64x2_t __p0,int64x2_t __p1)18744 __ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
18745   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
18746   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
18747   uint64x2_t __ret;
18748   __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
18749   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
18750   return __ret;
18751 }
18752 #endif
18753 
18754 #ifdef __LITTLE_ENDIAN__
vqshlq_u16(uint16x8_t __p0,int16x8_t __p1)18755 __ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
18756   uint16x8_t __ret;
18757   __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
18758   return __ret;
18759 }
18760 #else
vqshlq_u16(uint16x8_t __p0,int16x8_t __p1)18761 __ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
18762   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
18763   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
18764   uint16x8_t __ret;
18765   __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
18766   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
18767   return __ret;
18768 }
18769 #endif
18770 
18771 #ifdef __LITTLE_ENDIAN__
vqshlq_s8(int8x16_t __p0,int8x16_t __p1)18772 __ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
18773   int8x16_t __ret;
18774   __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
18775   return __ret;
18776 }
18777 #else
vqshlq_s8(int8x16_t __p0,int8x16_t __p1)18778 __ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
18779   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
18780   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
18781   int8x16_t __ret;
18782   __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
18783   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
18784   return __ret;
18785 }
18786 #endif
18787 
18788 #ifdef __LITTLE_ENDIAN__
vqshlq_s32(int32x4_t __p0,int32x4_t __p1)18789 __ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
18790   int32x4_t __ret;
18791   __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
18792   return __ret;
18793 }
18794 #else
vqshlq_s32(int32x4_t __p0,int32x4_t __p1)18795 __ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
18796   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
18797   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
18798   int32x4_t __ret;
18799   __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
18800   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
18801   return __ret;
18802 }
18803 #endif
18804 
18805 #ifdef __LITTLE_ENDIAN__
vqshlq_s64(int64x2_t __p0,int64x2_t __p1)18806 __ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
18807   int64x2_t __ret;
18808   __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
18809   return __ret;
18810 }
18811 #else
vqshlq_s64(int64x2_t __p0,int64x2_t __p1)18812 __ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
18813   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
18814   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
18815   int64x2_t __ret;
18816   __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
18817   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
18818   return __ret;
18819 }
18820 #endif
18821 
18822 #ifdef __LITTLE_ENDIAN__
vqshlq_s16(int16x8_t __p0,int16x8_t __p1)18823 __ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
18824   int16x8_t __ret;
18825   __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
18826   return __ret;
18827 }
18828 #else
vqshlq_s16(int16x8_t __p0,int16x8_t __p1)18829 __ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
18830   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
18831   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
18832   int16x8_t __ret;
18833   __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
18834   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
18835   return __ret;
18836 }
18837 #endif
18838 
18839 #ifdef __LITTLE_ENDIAN__
vqshl_u8(uint8x8_t __p0,int8x8_t __p1)18840 __ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
18841   uint8x8_t __ret;
18842   __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
18843   return __ret;
18844 }
18845 #else
vqshl_u8(uint8x8_t __p0,int8x8_t __p1)18846 __ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
18847   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
18848   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
18849   uint8x8_t __ret;
18850   __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
18851   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
18852   return __ret;
18853 }
18854 #endif
18855 
18856 #ifdef __LITTLE_ENDIAN__
vqshl_u32(uint32x2_t __p0,int32x2_t __p1)18857 __ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
18858   uint32x2_t __ret;
18859   __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
18860   return __ret;
18861 }
18862 #else
vqshl_u32(uint32x2_t __p0,int32x2_t __p1)18863 __ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
18864   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
18865   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
18866   uint32x2_t __ret;
18867   __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
18868   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
18869   return __ret;
18870 }
18871 #endif
18872 
18873 #ifdef __LITTLE_ENDIAN__
vqshl_u64(uint64x1_t __p0,int64x1_t __p1)18874 __ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) {
18875   uint64x1_t __ret;
18876   __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
18877   return __ret;
18878 }
18879 #else
vqshl_u64(uint64x1_t __p0,int64x1_t __p1)18880 __ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) {
18881   uint64x1_t __ret;
18882   __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
18883   return __ret;
18884 }
18885 #endif
18886 
18887 #ifdef __LITTLE_ENDIAN__
vqshl_u16(uint16x4_t __p0,int16x4_t __p1)18888 __ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
18889   uint16x4_t __ret;
18890   __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
18891   return __ret;
18892 }
18893 #else
vqshl_u16(uint16x4_t __p0,int16x4_t __p1)18894 __ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
18895   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
18896   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
18897   uint16x4_t __ret;
18898   __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
18899   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
18900   return __ret;
18901 }
18902 #endif
18903 
18904 #ifdef __LITTLE_ENDIAN__
vqshl_s8(int8x8_t __p0,int8x8_t __p1)18905 __ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
18906   int8x8_t __ret;
18907   __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
18908   return __ret;
18909 }
18910 #else
vqshl_s8(int8x8_t __p0,int8x8_t __p1)18911 __ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
18912   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
18913   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
18914   int8x8_t __ret;
18915   __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
18916   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
18917   return __ret;
18918 }
18919 #endif
18920 
18921 #ifdef __LITTLE_ENDIAN__
vqshl_s32(int32x2_t __p0,int32x2_t __p1)18922 __ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
18923   int32x2_t __ret;
18924   __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
18925   return __ret;
18926 }
18927 #else
vqshl_s32(int32x2_t __p0,int32x2_t __p1)18928 __ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
18929   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
18930   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
18931   int32x2_t __ret;
18932   __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
18933   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
18934   return __ret;
18935 }
18936 #endif
18937 
18938 #ifdef __LITTLE_ENDIAN__
vqshl_s64(int64x1_t __p0,int64x1_t __p1)18939 __ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) {
18940   int64x1_t __ret;
18941   __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
18942   return __ret;
18943 }
18944 #else
vqshl_s64(int64x1_t __p0,int64x1_t __p1)18945 __ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) {
18946   int64x1_t __ret;
18947   __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
18948   return __ret;
18949 }
18950 #endif
18951 
18952 #ifdef __LITTLE_ENDIAN__
vqshl_s16(int16x4_t __p0,int16x4_t __p1)18953 __ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
18954   int16x4_t __ret;
18955   __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
18956   return __ret;
18957 }
18958 #else
vqshl_s16(int16x4_t __p0,int16x4_t __p1)18959 __ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
18960   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
18961   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
18962   int16x4_t __ret;
18963   __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
18964   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
18965   return __ret;
18966 }
18967 #endif
18968 
18969 #ifdef __LITTLE_ENDIAN__
18970 #define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
18971   uint8x16_t __s0 = __p0; \
18972   uint8x16_t __ret; \
18973   __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 48); \
18974   __ret; \
18975 })
18976 #else
18977 #define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
18978   uint8x16_t __s0 = __p0; \
18979   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
18980   uint8x16_t __ret; \
18981   __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 48); \
18982   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
18983   __ret; \
18984 })
18985 #endif
18986 
18987 #ifdef __LITTLE_ENDIAN__
18988 #define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
18989   uint32x4_t __s0 = __p0; \
18990   uint32x4_t __ret; \
18991   __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 50); \
18992   __ret; \
18993 })
18994 #else
18995 #define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
18996   uint32x4_t __s0 = __p0; \
18997   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
18998   uint32x4_t __ret; \
18999   __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 50); \
19000   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
19001   __ret; \
19002 })
19003 #endif
19004 
19005 #ifdef __LITTLE_ENDIAN__
19006 #define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
19007   uint64x2_t __s0 = __p0; \
19008   uint64x2_t __ret; \
19009   __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 51); \
19010   __ret; \
19011 })
19012 #else
19013 #define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
19014   uint64x2_t __s0 = __p0; \
19015   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
19016   uint64x2_t __ret; \
19017   __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 51); \
19018   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
19019   __ret; \
19020 })
19021 #endif
19022 
19023 #ifdef __LITTLE_ENDIAN__
19024 #define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
19025   uint16x8_t __s0 = __p0; \
19026   uint16x8_t __ret; \
19027   __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 49); \
19028   __ret; \
19029 })
19030 #else
19031 #define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
19032   uint16x8_t __s0 = __p0; \
19033   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
19034   uint16x8_t __ret; \
19035   __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 49); \
19036   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
19037   __ret; \
19038 })
19039 #endif
19040 
19041 #ifdef __LITTLE_ENDIAN__
19042 #define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
19043   int8x16_t __s0 = __p0; \
19044   int8x16_t __ret; \
19045   __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 32); \
19046   __ret; \
19047 })
19048 #else
19049 #define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
19050   int8x16_t __s0 = __p0; \
19051   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
19052   int8x16_t __ret; \
19053   __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 32); \
19054   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
19055   __ret; \
19056 })
19057 #endif
19058 
19059 #ifdef __LITTLE_ENDIAN__
19060 #define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
19061   int32x4_t __s0 = __p0; \
19062   int32x4_t __ret; \
19063   __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 34); \
19064   __ret; \
19065 })
19066 #else
19067 #define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
19068   int32x4_t __s0 = __p0; \
19069   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
19070   int32x4_t __ret; \
19071   __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 34); \
19072   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
19073   __ret; \
19074 })
19075 #endif
19076 
19077 #ifdef __LITTLE_ENDIAN__
19078 #define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
19079   int64x2_t __s0 = __p0; \
19080   int64x2_t __ret; \
19081   __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 35); \
19082   __ret; \
19083 })
19084 #else
19085 #define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
19086   int64x2_t __s0 = __p0; \
19087   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
19088   int64x2_t __ret; \
19089   __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 35); \
19090   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
19091   __ret; \
19092 })
19093 #endif
19094 
19095 #ifdef __LITTLE_ENDIAN__
19096 #define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
19097   int16x8_t __s0 = __p0; \
19098   int16x8_t __ret; \
19099   __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 33); \
19100   __ret; \
19101 })
19102 #else
19103 #define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
19104   int16x8_t __s0 = __p0; \
19105   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
19106   int16x8_t __ret; \
19107   __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 33); \
19108   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
19109   __ret; \
19110 })
19111 #endif
19112 
19113 #ifdef __LITTLE_ENDIAN__
19114 #define vqshl_n_u8(__p0, __p1) __extension__ ({ \
19115   uint8x8_t __s0 = __p0; \
19116   uint8x8_t __ret; \
19117   __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 16); \
19118   __ret; \
19119 })
19120 #else
19121 #define vqshl_n_u8(__p0, __p1) __extension__ ({ \
19122   uint8x8_t __s0 = __p0; \
19123   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
19124   uint8x8_t __ret; \
19125   __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 16); \
19126   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
19127   __ret; \
19128 })
19129 #endif
19130 
19131 #ifdef __LITTLE_ENDIAN__
19132 #define vqshl_n_u32(__p0, __p1) __extension__ ({ \
19133   uint32x2_t __s0 = __p0; \
19134   uint32x2_t __ret; \
19135   __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 18); \
19136   __ret; \
19137 })
19138 #else
19139 #define vqshl_n_u32(__p0, __p1) __extension__ ({ \
19140   uint32x2_t __s0 = __p0; \
19141   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
19142   uint32x2_t __ret; \
19143   __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 18); \
19144   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
19145   __ret; \
19146 })
19147 #endif
19148 
19149 #ifdef __LITTLE_ENDIAN__
19150 #define vqshl_n_u64(__p0, __p1) __extension__ ({ \
19151   uint64x1_t __s0 = __p0; \
19152   uint64x1_t __ret; \
19153   __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \
19154   __ret; \
19155 })
19156 #else
19157 #define vqshl_n_u64(__p0, __p1) __extension__ ({ \
19158   uint64x1_t __s0 = __p0; \
19159   uint64x1_t __ret; \
19160   __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \
19161   __ret; \
19162 })
19163 #endif
19164 
19165 #ifdef __LITTLE_ENDIAN__
19166 #define vqshl_n_u16(__p0, __p1) __extension__ ({ \
19167   uint16x4_t __s0 = __p0; \
19168   uint16x4_t __ret; \
19169   __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 17); \
19170   __ret; \
19171 })
19172 #else
19173 #define vqshl_n_u16(__p0, __p1) __extension__ ({ \
19174   uint16x4_t __s0 = __p0; \
19175   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
19176   uint16x4_t __ret; \
19177   __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 17); \
19178   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
19179   __ret; \
19180 })
19181 #endif
19182 
19183 #ifdef __LITTLE_ENDIAN__
19184 #define vqshl_n_s8(__p0, __p1) __extension__ ({ \
19185   int8x8_t __s0 = __p0; \
19186   int8x8_t __ret; \
19187   __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 0); \
19188   __ret; \
19189 })
19190 #else
19191 #define vqshl_n_s8(__p0, __p1) __extension__ ({ \
19192   int8x8_t __s0 = __p0; \
19193   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
19194   int8x8_t __ret; \
19195   __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 0); \
19196   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
19197   __ret; \
19198 })
19199 #endif
19200 
19201 #ifdef __LITTLE_ENDIAN__
19202 #define vqshl_n_s32(__p0, __p1) __extension__ ({ \
19203   int32x2_t __s0 = __p0; \
19204   int32x2_t __ret; \
19205   __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 2); \
19206   __ret; \
19207 })
19208 #else
19209 #define vqshl_n_s32(__p0, __p1) __extension__ ({ \
19210   int32x2_t __s0 = __p0; \
19211   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
19212   int32x2_t __ret; \
19213   __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 2); \
19214   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
19215   __ret; \
19216 })
19217 #endif
19218 
19219 #ifdef __LITTLE_ENDIAN__
19220 #define vqshl_n_s64(__p0, __p1) __extension__ ({ \
19221   int64x1_t __s0 = __p0; \
19222   int64x1_t __ret; \
19223   __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \
19224   __ret; \
19225 })
19226 #else
19227 #define vqshl_n_s64(__p0, __p1) __extension__ ({ \
19228   int64x1_t __s0 = __p0; \
19229   int64x1_t __ret; \
19230   __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \
19231   __ret; \
19232 })
19233 #endif
19234 
19235 #ifdef __LITTLE_ENDIAN__
19236 #define vqshl_n_s16(__p0, __p1) __extension__ ({ \
19237   int16x4_t __s0 = __p0; \
19238   int16x4_t __ret; \
19239   __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 1); \
19240   __ret; \
19241 })
19242 #else
19243 #define vqshl_n_s16(__p0, __p1) __extension__ ({ \
19244   int16x4_t __s0 = __p0; \
19245   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
19246   int16x4_t __ret; \
19247   __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 1); \
19248   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
19249   __ret; \
19250 })
19251 #endif
19252 
19253 #ifdef __LITTLE_ENDIAN__
19254 #define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
19255   int8x16_t __s0 = __p0; \
19256   uint8x16_t __ret; \
19257   __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 48); \
19258   __ret; \
19259 })
19260 #else
19261 #define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
19262   int8x16_t __s0 = __p0; \
19263   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
19264   uint8x16_t __ret; \
19265   __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 48); \
19266   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
19267   __ret; \
19268 })
19269 #endif
19270 
19271 #ifdef __LITTLE_ENDIAN__
19272 #define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
19273   int32x4_t __s0 = __p0; \
19274   uint32x4_t __ret; \
19275   __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 50); \
19276   __ret; \
19277 })
19278 #else
19279 #define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
19280   int32x4_t __s0 = __p0; \
19281   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
19282   uint32x4_t __ret; \
19283   __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 50); \
19284   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
19285   __ret; \
19286 })
19287 #endif
19288 
19289 #ifdef __LITTLE_ENDIAN__
19290 #define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
19291   int64x2_t __s0 = __p0; \
19292   uint64x2_t __ret; \
19293   __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 51); \
19294   __ret; \
19295 })
19296 #else
19297 #define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
19298   int64x2_t __s0 = __p0; \
19299   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
19300   uint64x2_t __ret; \
19301   __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 51); \
19302   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
19303   __ret; \
19304 })
19305 #endif
19306 
19307 #ifdef __LITTLE_ENDIAN__
19308 #define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
19309   int16x8_t __s0 = __p0; \
19310   uint16x8_t __ret; \
19311   __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 49); \
19312   __ret; \
19313 })
19314 #else
19315 #define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
19316   int16x8_t __s0 = __p0; \
19317   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
19318   uint16x8_t __ret; \
19319   __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 49); \
19320   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
19321   __ret; \
19322 })
19323 #endif
19324 
19325 #ifdef __LITTLE_ENDIAN__
19326 #define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
19327   int8x8_t __s0 = __p0; \
19328   uint8x8_t __ret; \
19329   __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 16); \
19330   __ret; \
19331 })
19332 #else
19333 #define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
19334   int8x8_t __s0 = __p0; \
19335   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
19336   uint8x8_t __ret; \
19337   __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 16); \
19338   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
19339   __ret; \
19340 })
19341 #endif
19342 
19343 #ifdef __LITTLE_ENDIAN__
19344 #define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
19345   int32x2_t __s0 = __p0; \
19346   uint32x2_t __ret; \
19347   __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 18); \
19348   __ret; \
19349 })
19350 #else
19351 #define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
19352   int32x2_t __s0 = __p0; \
19353   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
19354   uint32x2_t __ret; \
19355   __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 18); \
19356   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
19357   __ret; \
19358 })
19359 #endif
19360 
19361 #ifdef __LITTLE_ENDIAN__
19362 #define vqshlu_n_s64(__p0, __p1) __extension__ ({ \
19363   int64x1_t __s0 = __p0; \
19364   uint64x1_t __ret; \
19365   __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \
19366   __ret; \
19367 })
19368 #else
19369 #define vqshlu_n_s64(__p0, __p1) __extension__ ({ \
19370   int64x1_t __s0 = __p0; \
19371   uint64x1_t __ret; \
19372   __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \
19373   __ret; \
19374 })
19375 #endif
19376 
19377 #ifdef __LITTLE_ENDIAN__
19378 #define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
19379   int16x4_t __s0 = __p0; \
19380   uint16x4_t __ret; \
19381   __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 17); \
19382   __ret; \
19383 })
19384 #else
19385 #define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
19386   int16x4_t __s0 = __p0; \
19387   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
19388   uint16x4_t __ret; \
19389   __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 17); \
19390   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
19391   __ret; \
19392 })
19393 #endif
19394 
19395 #ifdef __LITTLE_ENDIAN__
19396 #define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
19397   uint32x4_t __s0 = __p0; \
19398   uint16x4_t __ret; \
19399   __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
19400   __ret; \
19401 })
19402 #else
19403 #define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
19404   uint32x4_t __s0 = __p0; \
19405   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
19406   uint16x4_t __ret; \
19407   __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 17); \
19408   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
19409   __ret; \
19410 })
19411 #define __noswap_vqshrn_n_u32(__p0, __p1) __extension__ ({ \
19412   uint32x4_t __s0 = __p0; \
19413   uint16x4_t __ret; \
19414   __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
19415   __ret; \
19416 })
19417 #endif
19418 
19419 #ifdef __LITTLE_ENDIAN__
19420 #define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
19421   uint64x2_t __s0 = __p0; \
19422   uint32x2_t __ret; \
19423   __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
19424   __ret; \
19425 })
19426 #else
19427 #define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
19428   uint64x2_t __s0 = __p0; \
19429   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
19430   uint32x2_t __ret; \
19431   __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 18); \
19432   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
19433   __ret; \
19434 })
19435 #define __noswap_vqshrn_n_u64(__p0, __p1) __extension__ ({ \
19436   uint64x2_t __s0 = __p0; \
19437   uint32x2_t __ret; \
19438   __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
19439   __ret; \
19440 })
19441 #endif
19442 
19443 #ifdef __LITTLE_ENDIAN__
19444 #define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
19445   uint16x8_t __s0 = __p0; \
19446   uint8x8_t __ret; \
19447   __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
19448   __ret; \
19449 })
19450 #else
19451 #define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
19452   uint16x8_t __s0 = __p0; \
19453   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
19454   uint8x8_t __ret; \
19455   __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 16); \
19456   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
19457   __ret; \
19458 })
19459 #define __noswap_vqshrn_n_u16(__p0, __p1) __extension__ ({ \
19460   uint16x8_t __s0 = __p0; \
19461   uint8x8_t __ret; \
19462   __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
19463   __ret; \
19464 })
19465 #endif
19466 
19467 #ifdef __LITTLE_ENDIAN__
19468 #define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
19469   int32x4_t __s0 = __p0; \
19470   int16x4_t __ret; \
19471   __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
19472   __ret; \
19473 })
19474 #else
19475 #define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
19476   int32x4_t __s0 = __p0; \
19477   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
19478   int16x4_t __ret; \
19479   __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 1); \
19480   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
19481   __ret; \
19482 })
19483 #define __noswap_vqshrn_n_s32(__p0, __p1) __extension__ ({ \
19484   int32x4_t __s0 = __p0; \
19485   int16x4_t __ret; \
19486   __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
19487   __ret; \
19488 })
19489 #endif
19490 
19491 #ifdef __LITTLE_ENDIAN__
19492 #define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
19493   int64x2_t __s0 = __p0; \
19494   int32x2_t __ret; \
19495   __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
19496   __ret; \
19497 })
19498 #else
19499 #define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
19500   int64x2_t __s0 = __p0; \
19501   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
19502   int32x2_t __ret; \
19503   __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 2); \
19504   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
19505   __ret; \
19506 })
19507 #define __noswap_vqshrn_n_s64(__p0, __p1) __extension__ ({ \
19508   int64x2_t __s0 = __p0; \
19509   int32x2_t __ret; \
19510   __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
19511   __ret; \
19512 })
19513 #endif
19514 
19515 #ifdef __LITTLE_ENDIAN__
19516 #define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
19517   int16x8_t __s0 = __p0; \
19518   int8x8_t __ret; \
19519   __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
19520   __ret; \
19521 })
19522 #else
19523 #define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
19524   int16x8_t __s0 = __p0; \
19525   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
19526   int8x8_t __ret; \
19527   __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 0); \
19528   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
19529   __ret; \
19530 })
19531 #define __noswap_vqshrn_n_s16(__p0, __p1) __extension__ ({ \
19532   int16x8_t __s0 = __p0; \
19533   int8x8_t __ret; \
19534   __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
19535   __ret; \
19536 })
19537 #endif
19538 
19539 #ifdef __LITTLE_ENDIAN__
19540 #define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
19541   int32x4_t __s0 = __p0; \
19542   uint16x4_t __ret; \
19543   __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
19544   __ret; \
19545 })
19546 #else
19547 #define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
19548   int32x4_t __s0 = __p0; \
19549   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
19550   uint16x4_t __ret; \
19551   __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 17); \
19552   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
19553   __ret; \
19554 })
19555 #define __noswap_vqshrun_n_s32(__p0, __p1) __extension__ ({ \
19556   int32x4_t __s0 = __p0; \
19557   uint16x4_t __ret; \
19558   __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
19559   __ret; \
19560 })
19561 #endif
19562 
19563 #ifdef __LITTLE_ENDIAN__
19564 #define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
19565   int64x2_t __s0 = __p0; \
19566   uint32x2_t __ret; \
19567   __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
19568   __ret; \
19569 })
19570 #else
19571 #define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
19572   int64x2_t __s0 = __p0; \
19573   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
19574   uint32x2_t __ret; \
19575   __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 18); \
19576   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
19577   __ret; \
19578 })
19579 #define __noswap_vqshrun_n_s64(__p0, __p1) __extension__ ({ \
19580   int64x2_t __s0 = __p0; \
19581   uint32x2_t __ret; \
19582   __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
19583   __ret; \
19584 })
19585 #endif
19586 
19587 #ifdef __LITTLE_ENDIAN__
19588 #define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
19589   int16x8_t __s0 = __p0; \
19590   uint8x8_t __ret; \
19591   __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
19592   __ret; \
19593 })
19594 #else
19595 #define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
19596   int16x8_t __s0 = __p0; \
19597   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
19598   uint8x8_t __ret; \
19599   __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 16); \
19600   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
19601   __ret; \
19602 })
19603 #define __noswap_vqshrun_n_s16(__p0, __p1) __extension__ ({ \
19604   int16x8_t __s0 = __p0; \
19605   uint8x8_t __ret; \
19606   __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
19607   __ret; \
19608 })
19609 #endif
19610 
19611 #ifdef __LITTLE_ENDIAN__
vqsubq_u8(uint8x16_t __p0,uint8x16_t __p1)19612 __ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
19613   uint8x16_t __ret;
19614   __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
19615   return __ret;
19616 }
19617 #else
vqsubq_u8(uint8x16_t __p0,uint8x16_t __p1)19618 __ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
19619   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
19620   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
19621   uint8x16_t __ret;
19622   __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
19623   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
19624   return __ret;
19625 }
19626 #endif
19627 
19628 #ifdef __LITTLE_ENDIAN__
vqsubq_u32(uint32x4_t __p0,uint32x4_t __p1)19629 __ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
19630   uint32x4_t __ret;
19631   __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
19632   return __ret;
19633 }
19634 #else
vqsubq_u32(uint32x4_t __p0,uint32x4_t __p1)19635 __ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
19636   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
19637   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
19638   uint32x4_t __ret;
19639   __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
19640   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
19641   return __ret;
19642 }
19643 #endif
19644 
19645 #ifdef __LITTLE_ENDIAN__
vqsubq_u64(uint64x2_t __p0,uint64x2_t __p1)19646 __ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
19647   uint64x2_t __ret;
19648   __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
19649   return __ret;
19650 }
19651 #else
vqsubq_u64(uint64x2_t __p0,uint64x2_t __p1)19652 __ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
19653   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
19654   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
19655   uint64x2_t __ret;
19656   __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
19657   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
19658   return __ret;
19659 }
19660 #endif
19661 
19662 #ifdef __LITTLE_ENDIAN__
vqsubq_u16(uint16x8_t __p0,uint16x8_t __p1)19663 __ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
19664   uint16x8_t __ret;
19665   __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
19666   return __ret;
19667 }
19668 #else
vqsubq_u16(uint16x8_t __p0,uint16x8_t __p1)19669 __ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
19670   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
19671   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
19672   uint16x8_t __ret;
19673   __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
19674   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
19675   return __ret;
19676 }
19677 #endif
19678 
19679 #ifdef __LITTLE_ENDIAN__
vqsubq_s8(int8x16_t __p0,int8x16_t __p1)19680 __ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
19681   int8x16_t __ret;
19682   __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
19683   return __ret;
19684 }
19685 #else
vqsubq_s8(int8x16_t __p0,int8x16_t __p1)19686 __ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
19687   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
19688   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
19689   int8x16_t __ret;
19690   __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
19691   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
19692   return __ret;
19693 }
19694 #endif
19695 
19696 #ifdef __LITTLE_ENDIAN__
vqsubq_s32(int32x4_t __p0,int32x4_t __p1)19697 __ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
19698   int32x4_t __ret;
19699   __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
19700   return __ret;
19701 }
19702 #else
vqsubq_s32(int32x4_t __p0,int32x4_t __p1)19703 __ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
19704   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
19705   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
19706   int32x4_t __ret;
19707   __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
19708   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
19709   return __ret;
19710 }
19711 #endif
19712 
19713 #ifdef __LITTLE_ENDIAN__
vqsubq_s64(int64x2_t __p0,int64x2_t __p1)19714 __ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
19715   int64x2_t __ret;
19716   __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
19717   return __ret;
19718 }
19719 #else
vqsubq_s64(int64x2_t __p0,int64x2_t __p1)19720 __ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
19721   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
19722   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
19723   int64x2_t __ret;
19724   __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
19725   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
19726   return __ret;
19727 }
19728 #endif
19729 
19730 #ifdef __LITTLE_ENDIAN__
vqsubq_s16(int16x8_t __p0,int16x8_t __p1)19731 __ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
19732   int16x8_t __ret;
19733   __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
19734   return __ret;
19735 }
19736 #else
vqsubq_s16(int16x8_t __p0,int16x8_t __p1)19737 __ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
19738   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
19739   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
19740   int16x8_t __ret;
19741   __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
19742   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
19743   return __ret;
19744 }
19745 #endif
19746 
19747 #ifdef __LITTLE_ENDIAN__
vqsub_u8(uint8x8_t __p0,uint8x8_t __p1)19748 __ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
19749   uint8x8_t __ret;
19750   __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
19751   return __ret;
19752 }
19753 #else
vqsub_u8(uint8x8_t __p0,uint8x8_t __p1)19754 __ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
19755   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
19756   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
19757   uint8x8_t __ret;
19758   __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
19759   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
19760   return __ret;
19761 }
19762 #endif
19763 
19764 #ifdef __LITTLE_ENDIAN__
vqsub_u32(uint32x2_t __p0,uint32x2_t __p1)19765 __ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
19766   uint32x2_t __ret;
19767   __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
19768   return __ret;
19769 }
19770 #else
vqsub_u32(uint32x2_t __p0,uint32x2_t __p1)19771 __ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
19772   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
19773   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
19774   uint32x2_t __ret;
19775   __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
19776   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
19777   return __ret;
19778 }
19779 #endif
19780 
19781 #ifdef __LITTLE_ENDIAN__
vqsub_u64(uint64x1_t __p0,uint64x1_t __p1)19782 __ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
19783   uint64x1_t __ret;
19784   __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
19785   return __ret;
19786 }
19787 #else
vqsub_u64(uint64x1_t __p0,uint64x1_t __p1)19788 __ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
19789   uint64x1_t __ret;
19790   __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
19791   return __ret;
19792 }
19793 #endif
19794 
19795 #ifdef __LITTLE_ENDIAN__
vqsub_u16(uint16x4_t __p0,uint16x4_t __p1)19796 __ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
19797   uint16x4_t __ret;
19798   __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
19799   return __ret;
19800 }
19801 #else
vqsub_u16(uint16x4_t __p0,uint16x4_t __p1)19802 __ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
19803   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
19804   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
19805   uint16x4_t __ret;
19806   __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
19807   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
19808   return __ret;
19809 }
19810 #endif
19811 
19812 #ifdef __LITTLE_ENDIAN__
vqsub_s8(int8x8_t __p0,int8x8_t __p1)19813 __ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
19814   int8x8_t __ret;
19815   __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
19816   return __ret;
19817 }
19818 #else
vqsub_s8(int8x8_t __p0,int8x8_t __p1)19819 __ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
19820   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
19821   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
19822   int8x8_t __ret;
19823   __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
19824   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
19825   return __ret;
19826 }
19827 #endif
19828 
19829 #ifdef __LITTLE_ENDIAN__
vqsub_s32(int32x2_t __p0,int32x2_t __p1)19830 __ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
19831   int32x2_t __ret;
19832   __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
19833   return __ret;
19834 }
19835 #else
vqsub_s32(int32x2_t __p0,int32x2_t __p1)19836 __ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
19837   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
19838   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
19839   int32x2_t __ret;
19840   __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
19841   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
19842   return __ret;
19843 }
19844 #endif
19845 
19846 #ifdef __LITTLE_ENDIAN__
vqsub_s64(int64x1_t __p0,int64x1_t __p1)19847 __ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) {
19848   int64x1_t __ret;
19849   __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
19850   return __ret;
19851 }
19852 #else
vqsub_s64(int64x1_t __p0,int64x1_t __p1)19853 __ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) {
19854   int64x1_t __ret;
19855   __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
19856   return __ret;
19857 }
19858 #endif
19859 
19860 #ifdef __LITTLE_ENDIAN__
vqsub_s16(int16x4_t __p0,int16x4_t __p1)19861 __ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
19862   int16x4_t __ret;
19863   __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
19864   return __ret;
19865 }
19866 #else
vqsub_s16(int16x4_t __p0,int16x4_t __p1)19867 __ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
19868   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
19869   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
19870   int16x4_t __ret;
19871   __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
19872   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
19873   return __ret;
19874 }
19875 #endif
19876 
19877 #ifdef __LITTLE_ENDIAN__
vraddhn_u32(uint32x4_t __p0,uint32x4_t __p1)19878 __ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
19879   uint16x4_t __ret;
19880   __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
19881   return __ret;
19882 }
19883 #else
vraddhn_u32(uint32x4_t __p0,uint32x4_t __p1)19884 __ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
19885   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
19886   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
19887   uint16x4_t __ret;
19888   __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
19889   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
19890   return __ret;
19891 }
__noswap_vraddhn_u32(uint32x4_t __p0,uint32x4_t __p1)19892 __ai uint16x4_t __noswap_vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
19893   uint16x4_t __ret;
19894   __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
19895   return __ret;
19896 }
19897 #endif
19898 
19899 #ifdef __LITTLE_ENDIAN__
vraddhn_u64(uint64x2_t __p0,uint64x2_t __p1)19900 __ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
19901   uint32x2_t __ret;
19902   __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
19903   return __ret;
19904 }
19905 #else
vraddhn_u64(uint64x2_t __p0,uint64x2_t __p1)19906 __ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
19907   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
19908   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
19909   uint32x2_t __ret;
19910   __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
19911   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
19912   return __ret;
19913 }
__noswap_vraddhn_u64(uint64x2_t __p0,uint64x2_t __p1)19914 __ai uint32x2_t __noswap_vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
19915   uint32x2_t __ret;
19916   __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
19917   return __ret;
19918 }
19919 #endif
19920 
19921 #ifdef __LITTLE_ENDIAN__
vraddhn_u16(uint16x8_t __p0,uint16x8_t __p1)19922 __ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
19923   uint8x8_t __ret;
19924   __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
19925   return __ret;
19926 }
19927 #else
vraddhn_u16(uint16x8_t __p0,uint16x8_t __p1)19928 __ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
19929   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
19930   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
19931   uint8x8_t __ret;
19932   __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
19933   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
19934   return __ret;
19935 }
__noswap_vraddhn_u16(uint16x8_t __p0,uint16x8_t __p1)19936 __ai uint8x8_t __noswap_vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
19937   uint8x8_t __ret;
19938   __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
19939   return __ret;
19940 }
19941 #endif
19942 
19943 #ifdef __LITTLE_ENDIAN__
vraddhn_s32(int32x4_t __p0,int32x4_t __p1)19944 __ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
19945   int16x4_t __ret;
19946   __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
19947   return __ret;
19948 }
19949 #else
vraddhn_s32(int32x4_t __p0,int32x4_t __p1)19950 __ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
19951   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
19952   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
19953   int16x4_t __ret;
19954   __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
19955   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
19956   return __ret;
19957 }
__noswap_vraddhn_s32(int32x4_t __p0,int32x4_t __p1)19958 __ai int16x4_t __noswap_vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
19959   int16x4_t __ret;
19960   __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
19961   return __ret;
19962 }
19963 #endif
19964 
19965 #ifdef __LITTLE_ENDIAN__
vraddhn_s64(int64x2_t __p0,int64x2_t __p1)19966 __ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
19967   int32x2_t __ret;
19968   __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
19969   return __ret;
19970 }
19971 #else
vraddhn_s64(int64x2_t __p0,int64x2_t __p1)19972 __ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
19973   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
19974   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
19975   int32x2_t __ret;
19976   __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
19977   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
19978   return __ret;
19979 }
__noswap_vraddhn_s64(int64x2_t __p0,int64x2_t __p1)19980 __ai int32x2_t __noswap_vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
19981   int32x2_t __ret;
19982   __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
19983   return __ret;
19984 }
19985 #endif
19986 
19987 #ifdef __LITTLE_ENDIAN__
vraddhn_s16(int16x8_t __p0,int16x8_t __p1)19988 __ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
19989   int8x8_t __ret;
19990   __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
19991   return __ret;
19992 }
19993 #else
vraddhn_s16(int16x8_t __p0,int16x8_t __p1)19994 __ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
19995   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
19996   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
19997   int8x8_t __ret;
19998   __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
19999   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20000   return __ret;
20001 }
__noswap_vraddhn_s16(int16x8_t __p0,int16x8_t __p1)20002 __ai int8x8_t __noswap_vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
20003   int8x8_t __ret;
20004   __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
20005   return __ret;
20006 }
20007 #endif
20008 
20009 #ifdef __LITTLE_ENDIAN__
vrecpeq_u32(uint32x4_t __p0)20010 __ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
20011   uint32x4_t __ret;
20012   __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 50);
20013   return __ret;
20014 }
20015 #else
vrecpeq_u32(uint32x4_t __p0)20016 __ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
20017   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20018   uint32x4_t __ret;
20019   __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 50);
20020   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20021   return __ret;
20022 }
20023 #endif
20024 
20025 #ifdef __LITTLE_ENDIAN__
vrecpeq_f32(float32x4_t __p0)20026 __ai float32x4_t vrecpeq_f32(float32x4_t __p0) {
20027   float32x4_t __ret;
20028   __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 41);
20029   return __ret;
20030 }
20031 #else
vrecpeq_f32(float32x4_t __p0)20032 __ai float32x4_t vrecpeq_f32(float32x4_t __p0) {
20033   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20034   float32x4_t __ret;
20035   __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 41);
20036   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20037   return __ret;
20038 }
20039 #endif
20040 
20041 #ifdef __LITTLE_ENDIAN__
vrecpe_u32(uint32x2_t __p0)20042 __ai uint32x2_t vrecpe_u32(uint32x2_t __p0) {
20043   uint32x2_t __ret;
20044   __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 18);
20045   return __ret;
20046 }
20047 #else
vrecpe_u32(uint32x2_t __p0)20048 __ai uint32x2_t vrecpe_u32(uint32x2_t __p0) {
20049   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
20050   uint32x2_t __ret;
20051   __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 18);
20052   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
20053   return __ret;
20054 }
20055 #endif
20056 
20057 #ifdef __LITTLE_ENDIAN__
vrecpe_f32(float32x2_t __p0)20058 __ai float32x2_t vrecpe_f32(float32x2_t __p0) {
20059   float32x2_t __ret;
20060   __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 9);
20061   return __ret;
20062 }
20063 #else
vrecpe_f32(float32x2_t __p0)20064 __ai float32x2_t vrecpe_f32(float32x2_t __p0) {
20065   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
20066   float32x2_t __ret;
20067   __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 9);
20068   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
20069   return __ret;
20070 }
20071 #endif
20072 
20073 #ifdef __LITTLE_ENDIAN__
vrecpsq_f32(float32x4_t __p0,float32x4_t __p1)20074 __ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
20075   float32x4_t __ret;
20076   __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
20077   return __ret;
20078 }
20079 #else
vrecpsq_f32(float32x4_t __p0,float32x4_t __p1)20080 __ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
20081   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20082   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
20083   float32x4_t __ret;
20084   __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
20085   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20086   return __ret;
20087 }
20088 #endif
20089 
20090 #ifdef __LITTLE_ENDIAN__
vrecps_f32(float32x2_t __p0,float32x2_t __p1)20091 __ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
20092   float32x2_t __ret;
20093   __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
20094   return __ret;
20095 }
20096 #else
vrecps_f32(float32x2_t __p0,float32x2_t __p1)20097 __ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
20098   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
20099   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
20100   float32x2_t __ret;
20101   __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
20102   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
20103   return __ret;
20104 }
20105 #endif
20106 
20107 #ifdef __LITTLE_ENDIAN__
vrev16_p8(poly8x8_t __p0)20108 __ai poly8x8_t vrev16_p8(poly8x8_t __p0) {
20109   poly8x8_t __ret;
20110   __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
20111   return __ret;
20112 }
20113 #else
vrev16_p8(poly8x8_t __p0)20114 __ai poly8x8_t vrev16_p8(poly8x8_t __p0) {
20115   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20116   poly8x8_t __ret;
20117   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
20118   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20119   return __ret;
20120 }
20121 #endif
20122 
20123 #ifdef __LITTLE_ENDIAN__
vrev16q_p8(poly8x16_t __p0)20124 __ai poly8x16_t vrev16q_p8(poly8x16_t __p0) {
20125   poly8x16_t __ret;
20126   __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
20127   return __ret;
20128 }
20129 #else
vrev16q_p8(poly8x16_t __p0)20130 __ai poly8x16_t vrev16q_p8(poly8x16_t __p0) {
20131   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20132   poly8x16_t __ret;
20133   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
20134   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20135   return __ret;
20136 }
20137 #endif
20138 
20139 #ifdef __LITTLE_ENDIAN__
vrev16q_u8(uint8x16_t __p0)20140 __ai uint8x16_t vrev16q_u8(uint8x16_t __p0) {
20141   uint8x16_t __ret;
20142   __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
20143   return __ret;
20144 }
20145 #else
vrev16q_u8(uint8x16_t __p0)20146 __ai uint8x16_t vrev16q_u8(uint8x16_t __p0) {
20147   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20148   uint8x16_t __ret;
20149   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
20150   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20151   return __ret;
20152 }
20153 #endif
20154 
20155 #ifdef __LITTLE_ENDIAN__
vrev16q_s8(int8x16_t __p0)20156 __ai int8x16_t vrev16q_s8(int8x16_t __p0) {
20157   int8x16_t __ret;
20158   __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
20159   return __ret;
20160 }
20161 #else
vrev16q_s8(int8x16_t __p0)20162 __ai int8x16_t vrev16q_s8(int8x16_t __p0) {
20163   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20164   int8x16_t __ret;
20165   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
20166   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20167   return __ret;
20168 }
20169 #endif
20170 
20171 #ifdef __LITTLE_ENDIAN__
vrev16_u8(uint8x8_t __p0)20172 __ai uint8x8_t vrev16_u8(uint8x8_t __p0) {
20173   uint8x8_t __ret;
20174   __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
20175   return __ret;
20176 }
20177 #else
vrev16_u8(uint8x8_t __p0)20178 __ai uint8x8_t vrev16_u8(uint8x8_t __p0) {
20179   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20180   uint8x8_t __ret;
20181   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
20182   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20183   return __ret;
20184 }
20185 #endif
20186 
20187 #ifdef __LITTLE_ENDIAN__
vrev16_s8(int8x8_t __p0)20188 __ai int8x8_t vrev16_s8(int8x8_t __p0) {
20189   int8x8_t __ret;
20190   __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
20191   return __ret;
20192 }
20193 #else
vrev16_s8(int8x8_t __p0)20194 __ai int8x8_t vrev16_s8(int8x8_t __p0) {
20195   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20196   int8x8_t __ret;
20197   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
20198   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20199   return __ret;
20200 }
20201 #endif
20202 
20203 #ifdef __LITTLE_ENDIAN__
vrev32_p8(poly8x8_t __p0)20204 __ai poly8x8_t vrev32_p8(poly8x8_t __p0) {
20205   poly8x8_t __ret;
20206   __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
20207   return __ret;
20208 }
20209 #else
vrev32_p8(poly8x8_t __p0)20210 __ai poly8x8_t vrev32_p8(poly8x8_t __p0) {
20211   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20212   poly8x8_t __ret;
20213   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
20214   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20215   return __ret;
20216 }
20217 #endif
20218 
20219 #ifdef __LITTLE_ENDIAN__
vrev32_p16(poly16x4_t __p0)20220 __ai poly16x4_t vrev32_p16(poly16x4_t __p0) {
20221   poly16x4_t __ret;
20222   __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
20223   return __ret;
20224 }
20225 #else
vrev32_p16(poly16x4_t __p0)20226 __ai poly16x4_t vrev32_p16(poly16x4_t __p0) {
20227   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20228   poly16x4_t __ret;
20229   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
20230   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20231   return __ret;
20232 }
20233 #endif
20234 
20235 #ifdef __LITTLE_ENDIAN__
vrev32q_p8(poly8x16_t __p0)20236 __ai poly8x16_t vrev32q_p8(poly8x16_t __p0) {
20237   poly8x16_t __ret;
20238   __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
20239   return __ret;
20240 }
20241 #else
vrev32q_p8(poly8x16_t __p0)20242 __ai poly8x16_t vrev32q_p8(poly8x16_t __p0) {
20243   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20244   poly8x16_t __ret;
20245   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
20246   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20247   return __ret;
20248 }
20249 #endif
20250 
20251 #ifdef __LITTLE_ENDIAN__
vrev32q_p16(poly16x8_t __p0)20252 __ai poly16x8_t vrev32q_p16(poly16x8_t __p0) {
20253   poly16x8_t __ret;
20254   __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
20255   return __ret;
20256 }
20257 #else
vrev32q_p16(poly16x8_t __p0)20258 __ai poly16x8_t vrev32q_p16(poly16x8_t __p0) {
20259   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20260   poly16x8_t __ret;
20261   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
20262   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20263   return __ret;
20264 }
20265 #endif
20266 
20267 #ifdef __LITTLE_ENDIAN__
vrev32q_u8(uint8x16_t __p0)20268 __ai uint8x16_t vrev32q_u8(uint8x16_t __p0) {
20269   uint8x16_t __ret;
20270   __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
20271   return __ret;
20272 }
20273 #else
vrev32q_u8(uint8x16_t __p0)20274 __ai uint8x16_t vrev32q_u8(uint8x16_t __p0) {
20275   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20276   uint8x16_t __ret;
20277   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
20278   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20279   return __ret;
20280 }
20281 #endif
20282 
20283 #ifdef __LITTLE_ENDIAN__
vrev32q_u16(uint16x8_t __p0)20284 __ai uint16x8_t vrev32q_u16(uint16x8_t __p0) {
20285   uint16x8_t __ret;
20286   __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
20287   return __ret;
20288 }
20289 #else
vrev32q_u16(uint16x8_t __p0)20290 __ai uint16x8_t vrev32q_u16(uint16x8_t __p0) {
20291   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20292   uint16x8_t __ret;
20293   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
20294   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20295   return __ret;
20296 }
20297 #endif
20298 
20299 #ifdef __LITTLE_ENDIAN__
vrev32q_s8(int8x16_t __p0)20300 __ai int8x16_t vrev32q_s8(int8x16_t __p0) {
20301   int8x16_t __ret;
20302   __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
20303   return __ret;
20304 }
20305 #else
vrev32q_s8(int8x16_t __p0)20306 __ai int8x16_t vrev32q_s8(int8x16_t __p0) {
20307   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20308   int8x16_t __ret;
20309   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
20310   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20311   return __ret;
20312 }
20313 #endif
20314 
20315 #ifdef __LITTLE_ENDIAN__
vrev32q_s16(int16x8_t __p0)20316 __ai int16x8_t vrev32q_s16(int16x8_t __p0) {
20317   int16x8_t __ret;
20318   __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
20319   return __ret;
20320 }
20321 #else
vrev32q_s16(int16x8_t __p0)20322 __ai int16x8_t vrev32q_s16(int16x8_t __p0) {
20323   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20324   int16x8_t __ret;
20325   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
20326   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20327   return __ret;
20328 }
20329 #endif
20330 
20331 #ifdef __LITTLE_ENDIAN__
vrev32_u8(uint8x8_t __p0)20332 __ai uint8x8_t vrev32_u8(uint8x8_t __p0) {
20333   uint8x8_t __ret;
20334   __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
20335   return __ret;
20336 }
20337 #else
vrev32_u8(uint8x8_t __p0)20338 __ai uint8x8_t vrev32_u8(uint8x8_t __p0) {
20339   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20340   uint8x8_t __ret;
20341   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
20342   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20343   return __ret;
20344 }
20345 #endif
20346 
20347 #ifdef __LITTLE_ENDIAN__
vrev32_u16(uint16x4_t __p0)20348 __ai uint16x4_t vrev32_u16(uint16x4_t __p0) {
20349   uint16x4_t __ret;
20350   __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
20351   return __ret;
20352 }
20353 #else
vrev32_u16(uint16x4_t __p0)20354 __ai uint16x4_t vrev32_u16(uint16x4_t __p0) {
20355   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20356   uint16x4_t __ret;
20357   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
20358   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20359   return __ret;
20360 }
20361 #endif
20362 
20363 #ifdef __LITTLE_ENDIAN__
vrev32_s8(int8x8_t __p0)20364 __ai int8x8_t vrev32_s8(int8x8_t __p0) {
20365   int8x8_t __ret;
20366   __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
20367   return __ret;
20368 }
20369 #else
vrev32_s8(int8x8_t __p0)20370 __ai int8x8_t vrev32_s8(int8x8_t __p0) {
20371   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20372   int8x8_t __ret;
20373   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
20374   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20375   return __ret;
20376 }
20377 #endif
20378 
20379 #ifdef __LITTLE_ENDIAN__
vrev32_s16(int16x4_t __p0)20380 __ai int16x4_t vrev32_s16(int16x4_t __p0) {
20381   int16x4_t __ret;
20382   __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
20383   return __ret;
20384 }
20385 #else
vrev32_s16(int16x4_t __p0)20386 __ai int16x4_t vrev32_s16(int16x4_t __p0) {
20387   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20388   int16x4_t __ret;
20389   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
20390   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20391   return __ret;
20392 }
20393 #endif
20394 
20395 #ifdef __LITTLE_ENDIAN__
vrev64_p8(poly8x8_t __p0)20396 __ai poly8x8_t vrev64_p8(poly8x8_t __p0) {
20397   poly8x8_t __ret;
20398   __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20399   return __ret;
20400 }
20401 #else
vrev64_p8(poly8x8_t __p0)20402 __ai poly8x8_t vrev64_p8(poly8x8_t __p0) {
20403   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20404   poly8x8_t __ret;
20405   __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
20406   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20407   return __ret;
20408 }
20409 #endif
20410 
20411 #ifdef __LITTLE_ENDIAN__
vrev64_p16(poly16x4_t __p0)20412 __ai poly16x4_t vrev64_p16(poly16x4_t __p0) {
20413   poly16x4_t __ret;
20414   __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20415   return __ret;
20416 }
20417 #else
vrev64_p16(poly16x4_t __p0)20418 __ai poly16x4_t vrev64_p16(poly16x4_t __p0) {
20419   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20420   poly16x4_t __ret;
20421   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
20422   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20423   return __ret;
20424 }
20425 #endif
20426 
20427 #ifdef __LITTLE_ENDIAN__
vrev64q_p8(poly8x16_t __p0)20428 __ai poly8x16_t vrev64q_p8(poly8x16_t __p0) {
20429   poly8x16_t __ret;
20430   __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
20431   return __ret;
20432 }
20433 #else
vrev64q_p8(poly8x16_t __p0)20434 __ai poly8x16_t vrev64q_p8(poly8x16_t __p0) {
20435   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20436   poly8x16_t __ret;
20437   __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
20438   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20439   return __ret;
20440 }
20441 #endif
20442 
20443 #ifdef __LITTLE_ENDIAN__
vrev64q_p16(poly16x8_t __p0)20444 __ai poly16x8_t vrev64q_p16(poly16x8_t __p0) {
20445   poly16x8_t __ret;
20446   __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
20447   return __ret;
20448 }
20449 #else
vrev64q_p16(poly16x8_t __p0)20450 __ai poly16x8_t vrev64q_p16(poly16x8_t __p0) {
20451   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20452   poly16x8_t __ret;
20453   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
20454   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20455   return __ret;
20456 }
20457 #endif
20458 
20459 #ifdef __LITTLE_ENDIAN__
vrev64q_u8(uint8x16_t __p0)20460 __ai uint8x16_t vrev64q_u8(uint8x16_t __p0) {
20461   uint8x16_t __ret;
20462   __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
20463   return __ret;
20464 }
20465 #else
vrev64q_u8(uint8x16_t __p0)20466 __ai uint8x16_t vrev64q_u8(uint8x16_t __p0) {
20467   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20468   uint8x16_t __ret;
20469   __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
20470   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20471   return __ret;
20472 }
20473 #endif
20474 
20475 #ifdef __LITTLE_ENDIAN__
vrev64q_u32(uint32x4_t __p0)20476 __ai uint32x4_t vrev64q_u32(uint32x4_t __p0) {
20477   uint32x4_t __ret;
20478   __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
20479   return __ret;
20480 }
20481 #else
vrev64q_u32(uint32x4_t __p0)20482 __ai uint32x4_t vrev64q_u32(uint32x4_t __p0) {
20483   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20484   uint32x4_t __ret;
20485   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
20486   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20487   return __ret;
20488 }
20489 #endif
20490 
20491 #ifdef __LITTLE_ENDIAN__
vrev64q_u16(uint16x8_t __p0)20492 __ai uint16x8_t vrev64q_u16(uint16x8_t __p0) {
20493   uint16x8_t __ret;
20494   __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
20495   return __ret;
20496 }
20497 #else
vrev64q_u16(uint16x8_t __p0)20498 __ai uint16x8_t vrev64q_u16(uint16x8_t __p0) {
20499   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20500   uint16x8_t __ret;
20501   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
20502   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20503   return __ret;
20504 }
20505 #endif
20506 
20507 #ifdef __LITTLE_ENDIAN__
vrev64q_s8(int8x16_t __p0)20508 __ai int8x16_t vrev64q_s8(int8x16_t __p0) {
20509   int8x16_t __ret;
20510   __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
20511   return __ret;
20512 }
20513 #else
vrev64q_s8(int8x16_t __p0)20514 __ai int8x16_t vrev64q_s8(int8x16_t __p0) {
20515   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20516   int8x16_t __ret;
20517   __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
20518   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20519   return __ret;
20520 }
20521 #endif
20522 
20523 #ifdef __LITTLE_ENDIAN__
vrev64q_f32(float32x4_t __p0)20524 __ai float32x4_t vrev64q_f32(float32x4_t __p0) {
20525   float32x4_t __ret;
20526   __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
20527   return __ret;
20528 }
20529 #else
vrev64q_f32(float32x4_t __p0)20530 __ai float32x4_t vrev64q_f32(float32x4_t __p0) {
20531   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20532   float32x4_t __ret;
20533   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
20534   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20535   return __ret;
20536 }
20537 #endif
20538 
20539 #ifdef __LITTLE_ENDIAN__
vrev64q_s32(int32x4_t __p0)20540 __ai int32x4_t vrev64q_s32(int32x4_t __p0) {
20541   int32x4_t __ret;
20542   __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
20543   return __ret;
20544 }
20545 #else
vrev64q_s32(int32x4_t __p0)20546 __ai int32x4_t vrev64q_s32(int32x4_t __p0) {
20547   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20548   int32x4_t __ret;
20549   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
20550   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20551   return __ret;
20552 }
20553 #endif
20554 
20555 #ifdef __LITTLE_ENDIAN__
vrev64q_s16(int16x8_t __p0)20556 __ai int16x8_t vrev64q_s16(int16x8_t __p0) {
20557   int16x8_t __ret;
20558   __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
20559   return __ret;
20560 }
20561 #else
vrev64q_s16(int16x8_t __p0)20562 __ai int16x8_t vrev64q_s16(int16x8_t __p0) {
20563   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20564   int16x8_t __ret;
20565   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
20566   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20567   return __ret;
20568 }
20569 #endif
20570 
20571 #ifdef __LITTLE_ENDIAN__
vrev64_u8(uint8x8_t __p0)20572 __ai uint8x8_t vrev64_u8(uint8x8_t __p0) {
20573   uint8x8_t __ret;
20574   __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20575   return __ret;
20576 }
20577 #else
vrev64_u8(uint8x8_t __p0)20578 __ai uint8x8_t vrev64_u8(uint8x8_t __p0) {
20579   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20580   uint8x8_t __ret;
20581   __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
20582   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20583   return __ret;
20584 }
20585 #endif
20586 
20587 #ifdef __LITTLE_ENDIAN__
vrev64_u32(uint32x2_t __p0)20588 __ai uint32x2_t vrev64_u32(uint32x2_t __p0) {
20589   uint32x2_t __ret;
20590   __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
20591   return __ret;
20592 }
20593 #else
vrev64_u32(uint32x2_t __p0)20594 __ai uint32x2_t vrev64_u32(uint32x2_t __p0) {
20595   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
20596   uint32x2_t __ret;
20597   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
20598   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
20599   return __ret;
20600 }
20601 #endif
20602 
20603 #ifdef __LITTLE_ENDIAN__
vrev64_u16(uint16x4_t __p0)20604 __ai uint16x4_t vrev64_u16(uint16x4_t __p0) {
20605   uint16x4_t __ret;
20606   __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20607   return __ret;
20608 }
20609 #else
vrev64_u16(uint16x4_t __p0)20610 __ai uint16x4_t vrev64_u16(uint16x4_t __p0) {
20611   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20612   uint16x4_t __ret;
20613   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
20614   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20615   return __ret;
20616 }
20617 #endif
20618 
20619 #ifdef __LITTLE_ENDIAN__
vrev64_s8(int8x8_t __p0)20620 __ai int8x8_t vrev64_s8(int8x8_t __p0) {
20621   int8x8_t __ret;
20622   __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20623   return __ret;
20624 }
20625 #else
vrev64_s8(int8x8_t __p0)20626 __ai int8x8_t vrev64_s8(int8x8_t __p0) {
20627   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20628   int8x8_t __ret;
20629   __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
20630   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20631   return __ret;
20632 }
20633 #endif
20634 
20635 #ifdef __LITTLE_ENDIAN__
vrev64_f32(float32x2_t __p0)20636 __ai float32x2_t vrev64_f32(float32x2_t __p0) {
20637   float32x2_t __ret;
20638   __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
20639   return __ret;
20640 }
20641 #else
vrev64_f32(float32x2_t __p0)20642 __ai float32x2_t vrev64_f32(float32x2_t __p0) {
20643   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
20644   float32x2_t __ret;
20645   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
20646   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
20647   return __ret;
20648 }
20649 #endif
20650 
20651 #ifdef __LITTLE_ENDIAN__
vrev64_s32(int32x2_t __p0)20652 __ai int32x2_t vrev64_s32(int32x2_t __p0) {
20653   int32x2_t __ret;
20654   __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
20655   return __ret;
20656 }
20657 #else
vrev64_s32(int32x2_t __p0)20658 __ai int32x2_t vrev64_s32(int32x2_t __p0) {
20659   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
20660   int32x2_t __ret;
20661   __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
20662   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
20663   return __ret;
20664 }
20665 #endif
20666 
20667 #ifdef __LITTLE_ENDIAN__
vrev64_s16(int16x4_t __p0)20668 __ai int16x4_t vrev64_s16(int16x4_t __p0) {
20669   int16x4_t __ret;
20670   __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20671   return __ret;
20672 }
20673 #else
vrev64_s16(int16x4_t __p0)20674 __ai int16x4_t vrev64_s16(int16x4_t __p0) {
20675   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20676   int16x4_t __ret;
20677   __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
20678   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20679   return __ret;
20680 }
20681 #endif
20682 
20683 #ifdef __LITTLE_ENDIAN__
vrhaddq_u8(uint8x16_t __p0,uint8x16_t __p1)20684 __ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
20685   uint8x16_t __ret;
20686   __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
20687   return __ret;
20688 }
20689 #else
vrhaddq_u8(uint8x16_t __p0,uint8x16_t __p1)20690 __ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
20691   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20692   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20693   uint8x16_t __ret;
20694   __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
20695   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20696   return __ret;
20697 }
20698 #endif
20699 
20700 #ifdef __LITTLE_ENDIAN__
vrhaddq_u32(uint32x4_t __p0,uint32x4_t __p1)20701 __ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
20702   uint32x4_t __ret;
20703   __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
20704   return __ret;
20705 }
20706 #else
vrhaddq_u32(uint32x4_t __p0,uint32x4_t __p1)20707 __ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
20708   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20709   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
20710   uint32x4_t __ret;
20711   __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
20712   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20713   return __ret;
20714 }
20715 #endif
20716 
20717 #ifdef __LITTLE_ENDIAN__
vrhaddq_u16(uint16x8_t __p0,uint16x8_t __p1)20718 __ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
20719   uint16x8_t __ret;
20720   __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
20721   return __ret;
20722 }
20723 #else
vrhaddq_u16(uint16x8_t __p0,uint16x8_t __p1)20724 __ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
20725   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20726   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
20727   uint16x8_t __ret;
20728   __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
20729   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20730   return __ret;
20731 }
20732 #endif
20733 
20734 #ifdef __LITTLE_ENDIAN__
vrhaddq_s8(int8x16_t __p0,int8x16_t __p1)20735 __ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
20736   int8x16_t __ret;
20737   __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
20738   return __ret;
20739 }
20740 #else
vrhaddq_s8(int8x16_t __p0,int8x16_t __p1)20741 __ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
20742   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20743   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20744   int8x16_t __ret;
20745   __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
20746   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20747   return __ret;
20748 }
20749 #endif
20750 
20751 #ifdef __LITTLE_ENDIAN__
vrhaddq_s32(int32x4_t __p0,int32x4_t __p1)20752 __ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
20753   int32x4_t __ret;
20754   __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
20755   return __ret;
20756 }
20757 #else
vrhaddq_s32(int32x4_t __p0,int32x4_t __p1)20758 __ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
20759   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20760   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
20761   int32x4_t __ret;
20762   __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
20763   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20764   return __ret;
20765 }
20766 #endif
20767 
20768 #ifdef __LITTLE_ENDIAN__
vrhaddq_s16(int16x8_t __p0,int16x8_t __p1)20769 __ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
20770   int16x8_t __ret;
20771   __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
20772   return __ret;
20773 }
20774 #else
vrhaddq_s16(int16x8_t __p0,int16x8_t __p1)20775 __ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
20776   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20777   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
20778   int16x8_t __ret;
20779   __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
20780   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20781   return __ret;
20782 }
20783 #endif
20784 
20785 #ifdef __LITTLE_ENDIAN__
vrhadd_u8(uint8x8_t __p0,uint8x8_t __p1)20786 __ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
20787   uint8x8_t __ret;
20788   __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
20789   return __ret;
20790 }
20791 #else
vrhadd_u8(uint8x8_t __p0,uint8x8_t __p1)20792 __ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
20793   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20794   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
20795   uint8x8_t __ret;
20796   __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
20797   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20798   return __ret;
20799 }
20800 #endif
20801 
20802 #ifdef __LITTLE_ENDIAN__
vrhadd_u32(uint32x2_t __p0,uint32x2_t __p1)20803 __ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
20804   uint32x2_t __ret;
20805   __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
20806   return __ret;
20807 }
20808 #else
vrhadd_u32(uint32x2_t __p0,uint32x2_t __p1)20809 __ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
20810   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
20811   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
20812   uint32x2_t __ret;
20813   __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
20814   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
20815   return __ret;
20816 }
20817 #endif
20818 
20819 #ifdef __LITTLE_ENDIAN__
vrhadd_u16(uint16x4_t __p0,uint16x4_t __p1)20820 __ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
20821   uint16x4_t __ret;
20822   __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
20823   return __ret;
20824 }
20825 #else
vrhadd_u16(uint16x4_t __p0,uint16x4_t __p1)20826 __ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
20827   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20828   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
20829   uint16x4_t __ret;
20830   __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
20831   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20832   return __ret;
20833 }
20834 #endif
20835 
20836 #ifdef __LITTLE_ENDIAN__
vrhadd_s8(int8x8_t __p0,int8x8_t __p1)20837 __ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
20838   int8x8_t __ret;
20839   __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
20840   return __ret;
20841 }
20842 #else
vrhadd_s8(int8x8_t __p0,int8x8_t __p1)20843 __ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
20844   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20845   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
20846   int8x8_t __ret;
20847   __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
20848   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20849   return __ret;
20850 }
20851 #endif
20852 
20853 #ifdef __LITTLE_ENDIAN__
vrhadd_s32(int32x2_t __p0,int32x2_t __p1)20854 __ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
20855   int32x2_t __ret;
20856   __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
20857   return __ret;
20858 }
20859 #else
vrhadd_s32(int32x2_t __p0,int32x2_t __p1)20860 __ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
20861   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
20862   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
20863   int32x2_t __ret;
20864   __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
20865   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
20866   return __ret;
20867 }
20868 #endif
20869 
20870 #ifdef __LITTLE_ENDIAN__
vrhadd_s16(int16x4_t __p0,int16x4_t __p1)20871 __ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
20872   int16x4_t __ret;
20873   __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
20874   return __ret;
20875 }
20876 #else
vrhadd_s16(int16x4_t __p0,int16x4_t __p1)20877 __ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
20878   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20879   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
20880   int16x4_t __ret;
20881   __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
20882   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20883   return __ret;
20884 }
20885 #endif
20886 
20887 #ifdef __LITTLE_ENDIAN__
vrshlq_u8(uint8x16_t __p0,int8x16_t __p1)20888 __ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
20889   uint8x16_t __ret;
20890   __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
20891   return __ret;
20892 }
20893 #else
vrshlq_u8(uint8x16_t __p0,int8x16_t __p1)20894 __ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
20895   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20896   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20897   uint8x16_t __ret;
20898   __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
20899   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20900   return __ret;
20901 }
20902 #endif
20903 
20904 #ifdef __LITTLE_ENDIAN__
vrshlq_u32(uint32x4_t __p0,int32x4_t __p1)20905 __ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
20906   uint32x4_t __ret;
20907   __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
20908   return __ret;
20909 }
20910 #else
vrshlq_u32(uint32x4_t __p0,int32x4_t __p1)20911 __ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
20912   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20913   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
20914   uint32x4_t __ret;
20915   __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
20916   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20917   return __ret;
20918 }
20919 #endif
20920 
20921 #ifdef __LITTLE_ENDIAN__
vrshlq_u64(uint64x2_t __p0,int64x2_t __p1)20922 __ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
20923   uint64x2_t __ret;
20924   __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
20925   return __ret;
20926 }
20927 #else
vrshlq_u64(uint64x2_t __p0,int64x2_t __p1)20928 __ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
20929   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
20930   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
20931   uint64x2_t __ret;
20932   __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
20933   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
20934   return __ret;
20935 }
20936 #endif
20937 
20938 #ifdef __LITTLE_ENDIAN__
vrshlq_u16(uint16x8_t __p0,int16x8_t __p1)20939 __ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
20940   uint16x8_t __ret;
20941   __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
20942   return __ret;
20943 }
20944 #else
vrshlq_u16(uint16x8_t __p0,int16x8_t __p1)20945 __ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
20946   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20947   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
20948   uint16x8_t __ret;
20949   __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
20950   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20951   return __ret;
20952 }
20953 #endif
20954 
20955 #ifdef __LITTLE_ENDIAN__
vrshlq_s8(int8x16_t __p0,int8x16_t __p1)20956 __ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
20957   int8x16_t __ret;
20958   __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
20959   return __ret;
20960 }
20961 #else
vrshlq_s8(int8x16_t __p0,int8x16_t __p1)20962 __ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
20963   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20964   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20965   int8x16_t __ret;
20966   __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
20967   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20968   return __ret;
20969 }
20970 #endif
20971 
20972 #ifdef __LITTLE_ENDIAN__
vrshlq_s32(int32x4_t __p0,int32x4_t __p1)20973 __ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
20974   int32x4_t __ret;
20975   __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
20976   return __ret;
20977 }
20978 #else
vrshlq_s32(int32x4_t __p0,int32x4_t __p1)20979 __ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
20980   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20981   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
20982   int32x4_t __ret;
20983   __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
20984   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20985   return __ret;
20986 }
20987 #endif
20988 
20989 #ifdef __LITTLE_ENDIAN__
vrshlq_s64(int64x2_t __p0,int64x2_t __p1)20990 __ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
20991   int64x2_t __ret;
20992   __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
20993   return __ret;
20994 }
20995 #else
vrshlq_s64(int64x2_t __p0,int64x2_t __p1)20996 __ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
20997   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
20998   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
20999   int64x2_t __ret;
21000   __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
21001   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
21002   return __ret;
21003 }
21004 #endif
21005 
21006 #ifdef __LITTLE_ENDIAN__
vrshlq_s16(int16x8_t __p0,int16x8_t __p1)21007 __ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
21008   int16x8_t __ret;
21009   __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
21010   return __ret;
21011 }
21012 #else
vrshlq_s16(int16x8_t __p0,int16x8_t __p1)21013 __ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
21014   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
21015   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
21016   int16x8_t __ret;
21017   __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
21018   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
21019   return __ret;
21020 }
21021 #endif
21022 
21023 #ifdef __LITTLE_ENDIAN__
vrshl_u8(uint8x8_t __p0,int8x8_t __p1)21024 __ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
21025   uint8x8_t __ret;
21026   __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
21027   return __ret;
21028 }
21029 #else
vrshl_u8(uint8x8_t __p0,int8x8_t __p1)21030 __ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
21031   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
21032   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
21033   uint8x8_t __ret;
21034   __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
21035   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
21036   return __ret;
21037 }
21038 #endif
21039 
21040 #ifdef __LITTLE_ENDIAN__
vrshl_u32(uint32x2_t __p0,int32x2_t __p1)21041 __ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
21042   uint32x2_t __ret;
21043   __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
21044   return __ret;
21045 }
21046 #else
vrshl_u32(uint32x2_t __p0,int32x2_t __p1)21047 __ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
21048   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
21049   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
21050   uint32x2_t __ret;
21051   __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
21052   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
21053   return __ret;
21054 }
21055 #endif
21056 
21057 #ifdef __LITTLE_ENDIAN__
vrshl_u64(uint64x1_t __p0,int64x1_t __p1)21058 __ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
21059   uint64x1_t __ret;
21060   __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
21061   return __ret;
21062 }
21063 #else
vrshl_u64(uint64x1_t __p0,int64x1_t __p1)21064 __ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
21065   uint64x1_t __ret;
21066   __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
21067   return __ret;
21068 }
21069 #endif
21070 
21071 #ifdef __LITTLE_ENDIAN__
vrshl_u16(uint16x4_t __p0,int16x4_t __p1)21072 __ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
21073   uint16x4_t __ret;
21074   __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
21075   return __ret;
21076 }
21077 #else
vrshl_u16(uint16x4_t __p0,int16x4_t __p1)21078 __ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
21079   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
21080   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
21081   uint16x4_t __ret;
21082   __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
21083   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
21084   return __ret;
21085 }
21086 #endif
21087 
21088 #ifdef __LITTLE_ENDIAN__
vrshl_s8(int8x8_t __p0,int8x8_t __p1)21089 __ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
21090   int8x8_t __ret;
21091   __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
21092   return __ret;
21093 }
21094 #else
vrshl_s8(int8x8_t __p0,int8x8_t __p1)21095 __ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
21096   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
21097   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
21098   int8x8_t __ret;
21099   __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
21100   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
21101   return __ret;
21102 }
21103 #endif
21104 
21105 #ifdef __LITTLE_ENDIAN__
vrshl_s32(int32x2_t __p0,int32x2_t __p1)21106 __ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
21107   int32x2_t __ret;
21108   __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
21109   return __ret;
21110 }
21111 #else
vrshl_s32(int32x2_t __p0,int32x2_t __p1)21112 __ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
21113   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
21114   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
21115   int32x2_t __ret;
21116   __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
21117   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
21118   return __ret;
21119 }
21120 #endif
21121 
21122 #ifdef __LITTLE_ENDIAN__
vrshl_s64(int64x1_t __p0,int64x1_t __p1)21123 __ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) {
21124   int64x1_t __ret;
21125   __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
21126   return __ret;
21127 }
21128 #else
vrshl_s64(int64x1_t __p0,int64x1_t __p1)21129 __ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) {
21130   int64x1_t __ret;
21131   __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
21132   return __ret;
21133 }
21134 #endif
21135 
21136 #ifdef __LITTLE_ENDIAN__
vrshl_s16(int16x4_t __p0,int16x4_t __p1)21137 __ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
21138   int16x4_t __ret;
21139   __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
21140   return __ret;
21141 }
21142 #else
vrshl_s16(int16x4_t __p0,int16x4_t __p1)21143 __ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
21144   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
21145   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
21146   int16x4_t __ret;
21147   __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
21148   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
21149   return __ret;
21150 }
21151 #endif
21152 
21153 #ifdef __LITTLE_ENDIAN__
21154 #define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
21155   uint8x16_t __s0 = __p0; \
21156   uint8x16_t __ret; \
21157   __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 48); \
21158   __ret; \
21159 })
21160 #else
21161 #define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
21162   uint8x16_t __s0 = __p0; \
21163   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
21164   uint8x16_t __ret; \
21165   __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 48); \
21166   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
21167   __ret; \
21168 })
21169 #endif
21170 
21171 #ifdef __LITTLE_ENDIAN__
21172 #define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
21173   uint32x4_t __s0 = __p0; \
21174   uint32x4_t __ret; \
21175   __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 50); \
21176   __ret; \
21177 })
21178 #else
21179 #define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
21180   uint32x4_t __s0 = __p0; \
21181   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
21182   uint32x4_t __ret; \
21183   __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 50); \
21184   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
21185   __ret; \
21186 })
21187 #endif
21188 
21189 #ifdef __LITTLE_ENDIAN__
21190 #define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
21191   uint64x2_t __s0 = __p0; \
21192   uint64x2_t __ret; \
21193   __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 51); \
21194   __ret; \
21195 })
21196 #else
21197 #define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
21198   uint64x2_t __s0 = __p0; \
21199   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
21200   uint64x2_t __ret; \
21201   __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 51); \
21202   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
21203   __ret; \
21204 })
21205 #endif
21206 
21207 #ifdef __LITTLE_ENDIAN__
21208 #define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
21209   uint16x8_t __s0 = __p0; \
21210   uint16x8_t __ret; \
21211   __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 49); \
21212   __ret; \
21213 })
21214 #else
21215 #define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
21216   uint16x8_t __s0 = __p0; \
21217   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
21218   uint16x8_t __ret; \
21219   __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 49); \
21220   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
21221   __ret; \
21222 })
21223 #endif
21224 
21225 #ifdef __LITTLE_ENDIAN__
21226 #define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
21227   int8x16_t __s0 = __p0; \
21228   int8x16_t __ret; \
21229   __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 32); \
21230   __ret; \
21231 })
21232 #else
21233 #define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
21234   int8x16_t __s0 = __p0; \
21235   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
21236   int8x16_t __ret; \
21237   __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 32); \
21238   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
21239   __ret; \
21240 })
21241 #endif
21242 
21243 #ifdef __LITTLE_ENDIAN__
21244 #define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
21245   int32x4_t __s0 = __p0; \
21246   int32x4_t __ret; \
21247   __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 34); \
21248   __ret; \
21249 })
21250 #else
21251 #define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
21252   int32x4_t __s0 = __p0; \
21253   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
21254   int32x4_t __ret; \
21255   __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 34); \
21256   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
21257   __ret; \
21258 })
21259 #endif
21260 
21261 #ifdef __LITTLE_ENDIAN__
21262 #define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
21263   int64x2_t __s0 = __p0; \
21264   int64x2_t __ret; \
21265   __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 35); \
21266   __ret; \
21267 })
21268 #else
21269 #define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
21270   int64x2_t __s0 = __p0; \
21271   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
21272   int64x2_t __ret; \
21273   __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 35); \
21274   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
21275   __ret; \
21276 })
21277 #endif
21278 
21279 #ifdef __LITTLE_ENDIAN__
21280 #define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
21281   int16x8_t __s0 = __p0; \
21282   int16x8_t __ret; \
21283   __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 33); \
21284   __ret; \
21285 })
21286 #else
21287 #define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
21288   int16x8_t __s0 = __p0; \
21289   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
21290   int16x8_t __ret; \
21291   __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 33); \
21292   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
21293   __ret; \
21294 })
21295 #endif
21296 
21297 #ifdef __LITTLE_ENDIAN__
21298 #define vrshr_n_u8(__p0, __p1) __extension__ ({ \
21299   uint8x8_t __s0 = __p0; \
21300   uint8x8_t __ret; \
21301   __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 16); \
21302   __ret; \
21303 })
21304 #else
21305 #define vrshr_n_u8(__p0, __p1) __extension__ ({ \
21306   uint8x8_t __s0 = __p0; \
21307   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
21308   uint8x8_t __ret; \
21309   __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 16); \
21310   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
21311   __ret; \
21312 })
21313 #endif
21314 
21315 #ifdef __LITTLE_ENDIAN__
21316 #define vrshr_n_u32(__p0, __p1) __extension__ ({ \
21317   uint32x2_t __s0 = __p0; \
21318   uint32x2_t __ret; \
21319   __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 18); \
21320   __ret; \
21321 })
21322 #else
21323 #define vrshr_n_u32(__p0, __p1) __extension__ ({ \
21324   uint32x2_t __s0 = __p0; \
21325   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
21326   uint32x2_t __ret; \
21327   __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 18); \
21328   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
21329   __ret; \
21330 })
21331 #endif
21332 
21333 #ifdef __LITTLE_ENDIAN__
21334 #define vrshr_n_u64(__p0, __p1) __extension__ ({ \
21335   uint64x1_t __s0 = __p0; \
21336   uint64x1_t __ret; \
21337   __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \
21338   __ret; \
21339 })
21340 #else
21341 #define vrshr_n_u64(__p0, __p1) __extension__ ({ \
21342   uint64x1_t __s0 = __p0; \
21343   uint64x1_t __ret; \
21344   __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \
21345   __ret; \
21346 })
21347 #endif
21348 
21349 #ifdef __LITTLE_ENDIAN__
21350 #define vrshr_n_u16(__p0, __p1) __extension__ ({ \
21351   uint16x4_t __s0 = __p0; \
21352   uint16x4_t __ret; \
21353   __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 17); \
21354   __ret; \
21355 })
21356 #else
21357 #define vrshr_n_u16(__p0, __p1) __extension__ ({ \
21358   uint16x4_t __s0 = __p0; \
21359   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
21360   uint16x4_t __ret; \
21361   __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 17); \
21362   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
21363   __ret; \
21364 })
21365 #endif
21366 
21367 #ifdef __LITTLE_ENDIAN__
21368 #define vrshr_n_s8(__p0, __p1) __extension__ ({ \
21369   int8x8_t __s0 = __p0; \
21370   int8x8_t __ret; \
21371   __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 0); \
21372   __ret; \
21373 })
21374 #else
21375 #define vrshr_n_s8(__p0, __p1) __extension__ ({ \
21376   int8x8_t __s0 = __p0; \
21377   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
21378   int8x8_t __ret; \
21379   __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 0); \
21380   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
21381   __ret; \
21382 })
21383 #endif
21384 
21385 #ifdef __LITTLE_ENDIAN__
21386 #define vrshr_n_s32(__p0, __p1) __extension__ ({ \
21387   int32x2_t __s0 = __p0; \
21388   int32x2_t __ret; \
21389   __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 2); \
21390   __ret; \
21391 })
21392 #else
21393 #define vrshr_n_s32(__p0, __p1) __extension__ ({ \
21394   int32x2_t __s0 = __p0; \
21395   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
21396   int32x2_t __ret; \
21397   __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 2); \
21398   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
21399   __ret; \
21400 })
21401 #endif
21402 
21403 #ifdef __LITTLE_ENDIAN__
21404 #define vrshr_n_s64(__p0, __p1) __extension__ ({ \
21405   int64x1_t __s0 = __p0; \
21406   int64x1_t __ret; \
21407   __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \
21408   __ret; \
21409 })
21410 #else
21411 #define vrshr_n_s64(__p0, __p1) __extension__ ({ \
21412   int64x1_t __s0 = __p0; \
21413   int64x1_t __ret; \
21414   __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \
21415   __ret; \
21416 })
21417 #endif
21418 
21419 #ifdef __LITTLE_ENDIAN__
21420 #define vrshr_n_s16(__p0, __p1) __extension__ ({ \
21421   int16x4_t __s0 = __p0; \
21422   int16x4_t __ret; \
21423   __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 1); \
21424   __ret; \
21425 })
21426 #else
21427 #define vrshr_n_s16(__p0, __p1) __extension__ ({ \
21428   int16x4_t __s0 = __p0; \
21429   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
21430   int16x4_t __ret; \
21431   __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 1); \
21432   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
21433   __ret; \
21434 })
21435 #endif
21436 
21437 #ifdef __LITTLE_ENDIAN__
21438 #define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
21439   uint32x4_t __s0 = __p0; \
21440   uint16x4_t __ret; \
21441   __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
21442   __ret; \
21443 })
21444 #else
21445 #define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
21446   uint32x4_t __s0 = __p0; \
21447   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
21448   uint16x4_t __ret; \
21449   __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
21450   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
21451   __ret; \
21452 })
21453 #define __noswap_vrshrn_n_u32(__p0, __p1) __extension__ ({ \
21454   uint32x4_t __s0 = __p0; \
21455   uint16x4_t __ret; \
21456   __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
21457   __ret; \
21458 })
21459 #endif
21460 
21461 #ifdef __LITTLE_ENDIAN__
21462 #define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
21463   uint64x2_t __s0 = __p0; \
21464   uint32x2_t __ret; \
21465   __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
21466   __ret; \
21467 })
21468 #else
21469 #define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
21470   uint64x2_t __s0 = __p0; \
21471   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
21472   uint32x2_t __ret; \
21473   __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
21474   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
21475   __ret; \
21476 })
21477 #define __noswap_vrshrn_n_u64(__p0, __p1) __extension__ ({ \
21478   uint64x2_t __s0 = __p0; \
21479   uint32x2_t __ret; \
21480   __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
21481   __ret; \
21482 })
21483 #endif
21484 
21485 #ifdef __LITTLE_ENDIAN__
21486 #define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
21487   uint16x8_t __s0 = __p0; \
21488   uint8x8_t __ret; \
21489   __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
21490   __ret; \
21491 })
21492 #else
21493 #define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
21494   uint16x8_t __s0 = __p0; \
21495   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
21496   uint8x8_t __ret; \
21497   __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
21498   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
21499   __ret; \
21500 })
21501 #define __noswap_vrshrn_n_u16(__p0, __p1) __extension__ ({ \
21502   uint16x8_t __s0 = __p0; \
21503   uint8x8_t __ret; \
21504   __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
21505   __ret; \
21506 })
21507 #endif
21508 
21509 #ifdef __LITTLE_ENDIAN__
21510 #define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
21511   int32x4_t __s0 = __p0; \
21512   int16x4_t __ret; \
21513   __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
21514   __ret; \
21515 })
21516 #else
21517 #define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
21518   int32x4_t __s0 = __p0; \
21519   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
21520   int16x4_t __ret; \
21521   __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
21522   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
21523   __ret; \
21524 })
21525 #define __noswap_vrshrn_n_s32(__p0, __p1) __extension__ ({ \
21526   int32x4_t __s0 = __p0; \
21527   int16x4_t __ret; \
21528   __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
21529   __ret; \
21530 })
21531 #endif
21532 
21533 #ifdef __LITTLE_ENDIAN__
21534 #define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
21535   int64x2_t __s0 = __p0; \
21536   int32x2_t __ret; \
21537   __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
21538   __ret; \
21539 })
21540 #else
21541 #define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
21542   int64x2_t __s0 = __p0; \
21543   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
21544   int32x2_t __ret; \
21545   __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
21546   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
21547   __ret; \
21548 })
21549 #define __noswap_vrshrn_n_s64(__p0, __p1) __extension__ ({ \
21550   int64x2_t __s0 = __p0; \
21551   int32x2_t __ret; \
21552   __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
21553   __ret; \
21554 })
21555 #endif
21556 
21557 #ifdef __LITTLE_ENDIAN__
21558 #define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
21559   int16x8_t __s0 = __p0; \
21560   int8x8_t __ret; \
21561   __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
21562   __ret; \
21563 })
21564 #else
21565 #define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
21566   int16x8_t __s0 = __p0; \
21567   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
21568   int8x8_t __ret; \
21569   __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
21570   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
21571   __ret; \
21572 })
21573 #define __noswap_vrshrn_n_s16(__p0, __p1) __extension__ ({ \
21574   int16x8_t __s0 = __p0; \
21575   int8x8_t __ret; \
21576   __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
21577   __ret; \
21578 })
21579 #endif
21580 
21581 #ifdef __LITTLE_ENDIAN__
vrsqrteq_u32(uint32x4_t __p0)21582 __ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
21583   uint32x4_t __ret;
21584   __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 50);
21585   return __ret;
21586 }
21587 #else
vrsqrteq_u32(uint32x4_t __p0)21588 __ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
21589   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
21590   uint32x4_t __ret;
21591   __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 50);
21592   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
21593   return __ret;
21594 }
21595 #endif
21596 
21597 #ifdef __LITTLE_ENDIAN__
vrsqrteq_f32(float32x4_t __p0)21598 __ai float32x4_t vrsqrteq_f32(float32x4_t __p0) {
21599   float32x4_t __ret;
21600   __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 41);
21601   return __ret;
21602 }
21603 #else
vrsqrteq_f32(float32x4_t __p0)21604 __ai float32x4_t vrsqrteq_f32(float32x4_t __p0) {
21605   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
21606   float32x4_t __ret;
21607   __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 41);
21608   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
21609   return __ret;
21610 }
21611 #endif
21612 
21613 #ifdef __LITTLE_ENDIAN__
vrsqrte_u32(uint32x2_t __p0)21614 __ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
21615   uint32x2_t __ret;
21616   __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 18);
21617   return __ret;
21618 }
21619 #else
vrsqrte_u32(uint32x2_t __p0)21620 __ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
21621   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
21622   uint32x2_t __ret;
21623   __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 18);
21624   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
21625   return __ret;
21626 }
21627 #endif
21628 
21629 #ifdef __LITTLE_ENDIAN__
vrsqrte_f32(float32x2_t __p0)21630 __ai float32x2_t vrsqrte_f32(float32x2_t __p0) {
21631   float32x2_t __ret;
21632   __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 9);
21633   return __ret;
21634 }
21635 #else
vrsqrte_f32(float32x2_t __p0)21636 __ai float32x2_t vrsqrte_f32(float32x2_t __p0) {
21637   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
21638   float32x2_t __ret;
21639   __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 9);
21640   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
21641   return __ret;
21642 }
21643 #endif
21644 
21645 #ifdef __LITTLE_ENDIAN__
vrsqrtsq_f32(float32x4_t __p0,float32x4_t __p1)21646 __ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
21647   float32x4_t __ret;
21648   __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
21649   return __ret;
21650 }
21651 #else
vrsqrtsq_f32(float32x4_t __p0,float32x4_t __p1)21652 __ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
21653   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
21654   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
21655   float32x4_t __ret;
21656   __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
21657   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
21658   return __ret;
21659 }
21660 #endif
21661 
21662 #ifdef __LITTLE_ENDIAN__
vrsqrts_f32(float32x2_t __p0,float32x2_t __p1)21663 __ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
21664   float32x2_t __ret;
21665   __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
21666   return __ret;
21667 }
21668 #else
vrsqrts_f32(float32x2_t __p0,float32x2_t __p1)21669 __ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
21670   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
21671   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
21672   float32x2_t __ret;
21673   __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
21674   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
21675   return __ret;
21676 }
21677 #endif
21678 
21679 #ifdef __LITTLE_ENDIAN__
21680 #define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
21681   uint8x16_t __s0 = __p0; \
21682   uint8x16_t __s1 = __p1; \
21683   uint8x16_t __ret; \
21684   __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
21685   __ret; \
21686 })
21687 #else
21688 #define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
21689   uint8x16_t __s0 = __p0; \
21690   uint8x16_t __s1 = __p1; \
21691   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
21692   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
21693   uint8x16_t __ret; \
21694   __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
21695   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
21696   __ret; \
21697 })
21698 #endif
21699 
21700 #ifdef __LITTLE_ENDIAN__
21701 #define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
21702   uint32x4_t __s0 = __p0; \
21703   uint32x4_t __s1 = __p1; \
21704   uint32x4_t __ret; \
21705   __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
21706   __ret; \
21707 })
21708 #else
21709 #define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
21710   uint32x4_t __s0 = __p0; \
21711   uint32x4_t __s1 = __p1; \
21712   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
21713   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
21714   uint32x4_t __ret; \
21715   __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
21716   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
21717   __ret; \
21718 })
21719 #endif
21720 
21721 #ifdef __LITTLE_ENDIAN__
21722 #define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
21723   uint64x2_t __s0 = __p0; \
21724   uint64x2_t __s1 = __p1; \
21725   uint64x2_t __ret; \
21726   __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
21727   __ret; \
21728 })
21729 #else
21730 #define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
21731   uint64x2_t __s0 = __p0; \
21732   uint64x2_t __s1 = __p1; \
21733   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
21734   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
21735   uint64x2_t __ret; \
21736   __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
21737   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
21738   __ret; \
21739 })
21740 #endif
21741 
21742 #ifdef __LITTLE_ENDIAN__
21743 #define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
21744   uint16x8_t __s0 = __p0; \
21745   uint16x8_t __s1 = __p1; \
21746   uint16x8_t __ret; \
21747   __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
21748   __ret; \
21749 })
21750 #else
21751 #define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
21752   uint16x8_t __s0 = __p0; \
21753   uint16x8_t __s1 = __p1; \
21754   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
21755   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
21756   uint16x8_t __ret; \
21757   __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
21758   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
21759   __ret; \
21760 })
21761 #endif
21762 
21763 #ifdef __LITTLE_ENDIAN__
21764 #define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
21765   int8x16_t __s0 = __p0; \
21766   int8x16_t __s1 = __p1; \
21767   int8x16_t __ret; \
21768   __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
21769   __ret; \
21770 })
21771 #else
21772 #define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
21773   int8x16_t __s0 = __p0; \
21774   int8x16_t __s1 = __p1; \
21775   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
21776   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
21777   int8x16_t __ret; \
21778   __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
21779   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
21780   __ret; \
21781 })
21782 #endif
21783 
21784 #ifdef __LITTLE_ENDIAN__
21785 #define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
21786   int32x4_t __s0 = __p0; \
21787   int32x4_t __s1 = __p1; \
21788   int32x4_t __ret; \
21789   __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
21790   __ret; \
21791 })
21792 #else
21793 #define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
21794   int32x4_t __s0 = __p0; \
21795   int32x4_t __s1 = __p1; \
21796   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
21797   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
21798   int32x4_t __ret; \
21799   __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
21800   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
21801   __ret; \
21802 })
21803 #endif
21804 
21805 #ifdef __LITTLE_ENDIAN__
21806 #define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
21807   int64x2_t __s0 = __p0; \
21808   int64x2_t __s1 = __p1; \
21809   int64x2_t __ret; \
21810   __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
21811   __ret; \
21812 })
21813 #else
21814 #define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
21815   int64x2_t __s0 = __p0; \
21816   int64x2_t __s1 = __p1; \
21817   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
21818   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
21819   int64x2_t __ret; \
21820   __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
21821   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
21822   __ret; \
21823 })
21824 #endif
21825 
21826 #ifdef __LITTLE_ENDIAN__
21827 #define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
21828   int16x8_t __s0 = __p0; \
21829   int16x8_t __s1 = __p1; \
21830   int16x8_t __ret; \
21831   __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
21832   __ret; \
21833 })
21834 #else
21835 #define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
21836   int16x8_t __s0 = __p0; \
21837   int16x8_t __s1 = __p1; \
21838   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
21839   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
21840   int16x8_t __ret; \
21841   __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
21842   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
21843   __ret; \
21844 })
21845 #endif
21846 
21847 #ifdef __LITTLE_ENDIAN__
21848 #define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
21849   uint8x8_t __s0 = __p0; \
21850   uint8x8_t __s1 = __p1; \
21851   uint8x8_t __ret; \
21852   __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
21853   __ret; \
21854 })
21855 #else
21856 #define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
21857   uint8x8_t __s0 = __p0; \
21858   uint8x8_t __s1 = __p1; \
21859   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
21860   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
21861   uint8x8_t __ret; \
21862   __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
21863   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
21864   __ret; \
21865 })
21866 #endif
21867 
21868 #ifdef __LITTLE_ENDIAN__
21869 #define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
21870   uint32x2_t __s0 = __p0; \
21871   uint32x2_t __s1 = __p1; \
21872   uint32x2_t __ret; \
21873   __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
21874   __ret; \
21875 })
21876 #else
21877 #define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
21878   uint32x2_t __s0 = __p0; \
21879   uint32x2_t __s1 = __p1; \
21880   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
21881   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
21882   uint32x2_t __ret; \
21883   __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
21884   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
21885   __ret; \
21886 })
21887 #endif
21888 
21889 #ifdef __LITTLE_ENDIAN__
21890 #define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
21891   uint64x1_t __s0 = __p0; \
21892   uint64x1_t __s1 = __p1; \
21893   uint64x1_t __ret; \
21894   __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
21895   __ret; \
21896 })
21897 #else
21898 #define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
21899   uint64x1_t __s0 = __p0; \
21900   uint64x1_t __s1 = __p1; \
21901   uint64x1_t __ret; \
21902   __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
21903   __ret; \
21904 })
21905 #endif
21906 
21907 #ifdef __LITTLE_ENDIAN__
21908 #define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
21909   uint16x4_t __s0 = __p0; \
21910   uint16x4_t __s1 = __p1; \
21911   uint16x4_t __ret; \
21912   __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
21913   __ret; \
21914 })
21915 #else
21916 #define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
21917   uint16x4_t __s0 = __p0; \
21918   uint16x4_t __s1 = __p1; \
21919   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
21920   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
21921   uint16x4_t __ret; \
21922   __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
21923   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
21924   __ret; \
21925 })
21926 #endif
21927 
21928 #ifdef __LITTLE_ENDIAN__
21929 #define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
21930   int8x8_t __s0 = __p0; \
21931   int8x8_t __s1 = __p1; \
21932   int8x8_t __ret; \
21933   __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
21934   __ret; \
21935 })
21936 #else
21937 #define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
21938   int8x8_t __s0 = __p0; \
21939   int8x8_t __s1 = __p1; \
21940   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
21941   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
21942   int8x8_t __ret; \
21943   __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
21944   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
21945   __ret; \
21946 })
21947 #endif
21948 
21949 #ifdef __LITTLE_ENDIAN__
21950 #define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
21951   int32x2_t __s0 = __p0; \
21952   int32x2_t __s1 = __p1; \
21953   int32x2_t __ret; \
21954   __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
21955   __ret; \
21956 })
21957 #else
21958 #define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
21959   int32x2_t __s0 = __p0; \
21960   int32x2_t __s1 = __p1; \
21961   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
21962   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
21963   int32x2_t __ret; \
21964   __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
21965   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
21966   __ret; \
21967 })
21968 #endif
21969 
21970 #ifdef __LITTLE_ENDIAN__
21971 #define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
21972   int64x1_t __s0 = __p0; \
21973   int64x1_t __s1 = __p1; \
21974   int64x1_t __ret; \
21975   __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
21976   __ret; \
21977 })
21978 #else
21979 #define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
21980   int64x1_t __s0 = __p0; \
21981   int64x1_t __s1 = __p1; \
21982   int64x1_t __ret; \
21983   __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
21984   __ret; \
21985 })
21986 #endif
21987 
21988 #ifdef __LITTLE_ENDIAN__
21989 #define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
21990   int16x4_t __s0 = __p0; \
21991   int16x4_t __s1 = __p1; \
21992   int16x4_t __ret; \
21993   __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
21994   __ret; \
21995 })
21996 #else
21997 #define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
21998   int16x4_t __s0 = __p0; \
21999   int16x4_t __s1 = __p1; \
22000   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
22001   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
22002   int16x4_t __ret; \
22003   __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
22004   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
22005   __ret; \
22006 })
22007 #endif
22008 
22009 #ifdef __LITTLE_ENDIAN__
vrsubhn_u32(uint32x4_t __p0,uint32x4_t __p1)22010 __ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
22011   uint16x4_t __ret;
22012   __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
22013   return __ret;
22014 }
22015 #else
vrsubhn_u32(uint32x4_t __p0,uint32x4_t __p1)22016 __ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
22017   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
22018   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
22019   uint16x4_t __ret;
22020   __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
22021   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
22022   return __ret;
22023 }
__noswap_vrsubhn_u32(uint32x4_t __p0,uint32x4_t __p1)22024 __ai uint16x4_t __noswap_vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
22025   uint16x4_t __ret;
22026   __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
22027   return __ret;
22028 }
22029 #endif
22030 
22031 #ifdef __LITTLE_ENDIAN__
vrsubhn_u64(uint64x2_t __p0,uint64x2_t __p1)22032 __ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
22033   uint32x2_t __ret;
22034   __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
22035   return __ret;
22036 }
22037 #else
vrsubhn_u64(uint64x2_t __p0,uint64x2_t __p1)22038 __ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
22039   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
22040   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
22041   uint32x2_t __ret;
22042   __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
22043   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
22044   return __ret;
22045 }
__noswap_vrsubhn_u64(uint64x2_t __p0,uint64x2_t __p1)22046 __ai uint32x2_t __noswap_vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
22047   uint32x2_t __ret;
22048   __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
22049   return __ret;
22050 }
22051 #endif
22052 
22053 #ifdef __LITTLE_ENDIAN__
vrsubhn_u16(uint16x8_t __p0,uint16x8_t __p1)22054 __ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
22055   uint8x8_t __ret;
22056   __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
22057   return __ret;
22058 }
22059 #else
vrsubhn_u16(uint16x8_t __p0,uint16x8_t __p1)22060 __ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
22061   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
22062   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
22063   uint8x8_t __ret;
22064   __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
22065   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
22066   return __ret;
22067 }
__noswap_vrsubhn_u16(uint16x8_t __p0,uint16x8_t __p1)22068 __ai uint8x8_t __noswap_vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
22069   uint8x8_t __ret;
22070   __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
22071   return __ret;
22072 }
22073 #endif
22074 
22075 #ifdef __LITTLE_ENDIAN__
vrsubhn_s32(int32x4_t __p0,int32x4_t __p1)22076 __ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
22077   int16x4_t __ret;
22078   __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
22079   return __ret;
22080 }
22081 #else
vrsubhn_s32(int32x4_t __p0,int32x4_t __p1)22082 __ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
22083   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
22084   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
22085   int16x4_t __ret;
22086   __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
22087   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
22088   return __ret;
22089 }
__noswap_vrsubhn_s32(int32x4_t __p0,int32x4_t __p1)22090 __ai int16x4_t __noswap_vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
22091   int16x4_t __ret;
22092   __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
22093   return __ret;
22094 }
22095 #endif
22096 
22097 #ifdef __LITTLE_ENDIAN__
vrsubhn_s64(int64x2_t __p0,int64x2_t __p1)22098 __ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
22099   int32x2_t __ret;
22100   __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
22101   return __ret;
22102 }
22103 #else
vrsubhn_s64(int64x2_t __p0,int64x2_t __p1)22104 __ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
22105   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
22106   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
22107   int32x2_t __ret;
22108   __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
22109   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
22110   return __ret;
22111 }
__noswap_vrsubhn_s64(int64x2_t __p0,int64x2_t __p1)22112 __ai int32x2_t __noswap_vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
22113   int32x2_t __ret;
22114   __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
22115   return __ret;
22116 }
22117 #endif
22118 
22119 #ifdef __LITTLE_ENDIAN__
vrsubhn_s16(int16x8_t __p0,int16x8_t __p1)22120 __ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
22121   int8x8_t __ret;
22122   __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
22123   return __ret;
22124 }
22125 #else
vrsubhn_s16(int16x8_t __p0,int16x8_t __p1)22126 __ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
22127   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
22128   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
22129   int8x8_t __ret;
22130   __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
22131   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
22132   return __ret;
22133 }
__noswap_vrsubhn_s16(int16x8_t __p0,int16x8_t __p1)22134 __ai int8x8_t __noswap_vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
22135   int8x8_t __ret;
22136   __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
22137   return __ret;
22138 }
22139 #endif
22140 
22141 #ifdef __LITTLE_ENDIAN__
22142 #define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
22143   poly8_t __s0 = __p0; \
22144   poly8x8_t __s1 = __p1; \
22145   poly8x8_t __ret; \
22146   __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
22147   __ret; \
22148 })
22149 #else
22150 #define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
22151   poly8_t __s0 = __p0; \
22152   poly8x8_t __s1 = __p1; \
22153   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
22154   poly8x8_t __ret; \
22155   __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
22156   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
22157   __ret; \
22158 })
22159 #define __noswap_vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
22160   poly8_t __s0 = __p0; \
22161   poly8x8_t __s1 = __p1; \
22162   poly8x8_t __ret; \
22163   __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
22164   __ret; \
22165 })
22166 #endif
22167 
22168 #ifdef __LITTLE_ENDIAN__
22169 #define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
22170   poly16_t __s0 = __p0; \
22171   poly16x4_t __s1 = __p1; \
22172   poly16x4_t __ret; \
22173   __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
22174   __ret; \
22175 })
22176 #else
22177 #define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
22178   poly16_t __s0 = __p0; \
22179   poly16x4_t __s1 = __p1; \
22180   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
22181   poly16x4_t __ret; \
22182   __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
22183   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
22184   __ret; \
22185 })
22186 #define __noswap_vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
22187   poly16_t __s0 = __p0; \
22188   poly16x4_t __s1 = __p1; \
22189   poly16x4_t __ret; \
22190   __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
22191   __ret; \
22192 })
22193 #endif
22194 
22195 #ifdef __LITTLE_ENDIAN__
22196 #define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
22197   poly8_t __s0 = __p0; \
22198   poly8x16_t __s1 = __p1; \
22199   poly8x16_t __ret; \
22200   __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
22201   __ret; \
22202 })
22203 #else
22204 #define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
22205   poly8_t __s0 = __p0; \
22206   poly8x16_t __s1 = __p1; \
22207   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
22208   poly8x16_t __ret; \
22209   __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
22210   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
22211   __ret; \
22212 })
22213 #define __noswap_vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
22214   poly8_t __s0 = __p0; \
22215   poly8x16_t __s1 = __p1; \
22216   poly8x16_t __ret; \
22217   __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
22218   __ret; \
22219 })
22220 #endif
22221 
22222 #ifdef __LITTLE_ENDIAN__
22223 #define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
22224   poly16_t __s0 = __p0; \
22225   poly16x8_t __s1 = __p1; \
22226   poly16x8_t __ret; \
22227   __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
22228   __ret; \
22229 })
22230 #else
22231 #define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
22232   poly16_t __s0 = __p0; \
22233   poly16x8_t __s1 = __p1; \
22234   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
22235   poly16x8_t __ret; \
22236   __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
22237   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
22238   __ret; \
22239 })
22240 #define __noswap_vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
22241   poly16_t __s0 = __p0; \
22242   poly16x8_t __s1 = __p1; \
22243   poly16x8_t __ret; \
22244   __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
22245   __ret; \
22246 })
22247 #endif
22248 
22249 #ifdef __LITTLE_ENDIAN__
22250 #define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
22251   uint8_t __s0 = __p0; \
22252   uint8x16_t __s1 = __p1; \
22253   uint8x16_t __ret; \
22254   __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
22255   __ret; \
22256 })
22257 #else
22258 #define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
22259   uint8_t __s0 = __p0; \
22260   uint8x16_t __s1 = __p1; \
22261   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
22262   uint8x16_t __ret; \
22263   __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
22264   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
22265   __ret; \
22266 })
22267 #define __noswap_vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
22268   uint8_t __s0 = __p0; \
22269   uint8x16_t __s1 = __p1; \
22270   uint8x16_t __ret; \
22271   __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
22272   __ret; \
22273 })
22274 #endif
22275 
22276 #ifdef __LITTLE_ENDIAN__
22277 #define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
22278   uint32_t __s0 = __p0; \
22279   uint32x4_t __s1 = __p1; \
22280   uint32x4_t __ret; \
22281   __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
22282   __ret; \
22283 })
22284 #else
22285 #define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
22286   uint32_t __s0 = __p0; \
22287   uint32x4_t __s1 = __p1; \
22288   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
22289   uint32x4_t __ret; \
22290   __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__rev1, __p2); \
22291   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
22292   __ret; \
22293 })
22294 #define __noswap_vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
22295   uint32_t __s0 = __p0; \
22296   uint32x4_t __s1 = __p1; \
22297   uint32x4_t __ret; \
22298   __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
22299   __ret; \
22300 })
22301 #endif
22302 
22303 #ifdef __LITTLE_ENDIAN__
22304 #define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
22305   uint64_t __s0 = __p0; \
22306   uint64x2_t __s1 = __p1; \
22307   uint64x2_t __ret; \
22308   __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
22309   __ret; \
22310 })
22311 #else
22312 #define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
22313   uint64_t __s0 = __p0; \
22314   uint64x2_t __s1 = __p1; \
22315   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
22316   uint64x2_t __ret; \
22317   __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
22318   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
22319   __ret; \
22320 })
22321 #define __noswap_vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
22322   uint64_t __s0 = __p0; \
22323   uint64x2_t __s1 = __p1; \
22324   uint64x2_t __ret; \
22325   __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
22326   __ret; \
22327 })
22328 #endif
22329 
22330 #ifdef __LITTLE_ENDIAN__
22331 #define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
22332   uint16_t __s0 = __p0; \
22333   uint16x8_t __s1 = __p1; \
22334   uint16x8_t __ret; \
22335   __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
22336   __ret; \
22337 })
22338 #else
22339 #define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
22340   uint16_t __s0 = __p0; \
22341   uint16x8_t __s1 = __p1; \
22342   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
22343   uint16x8_t __ret; \
22344   __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
22345   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
22346   __ret; \
22347 })
22348 #define __noswap_vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
22349   uint16_t __s0 = __p0; \
22350   uint16x8_t __s1 = __p1; \
22351   uint16x8_t __ret; \
22352   __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
22353   __ret; \
22354 })
22355 #endif
22356 
22357 #ifdef __LITTLE_ENDIAN__
22358 #define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
22359   int8_t __s0 = __p0; \
22360   int8x16_t __s1 = __p1; \
22361   int8x16_t __ret; \
22362   __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
22363   __ret; \
22364 })
22365 #else
22366 #define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
22367   int8_t __s0 = __p0; \
22368   int8x16_t __s1 = __p1; \
22369   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
22370   int8x16_t __ret; \
22371   __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
22372   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
22373   __ret; \
22374 })
22375 #define __noswap_vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
22376   int8_t __s0 = __p0; \
22377   int8x16_t __s1 = __p1; \
22378   int8x16_t __ret; \
22379   __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
22380   __ret; \
22381 })
22382 #endif
22383 
22384 #ifdef __LITTLE_ENDIAN__
22385 #define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
22386   float32_t __s0 = __p0; \
22387   float32x4_t __s1 = __p1; \
22388   float32x4_t __ret; \
22389   __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__s1, __p2); \
22390   __ret; \
22391 })
22392 #else
22393 #define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
22394   float32_t __s0 = __p0; \
22395   float32x4_t __s1 = __p1; \
22396   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
22397   float32x4_t __ret; \
22398   __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__rev1, __p2); \
22399   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
22400   __ret; \
22401 })
22402 #define __noswap_vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
22403   float32_t __s0 = __p0; \
22404   float32x4_t __s1 = __p1; \
22405   float32x4_t __ret; \
22406   __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__s1, __p2); \
22407   __ret; \
22408 })
22409 #endif
22410 
22411 #ifdef __LITTLE_ENDIAN__
22412 #define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
22413   int32_t __s0 = __p0; \
22414   int32x4_t __s1 = __p1; \
22415   int32x4_t __ret; \
22416   __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
22417   __ret; \
22418 })
22419 #else
22420 #define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
22421   int32_t __s0 = __p0; \
22422   int32x4_t __s1 = __p1; \
22423   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
22424   int32x4_t __ret; \
22425   __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__rev1, __p2); \
22426   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
22427   __ret; \
22428 })
22429 #define __noswap_vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
22430   int32_t __s0 = __p0; \
22431   int32x4_t __s1 = __p1; \
22432   int32x4_t __ret; \
22433   __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
22434   __ret; \
22435 })
22436 #endif
22437 
22438 #ifdef __LITTLE_ENDIAN__
22439 #define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
22440   int64_t __s0 = __p0; \
22441   int64x2_t __s1 = __p1; \
22442   int64x2_t __ret; \
22443   __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
22444   __ret; \
22445 })
22446 #else
22447 #define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
22448   int64_t __s0 = __p0; \
22449   int64x2_t __s1 = __p1; \
22450   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
22451   int64x2_t __ret; \
22452   __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
22453   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
22454   __ret; \
22455 })
22456 #define __noswap_vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
22457   int64_t __s0 = __p0; \
22458   int64x2_t __s1 = __p1; \
22459   int64x2_t __ret; \
22460   __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
22461   __ret; \
22462 })
22463 #endif
22464 
22465 #ifdef __LITTLE_ENDIAN__
22466 #define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
22467   int16_t __s0 = __p0; \
22468   int16x8_t __s1 = __p1; \
22469   int16x8_t __ret; \
22470   __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
22471   __ret; \
22472 })
22473 #else
22474 #define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
22475   int16_t __s0 = __p0; \
22476   int16x8_t __s1 = __p1; \
22477   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
22478   int16x8_t __ret; \
22479   __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
22480   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
22481   __ret; \
22482 })
22483 #define __noswap_vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
22484   int16_t __s0 = __p0; \
22485   int16x8_t __s1 = __p1; \
22486   int16x8_t __ret; \
22487   __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
22488   __ret; \
22489 })
22490 #endif
22491 
22492 #ifdef __LITTLE_ENDIAN__
22493 #define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
22494   uint8_t __s0 = __p0; \
22495   uint8x8_t __s1 = __p1; \
22496   uint8x8_t __ret; \
22497   __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
22498   __ret; \
22499 })
22500 #else
22501 #define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
22502   uint8_t __s0 = __p0; \
22503   uint8x8_t __s1 = __p1; \
22504   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
22505   uint8x8_t __ret; \
22506   __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
22507   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
22508   __ret; \
22509 })
22510 #define __noswap_vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
22511   uint8_t __s0 = __p0; \
22512   uint8x8_t __s1 = __p1; \
22513   uint8x8_t __ret; \
22514   __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
22515   __ret; \
22516 })
22517 #endif
22518 
22519 #ifdef __LITTLE_ENDIAN__
22520 #define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
22521   uint32_t __s0 = __p0; \
22522   uint32x2_t __s1 = __p1; \
22523   uint32x2_t __ret; \
22524   __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
22525   __ret; \
22526 })
22527 #else
22528 #define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
22529   uint32_t __s0 = __p0; \
22530   uint32x2_t __s1 = __p1; \
22531   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
22532   uint32x2_t __ret; \
22533   __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__rev1, __p2); \
22534   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
22535   __ret; \
22536 })
22537 #define __noswap_vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
22538   uint32_t __s0 = __p0; \
22539   uint32x2_t __s1 = __p1; \
22540   uint32x2_t __ret; \
22541   __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
22542   __ret; \
22543 })
22544 #endif
22545 
22546 #ifdef __LITTLE_ENDIAN__
22547 #define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
22548   uint64_t __s0 = __p0; \
22549   uint64x1_t __s1 = __p1; \
22550   uint64x1_t __ret; \
22551   __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
22552   __ret; \
22553 })
22554 #else
22555 #define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
22556   uint64_t __s0 = __p0; \
22557   uint64x1_t __s1 = __p1; \
22558   uint64x1_t __ret; \
22559   __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
22560   __ret; \
22561 })
22562 #define __noswap_vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
22563   uint64_t __s0 = __p0; \
22564   uint64x1_t __s1 = __p1; \
22565   uint64x1_t __ret; \
22566   __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
22567   __ret; \
22568 })
22569 #endif
22570 
22571 #ifdef __LITTLE_ENDIAN__
22572 #define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
22573   uint16_t __s0 = __p0; \
22574   uint16x4_t __s1 = __p1; \
22575   uint16x4_t __ret; \
22576   __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
22577   __ret; \
22578 })
22579 #else
22580 #define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
22581   uint16_t __s0 = __p0; \
22582   uint16x4_t __s1 = __p1; \
22583   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
22584   uint16x4_t __ret; \
22585   __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
22586   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
22587   __ret; \
22588 })
22589 #define __noswap_vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
22590   uint16_t __s0 = __p0; \
22591   uint16x4_t __s1 = __p1; \
22592   uint16x4_t __ret; \
22593   __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
22594   __ret; \
22595 })
22596 #endif
22597 
22598 #ifdef __LITTLE_ENDIAN__
22599 #define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
22600   int8_t __s0 = __p0; \
22601   int8x8_t __s1 = __p1; \
22602   int8x8_t __ret; \
22603   __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
22604   __ret; \
22605 })
22606 #else
22607 #define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
22608   int8_t __s0 = __p0; \
22609   int8x8_t __s1 = __p1; \
22610   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
22611   int8x8_t __ret; \
22612   __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
22613   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
22614   __ret; \
22615 })
22616 #define __noswap_vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
22617   int8_t __s0 = __p0; \
22618   int8x8_t __s1 = __p1; \
22619   int8x8_t __ret; \
22620   __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
22621   __ret; \
22622 })
22623 #endif
22624 
22625 #ifdef __LITTLE_ENDIAN__
22626 #define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
22627   float32_t __s0 = __p0; \
22628   float32x2_t __s1 = __p1; \
22629   float32x2_t __ret; \
22630   __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__s1, __p2); \
22631   __ret; \
22632 })
22633 #else
22634 #define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
22635   float32_t __s0 = __p0; \
22636   float32x2_t __s1 = __p1; \
22637   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
22638   float32x2_t __ret; \
22639   __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__rev1, __p2); \
22640   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
22641   __ret; \
22642 })
22643 #define __noswap_vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
22644   float32_t __s0 = __p0; \
22645   float32x2_t __s1 = __p1; \
22646   float32x2_t __ret; \
22647   __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__s1, __p2); \
22648   __ret; \
22649 })
22650 #endif
22651 
22652 #ifdef __LITTLE_ENDIAN__
22653 #define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
22654   int32_t __s0 = __p0; \
22655   int32x2_t __s1 = __p1; \
22656   int32x2_t __ret; \
22657   __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
22658   __ret; \
22659 })
22660 #else
22661 #define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
22662   int32_t __s0 = __p0; \
22663   int32x2_t __s1 = __p1; \
22664   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
22665   int32x2_t __ret; \
22666   __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__rev1, __p2); \
22667   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
22668   __ret; \
22669 })
22670 #define __noswap_vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
22671   int32_t __s0 = __p0; \
22672   int32x2_t __s1 = __p1; \
22673   int32x2_t __ret; \
22674   __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
22675   __ret; \
22676 })
22677 #endif
22678 
22679 #ifdef __LITTLE_ENDIAN__
22680 #define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
22681   int64_t __s0 = __p0; \
22682   int64x1_t __s1 = __p1; \
22683   int64x1_t __ret; \
22684   __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
22685   __ret; \
22686 })
22687 #else
22688 #define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
22689   int64_t __s0 = __p0; \
22690   int64x1_t __s1 = __p1; \
22691   int64x1_t __ret; \
22692   __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
22693   __ret; \
22694 })
22695 #define __noswap_vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
22696   int64_t __s0 = __p0; \
22697   int64x1_t __s1 = __p1; \
22698   int64x1_t __ret; \
22699   __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
22700   __ret; \
22701 })
22702 #endif
22703 
22704 #ifdef __LITTLE_ENDIAN__
22705 #define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
22706   int16_t __s0 = __p0; \
22707   int16x4_t __s1 = __p1; \
22708   int16x4_t __ret; \
22709   __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
22710   __ret; \
22711 })
22712 #else
22713 #define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
22714   int16_t __s0 = __p0; \
22715   int16x4_t __s1 = __p1; \
22716   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
22717   int16x4_t __ret; \
22718   __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
22719   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
22720   __ret; \
22721 })
22722 #define __noswap_vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
22723   int16_t __s0 = __p0; \
22724   int16x4_t __s1 = __p1; \
22725   int16x4_t __ret; \
22726   __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
22727   __ret; \
22728 })
22729 #endif
22730 
22731 #ifdef __LITTLE_ENDIAN__
vshlq_u8(uint8x16_t __p0,int8x16_t __p1)22732 __ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
22733   uint8x16_t __ret;
22734   __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
22735   return __ret;
22736 }
22737 #else
vshlq_u8(uint8x16_t __p0,int8x16_t __p1)22738 __ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
22739   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
22740   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
22741   uint8x16_t __ret;
22742   __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
22743   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
22744   return __ret;
22745 }
22746 #endif
22747 
22748 #ifdef __LITTLE_ENDIAN__
vshlq_u32(uint32x4_t __p0,int32x4_t __p1)22749 __ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
22750   uint32x4_t __ret;
22751   __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
22752   return __ret;
22753 }
22754 #else
vshlq_u32(uint32x4_t __p0,int32x4_t __p1)22755 __ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
22756   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
22757   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
22758   uint32x4_t __ret;
22759   __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
22760   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
22761   return __ret;
22762 }
22763 #endif
22764 
22765 #ifdef __LITTLE_ENDIAN__
vshlq_u64(uint64x2_t __p0,int64x2_t __p1)22766 __ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
22767   uint64x2_t __ret;
22768   __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
22769   return __ret;
22770 }
22771 #else
vshlq_u64(uint64x2_t __p0,int64x2_t __p1)22772 __ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
22773   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
22774   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
22775   uint64x2_t __ret;
22776   __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
22777   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
22778   return __ret;
22779 }
22780 #endif
22781 
22782 #ifdef __LITTLE_ENDIAN__
vshlq_u16(uint16x8_t __p0,int16x8_t __p1)22783 __ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
22784   uint16x8_t __ret;
22785   __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
22786   return __ret;
22787 }
22788 #else
vshlq_u16(uint16x8_t __p0,int16x8_t __p1)22789 __ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
22790   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
22791   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
22792   uint16x8_t __ret;
22793   __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
22794   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
22795   return __ret;
22796 }
22797 #endif
22798 
22799 #ifdef __LITTLE_ENDIAN__
vshlq_s8(int8x16_t __p0,int8x16_t __p1)22800 __ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
22801   int8x16_t __ret;
22802   __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
22803   return __ret;
22804 }
22805 #else
vshlq_s8(int8x16_t __p0,int8x16_t __p1)22806 __ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
22807   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
22808   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
22809   int8x16_t __ret;
22810   __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
22811   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
22812   return __ret;
22813 }
22814 #endif
22815 
22816 #ifdef __LITTLE_ENDIAN__
vshlq_s32(int32x4_t __p0,int32x4_t __p1)22817 __ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
22818   int32x4_t __ret;
22819   __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
22820   return __ret;
22821 }
22822 #else
vshlq_s32(int32x4_t __p0,int32x4_t __p1)22823 __ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
22824   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
22825   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
22826   int32x4_t __ret;
22827   __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
22828   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
22829   return __ret;
22830 }
22831 #endif
22832 
22833 #ifdef __LITTLE_ENDIAN__
vshlq_s64(int64x2_t __p0,int64x2_t __p1)22834 __ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
22835   int64x2_t __ret;
22836   __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
22837   return __ret;
22838 }
22839 #else
vshlq_s64(int64x2_t __p0,int64x2_t __p1)22840 __ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
22841   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
22842   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
22843   int64x2_t __ret;
22844   __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
22845   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
22846   return __ret;
22847 }
22848 #endif
22849 
22850 #ifdef __LITTLE_ENDIAN__
vshlq_s16(int16x8_t __p0,int16x8_t __p1)22851 __ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
22852   int16x8_t __ret;
22853   __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
22854   return __ret;
22855 }
22856 #else
vshlq_s16(int16x8_t __p0,int16x8_t __p1)22857 __ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
22858   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
22859   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
22860   int16x8_t __ret;
22861   __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
22862   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
22863   return __ret;
22864 }
22865 #endif
22866 
22867 #ifdef __LITTLE_ENDIAN__
vshl_u8(uint8x8_t __p0,int8x8_t __p1)22868 __ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
22869   uint8x8_t __ret;
22870   __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
22871   return __ret;
22872 }
22873 #else
vshl_u8(uint8x8_t __p0,int8x8_t __p1)22874 __ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
22875   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
22876   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
22877   uint8x8_t __ret;
22878   __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
22879   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
22880   return __ret;
22881 }
22882 #endif
22883 
22884 #ifdef __LITTLE_ENDIAN__
vshl_u32(uint32x2_t __p0,int32x2_t __p1)22885 __ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
22886   uint32x2_t __ret;
22887   __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
22888   return __ret;
22889 }
22890 #else
vshl_u32(uint32x2_t __p0,int32x2_t __p1)22891 __ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
22892   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
22893   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
22894   uint32x2_t __ret;
22895   __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
22896   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
22897   return __ret;
22898 }
22899 #endif
22900 
22901 #ifdef __LITTLE_ENDIAN__
vshl_u64(uint64x1_t __p0,int64x1_t __p1)22902 __ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) {
22903   uint64x1_t __ret;
22904   __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
22905   return __ret;
22906 }
22907 #else
vshl_u64(uint64x1_t __p0,int64x1_t __p1)22908 __ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) {
22909   uint64x1_t __ret;
22910   __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
22911   return __ret;
22912 }
22913 #endif
22914 
22915 #ifdef __LITTLE_ENDIAN__
vshl_u16(uint16x4_t __p0,int16x4_t __p1)22916 __ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
22917   uint16x4_t __ret;
22918   __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
22919   return __ret;
22920 }
22921 #else
vshl_u16(uint16x4_t __p0,int16x4_t __p1)22922 __ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
22923   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
22924   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
22925   uint16x4_t __ret;
22926   __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
22927   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
22928   return __ret;
22929 }
22930 #endif
22931 
22932 #ifdef __LITTLE_ENDIAN__
vshl_s8(int8x8_t __p0,int8x8_t __p1)22933 __ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
22934   int8x8_t __ret;
22935   __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
22936   return __ret;
22937 }
22938 #else
vshl_s8(int8x8_t __p0,int8x8_t __p1)22939 __ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
22940   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
22941   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
22942   int8x8_t __ret;
22943   __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
22944   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
22945   return __ret;
22946 }
22947 #endif
22948 
22949 #ifdef __LITTLE_ENDIAN__
vshl_s32(int32x2_t __p0,int32x2_t __p1)22950 __ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
22951   int32x2_t __ret;
22952   __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
22953   return __ret;
22954 }
22955 #else
vshl_s32(int32x2_t __p0,int32x2_t __p1)22956 __ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
22957   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
22958   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
22959   int32x2_t __ret;
22960   __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
22961   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
22962   return __ret;
22963 }
22964 #endif
22965 
22966 #ifdef __LITTLE_ENDIAN__
vshl_s64(int64x1_t __p0,int64x1_t __p1)22967 __ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) {
22968   int64x1_t __ret;
22969   __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
22970   return __ret;
22971 }
22972 #else
vshl_s64(int64x1_t __p0,int64x1_t __p1)22973 __ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) {
22974   int64x1_t __ret;
22975   __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
22976   return __ret;
22977 }
22978 #endif
22979 
22980 #ifdef __LITTLE_ENDIAN__
vshl_s16(int16x4_t __p0,int16x4_t __p1)22981 __ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
22982   int16x4_t __ret;
22983   __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
22984   return __ret;
22985 }
22986 #else
vshl_s16(int16x4_t __p0,int16x4_t __p1)22987 __ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
22988   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
22989   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
22990   int16x4_t __ret;
22991   __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
22992   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
22993   return __ret;
22994 }
22995 #endif
22996 
22997 #ifdef __LITTLE_ENDIAN__
22998 #define vshlq_n_u8(__p0, __p1) __extension__ ({ \
22999   uint8x16_t __s0 = __p0; \
23000   uint8x16_t __ret; \
23001   __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 48); \
23002   __ret; \
23003 })
23004 #else
23005 #define vshlq_n_u8(__p0, __p1) __extension__ ({ \
23006   uint8x16_t __s0 = __p0; \
23007   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23008   uint8x16_t __ret; \
23009   __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 48); \
23010   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23011   __ret; \
23012 })
23013 #endif
23014 
23015 #ifdef __LITTLE_ENDIAN__
23016 #define vshlq_n_u32(__p0, __p1) __extension__ ({ \
23017   uint32x4_t __s0 = __p0; \
23018   uint32x4_t __ret; \
23019   __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 50); \
23020   __ret; \
23021 })
23022 #else
23023 #define vshlq_n_u32(__p0, __p1) __extension__ ({ \
23024   uint32x4_t __s0 = __p0; \
23025   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23026   uint32x4_t __ret; \
23027   __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 50); \
23028   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23029   __ret; \
23030 })
23031 #endif
23032 
23033 #ifdef __LITTLE_ENDIAN__
23034 #define vshlq_n_u64(__p0, __p1) __extension__ ({ \
23035   uint64x2_t __s0 = __p0; \
23036   uint64x2_t __ret; \
23037   __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 51); \
23038   __ret; \
23039 })
23040 #else
23041 #define vshlq_n_u64(__p0, __p1) __extension__ ({ \
23042   uint64x2_t __s0 = __p0; \
23043   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
23044   uint64x2_t __ret; \
23045   __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 51); \
23046   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
23047   __ret; \
23048 })
23049 #endif
23050 
23051 #ifdef __LITTLE_ENDIAN__
23052 #define vshlq_n_u16(__p0, __p1) __extension__ ({ \
23053   uint16x8_t __s0 = __p0; \
23054   uint16x8_t __ret; \
23055   __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 49); \
23056   __ret; \
23057 })
23058 #else
23059 #define vshlq_n_u16(__p0, __p1) __extension__ ({ \
23060   uint16x8_t __s0 = __p0; \
23061   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23062   uint16x8_t __ret; \
23063   __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 49); \
23064   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23065   __ret; \
23066 })
23067 #endif
23068 
23069 #ifdef __LITTLE_ENDIAN__
23070 #define vshlq_n_s8(__p0, __p1) __extension__ ({ \
23071   int8x16_t __s0 = __p0; \
23072   int8x16_t __ret; \
23073   __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 32); \
23074   __ret; \
23075 })
23076 #else
23077 #define vshlq_n_s8(__p0, __p1) __extension__ ({ \
23078   int8x16_t __s0 = __p0; \
23079   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23080   int8x16_t __ret; \
23081   __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 32); \
23082   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23083   __ret; \
23084 })
23085 #endif
23086 
23087 #ifdef __LITTLE_ENDIAN__
23088 #define vshlq_n_s32(__p0, __p1) __extension__ ({ \
23089   int32x4_t __s0 = __p0; \
23090   int32x4_t __ret; \
23091   __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 34); \
23092   __ret; \
23093 })
23094 #else
23095 #define vshlq_n_s32(__p0, __p1) __extension__ ({ \
23096   int32x4_t __s0 = __p0; \
23097   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23098   int32x4_t __ret; \
23099   __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 34); \
23100   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23101   __ret; \
23102 })
23103 #endif
23104 
23105 #ifdef __LITTLE_ENDIAN__
23106 #define vshlq_n_s64(__p0, __p1) __extension__ ({ \
23107   int64x2_t __s0 = __p0; \
23108   int64x2_t __ret; \
23109   __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 35); \
23110   __ret; \
23111 })
23112 #else
23113 #define vshlq_n_s64(__p0, __p1) __extension__ ({ \
23114   int64x2_t __s0 = __p0; \
23115   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
23116   int64x2_t __ret; \
23117   __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 35); \
23118   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
23119   __ret; \
23120 })
23121 #endif
23122 
23123 #ifdef __LITTLE_ENDIAN__
23124 #define vshlq_n_s16(__p0, __p1) __extension__ ({ \
23125   int16x8_t __s0 = __p0; \
23126   int16x8_t __ret; \
23127   __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 33); \
23128   __ret; \
23129 })
23130 #else
23131 #define vshlq_n_s16(__p0, __p1) __extension__ ({ \
23132   int16x8_t __s0 = __p0; \
23133   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23134   int16x8_t __ret; \
23135   __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 33); \
23136   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23137   __ret; \
23138 })
23139 #endif
23140 
23141 #ifdef __LITTLE_ENDIAN__
23142 #define vshl_n_u8(__p0, __p1) __extension__ ({ \
23143   uint8x8_t __s0 = __p0; \
23144   uint8x8_t __ret; \
23145   __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 16); \
23146   __ret; \
23147 })
23148 #else
23149 #define vshl_n_u8(__p0, __p1) __extension__ ({ \
23150   uint8x8_t __s0 = __p0; \
23151   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23152   uint8x8_t __ret; \
23153   __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 16); \
23154   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23155   __ret; \
23156 })
23157 #endif
23158 
23159 #ifdef __LITTLE_ENDIAN__
23160 #define vshl_n_u32(__p0, __p1) __extension__ ({ \
23161   uint32x2_t __s0 = __p0; \
23162   uint32x2_t __ret; \
23163   __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 18); \
23164   __ret; \
23165 })
23166 #else
23167 #define vshl_n_u32(__p0, __p1) __extension__ ({ \
23168   uint32x2_t __s0 = __p0; \
23169   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
23170   uint32x2_t __ret; \
23171   __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 18); \
23172   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
23173   __ret; \
23174 })
23175 #endif
23176 
23177 #ifdef __LITTLE_ENDIAN__
23178 #define vshl_n_u64(__p0, __p1) __extension__ ({ \
23179   uint64x1_t __s0 = __p0; \
23180   uint64x1_t __ret; \
23181   __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \
23182   __ret; \
23183 })
23184 #else
23185 #define vshl_n_u64(__p0, __p1) __extension__ ({ \
23186   uint64x1_t __s0 = __p0; \
23187   uint64x1_t __ret; \
23188   __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \
23189   __ret; \
23190 })
23191 #endif
23192 
23193 #ifdef __LITTLE_ENDIAN__
23194 #define vshl_n_u16(__p0, __p1) __extension__ ({ \
23195   uint16x4_t __s0 = __p0; \
23196   uint16x4_t __ret; \
23197   __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 17); \
23198   __ret; \
23199 })
23200 #else
23201 #define vshl_n_u16(__p0, __p1) __extension__ ({ \
23202   uint16x4_t __s0 = __p0; \
23203   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23204   uint16x4_t __ret; \
23205   __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 17); \
23206   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23207   __ret; \
23208 })
23209 #endif
23210 
23211 #ifdef __LITTLE_ENDIAN__
23212 #define vshl_n_s8(__p0, __p1) __extension__ ({ \
23213   int8x8_t __s0 = __p0; \
23214   int8x8_t __ret; \
23215   __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 0); \
23216   __ret; \
23217 })
23218 #else
23219 #define vshl_n_s8(__p0, __p1) __extension__ ({ \
23220   int8x8_t __s0 = __p0; \
23221   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23222   int8x8_t __ret; \
23223   __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 0); \
23224   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23225   __ret; \
23226 })
23227 #endif
23228 
23229 #ifdef __LITTLE_ENDIAN__
23230 #define vshl_n_s32(__p0, __p1) __extension__ ({ \
23231   int32x2_t __s0 = __p0; \
23232   int32x2_t __ret; \
23233   __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 2); \
23234   __ret; \
23235 })
23236 #else
23237 #define vshl_n_s32(__p0, __p1) __extension__ ({ \
23238   int32x2_t __s0 = __p0; \
23239   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
23240   int32x2_t __ret; \
23241   __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 2); \
23242   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
23243   __ret; \
23244 })
23245 #endif
23246 
23247 #ifdef __LITTLE_ENDIAN__
23248 #define vshl_n_s64(__p0, __p1) __extension__ ({ \
23249   int64x1_t __s0 = __p0; \
23250   int64x1_t __ret; \
23251   __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \
23252   __ret; \
23253 })
23254 #else
23255 #define vshl_n_s64(__p0, __p1) __extension__ ({ \
23256   int64x1_t __s0 = __p0; \
23257   int64x1_t __ret; \
23258   __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \
23259   __ret; \
23260 })
23261 #endif
23262 
23263 #ifdef __LITTLE_ENDIAN__
23264 #define vshl_n_s16(__p0, __p1) __extension__ ({ \
23265   int16x4_t __s0 = __p0; \
23266   int16x4_t __ret; \
23267   __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 1); \
23268   __ret; \
23269 })
23270 #else
23271 #define vshl_n_s16(__p0, __p1) __extension__ ({ \
23272   int16x4_t __s0 = __p0; \
23273   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23274   int16x4_t __ret; \
23275   __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 1); \
23276   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23277   __ret; \
23278 })
23279 #endif
23280 
23281 #ifdef __LITTLE_ENDIAN__
23282 #define vshll_n_u8(__p0, __p1) __extension__ ({ \
23283   uint8x8_t __s0 = __p0; \
23284   uint16x8_t __ret; \
23285   __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
23286   __ret; \
23287 })
23288 #else
23289 #define vshll_n_u8(__p0, __p1) __extension__ ({ \
23290   uint8x8_t __s0 = __p0; \
23291   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23292   uint16x8_t __ret; \
23293   __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 49); \
23294   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23295   __ret; \
23296 })
23297 #define __noswap_vshll_n_u8(__p0, __p1) __extension__ ({ \
23298   uint8x8_t __s0 = __p0; \
23299   uint16x8_t __ret; \
23300   __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
23301   __ret; \
23302 })
23303 #endif
23304 
23305 #ifdef __LITTLE_ENDIAN__
23306 #define vshll_n_u32(__p0, __p1) __extension__ ({ \
23307   uint32x2_t __s0 = __p0; \
23308   uint64x2_t __ret; \
23309   __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
23310   __ret; \
23311 })
23312 #else
23313 #define vshll_n_u32(__p0, __p1) __extension__ ({ \
23314   uint32x2_t __s0 = __p0; \
23315   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
23316   uint64x2_t __ret; \
23317   __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 51); \
23318   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
23319   __ret; \
23320 })
23321 #define __noswap_vshll_n_u32(__p0, __p1) __extension__ ({ \
23322   uint32x2_t __s0 = __p0; \
23323   uint64x2_t __ret; \
23324   __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
23325   __ret; \
23326 })
23327 #endif
23328 
23329 #ifdef __LITTLE_ENDIAN__
23330 #define vshll_n_u16(__p0, __p1) __extension__ ({ \
23331   uint16x4_t __s0 = __p0; \
23332   uint32x4_t __ret; \
23333   __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
23334   __ret; \
23335 })
23336 #else
23337 #define vshll_n_u16(__p0, __p1) __extension__ ({ \
23338   uint16x4_t __s0 = __p0; \
23339   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23340   uint32x4_t __ret; \
23341   __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 50); \
23342   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23343   __ret; \
23344 })
23345 #define __noswap_vshll_n_u16(__p0, __p1) __extension__ ({ \
23346   uint16x4_t __s0 = __p0; \
23347   uint32x4_t __ret; \
23348   __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
23349   __ret; \
23350 })
23351 #endif
23352 
23353 #ifdef __LITTLE_ENDIAN__
23354 #define vshll_n_s8(__p0, __p1) __extension__ ({ \
23355   int8x8_t __s0 = __p0; \
23356   int16x8_t __ret; \
23357   __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
23358   __ret; \
23359 })
23360 #else
23361 #define vshll_n_s8(__p0, __p1) __extension__ ({ \
23362   int8x8_t __s0 = __p0; \
23363   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23364   int16x8_t __ret; \
23365   __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 33); \
23366   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23367   __ret; \
23368 })
23369 #define __noswap_vshll_n_s8(__p0, __p1) __extension__ ({ \
23370   int8x8_t __s0 = __p0; \
23371   int16x8_t __ret; \
23372   __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
23373   __ret; \
23374 })
23375 #endif
23376 
23377 #ifdef __LITTLE_ENDIAN__
23378 #define vshll_n_s32(__p0, __p1) __extension__ ({ \
23379   int32x2_t __s0 = __p0; \
23380   int64x2_t __ret; \
23381   __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
23382   __ret; \
23383 })
23384 #else
23385 #define vshll_n_s32(__p0, __p1) __extension__ ({ \
23386   int32x2_t __s0 = __p0; \
23387   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
23388   int64x2_t __ret; \
23389   __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 35); \
23390   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
23391   __ret; \
23392 })
23393 #define __noswap_vshll_n_s32(__p0, __p1) __extension__ ({ \
23394   int32x2_t __s0 = __p0; \
23395   int64x2_t __ret; \
23396   __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
23397   __ret; \
23398 })
23399 #endif
23400 
23401 #ifdef __LITTLE_ENDIAN__
23402 #define vshll_n_s16(__p0, __p1) __extension__ ({ \
23403   int16x4_t __s0 = __p0; \
23404   int32x4_t __ret; \
23405   __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
23406   __ret; \
23407 })
23408 #else
23409 #define vshll_n_s16(__p0, __p1) __extension__ ({ \
23410   int16x4_t __s0 = __p0; \
23411   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23412   int32x4_t __ret; \
23413   __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 34); \
23414   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23415   __ret; \
23416 })
23417 #define __noswap_vshll_n_s16(__p0, __p1) __extension__ ({ \
23418   int16x4_t __s0 = __p0; \
23419   int32x4_t __ret; \
23420   __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
23421   __ret; \
23422 })
23423 #endif
23424 
23425 #ifdef __LITTLE_ENDIAN__
23426 #define vshrq_n_u8(__p0, __p1) __extension__ ({ \
23427   uint8x16_t __s0 = __p0; \
23428   uint8x16_t __ret; \
23429   __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 48); \
23430   __ret; \
23431 })
23432 #else
23433 #define vshrq_n_u8(__p0, __p1) __extension__ ({ \
23434   uint8x16_t __s0 = __p0; \
23435   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23436   uint8x16_t __ret; \
23437   __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 48); \
23438   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23439   __ret; \
23440 })
23441 #endif
23442 
23443 #ifdef __LITTLE_ENDIAN__
23444 #define vshrq_n_u32(__p0, __p1) __extension__ ({ \
23445   uint32x4_t __s0 = __p0; \
23446   uint32x4_t __ret; \
23447   __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 50); \
23448   __ret; \
23449 })
23450 #else
23451 #define vshrq_n_u32(__p0, __p1) __extension__ ({ \
23452   uint32x4_t __s0 = __p0; \
23453   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23454   uint32x4_t __ret; \
23455   __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 50); \
23456   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23457   __ret; \
23458 })
23459 #endif
23460 
23461 #ifdef __LITTLE_ENDIAN__
23462 #define vshrq_n_u64(__p0, __p1) __extension__ ({ \
23463   uint64x2_t __s0 = __p0; \
23464   uint64x2_t __ret; \
23465   __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 51); \
23466   __ret; \
23467 })
23468 #else
23469 #define vshrq_n_u64(__p0, __p1) __extension__ ({ \
23470   uint64x2_t __s0 = __p0; \
23471   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
23472   uint64x2_t __ret; \
23473   __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 51); \
23474   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
23475   __ret; \
23476 })
23477 #endif
23478 
23479 #ifdef __LITTLE_ENDIAN__
23480 #define vshrq_n_u16(__p0, __p1) __extension__ ({ \
23481   uint16x8_t __s0 = __p0; \
23482   uint16x8_t __ret; \
23483   __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 49); \
23484   __ret; \
23485 })
23486 #else
23487 #define vshrq_n_u16(__p0, __p1) __extension__ ({ \
23488   uint16x8_t __s0 = __p0; \
23489   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23490   uint16x8_t __ret; \
23491   __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 49); \
23492   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23493   __ret; \
23494 })
23495 #endif
23496 
23497 #ifdef __LITTLE_ENDIAN__
23498 #define vshrq_n_s8(__p0, __p1) __extension__ ({ \
23499   int8x16_t __s0 = __p0; \
23500   int8x16_t __ret; \
23501   __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 32); \
23502   __ret; \
23503 })
23504 #else
23505 #define vshrq_n_s8(__p0, __p1) __extension__ ({ \
23506   int8x16_t __s0 = __p0; \
23507   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23508   int8x16_t __ret; \
23509   __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 32); \
23510   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23511   __ret; \
23512 })
23513 #endif
23514 
23515 #ifdef __LITTLE_ENDIAN__
23516 #define vshrq_n_s32(__p0, __p1) __extension__ ({ \
23517   int32x4_t __s0 = __p0; \
23518   int32x4_t __ret; \
23519   __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 34); \
23520   __ret; \
23521 })
23522 #else
23523 #define vshrq_n_s32(__p0, __p1) __extension__ ({ \
23524   int32x4_t __s0 = __p0; \
23525   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23526   int32x4_t __ret; \
23527   __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 34); \
23528   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23529   __ret; \
23530 })
23531 #endif
23532 
23533 #ifdef __LITTLE_ENDIAN__
23534 #define vshrq_n_s64(__p0, __p1) __extension__ ({ \
23535   int64x2_t __s0 = __p0; \
23536   int64x2_t __ret; \
23537   __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 35); \
23538   __ret; \
23539 })
23540 #else
23541 #define vshrq_n_s64(__p0, __p1) __extension__ ({ \
23542   int64x2_t __s0 = __p0; \
23543   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
23544   int64x2_t __ret; \
23545   __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 35); \
23546   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
23547   __ret; \
23548 })
23549 #endif
23550 
23551 #ifdef __LITTLE_ENDIAN__
23552 #define vshrq_n_s16(__p0, __p1) __extension__ ({ \
23553   int16x8_t __s0 = __p0; \
23554   int16x8_t __ret; \
23555   __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 33); \
23556   __ret; \
23557 })
23558 #else
23559 #define vshrq_n_s16(__p0, __p1) __extension__ ({ \
23560   int16x8_t __s0 = __p0; \
23561   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23562   int16x8_t __ret; \
23563   __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 33); \
23564   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23565   __ret; \
23566 })
23567 #endif
23568 
23569 #ifdef __LITTLE_ENDIAN__
23570 #define vshr_n_u8(__p0, __p1) __extension__ ({ \
23571   uint8x8_t __s0 = __p0; \
23572   uint8x8_t __ret; \
23573   __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 16); \
23574   __ret; \
23575 })
23576 #else
23577 #define vshr_n_u8(__p0, __p1) __extension__ ({ \
23578   uint8x8_t __s0 = __p0; \
23579   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23580   uint8x8_t __ret; \
23581   __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 16); \
23582   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23583   __ret; \
23584 })
23585 #endif
23586 
23587 #ifdef __LITTLE_ENDIAN__
23588 #define vshr_n_u32(__p0, __p1) __extension__ ({ \
23589   uint32x2_t __s0 = __p0; \
23590   uint32x2_t __ret; \
23591   __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 18); \
23592   __ret; \
23593 })
23594 #else
23595 #define vshr_n_u32(__p0, __p1) __extension__ ({ \
23596   uint32x2_t __s0 = __p0; \
23597   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
23598   uint32x2_t __ret; \
23599   __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 18); \
23600   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
23601   __ret; \
23602 })
23603 #endif
23604 
23605 #ifdef __LITTLE_ENDIAN__
23606 #define vshr_n_u64(__p0, __p1) __extension__ ({ \
23607   uint64x1_t __s0 = __p0; \
23608   uint64x1_t __ret; \
23609   __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \
23610   __ret; \
23611 })
23612 #else
23613 #define vshr_n_u64(__p0, __p1) __extension__ ({ \
23614   uint64x1_t __s0 = __p0; \
23615   uint64x1_t __ret; \
23616   __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \
23617   __ret; \
23618 })
23619 #endif
23620 
23621 #ifdef __LITTLE_ENDIAN__
23622 #define vshr_n_u16(__p0, __p1) __extension__ ({ \
23623   uint16x4_t __s0 = __p0; \
23624   uint16x4_t __ret; \
23625   __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 17); \
23626   __ret; \
23627 })
23628 #else
23629 #define vshr_n_u16(__p0, __p1) __extension__ ({ \
23630   uint16x4_t __s0 = __p0; \
23631   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23632   uint16x4_t __ret; \
23633   __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 17); \
23634   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23635   __ret; \
23636 })
23637 #endif
23638 
23639 #ifdef __LITTLE_ENDIAN__
23640 #define vshr_n_s8(__p0, __p1) __extension__ ({ \
23641   int8x8_t __s0 = __p0; \
23642   int8x8_t __ret; \
23643   __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 0); \
23644   __ret; \
23645 })
23646 #else
23647 #define vshr_n_s8(__p0, __p1) __extension__ ({ \
23648   int8x8_t __s0 = __p0; \
23649   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23650   int8x8_t __ret; \
23651   __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 0); \
23652   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23653   __ret; \
23654 })
23655 #endif
23656 
23657 #ifdef __LITTLE_ENDIAN__
23658 #define vshr_n_s32(__p0, __p1) __extension__ ({ \
23659   int32x2_t __s0 = __p0; \
23660   int32x2_t __ret; \
23661   __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 2); \
23662   __ret; \
23663 })
23664 #else
23665 #define vshr_n_s32(__p0, __p1) __extension__ ({ \
23666   int32x2_t __s0 = __p0; \
23667   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
23668   int32x2_t __ret; \
23669   __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 2); \
23670   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
23671   __ret; \
23672 })
23673 #endif
23674 
23675 #ifdef __LITTLE_ENDIAN__
23676 #define vshr_n_s64(__p0, __p1) __extension__ ({ \
23677   int64x1_t __s0 = __p0; \
23678   int64x1_t __ret; \
23679   __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \
23680   __ret; \
23681 })
23682 #else
23683 #define vshr_n_s64(__p0, __p1) __extension__ ({ \
23684   int64x1_t __s0 = __p0; \
23685   int64x1_t __ret; \
23686   __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \
23687   __ret; \
23688 })
23689 #endif
23690 
23691 #ifdef __LITTLE_ENDIAN__
23692 #define vshr_n_s16(__p0, __p1) __extension__ ({ \
23693   int16x4_t __s0 = __p0; \
23694   int16x4_t __ret; \
23695   __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 1); \
23696   __ret; \
23697 })
23698 #else
23699 #define vshr_n_s16(__p0, __p1) __extension__ ({ \
23700   int16x4_t __s0 = __p0; \
23701   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23702   int16x4_t __ret; \
23703   __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 1); \
23704   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23705   __ret; \
23706 })
23707 #endif
23708 
23709 #ifdef __LITTLE_ENDIAN__
23710 #define vshrn_n_u32(__p0, __p1) __extension__ ({ \
23711   uint32x4_t __s0 = __p0; \
23712   uint16x4_t __ret; \
23713   __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
23714   __ret; \
23715 })
23716 #else
23717 #define vshrn_n_u32(__p0, __p1) __extension__ ({ \
23718   uint32x4_t __s0 = __p0; \
23719   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23720   uint16x4_t __ret; \
23721   __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 17); \
23722   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23723   __ret; \
23724 })
23725 #define __noswap_vshrn_n_u32(__p0, __p1) __extension__ ({ \
23726   uint32x4_t __s0 = __p0; \
23727   uint16x4_t __ret; \
23728   __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
23729   __ret; \
23730 })
23731 #endif
23732 
23733 #ifdef __LITTLE_ENDIAN__
23734 #define vshrn_n_u64(__p0, __p1) __extension__ ({ \
23735   uint64x2_t __s0 = __p0; \
23736   uint32x2_t __ret; \
23737   __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
23738   __ret; \
23739 })
23740 #else
23741 #define vshrn_n_u64(__p0, __p1) __extension__ ({ \
23742   uint64x2_t __s0 = __p0; \
23743   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
23744   uint32x2_t __ret; \
23745   __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 18); \
23746   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
23747   __ret; \
23748 })
23749 #define __noswap_vshrn_n_u64(__p0, __p1) __extension__ ({ \
23750   uint64x2_t __s0 = __p0; \
23751   uint32x2_t __ret; \
23752   __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
23753   __ret; \
23754 })
23755 #endif
23756 
23757 #ifdef __LITTLE_ENDIAN__
23758 #define vshrn_n_u16(__p0, __p1) __extension__ ({ \
23759   uint16x8_t __s0 = __p0; \
23760   uint8x8_t __ret; \
23761   __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
23762   __ret; \
23763 })
23764 #else
23765 #define vshrn_n_u16(__p0, __p1) __extension__ ({ \
23766   uint16x8_t __s0 = __p0; \
23767   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23768   uint8x8_t __ret; \
23769   __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 16); \
23770   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23771   __ret; \
23772 })
23773 #define __noswap_vshrn_n_u16(__p0, __p1) __extension__ ({ \
23774   uint16x8_t __s0 = __p0; \
23775   uint8x8_t __ret; \
23776   __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
23777   __ret; \
23778 })
23779 #endif
23780 
23781 #ifdef __LITTLE_ENDIAN__
23782 #define vshrn_n_s32(__p0, __p1) __extension__ ({ \
23783   int32x4_t __s0 = __p0; \
23784   int16x4_t __ret; \
23785   __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
23786   __ret; \
23787 })
23788 #else
23789 #define vshrn_n_s32(__p0, __p1) __extension__ ({ \
23790   int32x4_t __s0 = __p0; \
23791   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23792   int16x4_t __ret; \
23793   __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 1); \
23794   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23795   __ret; \
23796 })
23797 #define __noswap_vshrn_n_s32(__p0, __p1) __extension__ ({ \
23798   int32x4_t __s0 = __p0; \
23799   int16x4_t __ret; \
23800   __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
23801   __ret; \
23802 })
23803 #endif
23804 
23805 #ifdef __LITTLE_ENDIAN__
23806 #define vshrn_n_s64(__p0, __p1) __extension__ ({ \
23807   int64x2_t __s0 = __p0; \
23808   int32x2_t __ret; \
23809   __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
23810   __ret; \
23811 })
23812 #else
23813 #define vshrn_n_s64(__p0, __p1) __extension__ ({ \
23814   int64x2_t __s0 = __p0; \
23815   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
23816   int32x2_t __ret; \
23817   __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 2); \
23818   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
23819   __ret; \
23820 })
23821 #define __noswap_vshrn_n_s64(__p0, __p1) __extension__ ({ \
23822   int64x2_t __s0 = __p0; \
23823   int32x2_t __ret; \
23824   __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
23825   __ret; \
23826 })
23827 #endif
23828 
23829 #ifdef __LITTLE_ENDIAN__
23830 #define vshrn_n_s16(__p0, __p1) __extension__ ({ \
23831   int16x8_t __s0 = __p0; \
23832   int8x8_t __ret; \
23833   __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
23834   __ret; \
23835 })
23836 #else
23837 #define vshrn_n_s16(__p0, __p1) __extension__ ({ \
23838   int16x8_t __s0 = __p0; \
23839   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23840   int8x8_t __ret; \
23841   __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 0); \
23842   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23843   __ret; \
23844 })
23845 #define __noswap_vshrn_n_s16(__p0, __p1) __extension__ ({ \
23846   int16x8_t __s0 = __p0; \
23847   int8x8_t __ret; \
23848   __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
23849   __ret; \
23850 })
23851 #endif
23852 
23853 #ifdef __LITTLE_ENDIAN__
23854 #define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
23855   poly8x8_t __s0 = __p0; \
23856   poly8x8_t __s1 = __p1; \
23857   poly8x8_t __ret; \
23858   __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
23859   __ret; \
23860 })
23861 #else
23862 #define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
23863   poly8x8_t __s0 = __p0; \
23864   poly8x8_t __s1 = __p1; \
23865   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23866   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
23867   poly8x8_t __ret; \
23868   __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
23869   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23870   __ret; \
23871 })
23872 #endif
23873 
23874 #ifdef __LITTLE_ENDIAN__
23875 #define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
23876   poly16x4_t __s0 = __p0; \
23877   poly16x4_t __s1 = __p1; \
23878   poly16x4_t __ret; \
23879   __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
23880   __ret; \
23881 })
23882 #else
23883 #define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
23884   poly16x4_t __s0 = __p0; \
23885   poly16x4_t __s1 = __p1; \
23886   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23887   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
23888   poly16x4_t __ret; \
23889   __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
23890   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23891   __ret; \
23892 })
23893 #endif
23894 
23895 #ifdef __LITTLE_ENDIAN__
23896 #define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
23897   poly8x16_t __s0 = __p0; \
23898   poly8x16_t __s1 = __p1; \
23899   poly8x16_t __ret; \
23900   __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
23901   __ret; \
23902 })
23903 #else
23904 #define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
23905   poly8x16_t __s0 = __p0; \
23906   poly8x16_t __s1 = __p1; \
23907   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23908   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23909   poly8x16_t __ret; \
23910   __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
23911   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23912   __ret; \
23913 })
23914 #endif
23915 
23916 #ifdef __LITTLE_ENDIAN__
23917 #define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
23918   poly16x8_t __s0 = __p0; \
23919   poly16x8_t __s1 = __p1; \
23920   poly16x8_t __ret; \
23921   __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
23922   __ret; \
23923 })
23924 #else
23925 #define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
23926   poly16x8_t __s0 = __p0; \
23927   poly16x8_t __s1 = __p1; \
23928   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23929   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
23930   poly16x8_t __ret; \
23931   __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
23932   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23933   __ret; \
23934 })
23935 #endif
23936 
23937 #ifdef __LITTLE_ENDIAN__
23938 #define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
23939   uint8x16_t __s0 = __p0; \
23940   uint8x16_t __s1 = __p1; \
23941   uint8x16_t __ret; \
23942   __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
23943   __ret; \
23944 })
23945 #else
23946 #define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
23947   uint8x16_t __s0 = __p0; \
23948   uint8x16_t __s1 = __p1; \
23949   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23950   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23951   uint8x16_t __ret; \
23952   __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
23953   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23954   __ret; \
23955 })
23956 #endif
23957 
23958 #ifdef __LITTLE_ENDIAN__
23959 #define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
23960   uint32x4_t __s0 = __p0; \
23961   uint32x4_t __s1 = __p1; \
23962   uint32x4_t __ret; \
23963   __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
23964   __ret; \
23965 })
23966 #else
23967 #define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
23968   uint32x4_t __s0 = __p0; \
23969   uint32x4_t __s1 = __p1; \
23970   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23971   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
23972   uint32x4_t __ret; \
23973   __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
23974   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23975   __ret; \
23976 })
23977 #endif
23978 
23979 #ifdef __LITTLE_ENDIAN__
23980 #define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
23981   uint64x2_t __s0 = __p0; \
23982   uint64x2_t __s1 = __p1; \
23983   uint64x2_t __ret; \
23984   __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
23985   __ret; \
23986 })
23987 #else
23988 #define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
23989   uint64x2_t __s0 = __p0; \
23990   uint64x2_t __s1 = __p1; \
23991   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
23992   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
23993   uint64x2_t __ret; \
23994   __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
23995   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
23996   __ret; \
23997 })
23998 #endif
23999 
24000 #ifdef __LITTLE_ENDIAN__
24001 #define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
24002   uint16x8_t __s0 = __p0; \
24003   uint16x8_t __s1 = __p1; \
24004   uint16x8_t __ret; \
24005   __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
24006   __ret; \
24007 })
24008 #else
24009 #define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
24010   uint16x8_t __s0 = __p0; \
24011   uint16x8_t __s1 = __p1; \
24012   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24013   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24014   uint16x8_t __ret; \
24015   __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
24016   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24017   __ret; \
24018 })
24019 #endif
24020 
24021 #ifdef __LITTLE_ENDIAN__
24022 #define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
24023   int8x16_t __s0 = __p0; \
24024   int8x16_t __s1 = __p1; \
24025   int8x16_t __ret; \
24026   __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
24027   __ret; \
24028 })
24029 #else
24030 #define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
24031   int8x16_t __s0 = __p0; \
24032   int8x16_t __s1 = __p1; \
24033   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24034   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24035   int8x16_t __ret; \
24036   __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
24037   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24038   __ret; \
24039 })
24040 #endif
24041 
24042 #ifdef __LITTLE_ENDIAN__
24043 #define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
24044   int32x4_t __s0 = __p0; \
24045   int32x4_t __s1 = __p1; \
24046   int32x4_t __ret; \
24047   __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
24048   __ret; \
24049 })
24050 #else
24051 #define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
24052   int32x4_t __s0 = __p0; \
24053   int32x4_t __s1 = __p1; \
24054   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
24055   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
24056   int32x4_t __ret; \
24057   __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
24058   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
24059   __ret; \
24060 })
24061 #endif
24062 
24063 #ifdef __LITTLE_ENDIAN__
24064 #define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
24065   int64x2_t __s0 = __p0; \
24066   int64x2_t __s1 = __p1; \
24067   int64x2_t __ret; \
24068   __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
24069   __ret; \
24070 })
24071 #else
24072 #define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
24073   int64x2_t __s0 = __p0; \
24074   int64x2_t __s1 = __p1; \
24075   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
24076   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
24077   int64x2_t __ret; \
24078   __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
24079   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
24080   __ret; \
24081 })
24082 #endif
24083 
24084 #ifdef __LITTLE_ENDIAN__
24085 #define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
24086   int16x8_t __s0 = __p0; \
24087   int16x8_t __s1 = __p1; \
24088   int16x8_t __ret; \
24089   __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
24090   __ret; \
24091 })
24092 #else
24093 #define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
24094   int16x8_t __s0 = __p0; \
24095   int16x8_t __s1 = __p1; \
24096   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24097   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24098   int16x8_t __ret; \
24099   __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
24100   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24101   __ret; \
24102 })
24103 #endif
24104 
24105 #ifdef __LITTLE_ENDIAN__
24106 #define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
24107   uint8x8_t __s0 = __p0; \
24108   uint8x8_t __s1 = __p1; \
24109   uint8x8_t __ret; \
24110   __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
24111   __ret; \
24112 })
24113 #else
24114 #define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
24115   uint8x8_t __s0 = __p0; \
24116   uint8x8_t __s1 = __p1; \
24117   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24118   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24119   uint8x8_t __ret; \
24120   __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
24121   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24122   __ret; \
24123 })
24124 #endif
24125 
24126 #ifdef __LITTLE_ENDIAN__
24127 #define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
24128   uint32x2_t __s0 = __p0; \
24129   uint32x2_t __s1 = __p1; \
24130   uint32x2_t __ret; \
24131   __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
24132   __ret; \
24133 })
24134 #else
24135 #define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
24136   uint32x2_t __s0 = __p0; \
24137   uint32x2_t __s1 = __p1; \
24138   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
24139   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
24140   uint32x2_t __ret; \
24141   __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
24142   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
24143   __ret; \
24144 })
24145 #endif
24146 
24147 #ifdef __LITTLE_ENDIAN__
24148 #define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \
24149   uint64x1_t __s0 = __p0; \
24150   uint64x1_t __s1 = __p1; \
24151   uint64x1_t __ret; \
24152   __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
24153   __ret; \
24154 })
24155 #else
24156 #define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \
24157   uint64x1_t __s0 = __p0; \
24158   uint64x1_t __s1 = __p1; \
24159   uint64x1_t __ret; \
24160   __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
24161   __ret; \
24162 })
24163 #endif
24164 
24165 #ifdef __LITTLE_ENDIAN__
24166 #define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
24167   uint16x4_t __s0 = __p0; \
24168   uint16x4_t __s1 = __p1; \
24169   uint16x4_t __ret; \
24170   __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
24171   __ret; \
24172 })
24173 #else
24174 #define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
24175   uint16x4_t __s0 = __p0; \
24176   uint16x4_t __s1 = __p1; \
24177   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
24178   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
24179   uint16x4_t __ret; \
24180   __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
24181   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
24182   __ret; \
24183 })
24184 #endif
24185 
24186 #ifdef __LITTLE_ENDIAN__
24187 #define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
24188   int8x8_t __s0 = __p0; \
24189   int8x8_t __s1 = __p1; \
24190   int8x8_t __ret; \
24191   __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
24192   __ret; \
24193 })
24194 #else
24195 #define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
24196   int8x8_t __s0 = __p0; \
24197   int8x8_t __s1 = __p1; \
24198   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24199   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24200   int8x8_t __ret; \
24201   __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
24202   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24203   __ret; \
24204 })
24205 #endif
24206 
24207 #ifdef __LITTLE_ENDIAN__
24208 #define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
24209   int32x2_t __s0 = __p0; \
24210   int32x2_t __s1 = __p1; \
24211   int32x2_t __ret; \
24212   __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
24213   __ret; \
24214 })
24215 #else
24216 #define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
24217   int32x2_t __s0 = __p0; \
24218   int32x2_t __s1 = __p1; \
24219   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
24220   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
24221   int32x2_t __ret; \
24222   __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
24223   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
24224   __ret; \
24225 })
24226 #endif
24227 
24228 #ifdef __LITTLE_ENDIAN__
24229 #define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \
24230   int64x1_t __s0 = __p0; \
24231   int64x1_t __s1 = __p1; \
24232   int64x1_t __ret; \
24233   __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
24234   __ret; \
24235 })
24236 #else
24237 #define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \
24238   int64x1_t __s0 = __p0; \
24239   int64x1_t __s1 = __p1; \
24240   int64x1_t __ret; \
24241   __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
24242   __ret; \
24243 })
24244 #endif
24245 
24246 #ifdef __LITTLE_ENDIAN__
24247 #define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
24248   int16x4_t __s0 = __p0; \
24249   int16x4_t __s1 = __p1; \
24250   int16x4_t __ret; \
24251   __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
24252   __ret; \
24253 })
24254 #else
24255 #define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
24256   int16x4_t __s0 = __p0; \
24257   int16x4_t __s1 = __p1; \
24258   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
24259   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
24260   int16x4_t __ret; \
24261   __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
24262   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
24263   __ret; \
24264 })
24265 #endif
24266 
24267 #ifdef __LITTLE_ENDIAN__
24268 #define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
24269   uint8x16_t __s0 = __p0; \
24270   uint8x16_t __s1 = __p1; \
24271   uint8x16_t __ret; \
24272   __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
24273   __ret; \
24274 })
24275 #else
24276 #define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
24277   uint8x16_t __s0 = __p0; \
24278   uint8x16_t __s1 = __p1; \
24279   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24280   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24281   uint8x16_t __ret; \
24282   __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
24283   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24284   __ret; \
24285 })
24286 #endif
24287 
24288 #ifdef __LITTLE_ENDIAN__
24289 #define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
24290   uint32x4_t __s0 = __p0; \
24291   uint32x4_t __s1 = __p1; \
24292   uint32x4_t __ret; \
24293   __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
24294   __ret; \
24295 })
24296 #else
24297 #define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
24298   uint32x4_t __s0 = __p0; \
24299   uint32x4_t __s1 = __p1; \
24300   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
24301   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
24302   uint32x4_t __ret; \
24303   __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
24304   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
24305   __ret; \
24306 })
24307 #endif
24308 
24309 #ifdef __LITTLE_ENDIAN__
24310 #define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
24311   uint64x2_t __s0 = __p0; \
24312   uint64x2_t __s1 = __p1; \
24313   uint64x2_t __ret; \
24314   __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
24315   __ret; \
24316 })
24317 #else
24318 #define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
24319   uint64x2_t __s0 = __p0; \
24320   uint64x2_t __s1 = __p1; \
24321   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
24322   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
24323   uint64x2_t __ret; \
24324   __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
24325   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
24326   __ret; \
24327 })
24328 #endif
24329 
24330 #ifdef __LITTLE_ENDIAN__
24331 #define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
24332   uint16x8_t __s0 = __p0; \
24333   uint16x8_t __s1 = __p1; \
24334   uint16x8_t __ret; \
24335   __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
24336   __ret; \
24337 })
24338 #else
24339 #define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
24340   uint16x8_t __s0 = __p0; \
24341   uint16x8_t __s1 = __p1; \
24342   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24343   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24344   uint16x8_t __ret; \
24345   __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
24346   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24347   __ret; \
24348 })
24349 #endif
24350 
24351 #ifdef __LITTLE_ENDIAN__
24352 #define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
24353   int8x16_t __s0 = __p0; \
24354   int8x16_t __s1 = __p1; \
24355   int8x16_t __ret; \
24356   __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
24357   __ret; \
24358 })
24359 #else
24360 #define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
24361   int8x16_t __s0 = __p0; \
24362   int8x16_t __s1 = __p1; \
24363   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24364   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24365   int8x16_t __ret; \
24366   __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
24367   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24368   __ret; \
24369 })
24370 #endif
24371 
24372 #ifdef __LITTLE_ENDIAN__
24373 #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
24374   int32x4_t __s0 = __p0; \
24375   int32x4_t __s1 = __p1; \
24376   int32x4_t __ret; \
24377   __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
24378   __ret; \
24379 })
24380 #else
24381 #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
24382   int32x4_t __s0 = __p0; \
24383   int32x4_t __s1 = __p1; \
24384   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
24385   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
24386   int32x4_t __ret; \
24387   __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
24388   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
24389   __ret; \
24390 })
24391 #endif
24392 
24393 #ifdef __LITTLE_ENDIAN__
24394 #define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
24395   int64x2_t __s0 = __p0; \
24396   int64x2_t __s1 = __p1; \
24397   int64x2_t __ret; \
24398   __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
24399   __ret; \
24400 })
24401 #else
24402 #define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
24403   int64x2_t __s0 = __p0; \
24404   int64x2_t __s1 = __p1; \
24405   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
24406   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
24407   int64x2_t __ret; \
24408   __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
24409   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
24410   __ret; \
24411 })
24412 #endif
24413 
24414 #ifdef __LITTLE_ENDIAN__
24415 #define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
24416   int16x8_t __s0 = __p0; \
24417   int16x8_t __s1 = __p1; \
24418   int16x8_t __ret; \
24419   __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
24420   __ret; \
24421 })
24422 #else
24423 #define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
24424   int16x8_t __s0 = __p0; \
24425   int16x8_t __s1 = __p1; \
24426   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24427   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24428   int16x8_t __ret; \
24429   __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
24430   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24431   __ret; \
24432 })
24433 #endif
24434 
24435 #ifdef __LITTLE_ENDIAN__
24436 #define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
24437   uint8x8_t __s0 = __p0; \
24438   uint8x8_t __s1 = __p1; \
24439   uint8x8_t __ret; \
24440   __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
24441   __ret; \
24442 })
24443 #else
24444 #define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
24445   uint8x8_t __s0 = __p0; \
24446   uint8x8_t __s1 = __p1; \
24447   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24448   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24449   uint8x8_t __ret; \
24450   __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
24451   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24452   __ret; \
24453 })
24454 #endif
24455 
24456 #ifdef __LITTLE_ENDIAN__
24457 #define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
24458   uint32x2_t __s0 = __p0; \
24459   uint32x2_t __s1 = __p1; \
24460   uint32x2_t __ret; \
24461   __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
24462   __ret; \
24463 })
24464 #else
24465 #define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
24466   uint32x2_t __s0 = __p0; \
24467   uint32x2_t __s1 = __p1; \
24468   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
24469   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
24470   uint32x2_t __ret; \
24471   __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
24472   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
24473   __ret; \
24474 })
24475 #endif
24476 
24477 #ifdef __LITTLE_ENDIAN__
24478 #define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
24479   uint64x1_t __s0 = __p0; \
24480   uint64x1_t __s1 = __p1; \
24481   uint64x1_t __ret; \
24482   __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
24483   __ret; \
24484 })
24485 #else
24486 #define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
24487   uint64x1_t __s0 = __p0; \
24488   uint64x1_t __s1 = __p1; \
24489   uint64x1_t __ret; \
24490   __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
24491   __ret; \
24492 })
24493 #endif
24494 
24495 #ifdef __LITTLE_ENDIAN__
24496 #define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
24497   uint16x4_t __s0 = __p0; \
24498   uint16x4_t __s1 = __p1; \
24499   uint16x4_t __ret; \
24500   __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
24501   __ret; \
24502 })
24503 #else
24504 #define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
24505   uint16x4_t __s0 = __p0; \
24506   uint16x4_t __s1 = __p1; \
24507   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
24508   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
24509   uint16x4_t __ret; \
24510   __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
24511   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
24512   __ret; \
24513 })
24514 #endif
24515 
24516 #ifdef __LITTLE_ENDIAN__
24517 #define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
24518   int8x8_t __s0 = __p0; \
24519   int8x8_t __s1 = __p1; \
24520   int8x8_t __ret; \
24521   __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
24522   __ret; \
24523 })
24524 #else
24525 #define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
24526   int8x8_t __s0 = __p0; \
24527   int8x8_t __s1 = __p1; \
24528   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24529   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24530   int8x8_t __ret; \
24531   __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
24532   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24533   __ret; \
24534 })
24535 #endif
24536 
24537 #ifdef __LITTLE_ENDIAN__
24538 #define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
24539   int32x2_t __s0 = __p0; \
24540   int32x2_t __s1 = __p1; \
24541   int32x2_t __ret; \
24542   __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
24543   __ret; \
24544 })
24545 #else
24546 #define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
24547   int32x2_t __s0 = __p0; \
24548   int32x2_t __s1 = __p1; \
24549   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
24550   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
24551   int32x2_t __ret; \
24552   __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
24553   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
24554   __ret; \
24555 })
24556 #endif
24557 
24558 #ifdef __LITTLE_ENDIAN__
24559 #define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
24560   int64x1_t __s0 = __p0; \
24561   int64x1_t __s1 = __p1; \
24562   int64x1_t __ret; \
24563   __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
24564   __ret; \
24565 })
24566 #else
24567 #define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
24568   int64x1_t __s0 = __p0; \
24569   int64x1_t __s1 = __p1; \
24570   int64x1_t __ret; \
24571   __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
24572   __ret; \
24573 })
24574 #endif
24575 
24576 #ifdef __LITTLE_ENDIAN__
24577 #define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
24578   int16x4_t __s0 = __p0; \
24579   int16x4_t __s1 = __p1; \
24580   int16x4_t __ret; \
24581   __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
24582   __ret; \
24583 })
24584 #else
24585 #define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
24586   int16x4_t __s0 = __p0; \
24587   int16x4_t __s1 = __p1; \
24588   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
24589   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
24590   int16x4_t __ret; \
24591   __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
24592   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
24593   __ret; \
24594 })
24595 #endif
24596 
24597 #ifdef __LITTLE_ENDIAN__
24598 #define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
24599   poly8x8_t __s0 = __p0; \
24600   poly8x8_t __s1 = __p1; \
24601   poly8x8_t __ret; \
24602   __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
24603   __ret; \
24604 })
24605 #else
24606 #define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
24607   poly8x8_t __s0 = __p0; \
24608   poly8x8_t __s1 = __p1; \
24609   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24610   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24611   poly8x8_t __ret; \
24612   __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
24613   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24614   __ret; \
24615 })
24616 #endif
24617 
24618 #ifdef __LITTLE_ENDIAN__
24619 #define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
24620   poly16x4_t __s0 = __p0; \
24621   poly16x4_t __s1 = __p1; \
24622   poly16x4_t __ret; \
24623   __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
24624   __ret; \
24625 })
24626 #else
24627 #define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
24628   poly16x4_t __s0 = __p0; \
24629   poly16x4_t __s1 = __p1; \
24630   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
24631   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
24632   poly16x4_t __ret; \
24633   __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
24634   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
24635   __ret; \
24636 })
24637 #endif
24638 
24639 #ifdef __LITTLE_ENDIAN__
24640 #define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
24641   poly8x16_t __s0 = __p0; \
24642   poly8x16_t __s1 = __p1; \
24643   poly8x16_t __ret; \
24644   __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
24645   __ret; \
24646 })
24647 #else
24648 #define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
24649   poly8x16_t __s0 = __p0; \
24650   poly8x16_t __s1 = __p1; \
24651   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24652   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24653   poly8x16_t __ret; \
24654   __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
24655   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24656   __ret; \
24657 })
24658 #endif
24659 
24660 #ifdef __LITTLE_ENDIAN__
24661 #define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
24662   poly16x8_t __s0 = __p0; \
24663   poly16x8_t __s1 = __p1; \
24664   poly16x8_t __ret; \
24665   __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
24666   __ret; \
24667 })
24668 #else
24669 #define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
24670   poly16x8_t __s0 = __p0; \
24671   poly16x8_t __s1 = __p1; \
24672   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24673   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24674   poly16x8_t __ret; \
24675   __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
24676   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24677   __ret; \
24678 })
24679 #endif
24680 
24681 #ifdef __LITTLE_ENDIAN__
24682 #define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
24683   uint8x16_t __s0 = __p0; \
24684   uint8x16_t __s1 = __p1; \
24685   uint8x16_t __ret; \
24686   __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
24687   __ret; \
24688 })
24689 #else
24690 #define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
24691   uint8x16_t __s0 = __p0; \
24692   uint8x16_t __s1 = __p1; \
24693   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24694   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24695   uint8x16_t __ret; \
24696   __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
24697   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24698   __ret; \
24699 })
24700 #endif
24701 
24702 #ifdef __LITTLE_ENDIAN__
24703 #define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
24704   uint32x4_t __s0 = __p0; \
24705   uint32x4_t __s1 = __p1; \
24706   uint32x4_t __ret; \
24707   __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
24708   __ret; \
24709 })
24710 #else
24711 #define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
24712   uint32x4_t __s0 = __p0; \
24713   uint32x4_t __s1 = __p1; \
24714   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
24715   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
24716   uint32x4_t __ret; \
24717   __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
24718   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
24719   __ret; \
24720 })
24721 #endif
24722 
24723 #ifdef __LITTLE_ENDIAN__
24724 #define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
24725   uint64x2_t __s0 = __p0; \
24726   uint64x2_t __s1 = __p1; \
24727   uint64x2_t __ret; \
24728   __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
24729   __ret; \
24730 })
24731 #else
24732 #define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
24733   uint64x2_t __s0 = __p0; \
24734   uint64x2_t __s1 = __p1; \
24735   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
24736   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
24737   uint64x2_t __ret; \
24738   __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
24739   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
24740   __ret; \
24741 })
24742 #endif
24743 
24744 #ifdef __LITTLE_ENDIAN__
24745 #define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
24746   uint16x8_t __s0 = __p0; \
24747   uint16x8_t __s1 = __p1; \
24748   uint16x8_t __ret; \
24749   __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
24750   __ret; \
24751 })
24752 #else
24753 #define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
24754   uint16x8_t __s0 = __p0; \
24755   uint16x8_t __s1 = __p1; \
24756   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24757   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24758   uint16x8_t __ret; \
24759   __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
24760   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24761   __ret; \
24762 })
24763 #endif
24764 
24765 #ifdef __LITTLE_ENDIAN__
24766 #define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
24767   int8x16_t __s0 = __p0; \
24768   int8x16_t __s1 = __p1; \
24769   int8x16_t __ret; \
24770   __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
24771   __ret; \
24772 })
24773 #else
24774 #define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
24775   int8x16_t __s0 = __p0; \
24776   int8x16_t __s1 = __p1; \
24777   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24778   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24779   int8x16_t __ret; \
24780   __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
24781   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24782   __ret; \
24783 })
24784 #endif
24785 
24786 #ifdef __LITTLE_ENDIAN__
24787 #define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
24788   int32x4_t __s0 = __p0; \
24789   int32x4_t __s1 = __p1; \
24790   int32x4_t __ret; \
24791   __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
24792   __ret; \
24793 })
24794 #else
24795 #define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
24796   int32x4_t __s0 = __p0; \
24797   int32x4_t __s1 = __p1; \
24798   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
24799   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
24800   int32x4_t __ret; \
24801   __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
24802   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
24803   __ret; \
24804 })
24805 #endif
24806 
24807 #ifdef __LITTLE_ENDIAN__
24808 #define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
24809   int64x2_t __s0 = __p0; \
24810   int64x2_t __s1 = __p1; \
24811   int64x2_t __ret; \
24812   __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
24813   __ret; \
24814 })
24815 #else
24816 #define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
24817   int64x2_t __s0 = __p0; \
24818   int64x2_t __s1 = __p1; \
24819   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
24820   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
24821   int64x2_t __ret; \
24822   __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
24823   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
24824   __ret; \
24825 })
24826 #endif
24827 
24828 #ifdef __LITTLE_ENDIAN__
24829 #define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
24830   int16x8_t __s0 = __p0; \
24831   int16x8_t __s1 = __p1; \
24832   int16x8_t __ret; \
24833   __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
24834   __ret; \
24835 })
24836 #else
24837 #define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
24838   int16x8_t __s0 = __p0; \
24839   int16x8_t __s1 = __p1; \
24840   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24841   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24842   int16x8_t __ret; \
24843   __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
24844   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24845   __ret; \
24846 })
24847 #endif
24848 
24849 #ifdef __LITTLE_ENDIAN__
24850 #define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
24851   uint8x8_t __s0 = __p0; \
24852   uint8x8_t __s1 = __p1; \
24853   uint8x8_t __ret; \
24854   __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
24855   __ret; \
24856 })
24857 #else
24858 #define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
24859   uint8x8_t __s0 = __p0; \
24860   uint8x8_t __s1 = __p1; \
24861   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24862   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24863   uint8x8_t __ret; \
24864   __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
24865   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24866   __ret; \
24867 })
24868 #endif
24869 
24870 #ifdef __LITTLE_ENDIAN__
24871 #define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
24872   uint32x2_t __s0 = __p0; \
24873   uint32x2_t __s1 = __p1; \
24874   uint32x2_t __ret; \
24875   __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
24876   __ret; \
24877 })
24878 #else
24879 #define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
24880   uint32x2_t __s0 = __p0; \
24881   uint32x2_t __s1 = __p1; \
24882   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
24883   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
24884   uint32x2_t __ret; \
24885   __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
24886   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
24887   __ret; \
24888 })
24889 #endif
24890 
24891 #ifdef __LITTLE_ENDIAN__
24892 #define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \
24893   uint64x1_t __s0 = __p0; \
24894   uint64x1_t __s1 = __p1; \
24895   uint64x1_t __ret; \
24896   __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
24897   __ret; \
24898 })
24899 #else
24900 #define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \
24901   uint64x1_t __s0 = __p0; \
24902   uint64x1_t __s1 = __p1; \
24903   uint64x1_t __ret; \
24904   __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
24905   __ret; \
24906 })
24907 #endif
24908 
24909 #ifdef __LITTLE_ENDIAN__
24910 #define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
24911   uint16x4_t __s0 = __p0; \
24912   uint16x4_t __s1 = __p1; \
24913   uint16x4_t __ret; \
24914   __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
24915   __ret; \
24916 })
24917 #else
24918 #define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
24919   uint16x4_t __s0 = __p0; \
24920   uint16x4_t __s1 = __p1; \
24921   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
24922   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
24923   uint16x4_t __ret; \
24924   __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
24925   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
24926   __ret; \
24927 })
24928 #endif
24929 
24930 #ifdef __LITTLE_ENDIAN__
24931 #define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
24932   int8x8_t __s0 = __p0; \
24933   int8x8_t __s1 = __p1; \
24934   int8x8_t __ret; \
24935   __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
24936   __ret; \
24937 })
24938 #else
24939 #define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
24940   int8x8_t __s0 = __p0; \
24941   int8x8_t __s1 = __p1; \
24942   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24943   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24944   int8x8_t __ret; \
24945   __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
24946   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24947   __ret; \
24948 })
24949 #endif
24950 
24951 #ifdef __LITTLE_ENDIAN__
24952 #define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
24953   int32x2_t __s0 = __p0; \
24954   int32x2_t __s1 = __p1; \
24955   int32x2_t __ret; \
24956   __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
24957   __ret; \
24958 })
24959 #else
24960 #define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
24961   int32x2_t __s0 = __p0; \
24962   int32x2_t __s1 = __p1; \
24963   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
24964   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
24965   int32x2_t __ret; \
24966   __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
24967   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
24968   __ret; \
24969 })
24970 #endif
24971 
24972 #ifdef __LITTLE_ENDIAN__
24973 #define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \
24974   int64x1_t __s0 = __p0; \
24975   int64x1_t __s1 = __p1; \
24976   int64x1_t __ret; \
24977   __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
24978   __ret; \
24979 })
24980 #else
24981 #define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \
24982   int64x1_t __s0 = __p0; \
24983   int64x1_t __s1 = __p1; \
24984   int64x1_t __ret; \
24985   __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
24986   __ret; \
24987 })
24988 #endif
24989 
24990 #ifdef __LITTLE_ENDIAN__
24991 #define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
24992   int16x4_t __s0 = __p0; \
24993   int16x4_t __s1 = __p1; \
24994   int16x4_t __ret; \
24995   __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
24996   __ret; \
24997 })
24998 #else
24999 #define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
25000   int16x4_t __s0 = __p0; \
25001   int16x4_t __s1 = __p1; \
25002   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
25003   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25004   int16x4_t __ret; \
25005   __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
25006   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
25007   __ret; \
25008 })
25009 #endif
25010 
25011 #ifdef __LITTLE_ENDIAN__
25012 #define vst1_p8(__p0, __p1) __extension__ ({ \
25013   poly8x8_t __s1 = __p1; \
25014   __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 4); \
25015 })
25016 #else
25017 #define vst1_p8(__p0, __p1) __extension__ ({ \
25018   poly8x8_t __s1 = __p1; \
25019   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25020   __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 4); \
25021 })
25022 #endif
25023 
25024 #ifdef __LITTLE_ENDIAN__
25025 #define vst1_p16(__p0, __p1) __extension__ ({ \
25026   poly16x4_t __s1 = __p1; \
25027   __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 5); \
25028 })
25029 #else
25030 #define vst1_p16(__p0, __p1) __extension__ ({ \
25031   poly16x4_t __s1 = __p1; \
25032   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25033   __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 5); \
25034 })
25035 #endif
25036 
25037 #ifdef __LITTLE_ENDIAN__
25038 #define vst1q_p8(__p0, __p1) __extension__ ({ \
25039   poly8x16_t __s1 = __p1; \
25040   __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 36); \
25041 })
25042 #else
25043 #define vst1q_p8(__p0, __p1) __extension__ ({ \
25044   poly8x16_t __s1 = __p1; \
25045   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
25046   __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 36); \
25047 })
25048 #endif
25049 
25050 #ifdef __LITTLE_ENDIAN__
25051 #define vst1q_p16(__p0, __p1) __extension__ ({ \
25052   poly16x8_t __s1 = __p1; \
25053   __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 37); \
25054 })
25055 #else
25056 #define vst1q_p16(__p0, __p1) __extension__ ({ \
25057   poly16x8_t __s1 = __p1; \
25058   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25059   __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 37); \
25060 })
25061 #endif
25062 
25063 #ifdef __LITTLE_ENDIAN__
25064 #define vst1q_u8(__p0, __p1) __extension__ ({ \
25065   uint8x16_t __s1 = __p1; \
25066   __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 48); \
25067 })
25068 #else
25069 #define vst1q_u8(__p0, __p1) __extension__ ({ \
25070   uint8x16_t __s1 = __p1; \
25071   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
25072   __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 48); \
25073 })
25074 #endif
25075 
25076 #ifdef __LITTLE_ENDIAN__
25077 #define vst1q_u32(__p0, __p1) __extension__ ({ \
25078   uint32x4_t __s1 = __p1; \
25079   __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 50); \
25080 })
25081 #else
25082 #define vst1q_u32(__p0, __p1) __extension__ ({ \
25083   uint32x4_t __s1 = __p1; \
25084   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25085   __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 50); \
25086 })
25087 #endif
25088 
25089 #ifdef __LITTLE_ENDIAN__
25090 #define vst1q_u64(__p0, __p1) __extension__ ({ \
25091   uint64x2_t __s1 = __p1; \
25092   __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 51); \
25093 })
25094 #else
25095 #define vst1q_u64(__p0, __p1) __extension__ ({ \
25096   uint64x2_t __s1 = __p1; \
25097   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
25098   __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 51); \
25099 })
25100 #endif
25101 
25102 #ifdef __LITTLE_ENDIAN__
25103 #define vst1q_u16(__p0, __p1) __extension__ ({ \
25104   uint16x8_t __s1 = __p1; \
25105   __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 49); \
25106 })
25107 #else
25108 #define vst1q_u16(__p0, __p1) __extension__ ({ \
25109   uint16x8_t __s1 = __p1; \
25110   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25111   __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 49); \
25112 })
25113 #endif
25114 
25115 #ifdef __LITTLE_ENDIAN__
25116 #define vst1q_s8(__p0, __p1) __extension__ ({ \
25117   int8x16_t __s1 = __p1; \
25118   __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 32); \
25119 })
25120 #else
25121 #define vst1q_s8(__p0, __p1) __extension__ ({ \
25122   int8x16_t __s1 = __p1; \
25123   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
25124   __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 32); \
25125 })
25126 #endif
25127 
25128 #ifdef __LITTLE_ENDIAN__
25129 #define vst1q_f32(__p0, __p1) __extension__ ({ \
25130   float32x4_t __s1 = __p1; \
25131   __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 41); \
25132 })
25133 #else
25134 #define vst1q_f32(__p0, __p1) __extension__ ({ \
25135   float32x4_t __s1 = __p1; \
25136   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25137   __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 41); \
25138 })
25139 #endif
25140 
25141 #ifdef __LITTLE_ENDIAN__
25142 #define vst1q_f16(__p0, __p1) __extension__ ({ \
25143   float16x8_t __s1 = __p1; \
25144   __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 40); \
25145 })
25146 #else
25147 #define vst1q_f16(__p0, __p1) __extension__ ({ \
25148   float16x8_t __s1 = __p1; \
25149   float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25150   __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 40); \
25151 })
25152 #endif
25153 
25154 #ifdef __LITTLE_ENDIAN__
25155 #define vst1q_s32(__p0, __p1) __extension__ ({ \
25156   int32x4_t __s1 = __p1; \
25157   __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 34); \
25158 })
25159 #else
25160 #define vst1q_s32(__p0, __p1) __extension__ ({ \
25161   int32x4_t __s1 = __p1; \
25162   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25163   __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 34); \
25164 })
25165 #endif
25166 
25167 #ifdef __LITTLE_ENDIAN__
25168 #define vst1q_s64(__p0, __p1) __extension__ ({ \
25169   int64x2_t __s1 = __p1; \
25170   __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 35); \
25171 })
25172 #else
25173 #define vst1q_s64(__p0, __p1) __extension__ ({ \
25174   int64x2_t __s1 = __p1; \
25175   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
25176   __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 35); \
25177 })
25178 #endif
25179 
25180 #ifdef __LITTLE_ENDIAN__
25181 #define vst1q_s16(__p0, __p1) __extension__ ({ \
25182   int16x8_t __s1 = __p1; \
25183   __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 33); \
25184 })
25185 #else
25186 #define vst1q_s16(__p0, __p1) __extension__ ({ \
25187   int16x8_t __s1 = __p1; \
25188   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25189   __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 33); \
25190 })
25191 #endif
25192 
25193 #ifdef __LITTLE_ENDIAN__
25194 #define vst1_u8(__p0, __p1) __extension__ ({ \
25195   uint8x8_t __s1 = __p1; \
25196   __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 16); \
25197 })
25198 #else
25199 #define vst1_u8(__p0, __p1) __extension__ ({ \
25200   uint8x8_t __s1 = __p1; \
25201   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25202   __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 16); \
25203 })
25204 #endif
25205 
25206 #ifdef __LITTLE_ENDIAN__
25207 #define vst1_u32(__p0, __p1) __extension__ ({ \
25208   uint32x2_t __s1 = __p1; \
25209   __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 18); \
25210 })
25211 #else
25212 #define vst1_u32(__p0, __p1) __extension__ ({ \
25213   uint32x2_t __s1 = __p1; \
25214   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
25215   __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 18); \
25216 })
25217 #endif
25218 
25219 #ifdef __LITTLE_ENDIAN__
25220 #define vst1_u64(__p0, __p1) __extension__ ({ \
25221   uint64x1_t __s1 = __p1; \
25222   __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \
25223 })
25224 #else
25225 #define vst1_u64(__p0, __p1) __extension__ ({ \
25226   uint64x1_t __s1 = __p1; \
25227   __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \
25228 })
25229 #endif
25230 
25231 #ifdef __LITTLE_ENDIAN__
25232 #define vst1_u16(__p0, __p1) __extension__ ({ \
25233   uint16x4_t __s1 = __p1; \
25234   __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 17); \
25235 })
25236 #else
25237 #define vst1_u16(__p0, __p1) __extension__ ({ \
25238   uint16x4_t __s1 = __p1; \
25239   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25240   __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 17); \
25241 })
25242 #endif
25243 
25244 #ifdef __LITTLE_ENDIAN__
25245 #define vst1_s8(__p0, __p1) __extension__ ({ \
25246   int8x8_t __s1 = __p1; \
25247   __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 0); \
25248 })
25249 #else
25250 #define vst1_s8(__p0, __p1) __extension__ ({ \
25251   int8x8_t __s1 = __p1; \
25252   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25253   __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 0); \
25254 })
25255 #endif
25256 
25257 #ifdef __LITTLE_ENDIAN__
25258 #define vst1_f32(__p0, __p1) __extension__ ({ \
25259   float32x2_t __s1 = __p1; \
25260   __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 9); \
25261 })
25262 #else
25263 #define vst1_f32(__p0, __p1) __extension__ ({ \
25264   float32x2_t __s1 = __p1; \
25265   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
25266   __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 9); \
25267 })
25268 #endif
25269 
25270 #ifdef __LITTLE_ENDIAN__
25271 #define vst1_f16(__p0, __p1) __extension__ ({ \
25272   float16x4_t __s1 = __p1; \
25273   __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 8); \
25274 })
25275 #else
25276 #define vst1_f16(__p0, __p1) __extension__ ({ \
25277   float16x4_t __s1 = __p1; \
25278   float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25279   __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 8); \
25280 })
25281 #endif
25282 
25283 #ifdef __LITTLE_ENDIAN__
25284 #define vst1_s32(__p0, __p1) __extension__ ({ \
25285   int32x2_t __s1 = __p1; \
25286   __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 2); \
25287 })
25288 #else
25289 #define vst1_s32(__p0, __p1) __extension__ ({ \
25290   int32x2_t __s1 = __p1; \
25291   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
25292   __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 2); \
25293 })
25294 #endif
25295 
25296 #ifdef __LITTLE_ENDIAN__
25297 #define vst1_s64(__p0, __p1) __extension__ ({ \
25298   int64x1_t __s1 = __p1; \
25299   __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \
25300 })
25301 #else
25302 #define vst1_s64(__p0, __p1) __extension__ ({ \
25303   int64x1_t __s1 = __p1; \
25304   __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \
25305 })
25306 #endif
25307 
25308 #ifdef __LITTLE_ENDIAN__
25309 #define vst1_s16(__p0, __p1) __extension__ ({ \
25310   int16x4_t __s1 = __p1; \
25311   __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 1); \
25312 })
25313 #else
25314 #define vst1_s16(__p0, __p1) __extension__ ({ \
25315   int16x4_t __s1 = __p1; \
25316   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25317   __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 1); \
25318 })
25319 #endif
25320 
25321 #ifdef __LITTLE_ENDIAN__
25322 #define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
25323   poly8x8_t __s1 = __p1; \
25324   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \
25325 })
25326 #else
25327 #define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
25328   poly8x8_t __s1 = __p1; \
25329   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25330   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
25331 })
25332 #endif
25333 
25334 #ifdef __LITTLE_ENDIAN__
25335 #define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
25336   poly16x4_t __s1 = __p1; \
25337   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \
25338 })
25339 #else
25340 #define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
25341   poly16x4_t __s1 = __p1; \
25342   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25343   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
25344 })
25345 #endif
25346 
25347 #ifdef __LITTLE_ENDIAN__
25348 #define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
25349   poly8x16_t __s1 = __p1; \
25350   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \
25351 })
25352 #else
25353 #define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
25354   poly8x16_t __s1 = __p1; \
25355   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
25356   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
25357 })
25358 #endif
25359 
25360 #ifdef __LITTLE_ENDIAN__
25361 #define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
25362   poly16x8_t __s1 = __p1; \
25363   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \
25364 })
25365 #else
25366 #define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
25367   poly16x8_t __s1 = __p1; \
25368   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25369   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
25370 })
25371 #endif
25372 
25373 #ifdef __LITTLE_ENDIAN__
25374 #define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
25375   uint8x16_t __s1 = __p1; \
25376   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \
25377 })
25378 #else
25379 #define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
25380   uint8x16_t __s1 = __p1; \
25381   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
25382   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
25383 })
25384 #endif
25385 
25386 #ifdef __LITTLE_ENDIAN__
25387 #define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
25388   uint32x4_t __s1 = __p1; \
25389   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \
25390 })
25391 #else
25392 #define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
25393   uint32x4_t __s1 = __p1; \
25394   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25395   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
25396 })
25397 #endif
25398 
25399 #ifdef __LITTLE_ENDIAN__
25400 #define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
25401   uint64x2_t __s1 = __p1; \
25402   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \
25403 })
25404 #else
25405 #define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
25406   uint64x2_t __s1 = __p1; \
25407   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
25408   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
25409 })
25410 #endif
25411 
25412 #ifdef __LITTLE_ENDIAN__
25413 #define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
25414   uint16x8_t __s1 = __p1; \
25415   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \
25416 })
25417 #else
25418 #define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
25419   uint16x8_t __s1 = __p1; \
25420   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25421   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
25422 })
25423 #endif
25424 
25425 #ifdef __LITTLE_ENDIAN__
25426 #define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
25427   int8x16_t __s1 = __p1; \
25428   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \
25429 })
25430 #else
25431 #define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
25432   int8x16_t __s1 = __p1; \
25433   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
25434   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
25435 })
25436 #endif
25437 
25438 #ifdef __LITTLE_ENDIAN__
25439 #define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
25440   float32x4_t __s1 = __p1; \
25441   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \
25442 })
25443 #else
25444 #define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
25445   float32x4_t __s1 = __p1; \
25446   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25447   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
25448 })
25449 #endif
25450 
25451 #ifdef __LITTLE_ENDIAN__
25452 #define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
25453   float16x8_t __s1 = __p1; \
25454   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \
25455 })
25456 #else
25457 #define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
25458   float16x8_t __s1 = __p1; \
25459   float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25460   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
25461 })
25462 #endif
25463 
25464 #ifdef __LITTLE_ENDIAN__
25465 #define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
25466   int32x4_t __s1 = __p1; \
25467   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \
25468 })
25469 #else
25470 #define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
25471   int32x4_t __s1 = __p1; \
25472   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25473   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
25474 })
25475 #endif
25476 
25477 #ifdef __LITTLE_ENDIAN__
25478 #define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
25479   int64x2_t __s1 = __p1; \
25480   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \
25481 })
25482 #else
25483 #define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
25484   int64x2_t __s1 = __p1; \
25485   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
25486   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
25487 })
25488 #endif
25489 
25490 #ifdef __LITTLE_ENDIAN__
25491 #define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
25492   int16x8_t __s1 = __p1; \
25493   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \
25494 })
25495 #else
25496 #define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
25497   int16x8_t __s1 = __p1; \
25498   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25499   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
25500 })
25501 #endif
25502 
25503 #ifdef __LITTLE_ENDIAN__
25504 #define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
25505   uint8x8_t __s1 = __p1; \
25506   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \
25507 })
25508 #else
25509 #define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
25510   uint8x8_t __s1 = __p1; \
25511   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25512   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
25513 })
25514 #endif
25515 
25516 #ifdef __LITTLE_ENDIAN__
25517 #define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
25518   uint32x2_t __s1 = __p1; \
25519   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \
25520 })
25521 #else
25522 #define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
25523   uint32x2_t __s1 = __p1; \
25524   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
25525   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
25526 })
25527 #endif
25528 
25529 #ifdef __LITTLE_ENDIAN__
25530 #define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
25531   uint64x1_t __s1 = __p1; \
25532   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
25533 })
25534 #else
25535 #define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
25536   uint64x1_t __s1 = __p1; \
25537   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
25538 })
25539 #endif
25540 
25541 #ifdef __LITTLE_ENDIAN__
25542 #define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
25543   uint16x4_t __s1 = __p1; \
25544   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \
25545 })
25546 #else
25547 #define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
25548   uint16x4_t __s1 = __p1; \
25549   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25550   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
25551 })
25552 #endif
25553 
25554 #ifdef __LITTLE_ENDIAN__
25555 #define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
25556   int8x8_t __s1 = __p1; \
25557   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \
25558 })
25559 #else
25560 #define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
25561   int8x8_t __s1 = __p1; \
25562   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25563   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
25564 })
25565 #endif
25566 
25567 #ifdef __LITTLE_ENDIAN__
25568 #define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
25569   float32x2_t __s1 = __p1; \
25570   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \
25571 })
25572 #else
25573 #define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
25574   float32x2_t __s1 = __p1; \
25575   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
25576   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
25577 })
25578 #endif
25579 
25580 #ifdef __LITTLE_ENDIAN__
25581 #define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
25582   float16x4_t __s1 = __p1; \
25583   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \
25584 })
25585 #else
25586 #define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
25587   float16x4_t __s1 = __p1; \
25588   float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25589   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
25590 })
25591 #endif
25592 
25593 #ifdef __LITTLE_ENDIAN__
25594 #define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
25595   int32x2_t __s1 = __p1; \
25596   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \
25597 })
25598 #else
25599 #define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
25600   int32x2_t __s1 = __p1; \
25601   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
25602   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
25603 })
25604 #endif
25605 
25606 #ifdef __LITTLE_ENDIAN__
25607 #define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
25608   int64x1_t __s1 = __p1; \
25609   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
25610 })
25611 #else
25612 #define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
25613   int64x1_t __s1 = __p1; \
25614   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
25615 })
25616 #endif
25617 
25618 #ifdef __LITTLE_ENDIAN__
25619 #define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
25620   int16x4_t __s1 = __p1; \
25621   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \
25622 })
25623 #else
25624 #define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
25625   int16x4_t __s1 = __p1; \
25626   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25627   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
25628 })
25629 #endif
25630 
25631 #ifdef __LITTLE_ENDIAN__
25632 #define vst2_p8(__p0, __p1) __extension__ ({ \
25633   poly8x8x2_t __s1 = __p1; \
25634   __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \
25635 })
25636 #else
25637 #define vst2_p8(__p0, __p1) __extension__ ({ \
25638   poly8x8x2_t __s1 = __p1; \
25639   poly8x8x2_t __rev1; \
25640   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
25641   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
25642   __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \
25643 })
25644 #endif
25645 
25646 #ifdef __LITTLE_ENDIAN__
25647 #define vst2_p16(__p0, __p1) __extension__ ({ \
25648   poly16x4x2_t __s1 = __p1; \
25649   __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \
25650 })
25651 #else
25652 #define vst2_p16(__p0, __p1) __extension__ ({ \
25653   poly16x4x2_t __s1 = __p1; \
25654   poly16x4x2_t __rev1; \
25655   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
25656   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
25657   __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \
25658 })
25659 #endif
25660 
25661 #ifdef __LITTLE_ENDIAN__
25662 #define vst2q_p8(__p0, __p1) __extension__ ({ \
25663   poly8x16x2_t __s1 = __p1; \
25664   __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \
25665 })
25666 #else
25667 #define vst2q_p8(__p0, __p1) __extension__ ({ \
25668   poly8x16x2_t __s1 = __p1; \
25669   poly8x16x2_t __rev1; \
25670   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
25671   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
25672   __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \
25673 })
25674 #endif
25675 
25676 #ifdef __LITTLE_ENDIAN__
25677 #define vst2q_p16(__p0, __p1) __extension__ ({ \
25678   poly16x8x2_t __s1 = __p1; \
25679   __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \
25680 })
25681 #else
25682 #define vst2q_p16(__p0, __p1) __extension__ ({ \
25683   poly16x8x2_t __s1 = __p1; \
25684   poly16x8x2_t __rev1; \
25685   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
25686   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
25687   __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \
25688 })
25689 #endif
25690 
25691 #ifdef __LITTLE_ENDIAN__
25692 #define vst2q_u8(__p0, __p1) __extension__ ({ \
25693   uint8x16x2_t __s1 = __p1; \
25694   __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \
25695 })
25696 #else
25697 #define vst2q_u8(__p0, __p1) __extension__ ({ \
25698   uint8x16x2_t __s1 = __p1; \
25699   uint8x16x2_t __rev1; \
25700   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
25701   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
25702   __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \
25703 })
25704 #endif
25705 
25706 #ifdef __LITTLE_ENDIAN__
25707 #define vst2q_u32(__p0, __p1) __extension__ ({ \
25708   uint32x4x2_t __s1 = __p1; \
25709   __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \
25710 })
25711 #else
25712 #define vst2q_u32(__p0, __p1) __extension__ ({ \
25713   uint32x4x2_t __s1 = __p1; \
25714   uint32x4x2_t __rev1; \
25715   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
25716   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
25717   __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \
25718 })
25719 #endif
25720 
25721 #ifdef __LITTLE_ENDIAN__
25722 #define vst2q_u16(__p0, __p1) __extension__ ({ \
25723   uint16x8x2_t __s1 = __p1; \
25724   __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \
25725 })
25726 #else
25727 #define vst2q_u16(__p0, __p1) __extension__ ({ \
25728   uint16x8x2_t __s1 = __p1; \
25729   uint16x8x2_t __rev1; \
25730   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
25731   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
25732   __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \
25733 })
25734 #endif
25735 
25736 #ifdef __LITTLE_ENDIAN__
25737 #define vst2q_s8(__p0, __p1) __extension__ ({ \
25738   int8x16x2_t __s1 = __p1; \
25739   __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \
25740 })
25741 #else
25742 #define vst2q_s8(__p0, __p1) __extension__ ({ \
25743   int8x16x2_t __s1 = __p1; \
25744   int8x16x2_t __rev1; \
25745   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
25746   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
25747   __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \
25748 })
25749 #endif
25750 
25751 #ifdef __LITTLE_ENDIAN__
25752 #define vst2q_f32(__p0, __p1) __extension__ ({ \
25753   float32x4x2_t __s1 = __p1; \
25754   __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 41); \
25755 })
25756 #else
25757 #define vst2q_f32(__p0, __p1) __extension__ ({ \
25758   float32x4x2_t __s1 = __p1; \
25759   float32x4x2_t __rev1; \
25760   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
25761   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
25762   __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 41); \
25763 })
25764 #endif
25765 
25766 #ifdef __LITTLE_ENDIAN__
25767 #define vst2q_f16(__p0, __p1) __extension__ ({ \
25768   float16x8x2_t __s1 = __p1; \
25769   __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 40); \
25770 })
25771 #else
25772 #define vst2q_f16(__p0, __p1) __extension__ ({ \
25773   float16x8x2_t __s1 = __p1; \
25774   float16x8x2_t __rev1; \
25775   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
25776   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
25777   __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 40); \
25778 })
25779 #endif
25780 
25781 #ifdef __LITTLE_ENDIAN__
25782 #define vst2q_s32(__p0, __p1) __extension__ ({ \
25783   int32x4x2_t __s1 = __p1; \
25784   __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 34); \
25785 })
25786 #else
25787 #define vst2q_s32(__p0, __p1) __extension__ ({ \
25788   int32x4x2_t __s1 = __p1; \
25789   int32x4x2_t __rev1; \
25790   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
25791   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
25792   __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 34); \
25793 })
25794 #endif
25795 
25796 #ifdef __LITTLE_ENDIAN__
25797 #define vst2q_s16(__p0, __p1) __extension__ ({ \
25798   int16x8x2_t __s1 = __p1; \
25799   __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 33); \
25800 })
25801 #else
25802 #define vst2q_s16(__p0, __p1) __extension__ ({ \
25803   int16x8x2_t __s1 = __p1; \
25804   int16x8x2_t __rev1; \
25805   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
25806   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
25807   __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 33); \
25808 })
25809 #endif
25810 
25811 #ifdef __LITTLE_ENDIAN__
25812 #define vst2_u8(__p0, __p1) __extension__ ({ \
25813   uint8x8x2_t __s1 = __p1; \
25814   __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \
25815 })
25816 #else
25817 #define vst2_u8(__p0, __p1) __extension__ ({ \
25818   uint8x8x2_t __s1 = __p1; \
25819   uint8x8x2_t __rev1; \
25820   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
25821   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
25822   __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \
25823 })
25824 #endif
25825 
25826 #ifdef __LITTLE_ENDIAN__
25827 #define vst2_u32(__p0, __p1) __extension__ ({ \
25828   uint32x2x2_t __s1 = __p1; \
25829   __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \
25830 })
25831 #else
25832 #define vst2_u32(__p0, __p1) __extension__ ({ \
25833   uint32x2x2_t __s1 = __p1; \
25834   uint32x2x2_t __rev1; \
25835   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
25836   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
25837   __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \
25838 })
25839 #endif
25840 
25841 #ifdef __LITTLE_ENDIAN__
25842 #define vst2_u64(__p0, __p1) __extension__ ({ \
25843   uint64x1x2_t __s1 = __p1; \
25844   __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
25845 })
25846 #else
25847 #define vst2_u64(__p0, __p1) __extension__ ({ \
25848   uint64x1x2_t __s1 = __p1; \
25849   __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
25850 })
25851 #endif
25852 
25853 #ifdef __LITTLE_ENDIAN__
25854 #define vst2_u16(__p0, __p1) __extension__ ({ \
25855   uint16x4x2_t __s1 = __p1; \
25856   __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \
25857 })
25858 #else
25859 #define vst2_u16(__p0, __p1) __extension__ ({ \
25860   uint16x4x2_t __s1 = __p1; \
25861   uint16x4x2_t __rev1; \
25862   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
25863   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
25864   __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \
25865 })
25866 #endif
25867 
25868 #ifdef __LITTLE_ENDIAN__
25869 #define vst2_s8(__p0, __p1) __extension__ ({ \
25870   int8x8x2_t __s1 = __p1; \
25871   __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \
25872 })
25873 #else
25874 #define vst2_s8(__p0, __p1) __extension__ ({ \
25875   int8x8x2_t __s1 = __p1; \
25876   int8x8x2_t __rev1; \
25877   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
25878   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
25879   __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \
25880 })
25881 #endif
25882 
25883 #ifdef __LITTLE_ENDIAN__
25884 #define vst2_f32(__p0, __p1) __extension__ ({ \
25885   float32x2x2_t __s1 = __p1; \
25886   __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 9); \
25887 })
25888 #else
25889 #define vst2_f32(__p0, __p1) __extension__ ({ \
25890   float32x2x2_t __s1 = __p1; \
25891   float32x2x2_t __rev1; \
25892   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
25893   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
25894   __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 9); \
25895 })
25896 #endif
25897 
25898 #ifdef __LITTLE_ENDIAN__
25899 #define vst2_f16(__p0, __p1) __extension__ ({ \
25900   float16x4x2_t __s1 = __p1; \
25901   __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 8); \
25902 })
25903 #else
25904 #define vst2_f16(__p0, __p1) __extension__ ({ \
25905   float16x4x2_t __s1 = __p1; \
25906   float16x4x2_t __rev1; \
25907   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
25908   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
25909   __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 8); \
25910 })
25911 #endif
25912 
25913 #ifdef __LITTLE_ENDIAN__
25914 #define vst2_s32(__p0, __p1) __extension__ ({ \
25915   int32x2x2_t __s1 = __p1; \
25916   __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 2); \
25917 })
25918 #else
25919 #define vst2_s32(__p0, __p1) __extension__ ({ \
25920   int32x2x2_t __s1 = __p1; \
25921   int32x2x2_t __rev1; \
25922   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
25923   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
25924   __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 2); \
25925 })
25926 #endif
25927 
25928 #ifdef __LITTLE_ENDIAN__
25929 #define vst2_s64(__p0, __p1) __extension__ ({ \
25930   int64x1x2_t __s1 = __p1; \
25931   __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 3); \
25932 })
25933 #else
25934 #define vst2_s64(__p0, __p1) __extension__ ({ \
25935   int64x1x2_t __s1 = __p1; \
25936   __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 3); \
25937 })
25938 #endif
25939 
25940 #ifdef __LITTLE_ENDIAN__
25941 #define vst2_s16(__p0, __p1) __extension__ ({ \
25942   int16x4x2_t __s1 = __p1; \
25943   __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 1); \
25944 })
25945 #else
25946 #define vst2_s16(__p0, __p1) __extension__ ({ \
25947   int16x4x2_t __s1 = __p1; \
25948   int16x4x2_t __rev1; \
25949   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
25950   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
25951   __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 1); \
25952 })
25953 #endif
25954 
25955 #ifdef __LITTLE_ENDIAN__
25956 #define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
25957   poly8x8x2_t __s1 = __p1; \
25958   __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \
25959 })
25960 #else
25961 #define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
25962   poly8x8x2_t __s1 = __p1; \
25963   poly8x8x2_t __rev1; \
25964   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
25965   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
25966   __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
25967 })
25968 #endif
25969 
25970 #ifdef __LITTLE_ENDIAN__
25971 #define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
25972   poly16x4x2_t __s1 = __p1; \
25973   __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \
25974 })
25975 #else
25976 #define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
25977   poly16x4x2_t __s1 = __p1; \
25978   poly16x4x2_t __rev1; \
25979   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
25980   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
25981   __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
25982 })
25983 #endif
25984 
25985 #ifdef __LITTLE_ENDIAN__
25986 #define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
25987   poly16x8x2_t __s1 = __p1; \
25988   __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \
25989 })
25990 #else
25991 #define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
25992   poly16x8x2_t __s1 = __p1; \
25993   poly16x8x2_t __rev1; \
25994   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
25995   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
25996   __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
25997 })
25998 #endif
25999 
26000 #ifdef __LITTLE_ENDIAN__
26001 #define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
26002   uint32x4x2_t __s1 = __p1; \
26003   __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \
26004 })
26005 #else
26006 #define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
26007   uint32x4x2_t __s1 = __p1; \
26008   uint32x4x2_t __rev1; \
26009   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26010   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26011   __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
26012 })
26013 #endif
26014 
26015 #ifdef __LITTLE_ENDIAN__
26016 #define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
26017   uint16x8x2_t __s1 = __p1; \
26018   __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \
26019 })
26020 #else
26021 #define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
26022   uint16x8x2_t __s1 = __p1; \
26023   uint16x8x2_t __rev1; \
26024   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26025   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26026   __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
26027 })
26028 #endif
26029 
26030 #ifdef __LITTLE_ENDIAN__
26031 #define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
26032   float32x4x2_t __s1 = __p1; \
26033   __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 41); \
26034 })
26035 #else
26036 #define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
26037   float32x4x2_t __s1 = __p1; \
26038   float32x4x2_t __rev1; \
26039   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26040   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26041   __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 41); \
26042 })
26043 #endif
26044 
26045 #ifdef __LITTLE_ENDIAN__
26046 #define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
26047   float16x8x2_t __s1 = __p1; \
26048   __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 40); \
26049 })
26050 #else
26051 #define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
26052   float16x8x2_t __s1 = __p1; \
26053   float16x8x2_t __rev1; \
26054   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26055   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26056   __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 40); \
26057 })
26058 #endif
26059 
26060 #ifdef __LITTLE_ENDIAN__
26061 #define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
26062   int32x4x2_t __s1 = __p1; \
26063   __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 34); \
26064 })
26065 #else
26066 #define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
26067   int32x4x2_t __s1 = __p1; \
26068   int32x4x2_t __rev1; \
26069   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26070   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26071   __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 34); \
26072 })
26073 #endif
26074 
26075 #ifdef __LITTLE_ENDIAN__
26076 #define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
26077   int16x8x2_t __s1 = __p1; \
26078   __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 33); \
26079 })
26080 #else
26081 #define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
26082   int16x8x2_t __s1 = __p1; \
26083   int16x8x2_t __rev1; \
26084   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26085   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26086   __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 33); \
26087 })
26088 #endif
26089 
26090 #ifdef __LITTLE_ENDIAN__
26091 #define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
26092   uint8x8x2_t __s1 = __p1; \
26093   __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \
26094 })
26095 #else
26096 #define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
26097   uint8x8x2_t __s1 = __p1; \
26098   uint8x8x2_t __rev1; \
26099   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26100   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26101   __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
26102 })
26103 #endif
26104 
26105 #ifdef __LITTLE_ENDIAN__
26106 #define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
26107   uint32x2x2_t __s1 = __p1; \
26108   __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \
26109 })
26110 #else
26111 #define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
26112   uint32x2x2_t __s1 = __p1; \
26113   uint32x2x2_t __rev1; \
26114   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
26115   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
26116   __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
26117 })
26118 #endif
26119 
26120 #ifdef __LITTLE_ENDIAN__
26121 #define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
26122   uint16x4x2_t __s1 = __p1; \
26123   __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \
26124 })
26125 #else
26126 #define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
26127   uint16x4x2_t __s1 = __p1; \
26128   uint16x4x2_t __rev1; \
26129   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26130   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26131   __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
26132 })
26133 #endif
26134 
26135 #ifdef __LITTLE_ENDIAN__
26136 #define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
26137   int8x8x2_t __s1 = __p1; \
26138   __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \
26139 })
26140 #else
26141 #define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
26142   int8x8x2_t __s1 = __p1; \
26143   int8x8x2_t __rev1; \
26144   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26145   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26146   __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
26147 })
26148 #endif
26149 
26150 #ifdef __LITTLE_ENDIAN__
26151 #define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
26152   float32x2x2_t __s1 = __p1; \
26153   __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 9); \
26154 })
26155 #else
26156 #define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
26157   float32x2x2_t __s1 = __p1; \
26158   float32x2x2_t __rev1; \
26159   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
26160   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
26161   __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 9); \
26162 })
26163 #endif
26164 
26165 #ifdef __LITTLE_ENDIAN__
26166 #define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
26167   float16x4x2_t __s1 = __p1; \
26168   __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 8); \
26169 })
26170 #else
26171 #define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
26172   float16x4x2_t __s1 = __p1; \
26173   float16x4x2_t __rev1; \
26174   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26175   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26176   __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 8); \
26177 })
26178 #endif
26179 
26180 #ifdef __LITTLE_ENDIAN__
26181 #define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
26182   int32x2x2_t __s1 = __p1; \
26183   __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 2); \
26184 })
26185 #else
26186 #define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
26187   int32x2x2_t __s1 = __p1; \
26188   int32x2x2_t __rev1; \
26189   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
26190   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
26191   __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 2); \
26192 })
26193 #endif
26194 
26195 #ifdef __LITTLE_ENDIAN__
26196 #define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
26197   int16x4x2_t __s1 = __p1; \
26198   __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 1); \
26199 })
26200 #else
26201 #define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
26202   int16x4x2_t __s1 = __p1; \
26203   int16x4x2_t __rev1; \
26204   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26205   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26206   __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 1); \
26207 })
26208 #endif
26209 
26210 #ifdef __LITTLE_ENDIAN__
26211 #define vst3_p8(__p0, __p1) __extension__ ({ \
26212   poly8x8x3_t __s1 = __p1; \
26213   __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \
26214 })
26215 #else
26216 #define vst3_p8(__p0, __p1) __extension__ ({ \
26217   poly8x8x3_t __s1 = __p1; \
26218   poly8x8x3_t __rev1; \
26219   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26220   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26221   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26222   __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \
26223 })
26224 #endif
26225 
26226 #ifdef __LITTLE_ENDIAN__
26227 #define vst3_p16(__p0, __p1) __extension__ ({ \
26228   poly16x4x3_t __s1 = __p1; \
26229   __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \
26230 })
26231 #else
26232 #define vst3_p16(__p0, __p1) __extension__ ({ \
26233   poly16x4x3_t __s1 = __p1; \
26234   poly16x4x3_t __rev1; \
26235   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26236   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26237   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26238   __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \
26239 })
26240 #endif
26241 
26242 #ifdef __LITTLE_ENDIAN__
26243 #define vst3q_p8(__p0, __p1) __extension__ ({ \
26244   poly8x16x3_t __s1 = __p1; \
26245   __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \
26246 })
26247 #else
26248 #define vst3q_p8(__p0, __p1) __extension__ ({ \
26249   poly8x16x3_t __s1 = __p1; \
26250   poly8x16x3_t __rev1; \
26251   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26252   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26253   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26254   __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \
26255 })
26256 #endif
26257 
26258 #ifdef __LITTLE_ENDIAN__
26259 #define vst3q_p16(__p0, __p1) __extension__ ({ \
26260   poly16x8x3_t __s1 = __p1; \
26261   __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \
26262 })
26263 #else
26264 #define vst3q_p16(__p0, __p1) __extension__ ({ \
26265   poly16x8x3_t __s1 = __p1; \
26266   poly16x8x3_t __rev1; \
26267   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26268   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26269   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26270   __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \
26271 })
26272 #endif
26273 
26274 #ifdef __LITTLE_ENDIAN__
26275 #define vst3q_u8(__p0, __p1) __extension__ ({ \
26276   uint8x16x3_t __s1 = __p1; \
26277   __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \
26278 })
26279 #else
26280 #define vst3q_u8(__p0, __p1) __extension__ ({ \
26281   uint8x16x3_t __s1 = __p1; \
26282   uint8x16x3_t __rev1; \
26283   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26284   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26285   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26286   __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \
26287 })
26288 #endif
26289 
26290 #ifdef __LITTLE_ENDIAN__
26291 #define vst3q_u32(__p0, __p1) __extension__ ({ \
26292   uint32x4x3_t __s1 = __p1; \
26293   __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \
26294 })
26295 #else
26296 #define vst3q_u32(__p0, __p1) __extension__ ({ \
26297   uint32x4x3_t __s1 = __p1; \
26298   uint32x4x3_t __rev1; \
26299   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26300   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26301   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26302   __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \
26303 })
26304 #endif
26305 
26306 #ifdef __LITTLE_ENDIAN__
26307 #define vst3q_u16(__p0, __p1) __extension__ ({ \
26308   uint16x8x3_t __s1 = __p1; \
26309   __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \
26310 })
26311 #else
26312 #define vst3q_u16(__p0, __p1) __extension__ ({ \
26313   uint16x8x3_t __s1 = __p1; \
26314   uint16x8x3_t __rev1; \
26315   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26316   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26317   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26318   __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \
26319 })
26320 #endif
26321 
26322 #ifdef __LITTLE_ENDIAN__
26323 #define vst3q_s8(__p0, __p1) __extension__ ({ \
26324   int8x16x3_t __s1 = __p1; \
26325   __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \
26326 })
26327 #else
26328 #define vst3q_s8(__p0, __p1) __extension__ ({ \
26329   int8x16x3_t __s1 = __p1; \
26330   int8x16x3_t __rev1; \
26331   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26332   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26333   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26334   __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \
26335 })
26336 #endif
26337 
26338 #ifdef __LITTLE_ENDIAN__
26339 #define vst3q_f32(__p0, __p1) __extension__ ({ \
26340   float32x4x3_t __s1 = __p1; \
26341   __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 41); \
26342 })
26343 #else
26344 #define vst3q_f32(__p0, __p1) __extension__ ({ \
26345   float32x4x3_t __s1 = __p1; \
26346   float32x4x3_t __rev1; \
26347   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26348   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26349   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26350   __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 41); \
26351 })
26352 #endif
26353 
26354 #ifdef __LITTLE_ENDIAN__
26355 #define vst3q_f16(__p0, __p1) __extension__ ({ \
26356   float16x8x3_t __s1 = __p1; \
26357   __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 40); \
26358 })
26359 #else
26360 #define vst3q_f16(__p0, __p1) __extension__ ({ \
26361   float16x8x3_t __s1 = __p1; \
26362   float16x8x3_t __rev1; \
26363   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26364   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26365   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26366   __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 40); \
26367 })
26368 #endif
26369 
26370 #ifdef __LITTLE_ENDIAN__
26371 #define vst3q_s32(__p0, __p1) __extension__ ({ \
26372   int32x4x3_t __s1 = __p1; \
26373   __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 34); \
26374 })
26375 #else
26376 #define vst3q_s32(__p0, __p1) __extension__ ({ \
26377   int32x4x3_t __s1 = __p1; \
26378   int32x4x3_t __rev1; \
26379   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26380   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26381   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26382   __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 34); \
26383 })
26384 #endif
26385 
26386 #ifdef __LITTLE_ENDIAN__
26387 #define vst3q_s16(__p0, __p1) __extension__ ({ \
26388   int16x8x3_t __s1 = __p1; \
26389   __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 33); \
26390 })
26391 #else
26392 #define vst3q_s16(__p0, __p1) __extension__ ({ \
26393   int16x8x3_t __s1 = __p1; \
26394   int16x8x3_t __rev1; \
26395   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26396   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26397   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26398   __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 33); \
26399 })
26400 #endif
26401 
26402 #ifdef __LITTLE_ENDIAN__
26403 #define vst3_u8(__p0, __p1) __extension__ ({ \
26404   uint8x8x3_t __s1 = __p1; \
26405   __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \
26406 })
26407 #else
26408 #define vst3_u8(__p0, __p1) __extension__ ({ \
26409   uint8x8x3_t __s1 = __p1; \
26410   uint8x8x3_t __rev1; \
26411   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26412   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26413   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26414   __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \
26415 })
26416 #endif
26417 
26418 #ifdef __LITTLE_ENDIAN__
26419 #define vst3_u32(__p0, __p1) __extension__ ({ \
26420   uint32x2x3_t __s1 = __p1; \
26421   __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \
26422 })
26423 #else
26424 #define vst3_u32(__p0, __p1) __extension__ ({ \
26425   uint32x2x3_t __s1 = __p1; \
26426   uint32x2x3_t __rev1; \
26427   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
26428   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
26429   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
26430   __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \
26431 })
26432 #endif
26433 
26434 #ifdef __LITTLE_ENDIAN__
26435 #define vst3_u64(__p0, __p1) __extension__ ({ \
26436   uint64x1x3_t __s1 = __p1; \
26437   __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
26438 })
26439 #else
26440 #define vst3_u64(__p0, __p1) __extension__ ({ \
26441   uint64x1x3_t __s1 = __p1; \
26442   __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
26443 })
26444 #endif
26445 
26446 #ifdef __LITTLE_ENDIAN__
26447 #define vst3_u16(__p0, __p1) __extension__ ({ \
26448   uint16x4x3_t __s1 = __p1; \
26449   __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \
26450 })
26451 #else
26452 #define vst3_u16(__p0, __p1) __extension__ ({ \
26453   uint16x4x3_t __s1 = __p1; \
26454   uint16x4x3_t __rev1; \
26455   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26456   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26457   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26458   __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \
26459 })
26460 #endif
26461 
26462 #ifdef __LITTLE_ENDIAN__
26463 #define vst3_s8(__p0, __p1) __extension__ ({ \
26464   int8x8x3_t __s1 = __p1; \
26465   __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \
26466 })
26467 #else
26468 #define vst3_s8(__p0, __p1) __extension__ ({ \
26469   int8x8x3_t __s1 = __p1; \
26470   int8x8x3_t __rev1; \
26471   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26472   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26473   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26474   __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \
26475 })
26476 #endif
26477 
26478 #ifdef __LITTLE_ENDIAN__
26479 #define vst3_f32(__p0, __p1) __extension__ ({ \
26480   float32x2x3_t __s1 = __p1; \
26481   __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 9); \
26482 })
26483 #else
26484 #define vst3_f32(__p0, __p1) __extension__ ({ \
26485   float32x2x3_t __s1 = __p1; \
26486   float32x2x3_t __rev1; \
26487   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
26488   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
26489   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
26490   __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 9); \
26491 })
26492 #endif
26493 
26494 #ifdef __LITTLE_ENDIAN__
26495 #define vst3_f16(__p0, __p1) __extension__ ({ \
26496   float16x4x3_t __s1 = __p1; \
26497   __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 8); \
26498 })
26499 #else
26500 #define vst3_f16(__p0, __p1) __extension__ ({ \
26501   float16x4x3_t __s1 = __p1; \
26502   float16x4x3_t __rev1; \
26503   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26504   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26505   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26506   __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 8); \
26507 })
26508 #endif
26509 
26510 #ifdef __LITTLE_ENDIAN__
26511 #define vst3_s32(__p0, __p1) __extension__ ({ \
26512   int32x2x3_t __s1 = __p1; \
26513   __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 2); \
26514 })
26515 #else
26516 #define vst3_s32(__p0, __p1) __extension__ ({ \
26517   int32x2x3_t __s1 = __p1; \
26518   int32x2x3_t __rev1; \
26519   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
26520   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
26521   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
26522   __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 2); \
26523 })
26524 #endif
26525 
26526 #ifdef __LITTLE_ENDIAN__
26527 #define vst3_s64(__p0, __p1) __extension__ ({ \
26528   int64x1x3_t __s1 = __p1; \
26529   __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
26530 })
26531 #else
26532 #define vst3_s64(__p0, __p1) __extension__ ({ \
26533   int64x1x3_t __s1 = __p1; \
26534   __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
26535 })
26536 #endif
26537 
26538 #ifdef __LITTLE_ENDIAN__
26539 #define vst3_s16(__p0, __p1) __extension__ ({ \
26540   int16x4x3_t __s1 = __p1; \
26541   __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 1); \
26542 })
26543 #else
26544 #define vst3_s16(__p0, __p1) __extension__ ({ \
26545   int16x4x3_t __s1 = __p1; \
26546   int16x4x3_t __rev1; \
26547   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26548   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26549   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26550   __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 1); \
26551 })
26552 #endif
26553 
26554 #ifdef __LITTLE_ENDIAN__
26555 #define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
26556   poly8x8x3_t __s1 = __p1; \
26557   __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \
26558 })
26559 #else
26560 #define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
26561   poly8x8x3_t __s1 = __p1; \
26562   poly8x8x3_t __rev1; \
26563   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26564   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26565   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26566   __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
26567 })
26568 #endif
26569 
26570 #ifdef __LITTLE_ENDIAN__
26571 #define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
26572   poly16x4x3_t __s1 = __p1; \
26573   __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \
26574 })
26575 #else
26576 #define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
26577   poly16x4x3_t __s1 = __p1; \
26578   poly16x4x3_t __rev1; \
26579   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26580   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26581   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26582   __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
26583 })
26584 #endif
26585 
26586 #ifdef __LITTLE_ENDIAN__
26587 #define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
26588   poly16x8x3_t __s1 = __p1; \
26589   __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \
26590 })
26591 #else
26592 #define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
26593   poly16x8x3_t __s1 = __p1; \
26594   poly16x8x3_t __rev1; \
26595   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26596   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26597   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26598   __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
26599 })
26600 #endif
26601 
26602 #ifdef __LITTLE_ENDIAN__
26603 #define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
26604   uint32x4x3_t __s1 = __p1; \
26605   __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \
26606 })
26607 #else
26608 #define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
26609   uint32x4x3_t __s1 = __p1; \
26610   uint32x4x3_t __rev1; \
26611   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26612   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26613   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26614   __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
26615 })
26616 #endif
26617 
26618 #ifdef __LITTLE_ENDIAN__
26619 #define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
26620   uint16x8x3_t __s1 = __p1; \
26621   __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \
26622 })
26623 #else
26624 #define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
26625   uint16x8x3_t __s1 = __p1; \
26626   uint16x8x3_t __rev1; \
26627   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26628   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26629   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26630   __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
26631 })
26632 #endif
26633 
26634 #ifdef __LITTLE_ENDIAN__
26635 #define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
26636   float32x4x3_t __s1 = __p1; \
26637   __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 41); \
26638 })
26639 #else
26640 #define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
26641   float32x4x3_t __s1 = __p1; \
26642   float32x4x3_t __rev1; \
26643   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26644   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26645   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26646   __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 41); \
26647 })
26648 #endif
26649 
26650 #ifdef __LITTLE_ENDIAN__
26651 #define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
26652   float16x8x3_t __s1 = __p1; \
26653   __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 40); \
26654 })
26655 #else
26656 #define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
26657   float16x8x3_t __s1 = __p1; \
26658   float16x8x3_t __rev1; \
26659   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26660   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26661   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26662   __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 40); \
26663 })
26664 #endif
26665 
26666 #ifdef __LITTLE_ENDIAN__
26667 #define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
26668   int32x4x3_t __s1 = __p1; \
26669   __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 34); \
26670 })
26671 #else
26672 #define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
26673   int32x4x3_t __s1 = __p1; \
26674   int32x4x3_t __rev1; \
26675   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26676   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26677   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26678   __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 34); \
26679 })
26680 #endif
26681 
26682 #ifdef __LITTLE_ENDIAN__
26683 #define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
26684   int16x8x3_t __s1 = __p1; \
26685   __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 33); \
26686 })
26687 #else
26688 #define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
26689   int16x8x3_t __s1 = __p1; \
26690   int16x8x3_t __rev1; \
26691   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26692   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26693   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26694   __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 33); \
26695 })
26696 #endif
26697 
26698 #ifdef __LITTLE_ENDIAN__
26699 #define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
26700   uint8x8x3_t __s1 = __p1; \
26701   __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \
26702 })
26703 #else
26704 #define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
26705   uint8x8x3_t __s1 = __p1; \
26706   uint8x8x3_t __rev1; \
26707   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26708   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26709   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26710   __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
26711 })
26712 #endif
26713 
26714 #ifdef __LITTLE_ENDIAN__
26715 #define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
26716   uint32x2x3_t __s1 = __p1; \
26717   __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \
26718 })
26719 #else
26720 #define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
26721   uint32x2x3_t __s1 = __p1; \
26722   uint32x2x3_t __rev1; \
26723   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
26724   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
26725   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
26726   __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
26727 })
26728 #endif
26729 
26730 #ifdef __LITTLE_ENDIAN__
26731 #define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
26732   uint16x4x3_t __s1 = __p1; \
26733   __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \
26734 })
26735 #else
26736 #define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
26737   uint16x4x3_t __s1 = __p1; \
26738   uint16x4x3_t __rev1; \
26739   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26740   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26741   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26742   __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
26743 })
26744 #endif
26745 
26746 #ifdef __LITTLE_ENDIAN__
26747 #define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
26748   int8x8x3_t __s1 = __p1; \
26749   __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \
26750 })
26751 #else
26752 #define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
26753   int8x8x3_t __s1 = __p1; \
26754   int8x8x3_t __rev1; \
26755   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26756   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26757   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26758   __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
26759 })
26760 #endif
26761 
26762 #ifdef __LITTLE_ENDIAN__
26763 #define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
26764   float32x2x3_t __s1 = __p1; \
26765   __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 9); \
26766 })
26767 #else
26768 #define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
26769   float32x2x3_t __s1 = __p1; \
26770   float32x2x3_t __rev1; \
26771   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
26772   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
26773   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
26774   __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 9); \
26775 })
26776 #endif
26777 
26778 #ifdef __LITTLE_ENDIAN__
26779 #define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
26780   float16x4x3_t __s1 = __p1; \
26781   __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 8); \
26782 })
26783 #else
26784 #define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
26785   float16x4x3_t __s1 = __p1; \
26786   float16x4x3_t __rev1; \
26787   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26788   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26789   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26790   __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 8); \
26791 })
26792 #endif
26793 
26794 #ifdef __LITTLE_ENDIAN__
26795 #define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
26796   int32x2x3_t __s1 = __p1; \
26797   __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 2); \
26798 })
26799 #else
26800 #define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
26801   int32x2x3_t __s1 = __p1; \
26802   int32x2x3_t __rev1; \
26803   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
26804   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
26805   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
26806   __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 2); \
26807 })
26808 #endif
26809 
26810 #ifdef __LITTLE_ENDIAN__
26811 #define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
26812   int16x4x3_t __s1 = __p1; \
26813   __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 1); \
26814 })
26815 #else
26816 #define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
26817   int16x4x3_t __s1 = __p1; \
26818   int16x4x3_t __rev1; \
26819   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26820   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26821   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26822   __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 1); \
26823 })
26824 #endif
26825 
26826 #ifdef __LITTLE_ENDIAN__
26827 #define vst4_p8(__p0, __p1) __extension__ ({ \
26828   poly8x8x4_t __s1 = __p1; \
26829   __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \
26830 })
26831 #else
26832 #define vst4_p8(__p0, __p1) __extension__ ({ \
26833   poly8x8x4_t __s1 = __p1; \
26834   poly8x8x4_t __rev1; \
26835   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26836   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26837   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26838   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
26839   __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \
26840 })
26841 #endif
26842 
26843 #ifdef __LITTLE_ENDIAN__
26844 #define vst4_p16(__p0, __p1) __extension__ ({ \
26845   poly16x4x4_t __s1 = __p1; \
26846   __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \
26847 })
26848 #else
26849 #define vst4_p16(__p0, __p1) __extension__ ({ \
26850   poly16x4x4_t __s1 = __p1; \
26851   poly16x4x4_t __rev1; \
26852   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26853   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26854   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26855   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
26856   __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \
26857 })
26858 #endif
26859 
26860 #ifdef __LITTLE_ENDIAN__
26861 #define vst4q_p8(__p0, __p1) __extension__ ({ \
26862   poly8x16x4_t __s1 = __p1; \
26863   __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \
26864 })
26865 #else
26866 #define vst4q_p8(__p0, __p1) __extension__ ({ \
26867   poly8x16x4_t __s1 = __p1; \
26868   poly8x16x4_t __rev1; \
26869   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26870   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26871   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26872   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26873   __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \
26874 })
26875 #endif
26876 
26877 #ifdef __LITTLE_ENDIAN__
26878 #define vst4q_p16(__p0, __p1) __extension__ ({ \
26879   poly16x8x4_t __s1 = __p1; \
26880   __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \
26881 })
26882 #else
26883 #define vst4q_p16(__p0, __p1) __extension__ ({ \
26884   poly16x8x4_t __s1 = __p1; \
26885   poly16x8x4_t __rev1; \
26886   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26887   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26888   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26889   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
26890   __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \
26891 })
26892 #endif
26893 
26894 #ifdef __LITTLE_ENDIAN__
26895 #define vst4q_u8(__p0, __p1) __extension__ ({ \
26896   uint8x16x4_t __s1 = __p1; \
26897   __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \
26898 })
26899 #else
26900 #define vst4q_u8(__p0, __p1) __extension__ ({ \
26901   uint8x16x4_t __s1 = __p1; \
26902   uint8x16x4_t __rev1; \
26903   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26904   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26905   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26906   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26907   __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \
26908 })
26909 #endif
26910 
26911 #ifdef __LITTLE_ENDIAN__
26912 #define vst4q_u32(__p0, __p1) __extension__ ({ \
26913   uint32x4x4_t __s1 = __p1; \
26914   __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \
26915 })
26916 #else
26917 #define vst4q_u32(__p0, __p1) __extension__ ({ \
26918   uint32x4x4_t __s1 = __p1; \
26919   uint32x4x4_t __rev1; \
26920   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26921   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26922   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26923   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
26924   __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \
26925 })
26926 #endif
26927 
26928 #ifdef __LITTLE_ENDIAN__
26929 #define vst4q_u16(__p0, __p1) __extension__ ({ \
26930   uint16x8x4_t __s1 = __p1; \
26931   __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \
26932 })
26933 #else
26934 #define vst4q_u16(__p0, __p1) __extension__ ({ \
26935   uint16x8x4_t __s1 = __p1; \
26936   uint16x8x4_t __rev1; \
26937   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26938   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26939   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26940   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
26941   __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \
26942 })
26943 #endif
26944 
26945 #ifdef __LITTLE_ENDIAN__
26946 #define vst4q_s8(__p0, __p1) __extension__ ({ \
26947   int8x16x4_t __s1 = __p1; \
26948   __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \
26949 })
26950 #else
26951 #define vst4q_s8(__p0, __p1) __extension__ ({ \
26952   int8x16x4_t __s1 = __p1; \
26953   int8x16x4_t __rev1; \
26954   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26955   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26956   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26957   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26958   __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \
26959 })
26960 #endif
26961 
26962 #ifdef __LITTLE_ENDIAN__
26963 #define vst4q_f32(__p0, __p1) __extension__ ({ \
26964   float32x4x4_t __s1 = __p1; \
26965   __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 41); \
26966 })
26967 #else
26968 #define vst4q_f32(__p0, __p1) __extension__ ({ \
26969   float32x4x4_t __s1 = __p1; \
26970   float32x4x4_t __rev1; \
26971   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26972   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26973   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26974   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
26975   __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 41); \
26976 })
26977 #endif
26978 
26979 #ifdef __LITTLE_ENDIAN__
26980 #define vst4q_f16(__p0, __p1) __extension__ ({ \
26981   float16x8x4_t __s1 = __p1; \
26982   __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 40); \
26983 })
26984 #else
26985 #define vst4q_f16(__p0, __p1) __extension__ ({ \
26986   float16x8x4_t __s1 = __p1; \
26987   float16x8x4_t __rev1; \
26988   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26989   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26990   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26991   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
26992   __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 40); \
26993 })
26994 #endif
26995 
26996 #ifdef __LITTLE_ENDIAN__
26997 #define vst4q_s32(__p0, __p1) __extension__ ({ \
26998   int32x4x4_t __s1 = __p1; \
26999   __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 34); \
27000 })
27001 #else
27002 #define vst4q_s32(__p0, __p1) __extension__ ({ \
27003   int32x4x4_t __s1 = __p1; \
27004   int32x4x4_t __rev1; \
27005   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
27006   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
27007   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
27008   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
27009   __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 34); \
27010 })
27011 #endif
27012 
27013 #ifdef __LITTLE_ENDIAN__
27014 #define vst4q_s16(__p0, __p1) __extension__ ({ \
27015   int16x8x4_t __s1 = __p1; \
27016   __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 33); \
27017 })
27018 #else
27019 #define vst4q_s16(__p0, __p1) __extension__ ({ \
27020   int16x8x4_t __s1 = __p1; \
27021   int16x8x4_t __rev1; \
27022   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
27023   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
27024   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
27025   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
27026   __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 33); \
27027 })
27028 #endif
27029 
27030 #ifdef __LITTLE_ENDIAN__
27031 #define vst4_u8(__p0, __p1) __extension__ ({ \
27032   uint8x8x4_t __s1 = __p1; \
27033   __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \
27034 })
27035 #else
27036 #define vst4_u8(__p0, __p1) __extension__ ({ \
27037   uint8x8x4_t __s1 = __p1; \
27038   uint8x8x4_t __rev1; \
27039   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
27040   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
27041   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
27042   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
27043   __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \
27044 })
27045 #endif
27046 
27047 #ifdef __LITTLE_ENDIAN__
27048 #define vst4_u32(__p0, __p1) __extension__ ({ \
27049   uint32x2x4_t __s1 = __p1; \
27050   __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \
27051 })
27052 #else
27053 #define vst4_u32(__p0, __p1) __extension__ ({ \
27054   uint32x2x4_t __s1 = __p1; \
27055   uint32x2x4_t __rev1; \
27056   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
27057   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
27058   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
27059   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
27060   __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \
27061 })
27062 #endif
27063 
27064 #ifdef __LITTLE_ENDIAN__
27065 #define vst4_u64(__p0, __p1) __extension__ ({ \
27066   uint64x1x4_t __s1 = __p1; \
27067   __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
27068 })
27069 #else
27070 #define vst4_u64(__p0, __p1) __extension__ ({ \
27071   uint64x1x4_t __s1 = __p1; \
27072   __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
27073 })
27074 #endif
27075 
27076 #ifdef __LITTLE_ENDIAN__
27077 #define vst4_u16(__p0, __p1) __extension__ ({ \
27078   uint16x4x4_t __s1 = __p1; \
27079   __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \
27080 })
27081 #else
27082 #define vst4_u16(__p0, __p1) __extension__ ({ \
27083   uint16x4x4_t __s1 = __p1; \
27084   uint16x4x4_t __rev1; \
27085   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
27086   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
27087   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
27088   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
27089   __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \
27090 })
27091 #endif
27092 
27093 #ifdef __LITTLE_ENDIAN__
27094 #define vst4_s8(__p0, __p1) __extension__ ({ \
27095   int8x8x4_t __s1 = __p1; \
27096   __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \
27097 })
27098 #else
27099 #define vst4_s8(__p0, __p1) __extension__ ({ \
27100   int8x8x4_t __s1 = __p1; \
27101   int8x8x4_t __rev1; \
27102   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
27103   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
27104   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
27105   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
27106   __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \
27107 })
27108 #endif
27109 
27110 #ifdef __LITTLE_ENDIAN__
27111 #define vst4_f32(__p0, __p1) __extension__ ({ \
27112   float32x2x4_t __s1 = __p1; \
27113   __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 9); \
27114 })
27115 #else
27116 #define vst4_f32(__p0, __p1) __extension__ ({ \
27117   float32x2x4_t __s1 = __p1; \
27118   float32x2x4_t __rev1; \
27119   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
27120   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
27121   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
27122   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
27123   __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 9); \
27124 })
27125 #endif
27126 
27127 #ifdef __LITTLE_ENDIAN__
27128 #define vst4_f16(__p0, __p1) __extension__ ({ \
27129   float16x4x4_t __s1 = __p1; \
27130   __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 8); \
27131 })
27132 #else
27133 #define vst4_f16(__p0, __p1) __extension__ ({ \
27134   float16x4x4_t __s1 = __p1; \
27135   float16x4x4_t __rev1; \
27136   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
27137   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
27138   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
27139   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
27140   __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 8); \
27141 })
27142 #endif
27143 
27144 #ifdef __LITTLE_ENDIAN__
27145 #define vst4_s32(__p0, __p1) __extension__ ({ \
27146   int32x2x4_t __s1 = __p1; \
27147   __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 2); \
27148 })
27149 #else
27150 #define vst4_s32(__p0, __p1) __extension__ ({ \
27151   int32x2x4_t __s1 = __p1; \
27152   int32x2x4_t __rev1; \
27153   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
27154   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
27155   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
27156   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
27157   __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 2); \
27158 })
27159 #endif
27160 
27161 #ifdef __LITTLE_ENDIAN__
27162 #define vst4_s64(__p0, __p1) __extension__ ({ \
27163   int64x1x4_t __s1 = __p1; \
27164   __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
27165 })
27166 #else
27167 #define vst4_s64(__p0, __p1) __extension__ ({ \
27168   int64x1x4_t __s1 = __p1; \
27169   __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
27170 })
27171 #endif
27172 
27173 #ifdef __LITTLE_ENDIAN__
27174 #define vst4_s16(__p0, __p1) __extension__ ({ \
27175   int16x4x4_t __s1 = __p1; \
27176   __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 1); \
27177 })
27178 #else
27179 #define vst4_s16(__p0, __p1) __extension__ ({ \
27180   int16x4x4_t __s1 = __p1; \
27181   int16x4x4_t __rev1; \
27182   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
27183   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
27184   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
27185   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
27186   __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 1); \
27187 })
27188 #endif
27189 
27190 #ifdef __LITTLE_ENDIAN__
27191 #define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
27192   poly8x8x4_t __s1 = __p1; \
27193   __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \
27194 })
27195 #else
27196 #define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
27197   poly8x8x4_t __s1 = __p1; \
27198   poly8x8x4_t __rev1; \
27199   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
27200   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
27201   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
27202   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
27203   __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
27204 })
27205 #endif
27206 
27207 #ifdef __LITTLE_ENDIAN__
27208 #define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
27209   poly16x4x4_t __s1 = __p1; \
27210   __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \
27211 })
27212 #else
27213 #define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
27214   poly16x4x4_t __s1 = __p1; \
27215   poly16x4x4_t __rev1; \
27216   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
27217   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
27218   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
27219   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
27220   __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
27221 })
27222 #endif
27223 
27224 #ifdef __LITTLE_ENDIAN__
27225 #define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
27226   poly16x8x4_t __s1 = __p1; \
27227   __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \
27228 })
27229 #else
27230 #define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
27231   poly16x8x4_t __s1 = __p1; \
27232   poly16x8x4_t __rev1; \
27233   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
27234   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
27235   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
27236   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
27237   __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
27238 })
27239 #endif
27240 
27241 #ifdef __LITTLE_ENDIAN__
27242 #define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
27243   uint32x4x4_t __s1 = __p1; \
27244   __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \
27245 })
27246 #else
27247 #define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
27248   uint32x4x4_t __s1 = __p1; \
27249   uint32x4x4_t __rev1; \
27250   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
27251   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
27252   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
27253   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
27254   __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
27255 })
27256 #endif
27257 
27258 #ifdef __LITTLE_ENDIAN__
27259 #define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
27260   uint16x8x4_t __s1 = __p1; \
27261   __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \
27262 })
27263 #else
27264 #define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
27265   uint16x8x4_t __s1 = __p1; \
27266   uint16x8x4_t __rev1; \
27267   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
27268   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
27269   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
27270   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
27271   __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
27272 })
27273 #endif
27274 
27275 #ifdef __LITTLE_ENDIAN__
27276 #define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
27277   float32x4x4_t __s1 = __p1; \
27278   __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 41); \
27279 })
27280 #else
27281 #define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
27282   float32x4x4_t __s1 = __p1; \
27283   float32x4x4_t __rev1; \
27284   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
27285   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
27286   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
27287   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
27288   __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 41); \
27289 })
27290 #endif
27291 
27292 #ifdef __LITTLE_ENDIAN__
27293 #define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
27294   float16x8x4_t __s1 = __p1; \
27295   __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 40); \
27296 })
27297 #else
27298 #define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
27299   float16x8x4_t __s1 = __p1; \
27300   float16x8x4_t __rev1; \
27301   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
27302   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
27303   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
27304   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
27305   __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 40); \
27306 })
27307 #endif
27308 
27309 #ifdef __LITTLE_ENDIAN__
27310 #define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
27311   int32x4x4_t __s1 = __p1; \
27312   __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 34); \
27313 })
27314 #else
27315 #define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
27316   int32x4x4_t __s1 = __p1; \
27317   int32x4x4_t __rev1; \
27318   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
27319   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
27320   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
27321   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
27322   __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 34); \
27323 })
27324 #endif
27325 
27326 #ifdef __LITTLE_ENDIAN__
27327 #define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
27328   int16x8x4_t __s1 = __p1; \
27329   __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 33); \
27330 })
27331 #else
27332 #define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
27333   int16x8x4_t __s1 = __p1; \
27334   int16x8x4_t __rev1; \
27335   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
27336   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
27337   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
27338   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
27339   __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 33); \
27340 })
27341 #endif
27342 
27343 #ifdef __LITTLE_ENDIAN__
27344 #define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
27345   uint8x8x4_t __s1 = __p1; \
27346   __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \
27347 })
27348 #else
27349 #define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
27350   uint8x8x4_t __s1 = __p1; \
27351   uint8x8x4_t __rev1; \
27352   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
27353   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
27354   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
27355   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
27356   __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
27357 })
27358 #endif
27359 
27360 #ifdef __LITTLE_ENDIAN__
27361 #define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
27362   uint32x2x4_t __s1 = __p1; \
27363   __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \
27364 })
27365 #else
27366 #define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
27367   uint32x2x4_t __s1 = __p1; \
27368   uint32x2x4_t __rev1; \
27369   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
27370   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
27371   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
27372   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
27373   __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
27374 })
27375 #endif
27376 
27377 #ifdef __LITTLE_ENDIAN__
27378 #define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
27379   uint16x4x4_t __s1 = __p1; \
27380   __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \
27381 })
27382 #else
27383 #define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
27384   uint16x4x4_t __s1 = __p1; \
27385   uint16x4x4_t __rev1; \
27386   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
27387   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
27388   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
27389   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
27390   __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
27391 })
27392 #endif
27393 
27394 #ifdef __LITTLE_ENDIAN__
27395 #define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
27396   int8x8x4_t __s1 = __p1; \
27397   __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \
27398 })
27399 #else
27400 #define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
27401   int8x8x4_t __s1 = __p1; \
27402   int8x8x4_t __rev1; \
27403   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
27404   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
27405   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
27406   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
27407   __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
27408 })
27409 #endif
27410 
27411 #ifdef __LITTLE_ENDIAN__
27412 #define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
27413   float32x2x4_t __s1 = __p1; \
27414   __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 9); \
27415 })
27416 #else
27417 #define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
27418   float32x2x4_t __s1 = __p1; \
27419   float32x2x4_t __rev1; \
27420   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
27421   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
27422   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
27423   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
27424   __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 9); \
27425 })
27426 #endif
27427 
27428 #ifdef __LITTLE_ENDIAN__
27429 #define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
27430   float16x4x4_t __s1 = __p1; \
27431   __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 8); \
27432 })
27433 #else
27434 #define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
27435   float16x4x4_t __s1 = __p1; \
27436   float16x4x4_t __rev1; \
27437   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
27438   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
27439   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
27440   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
27441   __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 8); \
27442 })
27443 #endif
27444 
27445 #ifdef __LITTLE_ENDIAN__
27446 #define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
27447   int32x2x4_t __s1 = __p1; \
27448   __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 2); \
27449 })
27450 #else
27451 #define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
27452   int32x2x4_t __s1 = __p1; \
27453   int32x2x4_t __rev1; \
27454   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
27455   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
27456   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
27457   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
27458   __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 2); \
27459 })
27460 #endif
27461 
27462 #ifdef __LITTLE_ENDIAN__
27463 #define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
27464   int16x4x4_t __s1 = __p1; \
27465   __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 1); \
27466 })
27467 #else
27468 #define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
27469   int16x4x4_t __s1 = __p1; \
27470   int16x4x4_t __rev1; \
27471   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
27472   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
27473   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
27474   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
27475   __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 1); \
27476 })
27477 #endif
27478 
27479 #ifdef __LITTLE_ENDIAN__
vsubq_u8(uint8x16_t __p0,uint8x16_t __p1)27480 __ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
27481   uint8x16_t __ret;
27482   __ret = __p0 - __p1;
27483   return __ret;
27484 }
27485 #else
vsubq_u8(uint8x16_t __p0,uint8x16_t __p1)27486 __ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
27487   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
27488   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
27489   uint8x16_t __ret;
27490   __ret = __rev0 - __rev1;
27491   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
27492   return __ret;
27493 }
27494 #endif
27495 
27496 #ifdef __LITTLE_ENDIAN__
vsubq_u32(uint32x4_t __p0,uint32x4_t __p1)27497 __ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
27498   uint32x4_t __ret;
27499   __ret = __p0 - __p1;
27500   return __ret;
27501 }
27502 #else
vsubq_u32(uint32x4_t __p0,uint32x4_t __p1)27503 __ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
27504   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
27505   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
27506   uint32x4_t __ret;
27507   __ret = __rev0 - __rev1;
27508   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
27509   return __ret;
27510 }
27511 #endif
27512 
27513 #ifdef __LITTLE_ENDIAN__
vsubq_u64(uint64x2_t __p0,uint64x2_t __p1)27514 __ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
27515   uint64x2_t __ret;
27516   __ret = __p0 - __p1;
27517   return __ret;
27518 }
27519 #else
vsubq_u64(uint64x2_t __p0,uint64x2_t __p1)27520 __ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
27521   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
27522   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
27523   uint64x2_t __ret;
27524   __ret = __rev0 - __rev1;
27525   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
27526   return __ret;
27527 }
27528 #endif
27529 
27530 #ifdef __LITTLE_ENDIAN__
vsubq_u16(uint16x8_t __p0,uint16x8_t __p1)27531 __ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
27532   uint16x8_t __ret;
27533   __ret = __p0 - __p1;
27534   return __ret;
27535 }
27536 #else
vsubq_u16(uint16x8_t __p0,uint16x8_t __p1)27537 __ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
27538   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
27539   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
27540   uint16x8_t __ret;
27541   __ret = __rev0 - __rev1;
27542   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
27543   return __ret;
27544 }
27545 #endif
27546 
27547 #ifdef __LITTLE_ENDIAN__
vsubq_s8(int8x16_t __p0,int8x16_t __p1)27548 __ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
27549   int8x16_t __ret;
27550   __ret = __p0 - __p1;
27551   return __ret;
27552 }
27553 #else
vsubq_s8(int8x16_t __p0,int8x16_t __p1)27554 __ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
27555   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
27556   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
27557   int8x16_t __ret;
27558   __ret = __rev0 - __rev1;
27559   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
27560   return __ret;
27561 }
27562 #endif
27563 
27564 #ifdef __LITTLE_ENDIAN__
vsubq_f32(float32x4_t __p0,float32x4_t __p1)27565 __ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
27566   float32x4_t __ret;
27567   __ret = __p0 - __p1;
27568   return __ret;
27569 }
27570 #else
vsubq_f32(float32x4_t __p0,float32x4_t __p1)27571 __ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
27572   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
27573   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
27574   float32x4_t __ret;
27575   __ret = __rev0 - __rev1;
27576   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
27577   return __ret;
27578 }
27579 #endif
27580 
27581 #ifdef __LITTLE_ENDIAN__
vsubq_s32(int32x4_t __p0,int32x4_t __p1)27582 __ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
27583   int32x4_t __ret;
27584   __ret = __p0 - __p1;
27585   return __ret;
27586 }
27587 #else
vsubq_s32(int32x4_t __p0,int32x4_t __p1)27588 __ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
27589   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
27590   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
27591   int32x4_t __ret;
27592   __ret = __rev0 - __rev1;
27593   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
27594   return __ret;
27595 }
27596 #endif
27597 
27598 #ifdef __LITTLE_ENDIAN__
vsubq_s64(int64x2_t __p0,int64x2_t __p1)27599 __ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
27600   int64x2_t __ret;
27601   __ret = __p0 - __p1;
27602   return __ret;
27603 }
27604 #else
vsubq_s64(int64x2_t __p0,int64x2_t __p1)27605 __ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
27606   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
27607   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
27608   int64x2_t __ret;
27609   __ret = __rev0 - __rev1;
27610   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
27611   return __ret;
27612 }
27613 #endif
27614 
27615 #ifdef __LITTLE_ENDIAN__
vsubq_s16(int16x8_t __p0,int16x8_t __p1)27616 __ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
27617   int16x8_t __ret;
27618   __ret = __p0 - __p1;
27619   return __ret;
27620 }
27621 #else
vsubq_s16(int16x8_t __p0,int16x8_t __p1)27622 __ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
27623   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
27624   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
27625   int16x8_t __ret;
27626   __ret = __rev0 - __rev1;
27627   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
27628   return __ret;
27629 }
27630 #endif
27631 
27632 #ifdef __LITTLE_ENDIAN__
vsub_u8(uint8x8_t __p0,uint8x8_t __p1)27633 __ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
27634   uint8x8_t __ret;
27635   __ret = __p0 - __p1;
27636   return __ret;
27637 }
27638 #else
vsub_u8(uint8x8_t __p0,uint8x8_t __p1)27639 __ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
27640   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
27641   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
27642   uint8x8_t __ret;
27643   __ret = __rev0 - __rev1;
27644   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
27645   return __ret;
27646 }
27647 #endif
27648 
27649 #ifdef __LITTLE_ENDIAN__
vsub_u32(uint32x2_t __p0,uint32x2_t __p1)27650 __ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
27651   uint32x2_t __ret;
27652   __ret = __p0 - __p1;
27653   return __ret;
27654 }
27655 #else
vsub_u32(uint32x2_t __p0,uint32x2_t __p1)27656 __ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
27657   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
27658   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
27659   uint32x2_t __ret;
27660   __ret = __rev0 - __rev1;
27661   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
27662   return __ret;
27663 }
27664 #endif
27665 
27666 #ifdef __LITTLE_ENDIAN__
vsub_u64(uint64x1_t __p0,uint64x1_t __p1)27667 __ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
27668   uint64x1_t __ret;
27669   __ret = __p0 - __p1;
27670   return __ret;
27671 }
27672 #else
vsub_u64(uint64x1_t __p0,uint64x1_t __p1)27673 __ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
27674   uint64x1_t __ret;
27675   __ret = __p0 - __p1;
27676   return __ret;
27677 }
27678 #endif
27679 
27680 #ifdef __LITTLE_ENDIAN__
vsub_u16(uint16x4_t __p0,uint16x4_t __p1)27681 __ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
27682   uint16x4_t __ret;
27683   __ret = __p0 - __p1;
27684   return __ret;
27685 }
27686 #else
vsub_u16(uint16x4_t __p0,uint16x4_t __p1)27687 __ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
27688   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
27689   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
27690   uint16x4_t __ret;
27691   __ret = __rev0 - __rev1;
27692   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
27693   return __ret;
27694 }
27695 #endif
27696 
27697 #ifdef __LITTLE_ENDIAN__
vsub_s8(int8x8_t __p0,int8x8_t __p1)27698 __ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
27699   int8x8_t __ret;
27700   __ret = __p0 - __p1;
27701   return __ret;
27702 }
27703 #else
vsub_s8(int8x8_t __p0,int8x8_t __p1)27704 __ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
27705   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
27706   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
27707   int8x8_t __ret;
27708   __ret = __rev0 - __rev1;
27709   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
27710   return __ret;
27711 }
27712 #endif
27713 
27714 #ifdef __LITTLE_ENDIAN__
vsub_f32(float32x2_t __p0,float32x2_t __p1)27715 __ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
27716   float32x2_t __ret;
27717   __ret = __p0 - __p1;
27718   return __ret;
27719 }
27720 #else
vsub_f32(float32x2_t __p0,float32x2_t __p1)27721 __ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
27722   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
27723   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
27724   float32x2_t __ret;
27725   __ret = __rev0 - __rev1;
27726   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
27727   return __ret;
27728 }
27729 #endif
27730 
27731 #ifdef __LITTLE_ENDIAN__
vsub_s32(int32x2_t __p0,int32x2_t __p1)27732 __ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
27733   int32x2_t __ret;
27734   __ret = __p0 - __p1;
27735   return __ret;
27736 }
27737 #else
vsub_s32(int32x2_t __p0,int32x2_t __p1)27738 __ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
27739   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
27740   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
27741   int32x2_t __ret;
27742   __ret = __rev0 - __rev1;
27743   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
27744   return __ret;
27745 }
27746 #endif
27747 
27748 #ifdef __LITTLE_ENDIAN__
vsub_s64(int64x1_t __p0,int64x1_t __p1)27749 __ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) {
27750   int64x1_t __ret;
27751   __ret = __p0 - __p1;
27752   return __ret;
27753 }
27754 #else
vsub_s64(int64x1_t __p0,int64x1_t __p1)27755 __ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) {
27756   int64x1_t __ret;
27757   __ret = __p0 - __p1;
27758   return __ret;
27759 }
27760 #endif
27761 
27762 #ifdef __LITTLE_ENDIAN__
vsub_s16(int16x4_t __p0,int16x4_t __p1)27763 __ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
27764   int16x4_t __ret;
27765   __ret = __p0 - __p1;
27766   return __ret;
27767 }
27768 #else
vsub_s16(int16x4_t __p0,int16x4_t __p1)27769 __ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
27770   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
27771   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
27772   int16x4_t __ret;
27773   __ret = __rev0 - __rev1;
27774   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
27775   return __ret;
27776 }
27777 #endif
27778 
27779 #ifdef __LITTLE_ENDIAN__
vsubhn_u32(uint32x4_t __p0,uint32x4_t __p1)27780 __ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
27781   uint16x4_t __ret;
27782   __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
27783   return __ret;
27784 }
27785 #else
vsubhn_u32(uint32x4_t __p0,uint32x4_t __p1)27786 __ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
27787   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
27788   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
27789   uint16x4_t __ret;
27790   __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
27791   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
27792   return __ret;
27793 }
__noswap_vsubhn_u32(uint32x4_t __p0,uint32x4_t __p1)27794 __ai uint16x4_t __noswap_vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
27795   uint16x4_t __ret;
27796   __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
27797   return __ret;
27798 }
27799 #endif
27800 
27801 #ifdef __LITTLE_ENDIAN__
vsubhn_u64(uint64x2_t __p0,uint64x2_t __p1)27802 __ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
27803   uint32x2_t __ret;
27804   __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
27805   return __ret;
27806 }
27807 #else
vsubhn_u64(uint64x2_t __p0,uint64x2_t __p1)27808 __ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
27809   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
27810   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
27811   uint32x2_t __ret;
27812   __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
27813   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
27814   return __ret;
27815 }
__noswap_vsubhn_u64(uint64x2_t __p0,uint64x2_t __p1)27816 __ai uint32x2_t __noswap_vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
27817   uint32x2_t __ret;
27818   __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
27819   return __ret;
27820 }
27821 #endif
27822 
27823 #ifdef __LITTLE_ENDIAN__
vsubhn_u16(uint16x8_t __p0,uint16x8_t __p1)27824 __ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
27825   uint8x8_t __ret;
27826   __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
27827   return __ret;
27828 }
27829 #else
vsubhn_u16(uint16x8_t __p0,uint16x8_t __p1)27830 __ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
27831   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
27832   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
27833   uint8x8_t __ret;
27834   __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
27835   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
27836   return __ret;
27837 }
__noswap_vsubhn_u16(uint16x8_t __p0,uint16x8_t __p1)27838 __ai uint8x8_t __noswap_vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
27839   uint8x8_t __ret;
27840   __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
27841   return __ret;
27842 }
27843 #endif
27844 
27845 #ifdef __LITTLE_ENDIAN__
vsubhn_s32(int32x4_t __p0,int32x4_t __p1)27846 __ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
27847   int16x4_t __ret;
27848   __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
27849   return __ret;
27850 }
27851 #else
vsubhn_s32(int32x4_t __p0,int32x4_t __p1)27852 __ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
27853   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
27854   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
27855   int16x4_t __ret;
27856   __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
27857   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
27858   return __ret;
27859 }
__noswap_vsubhn_s32(int32x4_t __p0,int32x4_t __p1)27860 __ai int16x4_t __noswap_vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
27861   int16x4_t __ret;
27862   __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
27863   return __ret;
27864 }
27865 #endif
27866 
27867 #ifdef __LITTLE_ENDIAN__
vsubhn_s64(int64x2_t __p0,int64x2_t __p1)27868 __ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
27869   int32x2_t __ret;
27870   __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
27871   return __ret;
27872 }
27873 #else
vsubhn_s64(int64x2_t __p0,int64x2_t __p1)27874 __ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
27875   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
27876   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
27877   int32x2_t __ret;
27878   __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
27879   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
27880   return __ret;
27881 }
__noswap_vsubhn_s64(int64x2_t __p0,int64x2_t __p1)27882 __ai int32x2_t __noswap_vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
27883   int32x2_t __ret;
27884   __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
27885   return __ret;
27886 }
27887 #endif
27888 
27889 #ifdef __LITTLE_ENDIAN__
vsubhn_s16(int16x8_t __p0,int16x8_t __p1)27890 __ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
27891   int8x8_t __ret;
27892   __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
27893   return __ret;
27894 }
27895 #else
vsubhn_s16(int16x8_t __p0,int16x8_t __p1)27896 __ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
27897   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
27898   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
27899   int8x8_t __ret;
27900   __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
27901   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
27902   return __ret;
27903 }
__noswap_vsubhn_s16(int16x8_t __p0,int16x8_t __p1)27904 __ai int8x8_t __noswap_vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
27905   int8x8_t __ret;
27906   __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
27907   return __ret;
27908 }
27909 #endif
27910 
27911 #ifdef __LITTLE_ENDIAN__
vsubl_u8(uint8x8_t __p0,uint8x8_t __p1)27912 __ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
27913   uint16x8_t __ret;
27914   __ret = vmovl_u8(__p0) - vmovl_u8(__p1);
27915   return __ret;
27916 }
27917 #else
vsubl_u8(uint8x8_t __p0,uint8x8_t __p1)27918 __ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
27919   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
27920   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
27921   uint16x8_t __ret;
27922   __ret = __noswap_vmovl_u8(__rev0) - __noswap_vmovl_u8(__rev1);
27923   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
27924   return __ret;
27925 }
27926 #endif
27927 
27928 #ifdef __LITTLE_ENDIAN__
vsubl_u32(uint32x2_t __p0,uint32x2_t __p1)27929 __ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
27930   uint64x2_t __ret;
27931   __ret = vmovl_u32(__p0) - vmovl_u32(__p1);
27932   return __ret;
27933 }
27934 #else
vsubl_u32(uint32x2_t __p0,uint32x2_t __p1)27935 __ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
27936   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
27937   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
27938   uint64x2_t __ret;
27939   __ret = __noswap_vmovl_u32(__rev0) - __noswap_vmovl_u32(__rev1);
27940   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
27941   return __ret;
27942 }
27943 #endif
27944 
27945 #ifdef __LITTLE_ENDIAN__
vsubl_u16(uint16x4_t __p0,uint16x4_t __p1)27946 __ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
27947   uint32x4_t __ret;
27948   __ret = vmovl_u16(__p0) - vmovl_u16(__p1);
27949   return __ret;
27950 }
27951 #else
vsubl_u16(uint16x4_t __p0,uint16x4_t __p1)27952 __ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
27953   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
27954   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
27955   uint32x4_t __ret;
27956   __ret = __noswap_vmovl_u16(__rev0) - __noswap_vmovl_u16(__rev1);
27957   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
27958   return __ret;
27959 }
27960 #endif
27961 
27962 #ifdef __LITTLE_ENDIAN__
vsubl_s8(int8x8_t __p0,int8x8_t __p1)27963 __ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
27964   int16x8_t __ret;
27965   __ret = vmovl_s8(__p0) - vmovl_s8(__p1);
27966   return __ret;
27967 }
27968 #else
vsubl_s8(int8x8_t __p0,int8x8_t __p1)27969 __ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
27970   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
27971   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
27972   int16x8_t __ret;
27973   __ret = __noswap_vmovl_s8(__rev0) - __noswap_vmovl_s8(__rev1);
27974   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
27975   return __ret;
27976 }
27977 #endif
27978 
27979 #ifdef __LITTLE_ENDIAN__
vsubl_s32(int32x2_t __p0,int32x2_t __p1)27980 __ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
27981   int64x2_t __ret;
27982   __ret = vmovl_s32(__p0) - vmovl_s32(__p1);
27983   return __ret;
27984 }
27985 #else
vsubl_s32(int32x2_t __p0,int32x2_t __p1)27986 __ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
27987   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
27988   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
27989   int64x2_t __ret;
27990   __ret = __noswap_vmovl_s32(__rev0) - __noswap_vmovl_s32(__rev1);
27991   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
27992   return __ret;
27993 }
27994 #endif
27995 
27996 #ifdef __LITTLE_ENDIAN__
vsubl_s16(int16x4_t __p0,int16x4_t __p1)27997 __ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
27998   int32x4_t __ret;
27999   __ret = vmovl_s16(__p0) - vmovl_s16(__p1);
28000   return __ret;
28001 }
28002 #else
vsubl_s16(int16x4_t __p0,int16x4_t __p1)28003 __ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
28004   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
28005   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
28006   int32x4_t __ret;
28007   __ret = __noswap_vmovl_s16(__rev0) - __noswap_vmovl_s16(__rev1);
28008   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
28009   return __ret;
28010 }
28011 #endif
28012 
28013 #ifdef __LITTLE_ENDIAN__
vsubw_u8(uint16x8_t __p0,uint8x8_t __p1)28014 __ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
28015   uint16x8_t __ret;
28016   __ret = __p0 - vmovl_u8(__p1);
28017   return __ret;
28018 }
28019 #else
vsubw_u8(uint16x8_t __p0,uint8x8_t __p1)28020 __ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
28021   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28022   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28023   uint16x8_t __ret;
28024   __ret = __rev0 - __noswap_vmovl_u8(__rev1);
28025   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28026   return __ret;
28027 }
28028 #endif
28029 
28030 #ifdef __LITTLE_ENDIAN__
vsubw_u32(uint64x2_t __p0,uint32x2_t __p1)28031 __ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
28032   uint64x2_t __ret;
28033   __ret = __p0 - vmovl_u32(__p1);
28034   return __ret;
28035 }
28036 #else
vsubw_u32(uint64x2_t __p0,uint32x2_t __p1)28037 __ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
28038   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
28039   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
28040   uint64x2_t __ret;
28041   __ret = __rev0 - __noswap_vmovl_u32(__rev1);
28042   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
28043   return __ret;
28044 }
28045 #endif
28046 
28047 #ifdef __LITTLE_ENDIAN__
vsubw_u16(uint32x4_t __p0,uint16x4_t __p1)28048 __ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
28049   uint32x4_t __ret;
28050   __ret = __p0 - vmovl_u16(__p1);
28051   return __ret;
28052 }
28053 #else
vsubw_u16(uint32x4_t __p0,uint16x4_t __p1)28054 __ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
28055   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
28056   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
28057   uint32x4_t __ret;
28058   __ret = __rev0 - __noswap_vmovl_u16(__rev1);
28059   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
28060   return __ret;
28061 }
28062 #endif
28063 
28064 #ifdef __LITTLE_ENDIAN__
vsubw_s8(int16x8_t __p0,int8x8_t __p1)28065 __ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
28066   int16x8_t __ret;
28067   __ret = __p0 - vmovl_s8(__p1);
28068   return __ret;
28069 }
28070 #else
vsubw_s8(int16x8_t __p0,int8x8_t __p1)28071 __ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
28072   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28073   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28074   int16x8_t __ret;
28075   __ret = __rev0 - __noswap_vmovl_s8(__rev1);
28076   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28077   return __ret;
28078 }
28079 #endif
28080 
28081 #ifdef __LITTLE_ENDIAN__
vsubw_s32(int64x2_t __p0,int32x2_t __p1)28082 __ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
28083   int64x2_t __ret;
28084   __ret = __p0 - vmovl_s32(__p1);
28085   return __ret;
28086 }
28087 #else
vsubw_s32(int64x2_t __p0,int32x2_t __p1)28088 __ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
28089   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
28090   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
28091   int64x2_t __ret;
28092   __ret = __rev0 - __noswap_vmovl_s32(__rev1);
28093   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
28094   return __ret;
28095 }
28096 #endif
28097 
28098 #ifdef __LITTLE_ENDIAN__
vsubw_s16(int32x4_t __p0,int16x4_t __p1)28099 __ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
28100   int32x4_t __ret;
28101   __ret = __p0 - vmovl_s16(__p1);
28102   return __ret;
28103 }
28104 #else
vsubw_s16(int32x4_t __p0,int16x4_t __p1)28105 __ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
28106   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
28107   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
28108   int32x4_t __ret;
28109   __ret = __rev0 - __noswap_vmovl_s16(__rev1);
28110   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
28111   return __ret;
28112 }
28113 #endif
28114 
28115 #ifdef __LITTLE_ENDIAN__
vtbl1_p8(poly8x8_t __p0,uint8x8_t __p1)28116 __ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
28117   poly8x8_t __ret;
28118   __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 4);
28119   return __ret;
28120 }
28121 #else
vtbl1_p8(poly8x8_t __p0,uint8x8_t __p1)28122 __ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
28123   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28124   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28125   poly8x8_t __ret;
28126   __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
28127   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28128   return __ret;
28129 }
28130 #endif
28131 
28132 #ifdef __LITTLE_ENDIAN__
vtbl1_u8(uint8x8_t __p0,uint8x8_t __p1)28133 __ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
28134   uint8x8_t __ret;
28135   __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
28136   return __ret;
28137 }
28138 #else
vtbl1_u8(uint8x8_t __p0,uint8x8_t __p1)28139 __ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
28140   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28141   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28142   uint8x8_t __ret;
28143   __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
28144   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28145   return __ret;
28146 }
28147 #endif
28148 
28149 #ifdef __LITTLE_ENDIAN__
vtbl1_s8(int8x8_t __p0,int8x8_t __p1)28150 __ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
28151   int8x8_t __ret;
28152   __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
28153   return __ret;
28154 }
28155 #else
vtbl1_s8(int8x8_t __p0,int8x8_t __p1)28156 __ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
28157   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28158   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28159   int8x8_t __ret;
28160   __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
28161   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28162   return __ret;
28163 }
28164 #endif
28165 
28166 #ifdef __LITTLE_ENDIAN__
vtbl2_p8(poly8x8x2_t __p0,uint8x8_t __p1)28167 __ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
28168   poly8x8_t __ret;
28169   __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 4);
28170   return __ret;
28171 }
28172 #else
vtbl2_p8(poly8x8x2_t __p0,uint8x8_t __p1)28173 __ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
28174   poly8x8x2_t __rev0;
28175   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28176   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28177   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28178   poly8x8_t __ret;
28179   __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 4);
28180   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28181   return __ret;
28182 }
28183 #endif
28184 
28185 #ifdef __LITTLE_ENDIAN__
vtbl2_u8(uint8x8x2_t __p0,uint8x8_t __p1)28186 __ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
28187   uint8x8_t __ret;
28188   __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 16);
28189   return __ret;
28190 }
28191 #else
vtbl2_u8(uint8x8x2_t __p0,uint8x8_t __p1)28192 __ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
28193   uint8x8x2_t __rev0;
28194   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28195   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28196   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28197   uint8x8_t __ret;
28198   __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 16);
28199   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28200   return __ret;
28201 }
28202 #endif
28203 
28204 #ifdef __LITTLE_ENDIAN__
vtbl2_s8(int8x8x2_t __p0,int8x8_t __p1)28205 __ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
28206   int8x8_t __ret;
28207   __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 0);
28208   return __ret;
28209 }
28210 #else
vtbl2_s8(int8x8x2_t __p0,int8x8_t __p1)28211 __ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
28212   int8x8x2_t __rev0;
28213   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28214   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28215   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28216   int8x8_t __ret;
28217   __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 0);
28218   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28219   return __ret;
28220 }
28221 #endif
28222 
28223 #ifdef __LITTLE_ENDIAN__
vtbl3_p8(poly8x8x3_t __p0,uint8x8_t __p1)28224 __ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) {
28225   poly8x8_t __ret;
28226   __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 4);
28227   return __ret;
28228 }
28229 #else
vtbl3_p8(poly8x8x3_t __p0,uint8x8_t __p1)28230 __ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) {
28231   poly8x8x3_t __rev0;
28232   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28233   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28234   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
28235   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28236   poly8x8_t __ret;
28237   __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 4);
28238   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28239   return __ret;
28240 }
28241 #endif
28242 
28243 #ifdef __LITTLE_ENDIAN__
vtbl3_u8(uint8x8x3_t __p0,uint8x8_t __p1)28244 __ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) {
28245   uint8x8_t __ret;
28246   __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 16);
28247   return __ret;
28248 }
28249 #else
vtbl3_u8(uint8x8x3_t __p0,uint8x8_t __p1)28250 __ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) {
28251   uint8x8x3_t __rev0;
28252   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28253   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28254   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
28255   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28256   uint8x8_t __ret;
28257   __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 16);
28258   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28259   return __ret;
28260 }
28261 #endif
28262 
28263 #ifdef __LITTLE_ENDIAN__
vtbl3_s8(int8x8x3_t __p0,int8x8_t __p1)28264 __ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) {
28265   int8x8_t __ret;
28266   __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 0);
28267   return __ret;
28268 }
28269 #else
vtbl3_s8(int8x8x3_t __p0,int8x8_t __p1)28270 __ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) {
28271   int8x8x3_t __rev0;
28272   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28273   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28274   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
28275   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28276   int8x8_t __ret;
28277   __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 0);
28278   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28279   return __ret;
28280 }
28281 #endif
28282 
28283 #ifdef __LITTLE_ENDIAN__
vtbl4_p8(poly8x8x4_t __p0,uint8x8_t __p1)28284 __ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) {
28285   poly8x8_t __ret;
28286   __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 4);
28287   return __ret;
28288 }
28289 #else
vtbl4_p8(poly8x8x4_t __p0,uint8x8_t __p1)28290 __ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) {
28291   poly8x8x4_t __rev0;
28292   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28293   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28294   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
28295   __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
28296   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28297   poly8x8_t __ret;
28298   __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 4);
28299   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28300   return __ret;
28301 }
28302 #endif
28303 
28304 #ifdef __LITTLE_ENDIAN__
vtbl4_u8(uint8x8x4_t __p0,uint8x8_t __p1)28305 __ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) {
28306   uint8x8_t __ret;
28307   __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 16);
28308   return __ret;
28309 }
28310 #else
vtbl4_u8(uint8x8x4_t __p0,uint8x8_t __p1)28311 __ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) {
28312   uint8x8x4_t __rev0;
28313   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28314   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28315   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
28316   __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
28317   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28318   uint8x8_t __ret;
28319   __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 16);
28320   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28321   return __ret;
28322 }
28323 #endif
28324 
28325 #ifdef __LITTLE_ENDIAN__
vtbl4_s8(int8x8x4_t __p0,int8x8_t __p1)28326 __ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) {
28327   int8x8_t __ret;
28328   __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 0);
28329   return __ret;
28330 }
28331 #else
vtbl4_s8(int8x8x4_t __p0,int8x8_t __p1)28332 __ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) {
28333   int8x8x4_t __rev0;
28334   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28335   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28336   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
28337   __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
28338   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28339   int8x8_t __ret;
28340   __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 0);
28341   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28342   return __ret;
28343 }
28344 #endif
28345 
28346 #ifdef __LITTLE_ENDIAN__
vtbx1_p8(poly8x8_t __p0,poly8x8_t __p1,uint8x8_t __p2)28347 __ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) {
28348   poly8x8_t __ret;
28349   __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4);
28350   return __ret;
28351 }
28352 #else
vtbx1_p8(poly8x8_t __p0,poly8x8_t __p1,uint8x8_t __p2)28353 __ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) {
28354   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28355   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28356   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
28357   poly8x8_t __ret;
28358   __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
28359   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28360   return __ret;
28361 }
28362 #endif
28363 
28364 #ifdef __LITTLE_ENDIAN__
vtbx1_u8(uint8x8_t __p0,uint8x8_t __p1,uint8x8_t __p2)28365 __ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
28366   uint8x8_t __ret;
28367   __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16);
28368   return __ret;
28369 }
28370 #else
vtbx1_u8(uint8x8_t __p0,uint8x8_t __p1,uint8x8_t __p2)28371 __ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
28372   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28373   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28374   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
28375   uint8x8_t __ret;
28376   __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
28377   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28378   return __ret;
28379 }
28380 #endif
28381 
28382 #ifdef __LITTLE_ENDIAN__
vtbx1_s8(int8x8_t __p0,int8x8_t __p1,int8x8_t __p2)28383 __ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
28384   int8x8_t __ret;
28385   __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0);
28386   return __ret;
28387 }
28388 #else
vtbx1_s8(int8x8_t __p0,int8x8_t __p1,int8x8_t __p2)28389 __ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
28390   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28391   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28392   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
28393   int8x8_t __ret;
28394   __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
28395   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28396   return __ret;
28397 }
28398 #endif
28399 
28400 #ifdef __LITTLE_ENDIAN__
vtbx2_p8(poly8x8_t __p0,poly8x8x2_t __p1,uint8x8_t __p2)28401 __ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) {
28402   poly8x8_t __ret;
28403   __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 4);
28404   return __ret;
28405 }
28406 #else
vtbx2_p8(poly8x8_t __p0,poly8x8x2_t __p1,uint8x8_t __p2)28407 __ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) {
28408   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28409   poly8x8x2_t __rev1;
28410   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28411   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28412   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
28413   poly8x8_t __ret;
28414   __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 4);
28415   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28416   return __ret;
28417 }
28418 #endif
28419 
28420 #ifdef __LITTLE_ENDIAN__
vtbx2_u8(uint8x8_t __p0,uint8x8x2_t __p1,uint8x8_t __p2)28421 __ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) {
28422   uint8x8_t __ret;
28423   __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 16);
28424   return __ret;
28425 }
28426 #else
vtbx2_u8(uint8x8_t __p0,uint8x8x2_t __p1,uint8x8_t __p2)28427 __ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) {
28428   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28429   uint8x8x2_t __rev1;
28430   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28431   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28432   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
28433   uint8x8_t __ret;
28434   __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 16);
28435   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28436   return __ret;
28437 }
28438 #endif
28439 
28440 #ifdef __LITTLE_ENDIAN__
vtbx2_s8(int8x8_t __p0,int8x8x2_t __p1,int8x8_t __p2)28441 __ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) {
28442   int8x8_t __ret;
28443   __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 0);
28444   return __ret;
28445 }
28446 #else
vtbx2_s8(int8x8_t __p0,int8x8x2_t __p1,int8x8_t __p2)28447 __ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) {
28448   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28449   int8x8x2_t __rev1;
28450   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28451   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28452   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
28453   int8x8_t __ret;
28454   __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 0);
28455   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28456   return __ret;
28457 }
28458 #endif
28459 
28460 #ifdef __LITTLE_ENDIAN__
vtbx3_p8(poly8x8_t __p0,poly8x8x3_t __p1,uint8x8_t __p2)28461 __ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) {
28462   poly8x8_t __ret;
28463   __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 4);
28464   return __ret;
28465 }
28466 #else
vtbx3_p8(poly8x8_t __p0,poly8x8x3_t __p1,uint8x8_t __p2)28467 __ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) {
28468   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28469   poly8x8x3_t __rev1;
28470   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28471   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28472   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
28473   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
28474   poly8x8_t __ret;
28475   __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 4);
28476   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28477   return __ret;
28478 }
28479 #endif
28480 
28481 #ifdef __LITTLE_ENDIAN__
vtbx3_u8(uint8x8_t __p0,uint8x8x3_t __p1,uint8x8_t __p2)28482 __ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) {
28483   uint8x8_t __ret;
28484   __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 16);
28485   return __ret;
28486 }
28487 #else
vtbx3_u8(uint8x8_t __p0,uint8x8x3_t __p1,uint8x8_t __p2)28488 __ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) {
28489   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28490   uint8x8x3_t __rev1;
28491   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28492   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28493   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
28494   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
28495   uint8x8_t __ret;
28496   __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 16);
28497   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28498   return __ret;
28499 }
28500 #endif
28501 
28502 #ifdef __LITTLE_ENDIAN__
vtbx3_s8(int8x8_t __p0,int8x8x3_t __p1,int8x8_t __p2)28503 __ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) {
28504   int8x8_t __ret;
28505   __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 0);
28506   return __ret;
28507 }
28508 #else
vtbx3_s8(int8x8_t __p0,int8x8x3_t __p1,int8x8_t __p2)28509 __ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) {
28510   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28511   int8x8x3_t __rev1;
28512   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28513   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28514   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
28515   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
28516   int8x8_t __ret;
28517   __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 0);
28518   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28519   return __ret;
28520 }
28521 #endif
28522 
28523 #ifdef __LITTLE_ENDIAN__
vtbx4_p8(poly8x8_t __p0,poly8x8x4_t __p1,uint8x8_t __p2)28524 __ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) {
28525   poly8x8_t __ret;
28526   __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 4);
28527   return __ret;
28528 }
28529 #else
vtbx4_p8(poly8x8_t __p0,poly8x8x4_t __p1,uint8x8_t __p2)28530 __ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) {
28531   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28532   poly8x8x4_t __rev1;
28533   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28534   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28535   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
28536   __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
28537   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
28538   poly8x8_t __ret;
28539   __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 4);
28540   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28541   return __ret;
28542 }
28543 #endif
28544 
28545 #ifdef __LITTLE_ENDIAN__
vtbx4_u8(uint8x8_t __p0,uint8x8x4_t __p1,uint8x8_t __p2)28546 __ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) {
28547   uint8x8_t __ret;
28548   __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 16);
28549   return __ret;
28550 }
28551 #else
vtbx4_u8(uint8x8_t __p0,uint8x8x4_t __p1,uint8x8_t __p2)28552 __ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) {
28553   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28554   uint8x8x4_t __rev1;
28555   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28556   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28557   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
28558   __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
28559   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
28560   uint8x8_t __ret;
28561   __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 16);
28562   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28563   return __ret;
28564 }
28565 #endif
28566 
28567 #ifdef __LITTLE_ENDIAN__
vtbx4_s8(int8x8_t __p0,int8x8x4_t __p1,int8x8_t __p2)28568 __ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) {
28569   int8x8_t __ret;
28570   __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 0);
28571   return __ret;
28572 }
28573 #else
vtbx4_s8(int8x8_t __p0,int8x8x4_t __p1,int8x8_t __p2)28574 __ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) {
28575   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28576   int8x8x4_t __rev1;
28577   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28578   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28579   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
28580   __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
28581   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
28582   int8x8_t __ret;
28583   __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 0);
28584   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28585   return __ret;
28586 }
28587 #endif
28588 
28589 #ifdef __LITTLE_ENDIAN__
vtrn_p8(poly8x8_t __p0,poly8x8_t __p1)28590 __ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) {
28591   poly8x8x2_t __ret;
28592   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
28593   return __ret;
28594 }
28595 #else
vtrn_p8(poly8x8_t __p0,poly8x8_t __p1)28596 __ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) {
28597   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28598   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28599   poly8x8x2_t __ret;
28600   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
28601 
28602   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28603   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28604   return __ret;
28605 }
28606 #endif
28607 
28608 #ifdef __LITTLE_ENDIAN__
vtrn_p16(poly16x4_t __p0,poly16x4_t __p1)28609 __ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) {
28610   poly16x4x2_t __ret;
28611   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
28612   return __ret;
28613 }
28614 #else
vtrn_p16(poly16x4_t __p0,poly16x4_t __p1)28615 __ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) {
28616   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
28617   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
28618   poly16x4x2_t __ret;
28619   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
28620 
28621   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
28622   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
28623   return __ret;
28624 }
28625 #endif
28626 
28627 #ifdef __LITTLE_ENDIAN__
vtrnq_p8(poly8x16_t __p0,poly8x16_t __p1)28628 __ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) {
28629   poly8x16x2_t __ret;
28630   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
28631   return __ret;
28632 }
28633 #else
vtrnq_p8(poly8x16_t __p0,poly8x16_t __p1)28634 __ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) {
28635   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28636   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28637   poly8x16x2_t __ret;
28638   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
28639 
28640   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28641   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28642   return __ret;
28643 }
28644 #endif
28645 
28646 #ifdef __LITTLE_ENDIAN__
vtrnq_p16(poly16x8_t __p0,poly16x8_t __p1)28647 __ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) {
28648   poly16x8x2_t __ret;
28649   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
28650   return __ret;
28651 }
28652 #else
vtrnq_p16(poly16x8_t __p0,poly16x8_t __p1)28653 __ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) {
28654   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28655   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28656   poly16x8x2_t __ret;
28657   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
28658 
28659   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28660   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28661   return __ret;
28662 }
28663 #endif
28664 
28665 #ifdef __LITTLE_ENDIAN__
vtrnq_u8(uint8x16_t __p0,uint8x16_t __p1)28666 __ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) {
28667   uint8x16x2_t __ret;
28668   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
28669   return __ret;
28670 }
28671 #else
vtrnq_u8(uint8x16_t __p0,uint8x16_t __p1)28672 __ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) {
28673   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28674   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28675   uint8x16x2_t __ret;
28676   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
28677 
28678   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28679   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28680   return __ret;
28681 }
28682 #endif
28683 
28684 #ifdef __LITTLE_ENDIAN__
vtrnq_u32(uint32x4_t __p0,uint32x4_t __p1)28685 __ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) {
28686   uint32x4x2_t __ret;
28687   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
28688   return __ret;
28689 }
28690 #else
vtrnq_u32(uint32x4_t __p0,uint32x4_t __p1)28691 __ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) {
28692   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
28693   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
28694   uint32x4x2_t __ret;
28695   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
28696 
28697   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
28698   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
28699   return __ret;
28700 }
28701 #endif
28702 
28703 #ifdef __LITTLE_ENDIAN__
vtrnq_u16(uint16x8_t __p0,uint16x8_t __p1)28704 __ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) {
28705   uint16x8x2_t __ret;
28706   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
28707   return __ret;
28708 }
28709 #else
vtrnq_u16(uint16x8_t __p0,uint16x8_t __p1)28710 __ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) {
28711   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28712   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28713   uint16x8x2_t __ret;
28714   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
28715 
28716   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28717   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28718   return __ret;
28719 }
28720 #endif
28721 
28722 #ifdef __LITTLE_ENDIAN__
vtrnq_s8(int8x16_t __p0,int8x16_t __p1)28723 __ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) {
28724   int8x16x2_t __ret;
28725   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
28726   return __ret;
28727 }
28728 #else
vtrnq_s8(int8x16_t __p0,int8x16_t __p1)28729 __ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) {
28730   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28731   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28732   int8x16x2_t __ret;
28733   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
28734 
28735   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28736   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28737   return __ret;
28738 }
28739 #endif
28740 
28741 #ifdef __LITTLE_ENDIAN__
vtrnq_f32(float32x4_t __p0,float32x4_t __p1)28742 __ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) {
28743   float32x4x2_t __ret;
28744   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
28745   return __ret;
28746 }
28747 #else
vtrnq_f32(float32x4_t __p0,float32x4_t __p1)28748 __ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) {
28749   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
28750   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
28751   float32x4x2_t __ret;
28752   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
28753 
28754   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
28755   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
28756   return __ret;
28757 }
28758 #endif
28759 
28760 #ifdef __LITTLE_ENDIAN__
vtrnq_s32(int32x4_t __p0,int32x4_t __p1)28761 __ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) {
28762   int32x4x2_t __ret;
28763   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
28764   return __ret;
28765 }
28766 #else
vtrnq_s32(int32x4_t __p0,int32x4_t __p1)28767 __ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) {
28768   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
28769   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
28770   int32x4x2_t __ret;
28771   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
28772 
28773   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
28774   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
28775   return __ret;
28776 }
28777 #endif
28778 
28779 #ifdef __LITTLE_ENDIAN__
vtrnq_s16(int16x8_t __p0,int16x8_t __p1)28780 __ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) {
28781   int16x8x2_t __ret;
28782   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
28783   return __ret;
28784 }
28785 #else
vtrnq_s16(int16x8_t __p0,int16x8_t __p1)28786 __ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) {
28787   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28788   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28789   int16x8x2_t __ret;
28790   __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
28791 
28792   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28793   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28794   return __ret;
28795 }
28796 #endif
28797 
28798 #ifdef __LITTLE_ENDIAN__
vtrn_u8(uint8x8_t __p0,uint8x8_t __p1)28799 __ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) {
28800   uint8x8x2_t __ret;
28801   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
28802   return __ret;
28803 }
28804 #else
vtrn_u8(uint8x8_t __p0,uint8x8_t __p1)28805 __ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) {
28806   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28807   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28808   uint8x8x2_t __ret;
28809   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
28810 
28811   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28812   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28813   return __ret;
28814 }
28815 #endif
28816 
28817 #ifdef __LITTLE_ENDIAN__
vtrn_u32(uint32x2_t __p0,uint32x2_t __p1)28818 __ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) {
28819   uint32x2x2_t __ret;
28820   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
28821   return __ret;
28822 }
28823 #else
vtrn_u32(uint32x2_t __p0,uint32x2_t __p1)28824 __ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) {
28825   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
28826   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
28827   uint32x2x2_t __ret;
28828   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
28829 
28830   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
28831   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
28832   return __ret;
28833 }
28834 #endif
28835 
28836 #ifdef __LITTLE_ENDIAN__
vtrn_u16(uint16x4_t __p0,uint16x4_t __p1)28837 __ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) {
28838   uint16x4x2_t __ret;
28839   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
28840   return __ret;
28841 }
28842 #else
vtrn_u16(uint16x4_t __p0,uint16x4_t __p1)28843 __ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) {
28844   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
28845   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
28846   uint16x4x2_t __ret;
28847   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
28848 
28849   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
28850   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
28851   return __ret;
28852 }
28853 #endif
28854 
28855 #ifdef __LITTLE_ENDIAN__
vtrn_s8(int8x8_t __p0,int8x8_t __p1)28856 __ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) {
28857   int8x8x2_t __ret;
28858   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
28859   return __ret;
28860 }
28861 #else
vtrn_s8(int8x8_t __p0,int8x8_t __p1)28862 __ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) {
28863   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28864   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28865   int8x8x2_t __ret;
28866   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
28867 
28868   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28869   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28870   return __ret;
28871 }
28872 #endif
28873 
28874 #ifdef __LITTLE_ENDIAN__
vtrn_f32(float32x2_t __p0,float32x2_t __p1)28875 __ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) {
28876   float32x2x2_t __ret;
28877   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
28878   return __ret;
28879 }
28880 #else
vtrn_f32(float32x2_t __p0,float32x2_t __p1)28881 __ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) {
28882   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
28883   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
28884   float32x2x2_t __ret;
28885   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
28886 
28887   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
28888   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
28889   return __ret;
28890 }
28891 #endif
28892 
28893 #ifdef __LITTLE_ENDIAN__
vtrn_s32(int32x2_t __p0,int32x2_t __p1)28894 __ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) {
28895   int32x2x2_t __ret;
28896   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
28897   return __ret;
28898 }
28899 #else
vtrn_s32(int32x2_t __p0,int32x2_t __p1)28900 __ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) {
28901   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
28902   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
28903   int32x2x2_t __ret;
28904   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
28905 
28906   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
28907   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
28908   return __ret;
28909 }
28910 #endif
28911 
28912 #ifdef __LITTLE_ENDIAN__
vtrn_s16(int16x4_t __p0,int16x4_t __p1)28913 __ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) {
28914   int16x4x2_t __ret;
28915   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
28916   return __ret;
28917 }
28918 #else
vtrn_s16(int16x4_t __p0,int16x4_t __p1)28919 __ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) {
28920   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
28921   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
28922   int16x4x2_t __ret;
28923   __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
28924 
28925   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
28926   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
28927   return __ret;
28928 }
28929 #endif
28930 
28931 #ifdef __LITTLE_ENDIAN__
vtst_p8(poly8x8_t __p0,poly8x8_t __p1)28932 __ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) {
28933   uint8x8_t __ret;
28934   __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
28935   return __ret;
28936 }
28937 #else
vtst_p8(poly8x8_t __p0,poly8x8_t __p1)28938 __ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) {
28939   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28940   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28941   uint8x8_t __ret;
28942   __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
28943   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28944   return __ret;
28945 }
28946 #endif
28947 
28948 #ifdef __LITTLE_ENDIAN__
vtst_p16(poly16x4_t __p0,poly16x4_t __p1)28949 __ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) {
28950   uint16x4_t __ret;
28951   __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
28952   return __ret;
28953 }
28954 #else
vtst_p16(poly16x4_t __p0,poly16x4_t __p1)28955 __ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) {
28956   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
28957   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
28958   uint16x4_t __ret;
28959   __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
28960   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
28961   return __ret;
28962 }
28963 #endif
28964 
28965 #ifdef __LITTLE_ENDIAN__
vtstq_p8(poly8x16_t __p0,poly8x16_t __p1)28966 __ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) {
28967   uint8x16_t __ret;
28968   __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
28969   return __ret;
28970 }
28971 #else
vtstq_p8(poly8x16_t __p0,poly8x16_t __p1)28972 __ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) {
28973   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28974   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28975   uint8x16_t __ret;
28976   __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
28977   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28978   return __ret;
28979 }
28980 #endif
28981 
28982 #ifdef __LITTLE_ENDIAN__
vtstq_p16(poly16x8_t __p0,poly16x8_t __p1)28983 __ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) {
28984   uint16x8_t __ret;
28985   __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
28986   return __ret;
28987 }
28988 #else
vtstq_p16(poly16x8_t __p0,poly16x8_t __p1)28989 __ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) {
28990   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28991   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28992   uint16x8_t __ret;
28993   __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
28994   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28995   return __ret;
28996 }
28997 #endif
28998 
28999 #ifdef __LITTLE_ENDIAN__
vtstq_u8(uint8x16_t __p0,uint8x16_t __p1)29000 __ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) {
29001   uint8x16_t __ret;
29002   __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
29003   return __ret;
29004 }
29005 #else
vtstq_u8(uint8x16_t __p0,uint8x16_t __p1)29006 __ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) {
29007   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29008   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29009   uint8x16_t __ret;
29010   __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
29011   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29012   return __ret;
29013 }
29014 #endif
29015 
29016 #ifdef __LITTLE_ENDIAN__
vtstq_u32(uint32x4_t __p0,uint32x4_t __p1)29017 __ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) {
29018   uint32x4_t __ret;
29019   __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
29020   return __ret;
29021 }
29022 #else
vtstq_u32(uint32x4_t __p0,uint32x4_t __p1)29023 __ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) {
29024   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29025   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29026   uint32x4_t __ret;
29027   __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
29028   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
29029   return __ret;
29030 }
29031 #endif
29032 
29033 #ifdef __LITTLE_ENDIAN__
vtstq_u16(uint16x8_t __p0,uint16x8_t __p1)29034 __ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) {
29035   uint16x8_t __ret;
29036   __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
29037   return __ret;
29038 }
29039 #else
vtstq_u16(uint16x8_t __p0,uint16x8_t __p1)29040 __ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) {
29041   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29042   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29043   uint16x8_t __ret;
29044   __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
29045   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
29046   return __ret;
29047 }
29048 #endif
29049 
29050 #ifdef __LITTLE_ENDIAN__
vtstq_s8(int8x16_t __p0,int8x16_t __p1)29051 __ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) {
29052   uint8x16_t __ret;
29053   __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
29054   return __ret;
29055 }
29056 #else
vtstq_s8(int8x16_t __p0,int8x16_t __p1)29057 __ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) {
29058   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29059   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29060   uint8x16_t __ret;
29061   __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
29062   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29063   return __ret;
29064 }
29065 #endif
29066 
29067 #ifdef __LITTLE_ENDIAN__
vtstq_s32(int32x4_t __p0,int32x4_t __p1)29068 __ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) {
29069   uint32x4_t __ret;
29070   __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
29071   return __ret;
29072 }
29073 #else
vtstq_s32(int32x4_t __p0,int32x4_t __p1)29074 __ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) {
29075   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29076   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29077   uint32x4_t __ret;
29078   __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
29079   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
29080   return __ret;
29081 }
29082 #endif
29083 
29084 #ifdef __LITTLE_ENDIAN__
vtstq_s16(int16x8_t __p0,int16x8_t __p1)29085 __ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) {
29086   uint16x8_t __ret;
29087   __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
29088   return __ret;
29089 }
29090 #else
vtstq_s16(int16x8_t __p0,int16x8_t __p1)29091 __ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) {
29092   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29093   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29094   uint16x8_t __ret;
29095   __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
29096   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
29097   return __ret;
29098 }
29099 #endif
29100 
29101 #ifdef __LITTLE_ENDIAN__
vtst_u8(uint8x8_t __p0,uint8x8_t __p1)29102 __ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) {
29103   uint8x8_t __ret;
29104   __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
29105   return __ret;
29106 }
29107 #else
vtst_u8(uint8x8_t __p0,uint8x8_t __p1)29108 __ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) {
29109   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29110   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29111   uint8x8_t __ret;
29112   __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
29113   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
29114   return __ret;
29115 }
29116 #endif
29117 
29118 #ifdef __LITTLE_ENDIAN__
vtst_u32(uint32x2_t __p0,uint32x2_t __p1)29119 __ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) {
29120   uint32x2_t __ret;
29121   __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
29122   return __ret;
29123 }
29124 #else
vtst_u32(uint32x2_t __p0,uint32x2_t __p1)29125 __ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) {
29126   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
29127   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
29128   uint32x2_t __ret;
29129   __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
29130   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
29131   return __ret;
29132 }
29133 #endif
29134 
29135 #ifdef __LITTLE_ENDIAN__
vtst_u16(uint16x4_t __p0,uint16x4_t __p1)29136 __ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) {
29137   uint16x4_t __ret;
29138   __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
29139   return __ret;
29140 }
29141 #else
vtst_u16(uint16x4_t __p0,uint16x4_t __p1)29142 __ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) {
29143   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29144   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29145   uint16x4_t __ret;
29146   __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
29147   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
29148   return __ret;
29149 }
29150 #endif
29151 
29152 #ifdef __LITTLE_ENDIAN__
vtst_s8(int8x8_t __p0,int8x8_t __p1)29153 __ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) {
29154   uint8x8_t __ret;
29155   __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
29156   return __ret;
29157 }
29158 #else
vtst_s8(int8x8_t __p0,int8x8_t __p1)29159 __ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) {
29160   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29161   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29162   uint8x8_t __ret;
29163   __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
29164   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
29165   return __ret;
29166 }
29167 #endif
29168 
29169 #ifdef __LITTLE_ENDIAN__
vtst_s32(int32x2_t __p0,int32x2_t __p1)29170 __ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) {
29171   uint32x2_t __ret;
29172   __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
29173   return __ret;
29174 }
29175 #else
vtst_s32(int32x2_t __p0,int32x2_t __p1)29176 __ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) {
29177   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
29178   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
29179   uint32x2_t __ret;
29180   __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
29181   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
29182   return __ret;
29183 }
29184 #endif
29185 
29186 #ifdef __LITTLE_ENDIAN__
vtst_s16(int16x4_t __p0,int16x4_t __p1)29187 __ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) {
29188   uint16x4_t __ret;
29189   __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
29190   return __ret;
29191 }
29192 #else
vtst_s16(int16x4_t __p0,int16x4_t __p1)29193 __ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) {
29194   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29195   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29196   uint16x4_t __ret;
29197   __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
29198   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
29199   return __ret;
29200 }
29201 #endif
29202 
29203 #ifdef __LITTLE_ENDIAN__
vuzp_p8(poly8x8_t __p0,poly8x8_t __p1)29204 __ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) {
29205   poly8x8x2_t __ret;
29206   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
29207   return __ret;
29208 }
29209 #else
vuzp_p8(poly8x8_t __p0,poly8x8_t __p1)29210 __ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) {
29211   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29212   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29213   poly8x8x2_t __ret;
29214   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
29215 
29216   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
29217   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
29218   return __ret;
29219 }
29220 #endif
29221 
29222 #ifdef __LITTLE_ENDIAN__
vuzp_p16(poly16x4_t __p0,poly16x4_t __p1)29223 __ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) {
29224   poly16x4x2_t __ret;
29225   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
29226   return __ret;
29227 }
29228 #else
vuzp_p16(poly16x4_t __p0,poly16x4_t __p1)29229 __ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) {
29230   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29231   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29232   poly16x4x2_t __ret;
29233   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
29234 
29235   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
29236   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
29237   return __ret;
29238 }
29239 #endif
29240 
29241 #ifdef __LITTLE_ENDIAN__
vuzpq_p8(poly8x16_t __p0,poly8x16_t __p1)29242 __ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) {
29243   poly8x16x2_t __ret;
29244   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
29245   return __ret;
29246 }
29247 #else
vuzpq_p8(poly8x16_t __p0,poly8x16_t __p1)29248 __ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) {
29249   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29250   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29251   poly8x16x2_t __ret;
29252   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
29253 
29254   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29255   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29256   return __ret;
29257 }
29258 #endif
29259 
29260 #ifdef __LITTLE_ENDIAN__
vuzpq_p16(poly16x8_t __p0,poly16x8_t __p1)29261 __ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) {
29262   poly16x8x2_t __ret;
29263   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
29264   return __ret;
29265 }
29266 #else
vuzpq_p16(poly16x8_t __p0,poly16x8_t __p1)29267 __ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) {
29268   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29269   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29270   poly16x8x2_t __ret;
29271   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
29272 
29273   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
29274   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
29275   return __ret;
29276 }
29277 #endif
29278 
29279 #ifdef __LITTLE_ENDIAN__
vuzpq_u8(uint8x16_t __p0,uint8x16_t __p1)29280 __ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) {
29281   uint8x16x2_t __ret;
29282   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
29283   return __ret;
29284 }
29285 #else
vuzpq_u8(uint8x16_t __p0,uint8x16_t __p1)29286 __ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) {
29287   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29288   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29289   uint8x16x2_t __ret;
29290   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
29291 
29292   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29293   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29294   return __ret;
29295 }
29296 #endif
29297 
29298 #ifdef __LITTLE_ENDIAN__
vuzpq_u32(uint32x4_t __p0,uint32x4_t __p1)29299 __ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) {
29300   uint32x4x2_t __ret;
29301   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
29302   return __ret;
29303 }
29304 #else
vuzpq_u32(uint32x4_t __p0,uint32x4_t __p1)29305 __ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) {
29306   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29307   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29308   uint32x4x2_t __ret;
29309   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
29310 
29311   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
29312   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
29313   return __ret;
29314 }
29315 #endif
29316 
29317 #ifdef __LITTLE_ENDIAN__
vuzpq_u16(uint16x8_t __p0,uint16x8_t __p1)29318 __ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) {
29319   uint16x8x2_t __ret;
29320   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
29321   return __ret;
29322 }
29323 #else
vuzpq_u16(uint16x8_t __p0,uint16x8_t __p1)29324 __ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) {
29325   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29326   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29327   uint16x8x2_t __ret;
29328   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
29329 
29330   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
29331   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
29332   return __ret;
29333 }
29334 #endif
29335 
29336 #ifdef __LITTLE_ENDIAN__
vuzpq_s8(int8x16_t __p0,int8x16_t __p1)29337 __ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) {
29338   int8x16x2_t __ret;
29339   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
29340   return __ret;
29341 }
29342 #else
vuzpq_s8(int8x16_t __p0,int8x16_t __p1)29343 __ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) {
29344   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29345   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29346   int8x16x2_t __ret;
29347   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
29348 
29349   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29350   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29351   return __ret;
29352 }
29353 #endif
29354 
29355 #ifdef __LITTLE_ENDIAN__
vuzpq_f32(float32x4_t __p0,float32x4_t __p1)29356 __ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) {
29357   float32x4x2_t __ret;
29358   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
29359   return __ret;
29360 }
29361 #else
vuzpq_f32(float32x4_t __p0,float32x4_t __p1)29362 __ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) {
29363   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29364   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29365   float32x4x2_t __ret;
29366   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
29367 
29368   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
29369   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
29370   return __ret;
29371 }
29372 #endif
29373 
29374 #ifdef __LITTLE_ENDIAN__
vuzpq_s32(int32x4_t __p0,int32x4_t __p1)29375 __ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) {
29376   int32x4x2_t __ret;
29377   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
29378   return __ret;
29379 }
29380 #else
vuzpq_s32(int32x4_t __p0,int32x4_t __p1)29381 __ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) {
29382   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29383   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29384   int32x4x2_t __ret;
29385   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
29386 
29387   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
29388   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
29389   return __ret;
29390 }
29391 #endif
29392 
29393 #ifdef __LITTLE_ENDIAN__
vuzpq_s16(int16x8_t __p0,int16x8_t __p1)29394 __ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) {
29395   int16x8x2_t __ret;
29396   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
29397   return __ret;
29398 }
29399 #else
vuzpq_s16(int16x8_t __p0,int16x8_t __p1)29400 __ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) {
29401   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29402   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29403   int16x8x2_t __ret;
29404   __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
29405 
29406   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
29407   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
29408   return __ret;
29409 }
29410 #endif
29411 
29412 #ifdef __LITTLE_ENDIAN__
vuzp_u8(uint8x8_t __p0,uint8x8_t __p1)29413 __ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) {
29414   uint8x8x2_t __ret;
29415   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
29416   return __ret;
29417 }
29418 #else
vuzp_u8(uint8x8_t __p0,uint8x8_t __p1)29419 __ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) {
29420   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29421   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29422   uint8x8x2_t __ret;
29423   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
29424 
29425   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
29426   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
29427   return __ret;
29428 }
29429 #endif
29430 
29431 #ifdef __LITTLE_ENDIAN__
vuzp_u32(uint32x2_t __p0,uint32x2_t __p1)29432 __ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) {
29433   uint32x2x2_t __ret;
29434   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
29435   return __ret;
29436 }
29437 #else
vuzp_u32(uint32x2_t __p0,uint32x2_t __p1)29438 __ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) {
29439   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
29440   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
29441   uint32x2x2_t __ret;
29442   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
29443 
29444   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
29445   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
29446   return __ret;
29447 }
29448 #endif
29449 
29450 #ifdef __LITTLE_ENDIAN__
vuzp_u16(uint16x4_t __p0,uint16x4_t __p1)29451 __ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) {
29452   uint16x4x2_t __ret;
29453   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
29454   return __ret;
29455 }
29456 #else
vuzp_u16(uint16x4_t __p0,uint16x4_t __p1)29457 __ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) {
29458   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29459   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29460   uint16x4x2_t __ret;
29461   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
29462 
29463   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
29464   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
29465   return __ret;
29466 }
29467 #endif
29468 
29469 #ifdef __LITTLE_ENDIAN__
vuzp_s8(int8x8_t __p0,int8x8_t __p1)29470 __ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) {
29471   int8x8x2_t __ret;
29472   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
29473   return __ret;
29474 }
29475 #else
vuzp_s8(int8x8_t __p0,int8x8_t __p1)29476 __ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) {
29477   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29478   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29479   int8x8x2_t __ret;
29480   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
29481 
29482   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
29483   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
29484   return __ret;
29485 }
29486 #endif
29487 
29488 #ifdef __LITTLE_ENDIAN__
vuzp_f32(float32x2_t __p0,float32x2_t __p1)29489 __ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) {
29490   float32x2x2_t __ret;
29491   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
29492   return __ret;
29493 }
29494 #else
vuzp_f32(float32x2_t __p0,float32x2_t __p1)29495 __ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) {
29496   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
29497   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
29498   float32x2x2_t __ret;
29499   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
29500 
29501   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
29502   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
29503   return __ret;
29504 }
29505 #endif
29506 
29507 #ifdef __LITTLE_ENDIAN__
vuzp_s32(int32x2_t __p0,int32x2_t __p1)29508 __ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) {
29509   int32x2x2_t __ret;
29510   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
29511   return __ret;
29512 }
29513 #else
vuzp_s32(int32x2_t __p0,int32x2_t __p1)29514 __ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) {
29515   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
29516   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
29517   int32x2x2_t __ret;
29518   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
29519 
29520   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
29521   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
29522   return __ret;
29523 }
29524 #endif
29525 
29526 #ifdef __LITTLE_ENDIAN__
vuzp_s16(int16x4_t __p0,int16x4_t __p1)29527 __ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) {
29528   int16x4x2_t __ret;
29529   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
29530   return __ret;
29531 }
29532 #else
vuzp_s16(int16x4_t __p0,int16x4_t __p1)29533 __ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) {
29534   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29535   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29536   int16x4x2_t __ret;
29537   __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
29538 
29539   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
29540   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
29541   return __ret;
29542 }
29543 #endif
29544 
29545 #ifdef __LITTLE_ENDIAN__
vzip_p8(poly8x8_t __p0,poly8x8_t __p1)29546 __ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) {
29547   poly8x8x2_t __ret;
29548   __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
29549   return __ret;
29550 }
29551 #else
vzip_p8(poly8x8_t __p0,poly8x8_t __p1)29552 __ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) {
29553   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29554   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29555   poly8x8x2_t __ret;
29556   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
29557 
29558   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
29559   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
29560   return __ret;
29561 }
29562 #endif
29563 
29564 #ifdef __LITTLE_ENDIAN__
vzip_p16(poly16x4_t __p0,poly16x4_t __p1)29565 __ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) {
29566   poly16x4x2_t __ret;
29567   __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
29568   return __ret;
29569 }
29570 #else
vzip_p16(poly16x4_t __p0,poly16x4_t __p1)29571 __ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) {
29572   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29573   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29574   poly16x4x2_t __ret;
29575   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
29576 
29577   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
29578   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
29579   return __ret;
29580 }
29581 #endif
29582 
29583 #ifdef __LITTLE_ENDIAN__
vzipq_p8(poly8x16_t __p0,poly8x16_t __p1)29584 __ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) {
29585   poly8x16x2_t __ret;
29586   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
29587   return __ret;
29588 }
29589 #else
vzipq_p8(poly8x16_t __p0,poly8x16_t __p1)29590 __ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) {
29591   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29592   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29593   poly8x16x2_t __ret;
29594   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
29595 
29596   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29597   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29598   return __ret;
29599 }
29600 #endif
29601 
29602 #ifdef __LITTLE_ENDIAN__
vzipq_p16(poly16x8_t __p0,poly16x8_t __p1)29603 __ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) {
29604   poly16x8x2_t __ret;
29605   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
29606   return __ret;
29607 }
29608 #else
vzipq_p16(poly16x8_t __p0,poly16x8_t __p1)29609 __ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) {
29610   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29611   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29612   poly16x8x2_t __ret;
29613   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
29614 
29615   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
29616   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
29617   return __ret;
29618 }
29619 #endif
29620 
29621 #ifdef __LITTLE_ENDIAN__
vzipq_u8(uint8x16_t __p0,uint8x16_t __p1)29622 __ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) {
29623   uint8x16x2_t __ret;
29624   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
29625   return __ret;
29626 }
29627 #else
vzipq_u8(uint8x16_t __p0,uint8x16_t __p1)29628 __ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) {
29629   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29630   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29631   uint8x16x2_t __ret;
29632   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
29633 
29634   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29635   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29636   return __ret;
29637 }
29638 #endif
29639 
29640 #ifdef __LITTLE_ENDIAN__
vzipq_u32(uint32x4_t __p0,uint32x4_t __p1)29641 __ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) {
29642   uint32x4x2_t __ret;
29643   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
29644   return __ret;
29645 }
29646 #else
vzipq_u32(uint32x4_t __p0,uint32x4_t __p1)29647 __ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) {
29648   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29649   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29650   uint32x4x2_t __ret;
29651   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
29652 
29653   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
29654   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
29655   return __ret;
29656 }
29657 #endif
29658 
29659 #ifdef __LITTLE_ENDIAN__
vzipq_u16(uint16x8_t __p0,uint16x8_t __p1)29660 __ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) {
29661   uint16x8x2_t __ret;
29662   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
29663   return __ret;
29664 }
29665 #else
vzipq_u16(uint16x8_t __p0,uint16x8_t __p1)29666 __ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) {
29667   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29668   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29669   uint16x8x2_t __ret;
29670   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
29671 
29672   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
29673   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
29674   return __ret;
29675 }
29676 #endif
29677 
29678 #ifdef __LITTLE_ENDIAN__
vzipq_s8(int8x16_t __p0,int8x16_t __p1)29679 __ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) {
29680   int8x16x2_t __ret;
29681   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
29682   return __ret;
29683 }
29684 #else
vzipq_s8(int8x16_t __p0,int8x16_t __p1)29685 __ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) {
29686   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29687   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29688   int8x16x2_t __ret;
29689   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
29690 
29691   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29692   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29693   return __ret;
29694 }
29695 #endif
29696 
29697 #ifdef __LITTLE_ENDIAN__
vzipq_f32(float32x4_t __p0,float32x4_t __p1)29698 __ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) {
29699   float32x4x2_t __ret;
29700   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
29701   return __ret;
29702 }
29703 #else
vzipq_f32(float32x4_t __p0,float32x4_t __p1)29704 __ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) {
29705   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29706   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29707   float32x4x2_t __ret;
29708   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
29709 
29710   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
29711   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
29712   return __ret;
29713 }
29714 #endif
29715 
29716 #ifdef __LITTLE_ENDIAN__
vzipq_s32(int32x4_t __p0,int32x4_t __p1)29717 __ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) {
29718   int32x4x2_t __ret;
29719   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
29720   return __ret;
29721 }
29722 #else
vzipq_s32(int32x4_t __p0,int32x4_t __p1)29723 __ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) {
29724   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29725   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29726   int32x4x2_t __ret;
29727   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
29728 
29729   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
29730   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
29731   return __ret;
29732 }
29733 #endif
29734 
29735 #ifdef __LITTLE_ENDIAN__
vzipq_s16(int16x8_t __p0,int16x8_t __p1)29736 __ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) {
29737   int16x8x2_t __ret;
29738   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
29739   return __ret;
29740 }
29741 #else
vzipq_s16(int16x8_t __p0,int16x8_t __p1)29742 __ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) {
29743   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29744   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29745   int16x8x2_t __ret;
29746   __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
29747 
29748   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
29749   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
29750   return __ret;
29751 }
29752 #endif
29753 
29754 #ifdef __LITTLE_ENDIAN__
vzip_u8(uint8x8_t __p0,uint8x8_t __p1)29755 __ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) {
29756   uint8x8x2_t __ret;
29757   __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
29758   return __ret;
29759 }
29760 #else
vzip_u8(uint8x8_t __p0,uint8x8_t __p1)29761 __ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) {
29762   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29763   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29764   uint8x8x2_t __ret;
29765   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
29766 
29767   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
29768   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
29769   return __ret;
29770 }
29771 #endif
29772 
29773 #ifdef __LITTLE_ENDIAN__
vzip_u32(uint32x2_t __p0,uint32x2_t __p1)29774 __ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) {
29775   uint32x2x2_t __ret;
29776   __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
29777   return __ret;
29778 }
29779 #else
vzip_u32(uint32x2_t __p0,uint32x2_t __p1)29780 __ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) {
29781   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
29782   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
29783   uint32x2x2_t __ret;
29784   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
29785 
29786   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
29787   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
29788   return __ret;
29789 }
29790 #endif
29791 
29792 #ifdef __LITTLE_ENDIAN__
vzip_u16(uint16x4_t __p0,uint16x4_t __p1)29793 __ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) {
29794   uint16x4x2_t __ret;
29795   __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
29796   return __ret;
29797 }
29798 #else
vzip_u16(uint16x4_t __p0,uint16x4_t __p1)29799 __ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) {
29800   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29801   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29802   uint16x4x2_t __ret;
29803   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
29804 
29805   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
29806   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
29807   return __ret;
29808 }
29809 #endif
29810 
29811 #ifdef __LITTLE_ENDIAN__
vzip_s8(int8x8_t __p0,int8x8_t __p1)29812 __ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) {
29813   int8x8x2_t __ret;
29814   __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
29815   return __ret;
29816 }
29817 #else
vzip_s8(int8x8_t __p0,int8x8_t __p1)29818 __ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) {
29819   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29820   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29821   int8x8x2_t __ret;
29822   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
29823 
29824   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
29825   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
29826   return __ret;
29827 }
29828 #endif
29829 
29830 #ifdef __LITTLE_ENDIAN__
vzip_f32(float32x2_t __p0,float32x2_t __p1)29831 __ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) {
29832   float32x2x2_t __ret;
29833   __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
29834   return __ret;
29835 }
29836 #else
vzip_f32(float32x2_t __p0,float32x2_t __p1)29837 __ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) {
29838   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
29839   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
29840   float32x2x2_t __ret;
29841   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
29842 
29843   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
29844   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
29845   return __ret;
29846 }
29847 #endif
29848 
29849 #ifdef __LITTLE_ENDIAN__
vzip_s32(int32x2_t __p0,int32x2_t __p1)29850 __ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) {
29851   int32x2x2_t __ret;
29852   __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
29853   return __ret;
29854 }
29855 #else
vzip_s32(int32x2_t __p0,int32x2_t __p1)29856 __ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) {
29857   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
29858   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
29859   int32x2x2_t __ret;
29860   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
29861 
29862   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
29863   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
29864   return __ret;
29865 }
29866 #endif
29867 
29868 #ifdef __LITTLE_ENDIAN__
vzip_s16(int16x4_t __p0,int16x4_t __p1)29869 __ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) {
29870   int16x4x2_t __ret;
29871   __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
29872   return __ret;
29873 }
29874 #else
vzip_s16(int16x4_t __p0,int16x4_t __p1)29875 __ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) {
29876   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29877   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29878   int16x4x2_t __ret;
29879   __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
29880 
29881   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
29882   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
29883   return __ret;
29884 }
29885 #endif
29886 
29887 #if !defined(__aarch64__)
29888 #ifdef __LITTLE_ENDIAN__
vreinterpret_p8_p16(poly16x4_t __p0)29889 __ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
29890   poly8x8_t __ret;
29891   __ret = (poly8x8_t)(__p0);
29892   return __ret;
29893 }
29894 #else
vreinterpret_p8_p16(poly16x4_t __p0)29895 __ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
29896   poly8x8_t __ret;
29897   __ret = (poly8x8_t)(__p0);
29898   return __ret;
29899 }
29900 #endif
29901 
29902 #ifdef __LITTLE_ENDIAN__
vreinterpret_p8_u8(uint8x8_t __p0)29903 __ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
29904   poly8x8_t __ret;
29905   __ret = (poly8x8_t)(__p0);
29906   return __ret;
29907 }
29908 #else
vreinterpret_p8_u8(uint8x8_t __p0)29909 __ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
29910   poly8x8_t __ret;
29911   __ret = (poly8x8_t)(__p0);
29912   return __ret;
29913 }
29914 #endif
29915 
29916 #ifdef __LITTLE_ENDIAN__
vreinterpret_p8_u32(uint32x2_t __p0)29917 __ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
29918   poly8x8_t __ret;
29919   __ret = (poly8x8_t)(__p0);
29920   return __ret;
29921 }
29922 #else
vreinterpret_p8_u32(uint32x2_t __p0)29923 __ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
29924   poly8x8_t __ret;
29925   __ret = (poly8x8_t)(__p0);
29926   return __ret;
29927 }
29928 #endif
29929 
29930 #ifdef __LITTLE_ENDIAN__
vreinterpret_p8_u64(uint64x1_t __p0)29931 __ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
29932   poly8x8_t __ret;
29933   __ret = (poly8x8_t)(__p0);
29934   return __ret;
29935 }
29936 #else
vreinterpret_p8_u64(uint64x1_t __p0)29937 __ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
29938   poly8x8_t __ret;
29939   __ret = (poly8x8_t)(__p0);
29940   return __ret;
29941 }
29942 #endif
29943 
29944 #ifdef __LITTLE_ENDIAN__
vreinterpret_p8_u16(uint16x4_t __p0)29945 __ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
29946   poly8x8_t __ret;
29947   __ret = (poly8x8_t)(__p0);
29948   return __ret;
29949 }
29950 #else
vreinterpret_p8_u16(uint16x4_t __p0)29951 __ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
29952   poly8x8_t __ret;
29953   __ret = (poly8x8_t)(__p0);
29954   return __ret;
29955 }
29956 #endif
29957 
29958 #ifdef __LITTLE_ENDIAN__
vreinterpret_p8_s8(int8x8_t __p0)29959 __ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
29960   poly8x8_t __ret;
29961   __ret = (poly8x8_t)(__p0);
29962   return __ret;
29963 }
29964 #else
vreinterpret_p8_s8(int8x8_t __p0)29965 __ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
29966   poly8x8_t __ret;
29967   __ret = (poly8x8_t)(__p0);
29968   return __ret;
29969 }
29970 #endif
29971 
29972 #ifdef __LITTLE_ENDIAN__
vreinterpret_p8_f32(float32x2_t __p0)29973 __ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
29974   poly8x8_t __ret;
29975   __ret = (poly8x8_t)(__p0);
29976   return __ret;
29977 }
29978 #else
vreinterpret_p8_f32(float32x2_t __p0)29979 __ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
29980   poly8x8_t __ret;
29981   __ret = (poly8x8_t)(__p0);
29982   return __ret;
29983 }
29984 #endif
29985 
29986 #ifdef __LITTLE_ENDIAN__
vreinterpret_p8_f16(float16x4_t __p0)29987 __ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
29988   poly8x8_t __ret;
29989   __ret = (poly8x8_t)(__p0);
29990   return __ret;
29991 }
29992 #else
vreinterpret_p8_f16(float16x4_t __p0)29993 __ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
29994   poly8x8_t __ret;
29995   __ret = (poly8x8_t)(__p0);
29996   return __ret;
29997 }
29998 #endif
29999 
30000 #ifdef __LITTLE_ENDIAN__
vreinterpret_p8_s32(int32x2_t __p0)30001 __ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
30002   poly8x8_t __ret;
30003   __ret = (poly8x8_t)(__p0);
30004   return __ret;
30005 }
30006 #else
vreinterpret_p8_s32(int32x2_t __p0)30007 __ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
30008   poly8x8_t __ret;
30009   __ret = (poly8x8_t)(__p0);
30010   return __ret;
30011 }
30012 #endif
30013 
30014 #ifdef __LITTLE_ENDIAN__
vreinterpret_p8_s64(int64x1_t __p0)30015 __ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
30016   poly8x8_t __ret;
30017   __ret = (poly8x8_t)(__p0);
30018   return __ret;
30019 }
30020 #else
vreinterpret_p8_s64(int64x1_t __p0)30021 __ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
30022   poly8x8_t __ret;
30023   __ret = (poly8x8_t)(__p0);
30024   return __ret;
30025 }
30026 #endif
30027 
30028 #ifdef __LITTLE_ENDIAN__
vreinterpret_p8_s16(int16x4_t __p0)30029 __ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
30030   poly8x8_t __ret;
30031   __ret = (poly8x8_t)(__p0);
30032   return __ret;
30033 }
30034 #else
vreinterpret_p8_s16(int16x4_t __p0)30035 __ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
30036   poly8x8_t __ret;
30037   __ret = (poly8x8_t)(__p0);
30038   return __ret;
30039 }
30040 #endif
30041 
30042 #ifdef __LITTLE_ENDIAN__
vreinterpret_p16_p8(poly8x8_t __p0)30043 __ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
30044   poly16x4_t __ret;
30045   __ret = (poly16x4_t)(__p0);
30046   return __ret;
30047 }
30048 #else
vreinterpret_p16_p8(poly8x8_t __p0)30049 __ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
30050   poly16x4_t __ret;
30051   __ret = (poly16x4_t)(__p0);
30052   return __ret;
30053 }
30054 #endif
30055 
30056 #ifdef __LITTLE_ENDIAN__
vreinterpret_p16_u8(uint8x8_t __p0)30057 __ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
30058   poly16x4_t __ret;
30059   __ret = (poly16x4_t)(__p0);
30060   return __ret;
30061 }
30062 #else
vreinterpret_p16_u8(uint8x8_t __p0)30063 __ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
30064   poly16x4_t __ret;
30065   __ret = (poly16x4_t)(__p0);
30066   return __ret;
30067 }
30068 #endif
30069 
30070 #ifdef __LITTLE_ENDIAN__
vreinterpret_p16_u32(uint32x2_t __p0)30071 __ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
30072   poly16x4_t __ret;
30073   __ret = (poly16x4_t)(__p0);
30074   return __ret;
30075 }
30076 #else
vreinterpret_p16_u32(uint32x2_t __p0)30077 __ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
30078   poly16x4_t __ret;
30079   __ret = (poly16x4_t)(__p0);
30080   return __ret;
30081 }
30082 #endif
30083 
30084 #ifdef __LITTLE_ENDIAN__
vreinterpret_p16_u64(uint64x1_t __p0)30085 __ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
30086   poly16x4_t __ret;
30087   __ret = (poly16x4_t)(__p0);
30088   return __ret;
30089 }
30090 #else
vreinterpret_p16_u64(uint64x1_t __p0)30091 __ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
30092   poly16x4_t __ret;
30093   __ret = (poly16x4_t)(__p0);
30094   return __ret;
30095 }
30096 #endif
30097 
30098 #ifdef __LITTLE_ENDIAN__
vreinterpret_p16_u16(uint16x4_t __p0)30099 __ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
30100   poly16x4_t __ret;
30101   __ret = (poly16x4_t)(__p0);
30102   return __ret;
30103 }
30104 #else
vreinterpret_p16_u16(uint16x4_t __p0)30105 __ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
30106   poly16x4_t __ret;
30107   __ret = (poly16x4_t)(__p0);
30108   return __ret;
30109 }
30110 #endif
30111 
30112 #ifdef __LITTLE_ENDIAN__
vreinterpret_p16_s8(int8x8_t __p0)30113 __ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
30114   poly16x4_t __ret;
30115   __ret = (poly16x4_t)(__p0);
30116   return __ret;
30117 }
30118 #else
vreinterpret_p16_s8(int8x8_t __p0)30119 __ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
30120   poly16x4_t __ret;
30121   __ret = (poly16x4_t)(__p0);
30122   return __ret;
30123 }
30124 #endif
30125 
30126 #ifdef __LITTLE_ENDIAN__
vreinterpret_p16_f32(float32x2_t __p0)30127 __ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
30128   poly16x4_t __ret;
30129   __ret = (poly16x4_t)(__p0);
30130   return __ret;
30131 }
30132 #else
vreinterpret_p16_f32(float32x2_t __p0)30133 __ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
30134   poly16x4_t __ret;
30135   __ret = (poly16x4_t)(__p0);
30136   return __ret;
30137 }
30138 #endif
30139 
30140 #ifdef __LITTLE_ENDIAN__
vreinterpret_p16_f16(float16x4_t __p0)30141 __ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
30142   poly16x4_t __ret;
30143   __ret = (poly16x4_t)(__p0);
30144   return __ret;
30145 }
30146 #else
vreinterpret_p16_f16(float16x4_t __p0)30147 __ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
30148   poly16x4_t __ret;
30149   __ret = (poly16x4_t)(__p0);
30150   return __ret;
30151 }
30152 #endif
30153 
30154 #ifdef __LITTLE_ENDIAN__
vreinterpret_p16_s32(int32x2_t __p0)30155 __ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
30156   poly16x4_t __ret;
30157   __ret = (poly16x4_t)(__p0);
30158   return __ret;
30159 }
30160 #else
vreinterpret_p16_s32(int32x2_t __p0)30161 __ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
30162   poly16x4_t __ret;
30163   __ret = (poly16x4_t)(__p0);
30164   return __ret;
30165 }
30166 #endif
30167 
30168 #ifdef __LITTLE_ENDIAN__
vreinterpret_p16_s64(int64x1_t __p0)30169 __ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
30170   poly16x4_t __ret;
30171   __ret = (poly16x4_t)(__p0);
30172   return __ret;
30173 }
30174 #else
vreinterpret_p16_s64(int64x1_t __p0)30175 __ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
30176   poly16x4_t __ret;
30177   __ret = (poly16x4_t)(__p0);
30178   return __ret;
30179 }
30180 #endif
30181 
30182 #ifdef __LITTLE_ENDIAN__
vreinterpret_p16_s16(int16x4_t __p0)30183 __ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
30184   poly16x4_t __ret;
30185   __ret = (poly16x4_t)(__p0);
30186   return __ret;
30187 }
30188 #else
vreinterpret_p16_s16(int16x4_t __p0)30189 __ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
30190   poly16x4_t __ret;
30191   __ret = (poly16x4_t)(__p0);
30192   return __ret;
30193 }
30194 #endif
30195 
30196 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p8_p16(poly16x8_t __p0)30197 __ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
30198   poly8x16_t __ret;
30199   __ret = (poly8x16_t)(__p0);
30200   return __ret;
30201 }
30202 #else
vreinterpretq_p8_p16(poly16x8_t __p0)30203 __ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
30204   poly8x16_t __ret;
30205   __ret = (poly8x16_t)(__p0);
30206   return __ret;
30207 }
30208 #endif
30209 
30210 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p8_u8(uint8x16_t __p0)30211 __ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
30212   poly8x16_t __ret;
30213   __ret = (poly8x16_t)(__p0);
30214   return __ret;
30215 }
30216 #else
vreinterpretq_p8_u8(uint8x16_t __p0)30217 __ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
30218   poly8x16_t __ret;
30219   __ret = (poly8x16_t)(__p0);
30220   return __ret;
30221 }
30222 #endif
30223 
30224 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p8_u32(uint32x4_t __p0)30225 __ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
30226   poly8x16_t __ret;
30227   __ret = (poly8x16_t)(__p0);
30228   return __ret;
30229 }
30230 #else
vreinterpretq_p8_u32(uint32x4_t __p0)30231 __ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
30232   poly8x16_t __ret;
30233   __ret = (poly8x16_t)(__p0);
30234   return __ret;
30235 }
30236 #endif
30237 
30238 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p8_u64(uint64x2_t __p0)30239 __ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
30240   poly8x16_t __ret;
30241   __ret = (poly8x16_t)(__p0);
30242   return __ret;
30243 }
30244 #else
vreinterpretq_p8_u64(uint64x2_t __p0)30245 __ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
30246   poly8x16_t __ret;
30247   __ret = (poly8x16_t)(__p0);
30248   return __ret;
30249 }
30250 #endif
30251 
30252 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p8_u16(uint16x8_t __p0)30253 __ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
30254   poly8x16_t __ret;
30255   __ret = (poly8x16_t)(__p0);
30256   return __ret;
30257 }
30258 #else
vreinterpretq_p8_u16(uint16x8_t __p0)30259 __ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
30260   poly8x16_t __ret;
30261   __ret = (poly8x16_t)(__p0);
30262   return __ret;
30263 }
30264 #endif
30265 
30266 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p8_s8(int8x16_t __p0)30267 __ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
30268   poly8x16_t __ret;
30269   __ret = (poly8x16_t)(__p0);
30270   return __ret;
30271 }
30272 #else
vreinterpretq_p8_s8(int8x16_t __p0)30273 __ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
30274   poly8x16_t __ret;
30275   __ret = (poly8x16_t)(__p0);
30276   return __ret;
30277 }
30278 #endif
30279 
30280 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p8_f32(float32x4_t __p0)30281 __ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
30282   poly8x16_t __ret;
30283   __ret = (poly8x16_t)(__p0);
30284   return __ret;
30285 }
30286 #else
vreinterpretq_p8_f32(float32x4_t __p0)30287 __ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
30288   poly8x16_t __ret;
30289   __ret = (poly8x16_t)(__p0);
30290   return __ret;
30291 }
30292 #endif
30293 
30294 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p8_f16(float16x8_t __p0)30295 __ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
30296   poly8x16_t __ret;
30297   __ret = (poly8x16_t)(__p0);
30298   return __ret;
30299 }
30300 #else
vreinterpretq_p8_f16(float16x8_t __p0)30301 __ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
30302   poly8x16_t __ret;
30303   __ret = (poly8x16_t)(__p0);
30304   return __ret;
30305 }
30306 #endif
30307 
30308 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p8_s32(int32x4_t __p0)30309 __ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
30310   poly8x16_t __ret;
30311   __ret = (poly8x16_t)(__p0);
30312   return __ret;
30313 }
30314 #else
vreinterpretq_p8_s32(int32x4_t __p0)30315 __ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
30316   poly8x16_t __ret;
30317   __ret = (poly8x16_t)(__p0);
30318   return __ret;
30319 }
30320 #endif
30321 
30322 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p8_s64(int64x2_t __p0)30323 __ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
30324   poly8x16_t __ret;
30325   __ret = (poly8x16_t)(__p0);
30326   return __ret;
30327 }
30328 #else
vreinterpretq_p8_s64(int64x2_t __p0)30329 __ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
30330   poly8x16_t __ret;
30331   __ret = (poly8x16_t)(__p0);
30332   return __ret;
30333 }
30334 #endif
30335 
30336 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p8_s16(int16x8_t __p0)30337 __ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
30338   poly8x16_t __ret;
30339   __ret = (poly8x16_t)(__p0);
30340   return __ret;
30341 }
30342 #else
vreinterpretq_p8_s16(int16x8_t __p0)30343 __ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
30344   poly8x16_t __ret;
30345   __ret = (poly8x16_t)(__p0);
30346   return __ret;
30347 }
30348 #endif
30349 
30350 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p16_p8(poly8x16_t __p0)30351 __ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
30352   poly16x8_t __ret;
30353   __ret = (poly16x8_t)(__p0);
30354   return __ret;
30355 }
30356 #else
vreinterpretq_p16_p8(poly8x16_t __p0)30357 __ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
30358   poly16x8_t __ret;
30359   __ret = (poly16x8_t)(__p0);
30360   return __ret;
30361 }
30362 #endif
30363 
30364 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p16_u8(uint8x16_t __p0)30365 __ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
30366   poly16x8_t __ret;
30367   __ret = (poly16x8_t)(__p0);
30368   return __ret;
30369 }
30370 #else
vreinterpretq_p16_u8(uint8x16_t __p0)30371 __ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
30372   poly16x8_t __ret;
30373   __ret = (poly16x8_t)(__p0);
30374   return __ret;
30375 }
30376 #endif
30377 
30378 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p16_u32(uint32x4_t __p0)30379 __ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
30380   poly16x8_t __ret;
30381   __ret = (poly16x8_t)(__p0);
30382   return __ret;
30383 }
30384 #else
vreinterpretq_p16_u32(uint32x4_t __p0)30385 __ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
30386   poly16x8_t __ret;
30387   __ret = (poly16x8_t)(__p0);
30388   return __ret;
30389 }
30390 #endif
30391 
30392 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p16_u64(uint64x2_t __p0)30393 __ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
30394   poly16x8_t __ret;
30395   __ret = (poly16x8_t)(__p0);
30396   return __ret;
30397 }
30398 #else
vreinterpretq_p16_u64(uint64x2_t __p0)30399 __ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
30400   poly16x8_t __ret;
30401   __ret = (poly16x8_t)(__p0);
30402   return __ret;
30403 }
30404 #endif
30405 
30406 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p16_u16(uint16x8_t __p0)30407 __ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
30408   poly16x8_t __ret;
30409   __ret = (poly16x8_t)(__p0);
30410   return __ret;
30411 }
30412 #else
vreinterpretq_p16_u16(uint16x8_t __p0)30413 __ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
30414   poly16x8_t __ret;
30415   __ret = (poly16x8_t)(__p0);
30416   return __ret;
30417 }
30418 #endif
30419 
30420 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p16_s8(int8x16_t __p0)30421 __ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
30422   poly16x8_t __ret;
30423   __ret = (poly16x8_t)(__p0);
30424   return __ret;
30425 }
30426 #else
vreinterpretq_p16_s8(int8x16_t __p0)30427 __ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
30428   poly16x8_t __ret;
30429   __ret = (poly16x8_t)(__p0);
30430   return __ret;
30431 }
30432 #endif
30433 
30434 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p16_f32(float32x4_t __p0)30435 __ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
30436   poly16x8_t __ret;
30437   __ret = (poly16x8_t)(__p0);
30438   return __ret;
30439 }
30440 #else
vreinterpretq_p16_f32(float32x4_t __p0)30441 __ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
30442   poly16x8_t __ret;
30443   __ret = (poly16x8_t)(__p0);
30444   return __ret;
30445 }
30446 #endif
30447 
30448 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p16_f16(float16x8_t __p0)30449 __ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
30450   poly16x8_t __ret;
30451   __ret = (poly16x8_t)(__p0);
30452   return __ret;
30453 }
30454 #else
vreinterpretq_p16_f16(float16x8_t __p0)30455 __ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
30456   poly16x8_t __ret;
30457   __ret = (poly16x8_t)(__p0);
30458   return __ret;
30459 }
30460 #endif
30461 
30462 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p16_s32(int32x4_t __p0)30463 __ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
30464   poly16x8_t __ret;
30465   __ret = (poly16x8_t)(__p0);
30466   return __ret;
30467 }
30468 #else
vreinterpretq_p16_s32(int32x4_t __p0)30469 __ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
30470   poly16x8_t __ret;
30471   __ret = (poly16x8_t)(__p0);
30472   return __ret;
30473 }
30474 #endif
30475 
30476 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p16_s64(int64x2_t __p0)30477 __ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
30478   poly16x8_t __ret;
30479   __ret = (poly16x8_t)(__p0);
30480   return __ret;
30481 }
30482 #else
vreinterpretq_p16_s64(int64x2_t __p0)30483 __ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
30484   poly16x8_t __ret;
30485   __ret = (poly16x8_t)(__p0);
30486   return __ret;
30487 }
30488 #endif
30489 
30490 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p16_s16(int16x8_t __p0)30491 __ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
30492   poly16x8_t __ret;
30493   __ret = (poly16x8_t)(__p0);
30494   return __ret;
30495 }
30496 #else
vreinterpretq_p16_s16(int16x8_t __p0)30497 __ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
30498   poly16x8_t __ret;
30499   __ret = (poly16x8_t)(__p0);
30500   return __ret;
30501 }
30502 #endif
30503 
30504 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u8_p8(poly8x16_t __p0)30505 __ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
30506   uint8x16_t __ret;
30507   __ret = (uint8x16_t)(__p0);
30508   return __ret;
30509 }
30510 #else
vreinterpretq_u8_p8(poly8x16_t __p0)30511 __ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
30512   uint8x16_t __ret;
30513   __ret = (uint8x16_t)(__p0);
30514   return __ret;
30515 }
30516 #endif
30517 
30518 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u8_p16(poly16x8_t __p0)30519 __ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
30520   uint8x16_t __ret;
30521   __ret = (uint8x16_t)(__p0);
30522   return __ret;
30523 }
30524 #else
vreinterpretq_u8_p16(poly16x8_t __p0)30525 __ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
30526   uint8x16_t __ret;
30527   __ret = (uint8x16_t)(__p0);
30528   return __ret;
30529 }
30530 #endif
30531 
30532 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u8_u32(uint32x4_t __p0)30533 __ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
30534   uint8x16_t __ret;
30535   __ret = (uint8x16_t)(__p0);
30536   return __ret;
30537 }
30538 #else
vreinterpretq_u8_u32(uint32x4_t __p0)30539 __ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
30540   uint8x16_t __ret;
30541   __ret = (uint8x16_t)(__p0);
30542   return __ret;
30543 }
30544 #endif
30545 
30546 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u8_u64(uint64x2_t __p0)30547 __ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
30548   uint8x16_t __ret;
30549   __ret = (uint8x16_t)(__p0);
30550   return __ret;
30551 }
30552 #else
vreinterpretq_u8_u64(uint64x2_t __p0)30553 __ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
30554   uint8x16_t __ret;
30555   __ret = (uint8x16_t)(__p0);
30556   return __ret;
30557 }
30558 #endif
30559 
30560 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u8_u16(uint16x8_t __p0)30561 __ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
30562   uint8x16_t __ret;
30563   __ret = (uint8x16_t)(__p0);
30564   return __ret;
30565 }
30566 #else
vreinterpretq_u8_u16(uint16x8_t __p0)30567 __ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
30568   uint8x16_t __ret;
30569   __ret = (uint8x16_t)(__p0);
30570   return __ret;
30571 }
30572 #endif
30573 
30574 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u8_s8(int8x16_t __p0)30575 __ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
30576   uint8x16_t __ret;
30577   __ret = (uint8x16_t)(__p0);
30578   return __ret;
30579 }
30580 #else
vreinterpretq_u8_s8(int8x16_t __p0)30581 __ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
30582   uint8x16_t __ret;
30583   __ret = (uint8x16_t)(__p0);
30584   return __ret;
30585 }
30586 #endif
30587 
30588 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u8_f32(float32x4_t __p0)30589 __ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
30590   uint8x16_t __ret;
30591   __ret = (uint8x16_t)(__p0);
30592   return __ret;
30593 }
30594 #else
vreinterpretq_u8_f32(float32x4_t __p0)30595 __ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
30596   uint8x16_t __ret;
30597   __ret = (uint8x16_t)(__p0);
30598   return __ret;
30599 }
30600 #endif
30601 
30602 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u8_f16(float16x8_t __p0)30603 __ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
30604   uint8x16_t __ret;
30605   __ret = (uint8x16_t)(__p0);
30606   return __ret;
30607 }
30608 #else
vreinterpretq_u8_f16(float16x8_t __p0)30609 __ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
30610   uint8x16_t __ret;
30611   __ret = (uint8x16_t)(__p0);
30612   return __ret;
30613 }
30614 #endif
30615 
30616 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u8_s32(int32x4_t __p0)30617 __ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
30618   uint8x16_t __ret;
30619   __ret = (uint8x16_t)(__p0);
30620   return __ret;
30621 }
30622 #else
vreinterpretq_u8_s32(int32x4_t __p0)30623 __ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
30624   uint8x16_t __ret;
30625   __ret = (uint8x16_t)(__p0);
30626   return __ret;
30627 }
30628 #endif
30629 
30630 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u8_s64(int64x2_t __p0)30631 __ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
30632   uint8x16_t __ret;
30633   __ret = (uint8x16_t)(__p0);
30634   return __ret;
30635 }
30636 #else
vreinterpretq_u8_s64(int64x2_t __p0)30637 __ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
30638   uint8x16_t __ret;
30639   __ret = (uint8x16_t)(__p0);
30640   return __ret;
30641 }
30642 #endif
30643 
30644 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u8_s16(int16x8_t __p0)30645 __ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
30646   uint8x16_t __ret;
30647   __ret = (uint8x16_t)(__p0);
30648   return __ret;
30649 }
30650 #else
vreinterpretq_u8_s16(int16x8_t __p0)30651 __ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
30652   uint8x16_t __ret;
30653   __ret = (uint8x16_t)(__p0);
30654   return __ret;
30655 }
30656 #endif
30657 
30658 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u32_p8(poly8x16_t __p0)30659 __ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
30660   uint32x4_t __ret;
30661   __ret = (uint32x4_t)(__p0);
30662   return __ret;
30663 }
30664 #else
vreinterpretq_u32_p8(poly8x16_t __p0)30665 __ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
30666   uint32x4_t __ret;
30667   __ret = (uint32x4_t)(__p0);
30668   return __ret;
30669 }
30670 #endif
30671 
30672 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u32_p16(poly16x8_t __p0)30673 __ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
30674   uint32x4_t __ret;
30675   __ret = (uint32x4_t)(__p0);
30676   return __ret;
30677 }
30678 #else
vreinterpretq_u32_p16(poly16x8_t __p0)30679 __ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
30680   uint32x4_t __ret;
30681   __ret = (uint32x4_t)(__p0);
30682   return __ret;
30683 }
30684 #endif
30685 
30686 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u32_u8(uint8x16_t __p0)30687 __ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
30688   uint32x4_t __ret;
30689   __ret = (uint32x4_t)(__p0);
30690   return __ret;
30691 }
30692 #else
vreinterpretq_u32_u8(uint8x16_t __p0)30693 __ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
30694   uint32x4_t __ret;
30695   __ret = (uint32x4_t)(__p0);
30696   return __ret;
30697 }
30698 #endif
30699 
30700 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u32_u64(uint64x2_t __p0)30701 __ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
30702   uint32x4_t __ret;
30703   __ret = (uint32x4_t)(__p0);
30704   return __ret;
30705 }
30706 #else
vreinterpretq_u32_u64(uint64x2_t __p0)30707 __ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
30708   uint32x4_t __ret;
30709   __ret = (uint32x4_t)(__p0);
30710   return __ret;
30711 }
30712 #endif
30713 
30714 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u32_u16(uint16x8_t __p0)30715 __ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
30716   uint32x4_t __ret;
30717   __ret = (uint32x4_t)(__p0);
30718   return __ret;
30719 }
30720 #else
vreinterpretq_u32_u16(uint16x8_t __p0)30721 __ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
30722   uint32x4_t __ret;
30723   __ret = (uint32x4_t)(__p0);
30724   return __ret;
30725 }
30726 #endif
30727 
30728 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u32_s8(int8x16_t __p0)30729 __ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
30730   uint32x4_t __ret;
30731   __ret = (uint32x4_t)(__p0);
30732   return __ret;
30733 }
30734 #else
vreinterpretq_u32_s8(int8x16_t __p0)30735 __ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
30736   uint32x4_t __ret;
30737   __ret = (uint32x4_t)(__p0);
30738   return __ret;
30739 }
30740 #endif
30741 
30742 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u32_f32(float32x4_t __p0)30743 __ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
30744   uint32x4_t __ret;
30745   __ret = (uint32x4_t)(__p0);
30746   return __ret;
30747 }
30748 #else
vreinterpretq_u32_f32(float32x4_t __p0)30749 __ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
30750   uint32x4_t __ret;
30751   __ret = (uint32x4_t)(__p0);
30752   return __ret;
30753 }
30754 #endif
30755 
30756 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u32_f16(float16x8_t __p0)30757 __ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
30758   uint32x4_t __ret;
30759   __ret = (uint32x4_t)(__p0);
30760   return __ret;
30761 }
30762 #else
vreinterpretq_u32_f16(float16x8_t __p0)30763 __ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
30764   uint32x4_t __ret;
30765   __ret = (uint32x4_t)(__p0);
30766   return __ret;
30767 }
30768 #endif
30769 
30770 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u32_s32(int32x4_t __p0)30771 __ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
30772   uint32x4_t __ret;
30773   __ret = (uint32x4_t)(__p0);
30774   return __ret;
30775 }
30776 #else
vreinterpretq_u32_s32(int32x4_t __p0)30777 __ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
30778   uint32x4_t __ret;
30779   __ret = (uint32x4_t)(__p0);
30780   return __ret;
30781 }
30782 #endif
30783 
30784 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u32_s64(int64x2_t __p0)30785 __ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
30786   uint32x4_t __ret;
30787   __ret = (uint32x4_t)(__p0);
30788   return __ret;
30789 }
30790 #else
vreinterpretq_u32_s64(int64x2_t __p0)30791 __ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
30792   uint32x4_t __ret;
30793   __ret = (uint32x4_t)(__p0);
30794   return __ret;
30795 }
30796 #endif
30797 
30798 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u32_s16(int16x8_t __p0)30799 __ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
30800   uint32x4_t __ret;
30801   __ret = (uint32x4_t)(__p0);
30802   return __ret;
30803 }
30804 #else
vreinterpretq_u32_s16(int16x8_t __p0)30805 __ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
30806   uint32x4_t __ret;
30807   __ret = (uint32x4_t)(__p0);
30808   return __ret;
30809 }
30810 #endif
30811 
30812 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u64_p8(poly8x16_t __p0)30813 __ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
30814   uint64x2_t __ret;
30815   __ret = (uint64x2_t)(__p0);
30816   return __ret;
30817 }
30818 #else
vreinterpretq_u64_p8(poly8x16_t __p0)30819 __ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
30820   uint64x2_t __ret;
30821   __ret = (uint64x2_t)(__p0);
30822   return __ret;
30823 }
30824 #endif
30825 
30826 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u64_p16(poly16x8_t __p0)30827 __ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
30828   uint64x2_t __ret;
30829   __ret = (uint64x2_t)(__p0);
30830   return __ret;
30831 }
30832 #else
vreinterpretq_u64_p16(poly16x8_t __p0)30833 __ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
30834   uint64x2_t __ret;
30835   __ret = (uint64x2_t)(__p0);
30836   return __ret;
30837 }
30838 #endif
30839 
30840 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u64_u8(uint8x16_t __p0)30841 __ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
30842   uint64x2_t __ret;
30843   __ret = (uint64x2_t)(__p0);
30844   return __ret;
30845 }
30846 #else
vreinterpretq_u64_u8(uint8x16_t __p0)30847 __ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
30848   uint64x2_t __ret;
30849   __ret = (uint64x2_t)(__p0);
30850   return __ret;
30851 }
30852 #endif
30853 
30854 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u64_u32(uint32x4_t __p0)30855 __ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
30856   uint64x2_t __ret;
30857   __ret = (uint64x2_t)(__p0);
30858   return __ret;
30859 }
30860 #else
vreinterpretq_u64_u32(uint32x4_t __p0)30861 __ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
30862   uint64x2_t __ret;
30863   __ret = (uint64x2_t)(__p0);
30864   return __ret;
30865 }
30866 #endif
30867 
30868 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u64_u16(uint16x8_t __p0)30869 __ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
30870   uint64x2_t __ret;
30871   __ret = (uint64x2_t)(__p0);
30872   return __ret;
30873 }
30874 #else
vreinterpretq_u64_u16(uint16x8_t __p0)30875 __ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
30876   uint64x2_t __ret;
30877   __ret = (uint64x2_t)(__p0);
30878   return __ret;
30879 }
30880 #endif
30881 
30882 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u64_s8(int8x16_t __p0)30883 __ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
30884   uint64x2_t __ret;
30885   __ret = (uint64x2_t)(__p0);
30886   return __ret;
30887 }
30888 #else
vreinterpretq_u64_s8(int8x16_t __p0)30889 __ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
30890   uint64x2_t __ret;
30891   __ret = (uint64x2_t)(__p0);
30892   return __ret;
30893 }
30894 #endif
30895 
30896 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u64_f32(float32x4_t __p0)30897 __ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
30898   uint64x2_t __ret;
30899   __ret = (uint64x2_t)(__p0);
30900   return __ret;
30901 }
30902 #else
vreinterpretq_u64_f32(float32x4_t __p0)30903 __ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
30904   uint64x2_t __ret;
30905   __ret = (uint64x2_t)(__p0);
30906   return __ret;
30907 }
30908 #endif
30909 
30910 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u64_f16(float16x8_t __p0)30911 __ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
30912   uint64x2_t __ret;
30913   __ret = (uint64x2_t)(__p0);
30914   return __ret;
30915 }
30916 #else
vreinterpretq_u64_f16(float16x8_t __p0)30917 __ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
30918   uint64x2_t __ret;
30919   __ret = (uint64x2_t)(__p0);
30920   return __ret;
30921 }
30922 #endif
30923 
30924 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u64_s32(int32x4_t __p0)30925 __ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
30926   uint64x2_t __ret;
30927   __ret = (uint64x2_t)(__p0);
30928   return __ret;
30929 }
30930 #else
vreinterpretq_u64_s32(int32x4_t __p0)30931 __ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
30932   uint64x2_t __ret;
30933   __ret = (uint64x2_t)(__p0);
30934   return __ret;
30935 }
30936 #endif
30937 
30938 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u64_s64(int64x2_t __p0)30939 __ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
30940   uint64x2_t __ret;
30941   __ret = (uint64x2_t)(__p0);
30942   return __ret;
30943 }
30944 #else
vreinterpretq_u64_s64(int64x2_t __p0)30945 __ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
30946   uint64x2_t __ret;
30947   __ret = (uint64x2_t)(__p0);
30948   return __ret;
30949 }
30950 #endif
30951 
30952 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u64_s16(int16x8_t __p0)30953 __ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
30954   uint64x2_t __ret;
30955   __ret = (uint64x2_t)(__p0);
30956   return __ret;
30957 }
30958 #else
vreinterpretq_u64_s16(int16x8_t __p0)30959 __ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
30960   uint64x2_t __ret;
30961   __ret = (uint64x2_t)(__p0);
30962   return __ret;
30963 }
30964 #endif
30965 
30966 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u16_p8(poly8x16_t __p0)30967 __ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
30968   uint16x8_t __ret;
30969   __ret = (uint16x8_t)(__p0);
30970   return __ret;
30971 }
30972 #else
vreinterpretq_u16_p8(poly8x16_t __p0)30973 __ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
30974   uint16x8_t __ret;
30975   __ret = (uint16x8_t)(__p0);
30976   return __ret;
30977 }
30978 #endif
30979 
30980 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u16_p16(poly16x8_t __p0)30981 __ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
30982   uint16x8_t __ret;
30983   __ret = (uint16x8_t)(__p0);
30984   return __ret;
30985 }
30986 #else
vreinterpretq_u16_p16(poly16x8_t __p0)30987 __ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
30988   uint16x8_t __ret;
30989   __ret = (uint16x8_t)(__p0);
30990   return __ret;
30991 }
30992 #endif
30993 
30994 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u16_u8(uint8x16_t __p0)30995 __ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
30996   uint16x8_t __ret;
30997   __ret = (uint16x8_t)(__p0);
30998   return __ret;
30999 }
31000 #else
vreinterpretq_u16_u8(uint8x16_t __p0)31001 __ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
31002   uint16x8_t __ret;
31003   __ret = (uint16x8_t)(__p0);
31004   return __ret;
31005 }
31006 #endif
31007 
31008 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u16_u32(uint32x4_t __p0)31009 __ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
31010   uint16x8_t __ret;
31011   __ret = (uint16x8_t)(__p0);
31012   return __ret;
31013 }
31014 #else
vreinterpretq_u16_u32(uint32x4_t __p0)31015 __ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
31016   uint16x8_t __ret;
31017   __ret = (uint16x8_t)(__p0);
31018   return __ret;
31019 }
31020 #endif
31021 
31022 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u16_u64(uint64x2_t __p0)31023 __ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
31024   uint16x8_t __ret;
31025   __ret = (uint16x8_t)(__p0);
31026   return __ret;
31027 }
31028 #else
vreinterpretq_u16_u64(uint64x2_t __p0)31029 __ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
31030   uint16x8_t __ret;
31031   __ret = (uint16x8_t)(__p0);
31032   return __ret;
31033 }
31034 #endif
31035 
31036 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u16_s8(int8x16_t __p0)31037 __ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
31038   uint16x8_t __ret;
31039   __ret = (uint16x8_t)(__p0);
31040   return __ret;
31041 }
31042 #else
vreinterpretq_u16_s8(int8x16_t __p0)31043 __ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
31044   uint16x8_t __ret;
31045   __ret = (uint16x8_t)(__p0);
31046   return __ret;
31047 }
31048 #endif
31049 
31050 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u16_f32(float32x4_t __p0)31051 __ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
31052   uint16x8_t __ret;
31053   __ret = (uint16x8_t)(__p0);
31054   return __ret;
31055 }
31056 #else
vreinterpretq_u16_f32(float32x4_t __p0)31057 __ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
31058   uint16x8_t __ret;
31059   __ret = (uint16x8_t)(__p0);
31060   return __ret;
31061 }
31062 #endif
31063 
31064 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u16_f16(float16x8_t __p0)31065 __ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
31066   uint16x8_t __ret;
31067   __ret = (uint16x8_t)(__p0);
31068   return __ret;
31069 }
31070 #else
vreinterpretq_u16_f16(float16x8_t __p0)31071 __ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
31072   uint16x8_t __ret;
31073   __ret = (uint16x8_t)(__p0);
31074   return __ret;
31075 }
31076 #endif
31077 
31078 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u16_s32(int32x4_t __p0)31079 __ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
31080   uint16x8_t __ret;
31081   __ret = (uint16x8_t)(__p0);
31082   return __ret;
31083 }
31084 #else
vreinterpretq_u16_s32(int32x4_t __p0)31085 __ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
31086   uint16x8_t __ret;
31087   __ret = (uint16x8_t)(__p0);
31088   return __ret;
31089 }
31090 #endif
31091 
31092 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u16_s64(int64x2_t __p0)31093 __ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
31094   uint16x8_t __ret;
31095   __ret = (uint16x8_t)(__p0);
31096   return __ret;
31097 }
31098 #else
vreinterpretq_u16_s64(int64x2_t __p0)31099 __ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
31100   uint16x8_t __ret;
31101   __ret = (uint16x8_t)(__p0);
31102   return __ret;
31103 }
31104 #endif
31105 
31106 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u16_s16(int16x8_t __p0)31107 __ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
31108   uint16x8_t __ret;
31109   __ret = (uint16x8_t)(__p0);
31110   return __ret;
31111 }
31112 #else
vreinterpretq_u16_s16(int16x8_t __p0)31113 __ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
31114   uint16x8_t __ret;
31115   __ret = (uint16x8_t)(__p0);
31116   return __ret;
31117 }
31118 #endif
31119 
31120 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s8_p8(poly8x16_t __p0)31121 __ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
31122   int8x16_t __ret;
31123   __ret = (int8x16_t)(__p0);
31124   return __ret;
31125 }
31126 #else
vreinterpretq_s8_p8(poly8x16_t __p0)31127 __ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
31128   int8x16_t __ret;
31129   __ret = (int8x16_t)(__p0);
31130   return __ret;
31131 }
31132 #endif
31133 
31134 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s8_p16(poly16x8_t __p0)31135 __ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
31136   int8x16_t __ret;
31137   __ret = (int8x16_t)(__p0);
31138   return __ret;
31139 }
31140 #else
vreinterpretq_s8_p16(poly16x8_t __p0)31141 __ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
31142   int8x16_t __ret;
31143   __ret = (int8x16_t)(__p0);
31144   return __ret;
31145 }
31146 #endif
31147 
31148 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s8_u8(uint8x16_t __p0)31149 __ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
31150   int8x16_t __ret;
31151   __ret = (int8x16_t)(__p0);
31152   return __ret;
31153 }
31154 #else
vreinterpretq_s8_u8(uint8x16_t __p0)31155 __ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
31156   int8x16_t __ret;
31157   __ret = (int8x16_t)(__p0);
31158   return __ret;
31159 }
31160 #endif
31161 
31162 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s8_u32(uint32x4_t __p0)31163 __ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
31164   int8x16_t __ret;
31165   __ret = (int8x16_t)(__p0);
31166   return __ret;
31167 }
31168 #else
vreinterpretq_s8_u32(uint32x4_t __p0)31169 __ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
31170   int8x16_t __ret;
31171   __ret = (int8x16_t)(__p0);
31172   return __ret;
31173 }
31174 #endif
31175 
31176 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s8_u64(uint64x2_t __p0)31177 __ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
31178   int8x16_t __ret;
31179   __ret = (int8x16_t)(__p0);
31180   return __ret;
31181 }
31182 #else
vreinterpretq_s8_u64(uint64x2_t __p0)31183 __ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
31184   int8x16_t __ret;
31185   __ret = (int8x16_t)(__p0);
31186   return __ret;
31187 }
31188 #endif
31189 
31190 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s8_u16(uint16x8_t __p0)31191 __ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
31192   int8x16_t __ret;
31193   __ret = (int8x16_t)(__p0);
31194   return __ret;
31195 }
31196 #else
vreinterpretq_s8_u16(uint16x8_t __p0)31197 __ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
31198   int8x16_t __ret;
31199   __ret = (int8x16_t)(__p0);
31200   return __ret;
31201 }
31202 #endif
31203 
31204 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s8_f32(float32x4_t __p0)31205 __ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
31206   int8x16_t __ret;
31207   __ret = (int8x16_t)(__p0);
31208   return __ret;
31209 }
31210 #else
vreinterpretq_s8_f32(float32x4_t __p0)31211 __ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
31212   int8x16_t __ret;
31213   __ret = (int8x16_t)(__p0);
31214   return __ret;
31215 }
31216 #endif
31217 
31218 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s8_f16(float16x8_t __p0)31219 __ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
31220   int8x16_t __ret;
31221   __ret = (int8x16_t)(__p0);
31222   return __ret;
31223 }
31224 #else
vreinterpretq_s8_f16(float16x8_t __p0)31225 __ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
31226   int8x16_t __ret;
31227   __ret = (int8x16_t)(__p0);
31228   return __ret;
31229 }
31230 #endif
31231 
31232 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s8_s32(int32x4_t __p0)31233 __ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
31234   int8x16_t __ret;
31235   __ret = (int8x16_t)(__p0);
31236   return __ret;
31237 }
31238 #else
vreinterpretq_s8_s32(int32x4_t __p0)31239 __ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
31240   int8x16_t __ret;
31241   __ret = (int8x16_t)(__p0);
31242   return __ret;
31243 }
31244 #endif
31245 
31246 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s8_s64(int64x2_t __p0)31247 __ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
31248   int8x16_t __ret;
31249   __ret = (int8x16_t)(__p0);
31250   return __ret;
31251 }
31252 #else
vreinterpretq_s8_s64(int64x2_t __p0)31253 __ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
31254   int8x16_t __ret;
31255   __ret = (int8x16_t)(__p0);
31256   return __ret;
31257 }
31258 #endif
31259 
31260 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s8_s16(int16x8_t __p0)31261 __ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
31262   int8x16_t __ret;
31263   __ret = (int8x16_t)(__p0);
31264   return __ret;
31265 }
31266 #else
vreinterpretq_s8_s16(int16x8_t __p0)31267 __ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
31268   int8x16_t __ret;
31269   __ret = (int8x16_t)(__p0);
31270   return __ret;
31271 }
31272 #endif
31273 
31274 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f32_p8(poly8x16_t __p0)31275 __ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
31276   float32x4_t __ret;
31277   __ret = (float32x4_t)(__p0);
31278   return __ret;
31279 }
31280 #else
vreinterpretq_f32_p8(poly8x16_t __p0)31281 __ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
31282   float32x4_t __ret;
31283   __ret = (float32x4_t)(__p0);
31284   return __ret;
31285 }
31286 #endif
31287 
31288 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f32_p16(poly16x8_t __p0)31289 __ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
31290   float32x4_t __ret;
31291   __ret = (float32x4_t)(__p0);
31292   return __ret;
31293 }
31294 #else
vreinterpretq_f32_p16(poly16x8_t __p0)31295 __ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
31296   float32x4_t __ret;
31297   __ret = (float32x4_t)(__p0);
31298   return __ret;
31299 }
31300 #endif
31301 
31302 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f32_u8(uint8x16_t __p0)31303 __ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
31304   float32x4_t __ret;
31305   __ret = (float32x4_t)(__p0);
31306   return __ret;
31307 }
31308 #else
vreinterpretq_f32_u8(uint8x16_t __p0)31309 __ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
31310   float32x4_t __ret;
31311   __ret = (float32x4_t)(__p0);
31312   return __ret;
31313 }
31314 #endif
31315 
31316 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f32_u32(uint32x4_t __p0)31317 __ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
31318   float32x4_t __ret;
31319   __ret = (float32x4_t)(__p0);
31320   return __ret;
31321 }
31322 #else
vreinterpretq_f32_u32(uint32x4_t __p0)31323 __ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
31324   float32x4_t __ret;
31325   __ret = (float32x4_t)(__p0);
31326   return __ret;
31327 }
31328 #endif
31329 
31330 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f32_u64(uint64x2_t __p0)31331 __ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
31332   float32x4_t __ret;
31333   __ret = (float32x4_t)(__p0);
31334   return __ret;
31335 }
31336 #else
vreinterpretq_f32_u64(uint64x2_t __p0)31337 __ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
31338   float32x4_t __ret;
31339   __ret = (float32x4_t)(__p0);
31340   return __ret;
31341 }
31342 #endif
31343 
31344 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f32_u16(uint16x8_t __p0)31345 __ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
31346   float32x4_t __ret;
31347   __ret = (float32x4_t)(__p0);
31348   return __ret;
31349 }
31350 #else
vreinterpretq_f32_u16(uint16x8_t __p0)31351 __ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
31352   float32x4_t __ret;
31353   __ret = (float32x4_t)(__p0);
31354   return __ret;
31355 }
31356 #endif
31357 
31358 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f32_s8(int8x16_t __p0)31359 __ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
31360   float32x4_t __ret;
31361   __ret = (float32x4_t)(__p0);
31362   return __ret;
31363 }
31364 #else
vreinterpretq_f32_s8(int8x16_t __p0)31365 __ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
31366   float32x4_t __ret;
31367   __ret = (float32x4_t)(__p0);
31368   return __ret;
31369 }
31370 #endif
31371 
31372 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f32_f16(float16x8_t __p0)31373 __ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
31374   float32x4_t __ret;
31375   __ret = (float32x4_t)(__p0);
31376   return __ret;
31377 }
31378 #else
vreinterpretq_f32_f16(float16x8_t __p0)31379 __ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
31380   float32x4_t __ret;
31381   __ret = (float32x4_t)(__p0);
31382   return __ret;
31383 }
31384 #endif
31385 
31386 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f32_s32(int32x4_t __p0)31387 __ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
31388   float32x4_t __ret;
31389   __ret = (float32x4_t)(__p0);
31390   return __ret;
31391 }
31392 #else
vreinterpretq_f32_s32(int32x4_t __p0)31393 __ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
31394   float32x4_t __ret;
31395   __ret = (float32x4_t)(__p0);
31396   return __ret;
31397 }
31398 #endif
31399 
31400 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f32_s64(int64x2_t __p0)31401 __ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
31402   float32x4_t __ret;
31403   __ret = (float32x4_t)(__p0);
31404   return __ret;
31405 }
31406 #else
vreinterpretq_f32_s64(int64x2_t __p0)31407 __ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
31408   float32x4_t __ret;
31409   __ret = (float32x4_t)(__p0);
31410   return __ret;
31411 }
31412 #endif
31413 
31414 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f32_s16(int16x8_t __p0)31415 __ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
31416   float32x4_t __ret;
31417   __ret = (float32x4_t)(__p0);
31418   return __ret;
31419 }
31420 #else
vreinterpretq_f32_s16(int16x8_t __p0)31421 __ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
31422   float32x4_t __ret;
31423   __ret = (float32x4_t)(__p0);
31424   return __ret;
31425 }
31426 #endif
31427 
31428 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f16_p8(poly8x16_t __p0)31429 __ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
31430   float16x8_t __ret;
31431   __ret = (float16x8_t)(__p0);
31432   return __ret;
31433 }
31434 #else
vreinterpretq_f16_p8(poly8x16_t __p0)31435 __ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
31436   float16x8_t __ret;
31437   __ret = (float16x8_t)(__p0);
31438   return __ret;
31439 }
31440 #endif
31441 
31442 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f16_p16(poly16x8_t __p0)31443 __ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
31444   float16x8_t __ret;
31445   __ret = (float16x8_t)(__p0);
31446   return __ret;
31447 }
31448 #else
vreinterpretq_f16_p16(poly16x8_t __p0)31449 __ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
31450   float16x8_t __ret;
31451   __ret = (float16x8_t)(__p0);
31452   return __ret;
31453 }
31454 #endif
31455 
31456 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f16_u8(uint8x16_t __p0)31457 __ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
31458   float16x8_t __ret;
31459   __ret = (float16x8_t)(__p0);
31460   return __ret;
31461 }
31462 #else
vreinterpretq_f16_u8(uint8x16_t __p0)31463 __ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
31464   float16x8_t __ret;
31465   __ret = (float16x8_t)(__p0);
31466   return __ret;
31467 }
31468 #endif
31469 
31470 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f16_u32(uint32x4_t __p0)31471 __ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
31472   float16x8_t __ret;
31473   __ret = (float16x8_t)(__p0);
31474   return __ret;
31475 }
31476 #else
vreinterpretq_f16_u32(uint32x4_t __p0)31477 __ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
31478   float16x8_t __ret;
31479   __ret = (float16x8_t)(__p0);
31480   return __ret;
31481 }
31482 #endif
31483 
31484 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f16_u64(uint64x2_t __p0)31485 __ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
31486   float16x8_t __ret;
31487   __ret = (float16x8_t)(__p0);
31488   return __ret;
31489 }
31490 #else
vreinterpretq_f16_u64(uint64x2_t __p0)31491 __ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
31492   float16x8_t __ret;
31493   __ret = (float16x8_t)(__p0);
31494   return __ret;
31495 }
31496 #endif
31497 
31498 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f16_u16(uint16x8_t __p0)31499 __ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
31500   float16x8_t __ret;
31501   __ret = (float16x8_t)(__p0);
31502   return __ret;
31503 }
31504 #else
vreinterpretq_f16_u16(uint16x8_t __p0)31505 __ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
31506   float16x8_t __ret;
31507   __ret = (float16x8_t)(__p0);
31508   return __ret;
31509 }
31510 #endif
31511 
31512 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f16_s8(int8x16_t __p0)31513 __ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
31514   float16x8_t __ret;
31515   __ret = (float16x8_t)(__p0);
31516   return __ret;
31517 }
31518 #else
vreinterpretq_f16_s8(int8x16_t __p0)31519 __ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
31520   float16x8_t __ret;
31521   __ret = (float16x8_t)(__p0);
31522   return __ret;
31523 }
31524 #endif
31525 
31526 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f16_f32(float32x4_t __p0)31527 __ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
31528   float16x8_t __ret;
31529   __ret = (float16x8_t)(__p0);
31530   return __ret;
31531 }
31532 #else
vreinterpretq_f16_f32(float32x4_t __p0)31533 __ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
31534   float16x8_t __ret;
31535   __ret = (float16x8_t)(__p0);
31536   return __ret;
31537 }
31538 #endif
31539 
31540 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f16_s32(int32x4_t __p0)31541 __ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
31542   float16x8_t __ret;
31543   __ret = (float16x8_t)(__p0);
31544   return __ret;
31545 }
31546 #else
vreinterpretq_f16_s32(int32x4_t __p0)31547 __ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
31548   float16x8_t __ret;
31549   __ret = (float16x8_t)(__p0);
31550   return __ret;
31551 }
31552 #endif
31553 
31554 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f16_s64(int64x2_t __p0)31555 __ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
31556   float16x8_t __ret;
31557   __ret = (float16x8_t)(__p0);
31558   return __ret;
31559 }
31560 #else
vreinterpretq_f16_s64(int64x2_t __p0)31561 __ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
31562   float16x8_t __ret;
31563   __ret = (float16x8_t)(__p0);
31564   return __ret;
31565 }
31566 #endif
31567 
31568 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f16_s16(int16x8_t __p0)31569 __ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
31570   float16x8_t __ret;
31571   __ret = (float16x8_t)(__p0);
31572   return __ret;
31573 }
31574 #else
vreinterpretq_f16_s16(int16x8_t __p0)31575 __ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
31576   float16x8_t __ret;
31577   __ret = (float16x8_t)(__p0);
31578   return __ret;
31579 }
31580 #endif
31581 
31582 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s32_p8(poly8x16_t __p0)31583 __ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
31584   int32x4_t __ret;
31585   __ret = (int32x4_t)(__p0);
31586   return __ret;
31587 }
31588 #else
vreinterpretq_s32_p8(poly8x16_t __p0)31589 __ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
31590   int32x4_t __ret;
31591   __ret = (int32x4_t)(__p0);
31592   return __ret;
31593 }
31594 #endif
31595 
31596 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s32_p16(poly16x8_t __p0)31597 __ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
31598   int32x4_t __ret;
31599   __ret = (int32x4_t)(__p0);
31600   return __ret;
31601 }
31602 #else
vreinterpretq_s32_p16(poly16x8_t __p0)31603 __ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
31604   int32x4_t __ret;
31605   __ret = (int32x4_t)(__p0);
31606   return __ret;
31607 }
31608 #endif
31609 
31610 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s32_u8(uint8x16_t __p0)31611 __ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
31612   int32x4_t __ret;
31613   __ret = (int32x4_t)(__p0);
31614   return __ret;
31615 }
31616 #else
vreinterpretq_s32_u8(uint8x16_t __p0)31617 __ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
31618   int32x4_t __ret;
31619   __ret = (int32x4_t)(__p0);
31620   return __ret;
31621 }
31622 #endif
31623 
31624 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s32_u32(uint32x4_t __p0)31625 __ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
31626   int32x4_t __ret;
31627   __ret = (int32x4_t)(__p0);
31628   return __ret;
31629 }
31630 #else
vreinterpretq_s32_u32(uint32x4_t __p0)31631 __ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
31632   int32x4_t __ret;
31633   __ret = (int32x4_t)(__p0);
31634   return __ret;
31635 }
31636 #endif
31637 
31638 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s32_u64(uint64x2_t __p0)31639 __ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
31640   int32x4_t __ret;
31641   __ret = (int32x4_t)(__p0);
31642   return __ret;
31643 }
31644 #else
vreinterpretq_s32_u64(uint64x2_t __p0)31645 __ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
31646   int32x4_t __ret;
31647   __ret = (int32x4_t)(__p0);
31648   return __ret;
31649 }
31650 #endif
31651 
31652 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s32_u16(uint16x8_t __p0)31653 __ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
31654   int32x4_t __ret;
31655   __ret = (int32x4_t)(__p0);
31656   return __ret;
31657 }
31658 #else
vreinterpretq_s32_u16(uint16x8_t __p0)31659 __ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
31660   int32x4_t __ret;
31661   __ret = (int32x4_t)(__p0);
31662   return __ret;
31663 }
31664 #endif
31665 
31666 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s32_s8(int8x16_t __p0)31667 __ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
31668   int32x4_t __ret;
31669   __ret = (int32x4_t)(__p0);
31670   return __ret;
31671 }
31672 #else
vreinterpretq_s32_s8(int8x16_t __p0)31673 __ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
31674   int32x4_t __ret;
31675   __ret = (int32x4_t)(__p0);
31676   return __ret;
31677 }
31678 #endif
31679 
31680 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s32_f32(float32x4_t __p0)31681 __ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
31682   int32x4_t __ret;
31683   __ret = (int32x4_t)(__p0);
31684   return __ret;
31685 }
31686 #else
vreinterpretq_s32_f32(float32x4_t __p0)31687 __ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
31688   int32x4_t __ret;
31689   __ret = (int32x4_t)(__p0);
31690   return __ret;
31691 }
31692 #endif
31693 
31694 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s32_f16(float16x8_t __p0)31695 __ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
31696   int32x4_t __ret;
31697   __ret = (int32x4_t)(__p0);
31698   return __ret;
31699 }
31700 #else
vreinterpretq_s32_f16(float16x8_t __p0)31701 __ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
31702   int32x4_t __ret;
31703   __ret = (int32x4_t)(__p0);
31704   return __ret;
31705 }
31706 #endif
31707 
31708 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s32_s64(int64x2_t __p0)31709 __ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
31710   int32x4_t __ret;
31711   __ret = (int32x4_t)(__p0);
31712   return __ret;
31713 }
31714 #else
vreinterpretq_s32_s64(int64x2_t __p0)31715 __ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
31716   int32x4_t __ret;
31717   __ret = (int32x4_t)(__p0);
31718   return __ret;
31719 }
31720 #endif
31721 
31722 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s32_s16(int16x8_t __p0)31723 __ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
31724   int32x4_t __ret;
31725   __ret = (int32x4_t)(__p0);
31726   return __ret;
31727 }
31728 #else
vreinterpretq_s32_s16(int16x8_t __p0)31729 __ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
31730   int32x4_t __ret;
31731   __ret = (int32x4_t)(__p0);
31732   return __ret;
31733 }
31734 #endif
31735 
31736 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s64_p8(poly8x16_t __p0)31737 __ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
31738   int64x2_t __ret;
31739   __ret = (int64x2_t)(__p0);
31740   return __ret;
31741 }
31742 #else
vreinterpretq_s64_p8(poly8x16_t __p0)31743 __ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
31744   int64x2_t __ret;
31745   __ret = (int64x2_t)(__p0);
31746   return __ret;
31747 }
31748 #endif
31749 
31750 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s64_p16(poly16x8_t __p0)31751 __ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
31752   int64x2_t __ret;
31753   __ret = (int64x2_t)(__p0);
31754   return __ret;
31755 }
31756 #else
vreinterpretq_s64_p16(poly16x8_t __p0)31757 __ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
31758   int64x2_t __ret;
31759   __ret = (int64x2_t)(__p0);
31760   return __ret;
31761 }
31762 #endif
31763 
31764 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s64_u8(uint8x16_t __p0)31765 __ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
31766   int64x2_t __ret;
31767   __ret = (int64x2_t)(__p0);
31768   return __ret;
31769 }
31770 #else
vreinterpretq_s64_u8(uint8x16_t __p0)31771 __ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
31772   int64x2_t __ret;
31773   __ret = (int64x2_t)(__p0);
31774   return __ret;
31775 }
31776 #endif
31777 
31778 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s64_u32(uint32x4_t __p0)31779 __ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
31780   int64x2_t __ret;
31781   __ret = (int64x2_t)(__p0);
31782   return __ret;
31783 }
31784 #else
vreinterpretq_s64_u32(uint32x4_t __p0)31785 __ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
31786   int64x2_t __ret;
31787   __ret = (int64x2_t)(__p0);
31788   return __ret;
31789 }
31790 #endif
31791 
31792 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s64_u64(uint64x2_t __p0)31793 __ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
31794   int64x2_t __ret;
31795   __ret = (int64x2_t)(__p0);
31796   return __ret;
31797 }
31798 #else
vreinterpretq_s64_u64(uint64x2_t __p0)31799 __ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
31800   int64x2_t __ret;
31801   __ret = (int64x2_t)(__p0);
31802   return __ret;
31803 }
31804 #endif
31805 
31806 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s64_u16(uint16x8_t __p0)31807 __ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
31808   int64x2_t __ret;
31809   __ret = (int64x2_t)(__p0);
31810   return __ret;
31811 }
31812 #else
vreinterpretq_s64_u16(uint16x8_t __p0)31813 __ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
31814   int64x2_t __ret;
31815   __ret = (int64x2_t)(__p0);
31816   return __ret;
31817 }
31818 #endif
31819 
31820 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s64_s8(int8x16_t __p0)31821 __ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
31822   int64x2_t __ret;
31823   __ret = (int64x2_t)(__p0);
31824   return __ret;
31825 }
31826 #else
vreinterpretq_s64_s8(int8x16_t __p0)31827 __ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
31828   int64x2_t __ret;
31829   __ret = (int64x2_t)(__p0);
31830   return __ret;
31831 }
31832 #endif
31833 
31834 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s64_f32(float32x4_t __p0)31835 __ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
31836   int64x2_t __ret;
31837   __ret = (int64x2_t)(__p0);
31838   return __ret;
31839 }
31840 #else
vreinterpretq_s64_f32(float32x4_t __p0)31841 __ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
31842   int64x2_t __ret;
31843   __ret = (int64x2_t)(__p0);
31844   return __ret;
31845 }
31846 #endif
31847 
31848 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s64_f16(float16x8_t __p0)31849 __ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
31850   int64x2_t __ret;
31851   __ret = (int64x2_t)(__p0);
31852   return __ret;
31853 }
31854 #else
vreinterpretq_s64_f16(float16x8_t __p0)31855 __ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
31856   int64x2_t __ret;
31857   __ret = (int64x2_t)(__p0);
31858   return __ret;
31859 }
31860 #endif
31861 
31862 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s64_s32(int32x4_t __p0)31863 __ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
31864   int64x2_t __ret;
31865   __ret = (int64x2_t)(__p0);
31866   return __ret;
31867 }
31868 #else
vreinterpretq_s64_s32(int32x4_t __p0)31869 __ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
31870   int64x2_t __ret;
31871   __ret = (int64x2_t)(__p0);
31872   return __ret;
31873 }
31874 #endif
31875 
31876 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s64_s16(int16x8_t __p0)31877 __ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
31878   int64x2_t __ret;
31879   __ret = (int64x2_t)(__p0);
31880   return __ret;
31881 }
31882 #else
vreinterpretq_s64_s16(int16x8_t __p0)31883 __ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
31884   int64x2_t __ret;
31885   __ret = (int64x2_t)(__p0);
31886   return __ret;
31887 }
31888 #endif
31889 
31890 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s16_p8(poly8x16_t __p0)31891 __ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
31892   int16x8_t __ret;
31893   __ret = (int16x8_t)(__p0);
31894   return __ret;
31895 }
31896 #else
vreinterpretq_s16_p8(poly8x16_t __p0)31897 __ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
31898   int16x8_t __ret;
31899   __ret = (int16x8_t)(__p0);
31900   return __ret;
31901 }
31902 #endif
31903 
31904 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s16_p16(poly16x8_t __p0)31905 __ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
31906   int16x8_t __ret;
31907   __ret = (int16x8_t)(__p0);
31908   return __ret;
31909 }
31910 #else
vreinterpretq_s16_p16(poly16x8_t __p0)31911 __ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
31912   int16x8_t __ret;
31913   __ret = (int16x8_t)(__p0);
31914   return __ret;
31915 }
31916 #endif
31917 
31918 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s16_u8(uint8x16_t __p0)31919 __ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
31920   int16x8_t __ret;
31921   __ret = (int16x8_t)(__p0);
31922   return __ret;
31923 }
31924 #else
vreinterpretq_s16_u8(uint8x16_t __p0)31925 __ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
31926   int16x8_t __ret;
31927   __ret = (int16x8_t)(__p0);
31928   return __ret;
31929 }
31930 #endif
31931 
31932 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s16_u32(uint32x4_t __p0)31933 __ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
31934   int16x8_t __ret;
31935   __ret = (int16x8_t)(__p0);
31936   return __ret;
31937 }
31938 #else
vreinterpretq_s16_u32(uint32x4_t __p0)31939 __ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
31940   int16x8_t __ret;
31941   __ret = (int16x8_t)(__p0);
31942   return __ret;
31943 }
31944 #endif
31945 
31946 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s16_u64(uint64x2_t __p0)31947 __ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
31948   int16x8_t __ret;
31949   __ret = (int16x8_t)(__p0);
31950   return __ret;
31951 }
31952 #else
vreinterpretq_s16_u64(uint64x2_t __p0)31953 __ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
31954   int16x8_t __ret;
31955   __ret = (int16x8_t)(__p0);
31956   return __ret;
31957 }
31958 #endif
31959 
31960 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s16_u16(uint16x8_t __p0)31961 __ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
31962   int16x8_t __ret;
31963   __ret = (int16x8_t)(__p0);
31964   return __ret;
31965 }
31966 #else
vreinterpretq_s16_u16(uint16x8_t __p0)31967 __ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
31968   int16x8_t __ret;
31969   __ret = (int16x8_t)(__p0);
31970   return __ret;
31971 }
31972 #endif
31973 
31974 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s16_s8(int8x16_t __p0)31975 __ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
31976   int16x8_t __ret;
31977   __ret = (int16x8_t)(__p0);
31978   return __ret;
31979 }
31980 #else
vreinterpretq_s16_s8(int8x16_t __p0)31981 __ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
31982   int16x8_t __ret;
31983   __ret = (int16x8_t)(__p0);
31984   return __ret;
31985 }
31986 #endif
31987 
31988 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s16_f32(float32x4_t __p0)31989 __ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
31990   int16x8_t __ret;
31991   __ret = (int16x8_t)(__p0);
31992   return __ret;
31993 }
31994 #else
vreinterpretq_s16_f32(float32x4_t __p0)31995 __ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
31996   int16x8_t __ret;
31997   __ret = (int16x8_t)(__p0);
31998   return __ret;
31999 }
32000 #endif
32001 
32002 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s16_f16(float16x8_t __p0)32003 __ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
32004   int16x8_t __ret;
32005   __ret = (int16x8_t)(__p0);
32006   return __ret;
32007 }
32008 #else
vreinterpretq_s16_f16(float16x8_t __p0)32009 __ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
32010   int16x8_t __ret;
32011   __ret = (int16x8_t)(__p0);
32012   return __ret;
32013 }
32014 #endif
32015 
32016 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s16_s32(int32x4_t __p0)32017 __ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
32018   int16x8_t __ret;
32019   __ret = (int16x8_t)(__p0);
32020   return __ret;
32021 }
32022 #else
vreinterpretq_s16_s32(int32x4_t __p0)32023 __ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
32024   int16x8_t __ret;
32025   __ret = (int16x8_t)(__p0);
32026   return __ret;
32027 }
32028 #endif
32029 
32030 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s16_s64(int64x2_t __p0)32031 __ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
32032   int16x8_t __ret;
32033   __ret = (int16x8_t)(__p0);
32034   return __ret;
32035 }
32036 #else
vreinterpretq_s16_s64(int64x2_t __p0)32037 __ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
32038   int16x8_t __ret;
32039   __ret = (int16x8_t)(__p0);
32040   return __ret;
32041 }
32042 #endif
32043 
32044 #ifdef __LITTLE_ENDIAN__
vreinterpret_u8_p8(poly8x8_t __p0)32045 __ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
32046   uint8x8_t __ret;
32047   __ret = (uint8x8_t)(__p0);
32048   return __ret;
32049 }
32050 #else
vreinterpret_u8_p8(poly8x8_t __p0)32051 __ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
32052   uint8x8_t __ret;
32053   __ret = (uint8x8_t)(__p0);
32054   return __ret;
32055 }
32056 #endif
32057 
32058 #ifdef __LITTLE_ENDIAN__
vreinterpret_u8_p16(poly16x4_t __p0)32059 __ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
32060   uint8x8_t __ret;
32061   __ret = (uint8x8_t)(__p0);
32062   return __ret;
32063 }
32064 #else
vreinterpret_u8_p16(poly16x4_t __p0)32065 __ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
32066   uint8x8_t __ret;
32067   __ret = (uint8x8_t)(__p0);
32068   return __ret;
32069 }
32070 #endif
32071 
32072 #ifdef __LITTLE_ENDIAN__
vreinterpret_u8_u32(uint32x2_t __p0)32073 __ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
32074   uint8x8_t __ret;
32075   __ret = (uint8x8_t)(__p0);
32076   return __ret;
32077 }
32078 #else
vreinterpret_u8_u32(uint32x2_t __p0)32079 __ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
32080   uint8x8_t __ret;
32081   __ret = (uint8x8_t)(__p0);
32082   return __ret;
32083 }
32084 #endif
32085 
32086 #ifdef __LITTLE_ENDIAN__
vreinterpret_u8_u64(uint64x1_t __p0)32087 __ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
32088   uint8x8_t __ret;
32089   __ret = (uint8x8_t)(__p0);
32090   return __ret;
32091 }
32092 #else
vreinterpret_u8_u64(uint64x1_t __p0)32093 __ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
32094   uint8x8_t __ret;
32095   __ret = (uint8x8_t)(__p0);
32096   return __ret;
32097 }
32098 #endif
32099 
32100 #ifdef __LITTLE_ENDIAN__
vreinterpret_u8_u16(uint16x4_t __p0)32101 __ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
32102   uint8x8_t __ret;
32103   __ret = (uint8x8_t)(__p0);
32104   return __ret;
32105 }
32106 #else
vreinterpret_u8_u16(uint16x4_t __p0)32107 __ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
32108   uint8x8_t __ret;
32109   __ret = (uint8x8_t)(__p0);
32110   return __ret;
32111 }
32112 #endif
32113 
32114 #ifdef __LITTLE_ENDIAN__
vreinterpret_u8_s8(int8x8_t __p0)32115 __ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
32116   uint8x8_t __ret;
32117   __ret = (uint8x8_t)(__p0);
32118   return __ret;
32119 }
32120 #else
vreinterpret_u8_s8(int8x8_t __p0)32121 __ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
32122   uint8x8_t __ret;
32123   __ret = (uint8x8_t)(__p0);
32124   return __ret;
32125 }
32126 #endif
32127 
32128 #ifdef __LITTLE_ENDIAN__
vreinterpret_u8_f32(float32x2_t __p0)32129 __ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
32130   uint8x8_t __ret;
32131   __ret = (uint8x8_t)(__p0);
32132   return __ret;
32133 }
32134 #else
vreinterpret_u8_f32(float32x2_t __p0)32135 __ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
32136   uint8x8_t __ret;
32137   __ret = (uint8x8_t)(__p0);
32138   return __ret;
32139 }
32140 #endif
32141 
32142 #ifdef __LITTLE_ENDIAN__
vreinterpret_u8_f16(float16x4_t __p0)32143 __ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
32144   uint8x8_t __ret;
32145   __ret = (uint8x8_t)(__p0);
32146   return __ret;
32147 }
32148 #else
vreinterpret_u8_f16(float16x4_t __p0)32149 __ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
32150   uint8x8_t __ret;
32151   __ret = (uint8x8_t)(__p0);
32152   return __ret;
32153 }
32154 #endif
32155 
32156 #ifdef __LITTLE_ENDIAN__
vreinterpret_u8_s32(int32x2_t __p0)32157 __ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
32158   uint8x8_t __ret;
32159   __ret = (uint8x8_t)(__p0);
32160   return __ret;
32161 }
32162 #else
vreinterpret_u8_s32(int32x2_t __p0)32163 __ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
32164   uint8x8_t __ret;
32165   __ret = (uint8x8_t)(__p0);
32166   return __ret;
32167 }
32168 #endif
32169 
32170 #ifdef __LITTLE_ENDIAN__
vreinterpret_u8_s64(int64x1_t __p0)32171 __ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
32172   uint8x8_t __ret;
32173   __ret = (uint8x8_t)(__p0);
32174   return __ret;
32175 }
32176 #else
vreinterpret_u8_s64(int64x1_t __p0)32177 __ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
32178   uint8x8_t __ret;
32179   __ret = (uint8x8_t)(__p0);
32180   return __ret;
32181 }
32182 #endif
32183 
32184 #ifdef __LITTLE_ENDIAN__
vreinterpret_u8_s16(int16x4_t __p0)32185 __ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
32186   uint8x8_t __ret;
32187   __ret = (uint8x8_t)(__p0);
32188   return __ret;
32189 }
32190 #else
vreinterpret_u8_s16(int16x4_t __p0)32191 __ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
32192   uint8x8_t __ret;
32193   __ret = (uint8x8_t)(__p0);
32194   return __ret;
32195 }
32196 #endif
32197 
32198 #ifdef __LITTLE_ENDIAN__
vreinterpret_u32_p8(poly8x8_t __p0)32199 __ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
32200   uint32x2_t __ret;
32201   __ret = (uint32x2_t)(__p0);
32202   return __ret;
32203 }
32204 #else
vreinterpret_u32_p8(poly8x8_t __p0)32205 __ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
32206   uint32x2_t __ret;
32207   __ret = (uint32x2_t)(__p0);
32208   return __ret;
32209 }
32210 #endif
32211 
32212 #ifdef __LITTLE_ENDIAN__
vreinterpret_u32_p16(poly16x4_t __p0)32213 __ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
32214   uint32x2_t __ret;
32215   __ret = (uint32x2_t)(__p0);
32216   return __ret;
32217 }
32218 #else
vreinterpret_u32_p16(poly16x4_t __p0)32219 __ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
32220   uint32x2_t __ret;
32221   __ret = (uint32x2_t)(__p0);
32222   return __ret;
32223 }
32224 #endif
32225 
32226 #ifdef __LITTLE_ENDIAN__
vreinterpret_u32_u8(uint8x8_t __p0)32227 __ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
32228   uint32x2_t __ret;
32229   __ret = (uint32x2_t)(__p0);
32230   return __ret;
32231 }
32232 #else
vreinterpret_u32_u8(uint8x8_t __p0)32233 __ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
32234   uint32x2_t __ret;
32235   __ret = (uint32x2_t)(__p0);
32236   return __ret;
32237 }
32238 #endif
32239 
32240 #ifdef __LITTLE_ENDIAN__
vreinterpret_u32_u64(uint64x1_t __p0)32241 __ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
32242   uint32x2_t __ret;
32243   __ret = (uint32x2_t)(__p0);
32244   return __ret;
32245 }
32246 #else
vreinterpret_u32_u64(uint64x1_t __p0)32247 __ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
32248   uint32x2_t __ret;
32249   __ret = (uint32x2_t)(__p0);
32250   return __ret;
32251 }
32252 #endif
32253 
32254 #ifdef __LITTLE_ENDIAN__
vreinterpret_u32_u16(uint16x4_t __p0)32255 __ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
32256   uint32x2_t __ret;
32257   __ret = (uint32x2_t)(__p0);
32258   return __ret;
32259 }
32260 #else
vreinterpret_u32_u16(uint16x4_t __p0)32261 __ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
32262   uint32x2_t __ret;
32263   __ret = (uint32x2_t)(__p0);
32264   return __ret;
32265 }
32266 #endif
32267 
32268 #ifdef __LITTLE_ENDIAN__
vreinterpret_u32_s8(int8x8_t __p0)32269 __ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
32270   uint32x2_t __ret;
32271   __ret = (uint32x2_t)(__p0);
32272   return __ret;
32273 }
32274 #else
vreinterpret_u32_s8(int8x8_t __p0)32275 __ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
32276   uint32x2_t __ret;
32277   __ret = (uint32x2_t)(__p0);
32278   return __ret;
32279 }
32280 #endif
32281 
32282 #ifdef __LITTLE_ENDIAN__
vreinterpret_u32_f32(float32x2_t __p0)32283 __ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
32284   uint32x2_t __ret;
32285   __ret = (uint32x2_t)(__p0);
32286   return __ret;
32287 }
32288 #else
vreinterpret_u32_f32(float32x2_t __p0)32289 __ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
32290   uint32x2_t __ret;
32291   __ret = (uint32x2_t)(__p0);
32292   return __ret;
32293 }
32294 #endif
32295 
32296 #ifdef __LITTLE_ENDIAN__
vreinterpret_u32_f16(float16x4_t __p0)32297 __ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
32298   uint32x2_t __ret;
32299   __ret = (uint32x2_t)(__p0);
32300   return __ret;
32301 }
32302 #else
vreinterpret_u32_f16(float16x4_t __p0)32303 __ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
32304   uint32x2_t __ret;
32305   __ret = (uint32x2_t)(__p0);
32306   return __ret;
32307 }
32308 #endif
32309 
32310 #ifdef __LITTLE_ENDIAN__
vreinterpret_u32_s32(int32x2_t __p0)32311 __ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
32312   uint32x2_t __ret;
32313   __ret = (uint32x2_t)(__p0);
32314   return __ret;
32315 }
32316 #else
vreinterpret_u32_s32(int32x2_t __p0)32317 __ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
32318   uint32x2_t __ret;
32319   __ret = (uint32x2_t)(__p0);
32320   return __ret;
32321 }
32322 #endif
32323 
32324 #ifdef __LITTLE_ENDIAN__
vreinterpret_u32_s64(int64x1_t __p0)32325 __ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
32326   uint32x2_t __ret;
32327   __ret = (uint32x2_t)(__p0);
32328   return __ret;
32329 }
32330 #else
vreinterpret_u32_s64(int64x1_t __p0)32331 __ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
32332   uint32x2_t __ret;
32333   __ret = (uint32x2_t)(__p0);
32334   return __ret;
32335 }
32336 #endif
32337 
32338 #ifdef __LITTLE_ENDIAN__
vreinterpret_u32_s16(int16x4_t __p0)32339 __ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
32340   uint32x2_t __ret;
32341   __ret = (uint32x2_t)(__p0);
32342   return __ret;
32343 }
32344 #else
vreinterpret_u32_s16(int16x4_t __p0)32345 __ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
32346   uint32x2_t __ret;
32347   __ret = (uint32x2_t)(__p0);
32348   return __ret;
32349 }
32350 #endif
32351 
32352 #ifdef __LITTLE_ENDIAN__
vreinterpret_u64_p8(poly8x8_t __p0)32353 __ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
32354   uint64x1_t __ret;
32355   __ret = (uint64x1_t)(__p0);
32356   return __ret;
32357 }
32358 #else
vreinterpret_u64_p8(poly8x8_t __p0)32359 __ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
32360   uint64x1_t __ret;
32361   __ret = (uint64x1_t)(__p0);
32362   return __ret;
32363 }
32364 #endif
32365 
32366 #ifdef __LITTLE_ENDIAN__
vreinterpret_u64_p16(poly16x4_t __p0)32367 __ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
32368   uint64x1_t __ret;
32369   __ret = (uint64x1_t)(__p0);
32370   return __ret;
32371 }
32372 #else
vreinterpret_u64_p16(poly16x4_t __p0)32373 __ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
32374   uint64x1_t __ret;
32375   __ret = (uint64x1_t)(__p0);
32376   return __ret;
32377 }
32378 #endif
32379 
32380 #ifdef __LITTLE_ENDIAN__
vreinterpret_u64_u8(uint8x8_t __p0)32381 __ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
32382   uint64x1_t __ret;
32383   __ret = (uint64x1_t)(__p0);
32384   return __ret;
32385 }
32386 #else
vreinterpret_u64_u8(uint8x8_t __p0)32387 __ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
32388   uint64x1_t __ret;
32389   __ret = (uint64x1_t)(__p0);
32390   return __ret;
32391 }
32392 #endif
32393 
32394 #ifdef __LITTLE_ENDIAN__
vreinterpret_u64_u32(uint32x2_t __p0)32395 __ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
32396   uint64x1_t __ret;
32397   __ret = (uint64x1_t)(__p0);
32398   return __ret;
32399 }
32400 #else
vreinterpret_u64_u32(uint32x2_t __p0)32401 __ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
32402   uint64x1_t __ret;
32403   __ret = (uint64x1_t)(__p0);
32404   return __ret;
32405 }
32406 #endif
32407 
32408 #ifdef __LITTLE_ENDIAN__
vreinterpret_u64_u16(uint16x4_t __p0)32409 __ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
32410   uint64x1_t __ret;
32411   __ret = (uint64x1_t)(__p0);
32412   return __ret;
32413 }
32414 #else
vreinterpret_u64_u16(uint16x4_t __p0)32415 __ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
32416   uint64x1_t __ret;
32417   __ret = (uint64x1_t)(__p0);
32418   return __ret;
32419 }
32420 #endif
32421 
32422 #ifdef __LITTLE_ENDIAN__
vreinterpret_u64_s8(int8x8_t __p0)32423 __ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
32424   uint64x1_t __ret;
32425   __ret = (uint64x1_t)(__p0);
32426   return __ret;
32427 }
32428 #else
vreinterpret_u64_s8(int8x8_t __p0)32429 __ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
32430   uint64x1_t __ret;
32431   __ret = (uint64x1_t)(__p0);
32432   return __ret;
32433 }
32434 #endif
32435 
32436 #ifdef __LITTLE_ENDIAN__
vreinterpret_u64_f32(float32x2_t __p0)32437 __ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
32438   uint64x1_t __ret;
32439   __ret = (uint64x1_t)(__p0);
32440   return __ret;
32441 }
32442 #else
vreinterpret_u64_f32(float32x2_t __p0)32443 __ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
32444   uint64x1_t __ret;
32445   __ret = (uint64x1_t)(__p0);
32446   return __ret;
32447 }
32448 #endif
32449 
32450 #ifdef __LITTLE_ENDIAN__
vreinterpret_u64_f16(float16x4_t __p0)32451 __ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
32452   uint64x1_t __ret;
32453   __ret = (uint64x1_t)(__p0);
32454   return __ret;
32455 }
32456 #else
vreinterpret_u64_f16(float16x4_t __p0)32457 __ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
32458   uint64x1_t __ret;
32459   __ret = (uint64x1_t)(__p0);
32460   return __ret;
32461 }
32462 #endif
32463 
32464 #ifdef __LITTLE_ENDIAN__
vreinterpret_u64_s32(int32x2_t __p0)32465 __ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
32466   uint64x1_t __ret;
32467   __ret = (uint64x1_t)(__p0);
32468   return __ret;
32469 }
32470 #else
vreinterpret_u64_s32(int32x2_t __p0)32471 __ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
32472   uint64x1_t __ret;
32473   __ret = (uint64x1_t)(__p0);
32474   return __ret;
32475 }
32476 #endif
32477 
32478 #ifdef __LITTLE_ENDIAN__
vreinterpret_u64_s64(int64x1_t __p0)32479 __ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
32480   uint64x1_t __ret;
32481   __ret = (uint64x1_t)(__p0);
32482   return __ret;
32483 }
32484 #else
vreinterpret_u64_s64(int64x1_t __p0)32485 __ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
32486   uint64x1_t __ret;
32487   __ret = (uint64x1_t)(__p0);
32488   return __ret;
32489 }
32490 #endif
32491 
32492 #ifdef __LITTLE_ENDIAN__
vreinterpret_u64_s16(int16x4_t __p0)32493 __ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
32494   uint64x1_t __ret;
32495   __ret = (uint64x1_t)(__p0);
32496   return __ret;
32497 }
32498 #else
vreinterpret_u64_s16(int16x4_t __p0)32499 __ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
32500   uint64x1_t __ret;
32501   __ret = (uint64x1_t)(__p0);
32502   return __ret;
32503 }
32504 #endif
32505 
32506 #ifdef __LITTLE_ENDIAN__
vreinterpret_u16_p8(poly8x8_t __p0)32507 __ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
32508   uint16x4_t __ret;
32509   __ret = (uint16x4_t)(__p0);
32510   return __ret;
32511 }
32512 #else
vreinterpret_u16_p8(poly8x8_t __p0)32513 __ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
32514   uint16x4_t __ret;
32515   __ret = (uint16x4_t)(__p0);
32516   return __ret;
32517 }
32518 #endif
32519 
32520 #ifdef __LITTLE_ENDIAN__
vreinterpret_u16_p16(poly16x4_t __p0)32521 __ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
32522   uint16x4_t __ret;
32523   __ret = (uint16x4_t)(__p0);
32524   return __ret;
32525 }
32526 #else
vreinterpret_u16_p16(poly16x4_t __p0)32527 __ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
32528   uint16x4_t __ret;
32529   __ret = (uint16x4_t)(__p0);
32530   return __ret;
32531 }
32532 #endif
32533 
32534 #ifdef __LITTLE_ENDIAN__
vreinterpret_u16_u8(uint8x8_t __p0)32535 __ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
32536   uint16x4_t __ret;
32537   __ret = (uint16x4_t)(__p0);
32538   return __ret;
32539 }
32540 #else
vreinterpret_u16_u8(uint8x8_t __p0)32541 __ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
32542   uint16x4_t __ret;
32543   __ret = (uint16x4_t)(__p0);
32544   return __ret;
32545 }
32546 #endif
32547 
32548 #ifdef __LITTLE_ENDIAN__
vreinterpret_u16_u32(uint32x2_t __p0)32549 __ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
32550   uint16x4_t __ret;
32551   __ret = (uint16x4_t)(__p0);
32552   return __ret;
32553 }
32554 #else
vreinterpret_u16_u32(uint32x2_t __p0)32555 __ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
32556   uint16x4_t __ret;
32557   __ret = (uint16x4_t)(__p0);
32558   return __ret;
32559 }
32560 #endif
32561 
32562 #ifdef __LITTLE_ENDIAN__
vreinterpret_u16_u64(uint64x1_t __p0)32563 __ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
32564   uint16x4_t __ret;
32565   __ret = (uint16x4_t)(__p0);
32566   return __ret;
32567 }
32568 #else
vreinterpret_u16_u64(uint64x1_t __p0)32569 __ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
32570   uint16x4_t __ret;
32571   __ret = (uint16x4_t)(__p0);
32572   return __ret;
32573 }
32574 #endif
32575 
32576 #ifdef __LITTLE_ENDIAN__
vreinterpret_u16_s8(int8x8_t __p0)32577 __ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
32578   uint16x4_t __ret;
32579   __ret = (uint16x4_t)(__p0);
32580   return __ret;
32581 }
32582 #else
vreinterpret_u16_s8(int8x8_t __p0)32583 __ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
32584   uint16x4_t __ret;
32585   __ret = (uint16x4_t)(__p0);
32586   return __ret;
32587 }
32588 #endif
32589 
32590 #ifdef __LITTLE_ENDIAN__
vreinterpret_u16_f32(float32x2_t __p0)32591 __ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
32592   uint16x4_t __ret;
32593   __ret = (uint16x4_t)(__p0);
32594   return __ret;
32595 }
32596 #else
vreinterpret_u16_f32(float32x2_t __p0)32597 __ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
32598   uint16x4_t __ret;
32599   __ret = (uint16x4_t)(__p0);
32600   return __ret;
32601 }
32602 #endif
32603 
32604 #ifdef __LITTLE_ENDIAN__
vreinterpret_u16_f16(float16x4_t __p0)32605 __ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
32606   uint16x4_t __ret;
32607   __ret = (uint16x4_t)(__p0);
32608   return __ret;
32609 }
32610 #else
vreinterpret_u16_f16(float16x4_t __p0)32611 __ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
32612   uint16x4_t __ret;
32613   __ret = (uint16x4_t)(__p0);
32614   return __ret;
32615 }
32616 #endif
32617 
32618 #ifdef __LITTLE_ENDIAN__
vreinterpret_u16_s32(int32x2_t __p0)32619 __ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
32620   uint16x4_t __ret;
32621   __ret = (uint16x4_t)(__p0);
32622   return __ret;
32623 }
32624 #else
vreinterpret_u16_s32(int32x2_t __p0)32625 __ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
32626   uint16x4_t __ret;
32627   __ret = (uint16x4_t)(__p0);
32628   return __ret;
32629 }
32630 #endif
32631 
32632 #ifdef __LITTLE_ENDIAN__
vreinterpret_u16_s64(int64x1_t __p0)32633 __ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
32634   uint16x4_t __ret;
32635   __ret = (uint16x4_t)(__p0);
32636   return __ret;
32637 }
32638 #else
vreinterpret_u16_s64(int64x1_t __p0)32639 __ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
32640   uint16x4_t __ret;
32641   __ret = (uint16x4_t)(__p0);
32642   return __ret;
32643 }
32644 #endif
32645 
32646 #ifdef __LITTLE_ENDIAN__
vreinterpret_u16_s16(int16x4_t __p0)32647 __ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
32648   uint16x4_t __ret;
32649   __ret = (uint16x4_t)(__p0);
32650   return __ret;
32651 }
32652 #else
vreinterpret_u16_s16(int16x4_t __p0)32653 __ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
32654   uint16x4_t __ret;
32655   __ret = (uint16x4_t)(__p0);
32656   return __ret;
32657 }
32658 #endif
32659 
32660 #ifdef __LITTLE_ENDIAN__
vreinterpret_s8_p8(poly8x8_t __p0)32661 __ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
32662   int8x8_t __ret;
32663   __ret = (int8x8_t)(__p0);
32664   return __ret;
32665 }
32666 #else
vreinterpret_s8_p8(poly8x8_t __p0)32667 __ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
32668   int8x8_t __ret;
32669   __ret = (int8x8_t)(__p0);
32670   return __ret;
32671 }
32672 #endif
32673 
32674 #ifdef __LITTLE_ENDIAN__
vreinterpret_s8_p16(poly16x4_t __p0)32675 __ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
32676   int8x8_t __ret;
32677   __ret = (int8x8_t)(__p0);
32678   return __ret;
32679 }
32680 #else
vreinterpret_s8_p16(poly16x4_t __p0)32681 __ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
32682   int8x8_t __ret;
32683   __ret = (int8x8_t)(__p0);
32684   return __ret;
32685 }
32686 #endif
32687 
32688 #ifdef __LITTLE_ENDIAN__
vreinterpret_s8_u8(uint8x8_t __p0)32689 __ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
32690   int8x8_t __ret;
32691   __ret = (int8x8_t)(__p0);
32692   return __ret;
32693 }
32694 #else
vreinterpret_s8_u8(uint8x8_t __p0)32695 __ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
32696   int8x8_t __ret;
32697   __ret = (int8x8_t)(__p0);
32698   return __ret;
32699 }
32700 #endif
32701 
32702 #ifdef __LITTLE_ENDIAN__
vreinterpret_s8_u32(uint32x2_t __p0)32703 __ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
32704   int8x8_t __ret;
32705   __ret = (int8x8_t)(__p0);
32706   return __ret;
32707 }
32708 #else
vreinterpret_s8_u32(uint32x2_t __p0)32709 __ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
32710   int8x8_t __ret;
32711   __ret = (int8x8_t)(__p0);
32712   return __ret;
32713 }
32714 #endif
32715 
32716 #ifdef __LITTLE_ENDIAN__
vreinterpret_s8_u64(uint64x1_t __p0)32717 __ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
32718   int8x8_t __ret;
32719   __ret = (int8x8_t)(__p0);
32720   return __ret;
32721 }
32722 #else
vreinterpret_s8_u64(uint64x1_t __p0)32723 __ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
32724   int8x8_t __ret;
32725   __ret = (int8x8_t)(__p0);
32726   return __ret;
32727 }
32728 #endif
32729 
32730 #ifdef __LITTLE_ENDIAN__
vreinterpret_s8_u16(uint16x4_t __p0)32731 __ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
32732   int8x8_t __ret;
32733   __ret = (int8x8_t)(__p0);
32734   return __ret;
32735 }
32736 #else
vreinterpret_s8_u16(uint16x4_t __p0)32737 __ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
32738   int8x8_t __ret;
32739   __ret = (int8x8_t)(__p0);
32740   return __ret;
32741 }
32742 #endif
32743 
32744 #ifdef __LITTLE_ENDIAN__
vreinterpret_s8_f32(float32x2_t __p0)32745 __ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
32746   int8x8_t __ret;
32747   __ret = (int8x8_t)(__p0);
32748   return __ret;
32749 }
32750 #else
vreinterpret_s8_f32(float32x2_t __p0)32751 __ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
32752   int8x8_t __ret;
32753   __ret = (int8x8_t)(__p0);
32754   return __ret;
32755 }
32756 #endif
32757 
32758 #ifdef __LITTLE_ENDIAN__
vreinterpret_s8_f16(float16x4_t __p0)32759 __ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
32760   int8x8_t __ret;
32761   __ret = (int8x8_t)(__p0);
32762   return __ret;
32763 }
32764 #else
vreinterpret_s8_f16(float16x4_t __p0)32765 __ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
32766   int8x8_t __ret;
32767   __ret = (int8x8_t)(__p0);
32768   return __ret;
32769 }
32770 #endif
32771 
32772 #ifdef __LITTLE_ENDIAN__
vreinterpret_s8_s32(int32x2_t __p0)32773 __ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
32774   int8x8_t __ret;
32775   __ret = (int8x8_t)(__p0);
32776   return __ret;
32777 }
32778 #else
vreinterpret_s8_s32(int32x2_t __p0)32779 __ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
32780   int8x8_t __ret;
32781   __ret = (int8x8_t)(__p0);
32782   return __ret;
32783 }
32784 #endif
32785 
32786 #ifdef __LITTLE_ENDIAN__
vreinterpret_s8_s64(int64x1_t __p0)32787 __ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
32788   int8x8_t __ret;
32789   __ret = (int8x8_t)(__p0);
32790   return __ret;
32791 }
32792 #else
vreinterpret_s8_s64(int64x1_t __p0)32793 __ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
32794   int8x8_t __ret;
32795   __ret = (int8x8_t)(__p0);
32796   return __ret;
32797 }
32798 #endif
32799 
32800 #ifdef __LITTLE_ENDIAN__
vreinterpret_s8_s16(int16x4_t __p0)32801 __ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
32802   int8x8_t __ret;
32803   __ret = (int8x8_t)(__p0);
32804   return __ret;
32805 }
32806 #else
vreinterpret_s8_s16(int16x4_t __p0)32807 __ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
32808   int8x8_t __ret;
32809   __ret = (int8x8_t)(__p0);
32810   return __ret;
32811 }
32812 #endif
32813 
32814 #ifdef __LITTLE_ENDIAN__
vreinterpret_f32_p8(poly8x8_t __p0)32815 __ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
32816   float32x2_t __ret;
32817   __ret = (float32x2_t)(__p0);
32818   return __ret;
32819 }
32820 #else
vreinterpret_f32_p8(poly8x8_t __p0)32821 __ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
32822   float32x2_t __ret;
32823   __ret = (float32x2_t)(__p0);
32824   return __ret;
32825 }
32826 #endif
32827 
32828 #ifdef __LITTLE_ENDIAN__
vreinterpret_f32_p16(poly16x4_t __p0)32829 __ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
32830   float32x2_t __ret;
32831   __ret = (float32x2_t)(__p0);
32832   return __ret;
32833 }
32834 #else
vreinterpret_f32_p16(poly16x4_t __p0)32835 __ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
32836   float32x2_t __ret;
32837   __ret = (float32x2_t)(__p0);
32838   return __ret;
32839 }
32840 #endif
32841 
32842 #ifdef __LITTLE_ENDIAN__
vreinterpret_f32_u8(uint8x8_t __p0)32843 __ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
32844   float32x2_t __ret;
32845   __ret = (float32x2_t)(__p0);
32846   return __ret;
32847 }
32848 #else
vreinterpret_f32_u8(uint8x8_t __p0)32849 __ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
32850   float32x2_t __ret;
32851   __ret = (float32x2_t)(__p0);
32852   return __ret;
32853 }
32854 #endif
32855 
32856 #ifdef __LITTLE_ENDIAN__
vreinterpret_f32_u32(uint32x2_t __p0)32857 __ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
32858   float32x2_t __ret;
32859   __ret = (float32x2_t)(__p0);
32860   return __ret;
32861 }
32862 #else
vreinterpret_f32_u32(uint32x2_t __p0)32863 __ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
32864   float32x2_t __ret;
32865   __ret = (float32x2_t)(__p0);
32866   return __ret;
32867 }
32868 #endif
32869 
32870 #ifdef __LITTLE_ENDIAN__
vreinterpret_f32_u64(uint64x1_t __p0)32871 __ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
32872   float32x2_t __ret;
32873   __ret = (float32x2_t)(__p0);
32874   return __ret;
32875 }
32876 #else
vreinterpret_f32_u64(uint64x1_t __p0)32877 __ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
32878   float32x2_t __ret;
32879   __ret = (float32x2_t)(__p0);
32880   return __ret;
32881 }
32882 #endif
32883 
32884 #ifdef __LITTLE_ENDIAN__
vreinterpret_f32_u16(uint16x4_t __p0)32885 __ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
32886   float32x2_t __ret;
32887   __ret = (float32x2_t)(__p0);
32888   return __ret;
32889 }
32890 #else
vreinterpret_f32_u16(uint16x4_t __p0)32891 __ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
32892   float32x2_t __ret;
32893   __ret = (float32x2_t)(__p0);
32894   return __ret;
32895 }
32896 #endif
32897 
32898 #ifdef __LITTLE_ENDIAN__
vreinterpret_f32_s8(int8x8_t __p0)32899 __ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
32900   float32x2_t __ret;
32901   __ret = (float32x2_t)(__p0);
32902   return __ret;
32903 }
32904 #else
vreinterpret_f32_s8(int8x8_t __p0)32905 __ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
32906   float32x2_t __ret;
32907   __ret = (float32x2_t)(__p0);
32908   return __ret;
32909 }
32910 #endif
32911 
32912 #ifdef __LITTLE_ENDIAN__
vreinterpret_f32_f16(float16x4_t __p0)32913 __ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
32914   float32x2_t __ret;
32915   __ret = (float32x2_t)(__p0);
32916   return __ret;
32917 }
32918 #else
vreinterpret_f32_f16(float16x4_t __p0)32919 __ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
32920   float32x2_t __ret;
32921   __ret = (float32x2_t)(__p0);
32922   return __ret;
32923 }
32924 #endif
32925 
32926 #ifdef __LITTLE_ENDIAN__
vreinterpret_f32_s32(int32x2_t __p0)32927 __ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
32928   float32x2_t __ret;
32929   __ret = (float32x2_t)(__p0);
32930   return __ret;
32931 }
32932 #else
vreinterpret_f32_s32(int32x2_t __p0)32933 __ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
32934   float32x2_t __ret;
32935   __ret = (float32x2_t)(__p0);
32936   return __ret;
32937 }
32938 #endif
32939 
32940 #ifdef __LITTLE_ENDIAN__
vreinterpret_f32_s64(int64x1_t __p0)32941 __ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
32942   float32x2_t __ret;
32943   __ret = (float32x2_t)(__p0);
32944   return __ret;
32945 }
32946 #else
vreinterpret_f32_s64(int64x1_t __p0)32947 __ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
32948   float32x2_t __ret;
32949   __ret = (float32x2_t)(__p0);
32950   return __ret;
32951 }
32952 #endif
32953 
32954 #ifdef __LITTLE_ENDIAN__
vreinterpret_f32_s16(int16x4_t __p0)32955 __ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
32956   float32x2_t __ret;
32957   __ret = (float32x2_t)(__p0);
32958   return __ret;
32959 }
32960 #else
vreinterpret_f32_s16(int16x4_t __p0)32961 __ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
32962   float32x2_t __ret;
32963   __ret = (float32x2_t)(__p0);
32964   return __ret;
32965 }
32966 #endif
32967 
32968 #ifdef __LITTLE_ENDIAN__
vreinterpret_f16_p8(poly8x8_t __p0)32969 __ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
32970   float16x4_t __ret;
32971   __ret = (float16x4_t)(__p0);
32972   return __ret;
32973 }
32974 #else
vreinterpret_f16_p8(poly8x8_t __p0)32975 __ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
32976   float16x4_t __ret;
32977   __ret = (float16x4_t)(__p0);
32978   return __ret;
32979 }
32980 #endif
32981 
32982 #ifdef __LITTLE_ENDIAN__
vreinterpret_f16_p16(poly16x4_t __p0)32983 __ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
32984   float16x4_t __ret;
32985   __ret = (float16x4_t)(__p0);
32986   return __ret;
32987 }
32988 #else
vreinterpret_f16_p16(poly16x4_t __p0)32989 __ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
32990   float16x4_t __ret;
32991   __ret = (float16x4_t)(__p0);
32992   return __ret;
32993 }
32994 #endif
32995 
32996 #ifdef __LITTLE_ENDIAN__
vreinterpret_f16_u8(uint8x8_t __p0)32997 __ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
32998   float16x4_t __ret;
32999   __ret = (float16x4_t)(__p0);
33000   return __ret;
33001 }
33002 #else
vreinterpret_f16_u8(uint8x8_t __p0)33003 __ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
33004   float16x4_t __ret;
33005   __ret = (float16x4_t)(__p0);
33006   return __ret;
33007 }
33008 #endif
33009 
33010 #ifdef __LITTLE_ENDIAN__
vreinterpret_f16_u32(uint32x2_t __p0)33011 __ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
33012   float16x4_t __ret;
33013   __ret = (float16x4_t)(__p0);
33014   return __ret;
33015 }
33016 #else
vreinterpret_f16_u32(uint32x2_t __p0)33017 __ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
33018   float16x4_t __ret;
33019   __ret = (float16x4_t)(__p0);
33020   return __ret;
33021 }
33022 #endif
33023 
33024 #ifdef __LITTLE_ENDIAN__
vreinterpret_f16_u64(uint64x1_t __p0)33025 __ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
33026   float16x4_t __ret;
33027   __ret = (float16x4_t)(__p0);
33028   return __ret;
33029 }
33030 #else
vreinterpret_f16_u64(uint64x1_t __p0)33031 __ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
33032   float16x4_t __ret;
33033   __ret = (float16x4_t)(__p0);
33034   return __ret;
33035 }
33036 #endif
33037 
33038 #ifdef __LITTLE_ENDIAN__
vreinterpret_f16_u16(uint16x4_t __p0)33039 __ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
33040   float16x4_t __ret;
33041   __ret = (float16x4_t)(__p0);
33042   return __ret;
33043 }
33044 #else
vreinterpret_f16_u16(uint16x4_t __p0)33045 __ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
33046   float16x4_t __ret;
33047   __ret = (float16x4_t)(__p0);
33048   return __ret;
33049 }
33050 #endif
33051 
33052 #ifdef __LITTLE_ENDIAN__
vreinterpret_f16_s8(int8x8_t __p0)33053 __ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
33054   float16x4_t __ret;
33055   __ret = (float16x4_t)(__p0);
33056   return __ret;
33057 }
33058 #else
vreinterpret_f16_s8(int8x8_t __p0)33059 __ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
33060   float16x4_t __ret;
33061   __ret = (float16x4_t)(__p0);
33062   return __ret;
33063 }
33064 #endif
33065 
33066 #ifdef __LITTLE_ENDIAN__
vreinterpret_f16_f32(float32x2_t __p0)33067 __ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
33068   float16x4_t __ret;
33069   __ret = (float16x4_t)(__p0);
33070   return __ret;
33071 }
33072 #else
vreinterpret_f16_f32(float32x2_t __p0)33073 __ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
33074   float16x4_t __ret;
33075   __ret = (float16x4_t)(__p0);
33076   return __ret;
33077 }
33078 #endif
33079 
33080 #ifdef __LITTLE_ENDIAN__
vreinterpret_f16_s32(int32x2_t __p0)33081 __ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
33082   float16x4_t __ret;
33083   __ret = (float16x4_t)(__p0);
33084   return __ret;
33085 }
33086 #else
vreinterpret_f16_s32(int32x2_t __p0)33087 __ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
33088   float16x4_t __ret;
33089   __ret = (float16x4_t)(__p0);
33090   return __ret;
33091 }
33092 #endif
33093 
33094 #ifdef __LITTLE_ENDIAN__
vreinterpret_f16_s64(int64x1_t __p0)33095 __ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
33096   float16x4_t __ret;
33097   __ret = (float16x4_t)(__p0);
33098   return __ret;
33099 }
33100 #else
vreinterpret_f16_s64(int64x1_t __p0)33101 __ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
33102   float16x4_t __ret;
33103   __ret = (float16x4_t)(__p0);
33104   return __ret;
33105 }
33106 #endif
33107 
33108 #ifdef __LITTLE_ENDIAN__
vreinterpret_f16_s16(int16x4_t __p0)33109 __ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
33110   float16x4_t __ret;
33111   __ret = (float16x4_t)(__p0);
33112   return __ret;
33113 }
33114 #else
vreinterpret_f16_s16(int16x4_t __p0)33115 __ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
33116   float16x4_t __ret;
33117   __ret = (float16x4_t)(__p0);
33118   return __ret;
33119 }
33120 #endif
33121 
33122 #ifdef __LITTLE_ENDIAN__
vreinterpret_s32_p8(poly8x8_t __p0)33123 __ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
33124   int32x2_t __ret;
33125   __ret = (int32x2_t)(__p0);
33126   return __ret;
33127 }
33128 #else
vreinterpret_s32_p8(poly8x8_t __p0)33129 __ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
33130   int32x2_t __ret;
33131   __ret = (int32x2_t)(__p0);
33132   return __ret;
33133 }
33134 #endif
33135 
33136 #ifdef __LITTLE_ENDIAN__
vreinterpret_s32_p16(poly16x4_t __p0)33137 __ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
33138   int32x2_t __ret;
33139   __ret = (int32x2_t)(__p0);
33140   return __ret;
33141 }
33142 #else
vreinterpret_s32_p16(poly16x4_t __p0)33143 __ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
33144   int32x2_t __ret;
33145   __ret = (int32x2_t)(__p0);
33146   return __ret;
33147 }
33148 #endif
33149 
33150 #ifdef __LITTLE_ENDIAN__
vreinterpret_s32_u8(uint8x8_t __p0)33151 __ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
33152   int32x2_t __ret;
33153   __ret = (int32x2_t)(__p0);
33154   return __ret;
33155 }
33156 #else
vreinterpret_s32_u8(uint8x8_t __p0)33157 __ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
33158   int32x2_t __ret;
33159   __ret = (int32x2_t)(__p0);
33160   return __ret;
33161 }
33162 #endif
33163 
33164 #ifdef __LITTLE_ENDIAN__
vreinterpret_s32_u32(uint32x2_t __p0)33165 __ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
33166   int32x2_t __ret;
33167   __ret = (int32x2_t)(__p0);
33168   return __ret;
33169 }
33170 #else
vreinterpret_s32_u32(uint32x2_t __p0)33171 __ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
33172   int32x2_t __ret;
33173   __ret = (int32x2_t)(__p0);
33174   return __ret;
33175 }
33176 #endif
33177 
33178 #ifdef __LITTLE_ENDIAN__
vreinterpret_s32_u64(uint64x1_t __p0)33179 __ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
33180   int32x2_t __ret;
33181   __ret = (int32x2_t)(__p0);
33182   return __ret;
33183 }
33184 #else
vreinterpret_s32_u64(uint64x1_t __p0)33185 __ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
33186   int32x2_t __ret;
33187   __ret = (int32x2_t)(__p0);
33188   return __ret;
33189 }
33190 #endif
33191 
33192 #ifdef __LITTLE_ENDIAN__
vreinterpret_s32_u16(uint16x4_t __p0)33193 __ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
33194   int32x2_t __ret;
33195   __ret = (int32x2_t)(__p0);
33196   return __ret;
33197 }
33198 #else
vreinterpret_s32_u16(uint16x4_t __p0)33199 __ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
33200   int32x2_t __ret;
33201   __ret = (int32x2_t)(__p0);
33202   return __ret;
33203 }
33204 #endif
33205 
33206 #ifdef __LITTLE_ENDIAN__
vreinterpret_s32_s8(int8x8_t __p0)33207 __ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
33208   int32x2_t __ret;
33209   __ret = (int32x2_t)(__p0);
33210   return __ret;
33211 }
33212 #else
vreinterpret_s32_s8(int8x8_t __p0)33213 __ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
33214   int32x2_t __ret;
33215   __ret = (int32x2_t)(__p0);
33216   return __ret;
33217 }
33218 #endif
33219 
33220 #ifdef __LITTLE_ENDIAN__
vreinterpret_s32_f32(float32x2_t __p0)33221 __ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
33222   int32x2_t __ret;
33223   __ret = (int32x2_t)(__p0);
33224   return __ret;
33225 }
33226 #else
vreinterpret_s32_f32(float32x2_t __p0)33227 __ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
33228   int32x2_t __ret;
33229   __ret = (int32x2_t)(__p0);
33230   return __ret;
33231 }
33232 #endif
33233 
33234 #ifdef __LITTLE_ENDIAN__
vreinterpret_s32_f16(float16x4_t __p0)33235 __ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
33236   int32x2_t __ret;
33237   __ret = (int32x2_t)(__p0);
33238   return __ret;
33239 }
33240 #else
vreinterpret_s32_f16(float16x4_t __p0)33241 __ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
33242   int32x2_t __ret;
33243   __ret = (int32x2_t)(__p0);
33244   return __ret;
33245 }
33246 #endif
33247 
33248 #ifdef __LITTLE_ENDIAN__
vreinterpret_s32_s64(int64x1_t __p0)33249 __ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
33250   int32x2_t __ret;
33251   __ret = (int32x2_t)(__p0);
33252   return __ret;
33253 }
33254 #else
vreinterpret_s32_s64(int64x1_t __p0)33255 __ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
33256   int32x2_t __ret;
33257   __ret = (int32x2_t)(__p0);
33258   return __ret;
33259 }
33260 #endif
33261 
33262 #ifdef __LITTLE_ENDIAN__
vreinterpret_s32_s16(int16x4_t __p0)33263 __ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
33264   int32x2_t __ret;
33265   __ret = (int32x2_t)(__p0);
33266   return __ret;
33267 }
33268 #else
vreinterpret_s32_s16(int16x4_t __p0)33269 __ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
33270   int32x2_t __ret;
33271   __ret = (int32x2_t)(__p0);
33272   return __ret;
33273 }
33274 #endif
33275 
33276 #ifdef __LITTLE_ENDIAN__
vreinterpret_s64_p8(poly8x8_t __p0)33277 __ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
33278   int64x1_t __ret;
33279   __ret = (int64x1_t)(__p0);
33280   return __ret;
33281 }
33282 #else
vreinterpret_s64_p8(poly8x8_t __p0)33283 __ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
33284   int64x1_t __ret;
33285   __ret = (int64x1_t)(__p0);
33286   return __ret;
33287 }
33288 #endif
33289 
33290 #ifdef __LITTLE_ENDIAN__
vreinterpret_s64_p16(poly16x4_t __p0)33291 __ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
33292   int64x1_t __ret;
33293   __ret = (int64x1_t)(__p0);
33294   return __ret;
33295 }
33296 #else
vreinterpret_s64_p16(poly16x4_t __p0)33297 __ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
33298   int64x1_t __ret;
33299   __ret = (int64x1_t)(__p0);
33300   return __ret;
33301 }
33302 #endif
33303 
33304 #ifdef __LITTLE_ENDIAN__
vreinterpret_s64_u8(uint8x8_t __p0)33305 __ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
33306   int64x1_t __ret;
33307   __ret = (int64x1_t)(__p0);
33308   return __ret;
33309 }
33310 #else
vreinterpret_s64_u8(uint8x8_t __p0)33311 __ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
33312   int64x1_t __ret;
33313   __ret = (int64x1_t)(__p0);
33314   return __ret;
33315 }
33316 #endif
33317 
33318 #ifdef __LITTLE_ENDIAN__
vreinterpret_s64_u32(uint32x2_t __p0)33319 __ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
33320   int64x1_t __ret;
33321   __ret = (int64x1_t)(__p0);
33322   return __ret;
33323 }
33324 #else
vreinterpret_s64_u32(uint32x2_t __p0)33325 __ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
33326   int64x1_t __ret;
33327   __ret = (int64x1_t)(__p0);
33328   return __ret;
33329 }
33330 #endif
33331 
33332 #ifdef __LITTLE_ENDIAN__
vreinterpret_s64_u64(uint64x1_t __p0)33333 __ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
33334   int64x1_t __ret;
33335   __ret = (int64x1_t)(__p0);
33336   return __ret;
33337 }
33338 #else
vreinterpret_s64_u64(uint64x1_t __p0)33339 __ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
33340   int64x1_t __ret;
33341   __ret = (int64x1_t)(__p0);
33342   return __ret;
33343 }
33344 #endif
33345 
33346 #ifdef __LITTLE_ENDIAN__
vreinterpret_s64_u16(uint16x4_t __p0)33347 __ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
33348   int64x1_t __ret;
33349   __ret = (int64x1_t)(__p0);
33350   return __ret;
33351 }
33352 #else
vreinterpret_s64_u16(uint16x4_t __p0)33353 __ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
33354   int64x1_t __ret;
33355   __ret = (int64x1_t)(__p0);
33356   return __ret;
33357 }
33358 #endif
33359 
33360 #ifdef __LITTLE_ENDIAN__
vreinterpret_s64_s8(int8x8_t __p0)33361 __ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
33362   int64x1_t __ret;
33363   __ret = (int64x1_t)(__p0);
33364   return __ret;
33365 }
33366 #else
vreinterpret_s64_s8(int8x8_t __p0)33367 __ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
33368   int64x1_t __ret;
33369   __ret = (int64x1_t)(__p0);
33370   return __ret;
33371 }
33372 #endif
33373 
33374 #ifdef __LITTLE_ENDIAN__
vreinterpret_s64_f32(float32x2_t __p0)33375 __ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
33376   int64x1_t __ret;
33377   __ret = (int64x1_t)(__p0);
33378   return __ret;
33379 }
33380 #else
vreinterpret_s64_f32(float32x2_t __p0)33381 __ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
33382   int64x1_t __ret;
33383   __ret = (int64x1_t)(__p0);
33384   return __ret;
33385 }
33386 #endif
33387 
33388 #ifdef __LITTLE_ENDIAN__
vreinterpret_s64_f16(float16x4_t __p0)33389 __ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
33390   int64x1_t __ret;
33391   __ret = (int64x1_t)(__p0);
33392   return __ret;
33393 }
33394 #else
vreinterpret_s64_f16(float16x4_t __p0)33395 __ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
33396   int64x1_t __ret;
33397   __ret = (int64x1_t)(__p0);
33398   return __ret;
33399 }
33400 #endif
33401 
33402 #ifdef __LITTLE_ENDIAN__
vreinterpret_s64_s32(int32x2_t __p0)33403 __ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
33404   int64x1_t __ret;
33405   __ret = (int64x1_t)(__p0);
33406   return __ret;
33407 }
33408 #else
vreinterpret_s64_s32(int32x2_t __p0)33409 __ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
33410   int64x1_t __ret;
33411   __ret = (int64x1_t)(__p0);
33412   return __ret;
33413 }
33414 #endif
33415 
33416 #ifdef __LITTLE_ENDIAN__
vreinterpret_s64_s16(int16x4_t __p0)33417 __ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
33418   int64x1_t __ret;
33419   __ret = (int64x1_t)(__p0);
33420   return __ret;
33421 }
33422 #else
vreinterpret_s64_s16(int16x4_t __p0)33423 __ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
33424   int64x1_t __ret;
33425   __ret = (int64x1_t)(__p0);
33426   return __ret;
33427 }
33428 #endif
33429 
33430 #ifdef __LITTLE_ENDIAN__
vreinterpret_s16_p8(poly8x8_t __p0)33431 __ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
33432   int16x4_t __ret;
33433   __ret = (int16x4_t)(__p0);
33434   return __ret;
33435 }
33436 #else
vreinterpret_s16_p8(poly8x8_t __p0)33437 __ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
33438   int16x4_t __ret;
33439   __ret = (int16x4_t)(__p0);
33440   return __ret;
33441 }
33442 #endif
33443 
33444 #ifdef __LITTLE_ENDIAN__
vreinterpret_s16_p16(poly16x4_t __p0)33445 __ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
33446   int16x4_t __ret;
33447   __ret = (int16x4_t)(__p0);
33448   return __ret;
33449 }
33450 #else
vreinterpret_s16_p16(poly16x4_t __p0)33451 __ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
33452   int16x4_t __ret;
33453   __ret = (int16x4_t)(__p0);
33454   return __ret;
33455 }
33456 #endif
33457 
33458 #ifdef __LITTLE_ENDIAN__
vreinterpret_s16_u8(uint8x8_t __p0)33459 __ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
33460   int16x4_t __ret;
33461   __ret = (int16x4_t)(__p0);
33462   return __ret;
33463 }
33464 #else
vreinterpret_s16_u8(uint8x8_t __p0)33465 __ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
33466   int16x4_t __ret;
33467   __ret = (int16x4_t)(__p0);
33468   return __ret;
33469 }
33470 #endif
33471 
33472 #ifdef __LITTLE_ENDIAN__
vreinterpret_s16_u32(uint32x2_t __p0)33473 __ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
33474   int16x4_t __ret;
33475   __ret = (int16x4_t)(__p0);
33476   return __ret;
33477 }
33478 #else
vreinterpret_s16_u32(uint32x2_t __p0)33479 __ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
33480   int16x4_t __ret;
33481   __ret = (int16x4_t)(__p0);
33482   return __ret;
33483 }
33484 #endif
33485 
33486 #ifdef __LITTLE_ENDIAN__
vreinterpret_s16_u64(uint64x1_t __p0)33487 __ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
33488   int16x4_t __ret;
33489   __ret = (int16x4_t)(__p0);
33490   return __ret;
33491 }
33492 #else
vreinterpret_s16_u64(uint64x1_t __p0)33493 __ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
33494   int16x4_t __ret;
33495   __ret = (int16x4_t)(__p0);
33496   return __ret;
33497 }
33498 #endif
33499 
33500 #ifdef __LITTLE_ENDIAN__
vreinterpret_s16_u16(uint16x4_t __p0)33501 __ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
33502   int16x4_t __ret;
33503   __ret = (int16x4_t)(__p0);
33504   return __ret;
33505 }
33506 #else
vreinterpret_s16_u16(uint16x4_t __p0)33507 __ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
33508   int16x4_t __ret;
33509   __ret = (int16x4_t)(__p0);
33510   return __ret;
33511 }
33512 #endif
33513 
33514 #ifdef __LITTLE_ENDIAN__
vreinterpret_s16_s8(int8x8_t __p0)33515 __ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
33516   int16x4_t __ret;
33517   __ret = (int16x4_t)(__p0);
33518   return __ret;
33519 }
33520 #else
vreinterpret_s16_s8(int8x8_t __p0)33521 __ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
33522   int16x4_t __ret;
33523   __ret = (int16x4_t)(__p0);
33524   return __ret;
33525 }
33526 #endif
33527 
33528 #ifdef __LITTLE_ENDIAN__
vreinterpret_s16_f32(float32x2_t __p0)33529 __ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
33530   int16x4_t __ret;
33531   __ret = (int16x4_t)(__p0);
33532   return __ret;
33533 }
33534 #else
vreinterpret_s16_f32(float32x2_t __p0)33535 __ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
33536   int16x4_t __ret;
33537   __ret = (int16x4_t)(__p0);
33538   return __ret;
33539 }
33540 #endif
33541 
33542 #ifdef __LITTLE_ENDIAN__
vreinterpret_s16_f16(float16x4_t __p0)33543 __ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
33544   int16x4_t __ret;
33545   __ret = (int16x4_t)(__p0);
33546   return __ret;
33547 }
33548 #else
vreinterpret_s16_f16(float16x4_t __p0)33549 __ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
33550   int16x4_t __ret;
33551   __ret = (int16x4_t)(__p0);
33552   return __ret;
33553 }
33554 #endif
33555 
33556 #ifdef __LITTLE_ENDIAN__
vreinterpret_s16_s32(int32x2_t __p0)33557 __ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
33558   int16x4_t __ret;
33559   __ret = (int16x4_t)(__p0);
33560   return __ret;
33561 }
33562 #else
vreinterpret_s16_s32(int32x2_t __p0)33563 __ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
33564   int16x4_t __ret;
33565   __ret = (int16x4_t)(__p0);
33566   return __ret;
33567 }
33568 #endif
33569 
33570 #ifdef __LITTLE_ENDIAN__
vreinterpret_s16_s64(int64x1_t __p0)33571 __ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
33572   int16x4_t __ret;
33573   __ret = (int16x4_t)(__p0);
33574   return __ret;
33575 }
33576 #else
vreinterpret_s16_s64(int64x1_t __p0)33577 __ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
33578   int16x4_t __ret;
33579   __ret = (int16x4_t)(__p0);
33580   return __ret;
33581 }
33582 #endif
33583 
33584 #endif
33585 #if __ARM_ARCH >= 8
33586 #ifdef __LITTLE_ENDIAN__
vcvtaq_s32_f32(float32x4_t __p0)33587 __ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) {
33588   int32x4_t __ret;
33589   __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__p0, 34);
33590   return __ret;
33591 }
33592 #else
vcvtaq_s32_f32(float32x4_t __p0)33593 __ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) {
33594   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
33595   int32x4_t __ret;
33596   __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__rev0, 34);
33597   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
33598   return __ret;
33599 }
33600 #endif
33601 
33602 #ifdef __LITTLE_ENDIAN__
vcvta_s32_f32(float32x2_t __p0)33603 __ai int32x2_t vcvta_s32_f32(float32x2_t __p0) {
33604   int32x2_t __ret;
33605   __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__p0, 2);
33606   return __ret;
33607 }
33608 #else
vcvta_s32_f32(float32x2_t __p0)33609 __ai int32x2_t vcvta_s32_f32(float32x2_t __p0) {
33610   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
33611   int32x2_t __ret;
33612   __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__rev0, 2);
33613   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
33614   return __ret;
33615 }
33616 #endif
33617 
33618 #ifdef __LITTLE_ENDIAN__
vcvtaq_u32_f32(float32x4_t __p0)33619 __ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) {
33620   uint32x4_t __ret;
33621   __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__p0, 50);
33622   return __ret;
33623 }
33624 #else
vcvtaq_u32_f32(float32x4_t __p0)33625 __ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) {
33626   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
33627   uint32x4_t __ret;
33628   __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__rev0, 50);
33629   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
33630   return __ret;
33631 }
33632 #endif
33633 
33634 #ifdef __LITTLE_ENDIAN__
vcvta_u32_f32(float32x2_t __p0)33635 __ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) {
33636   uint32x2_t __ret;
33637   __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__p0, 18);
33638   return __ret;
33639 }
33640 #else
vcvta_u32_f32(float32x2_t __p0)33641 __ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) {
33642   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
33643   uint32x2_t __ret;
33644   __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__rev0, 18);
33645   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
33646   return __ret;
33647 }
33648 #endif
33649 
33650 #ifdef __LITTLE_ENDIAN__
vcvtmq_s32_f32(float32x4_t __p0)33651 __ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) {
33652   int32x4_t __ret;
33653   __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__p0, 34);
33654   return __ret;
33655 }
33656 #else
vcvtmq_s32_f32(float32x4_t __p0)33657 __ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) {
33658   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
33659   int32x4_t __ret;
33660   __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__rev0, 34);
33661   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
33662   return __ret;
33663 }
33664 #endif
33665 
33666 #ifdef __LITTLE_ENDIAN__
vcvtm_s32_f32(float32x2_t __p0)33667 __ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) {
33668   int32x2_t __ret;
33669   __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__p0, 2);
33670   return __ret;
33671 }
33672 #else
vcvtm_s32_f32(float32x2_t __p0)33673 __ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) {
33674   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
33675   int32x2_t __ret;
33676   __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__rev0, 2);
33677   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
33678   return __ret;
33679 }
33680 #endif
33681 
33682 #ifdef __LITTLE_ENDIAN__
vcvtmq_u32_f32(float32x4_t __p0)33683 __ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) {
33684   uint32x4_t __ret;
33685   __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__p0, 50);
33686   return __ret;
33687 }
33688 #else
vcvtmq_u32_f32(float32x4_t __p0)33689 __ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) {
33690   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
33691   uint32x4_t __ret;
33692   __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__rev0, 50);
33693   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
33694   return __ret;
33695 }
33696 #endif
33697 
33698 #ifdef __LITTLE_ENDIAN__
vcvtm_u32_f32(float32x2_t __p0)33699 __ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) {
33700   uint32x2_t __ret;
33701   __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__p0, 18);
33702   return __ret;
33703 }
33704 #else
vcvtm_u32_f32(float32x2_t __p0)33705 __ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) {
33706   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
33707   uint32x2_t __ret;
33708   __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__rev0, 18);
33709   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
33710   return __ret;
33711 }
33712 #endif
33713 
33714 #ifdef __LITTLE_ENDIAN__
vcvtnq_s32_f32(float32x4_t __p0)33715 __ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) {
33716   int32x4_t __ret;
33717   __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__p0, 34);
33718   return __ret;
33719 }
33720 #else
vcvtnq_s32_f32(float32x4_t __p0)33721 __ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) {
33722   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
33723   int32x4_t __ret;
33724   __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__rev0, 34);
33725   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
33726   return __ret;
33727 }
33728 #endif
33729 
33730 #ifdef __LITTLE_ENDIAN__
vcvtn_s32_f32(float32x2_t __p0)33731 __ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) {
33732   int32x2_t __ret;
33733   __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__p0, 2);
33734   return __ret;
33735 }
33736 #else
vcvtn_s32_f32(float32x2_t __p0)33737 __ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) {
33738   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
33739   int32x2_t __ret;
33740   __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__rev0, 2);
33741   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
33742   return __ret;
33743 }
33744 #endif
33745 
33746 #ifdef __LITTLE_ENDIAN__
vcvtnq_u32_f32(float32x4_t __p0)33747 __ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) {
33748   uint32x4_t __ret;
33749   __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__p0, 50);
33750   return __ret;
33751 }
33752 #else
vcvtnq_u32_f32(float32x4_t __p0)33753 __ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) {
33754   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
33755   uint32x4_t __ret;
33756   __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__rev0, 50);
33757   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
33758   return __ret;
33759 }
33760 #endif
33761 
33762 #ifdef __LITTLE_ENDIAN__
vcvtn_u32_f32(float32x2_t __p0)33763 __ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) {
33764   uint32x2_t __ret;
33765   __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__p0, 18);
33766   return __ret;
33767 }
33768 #else
vcvtn_u32_f32(float32x2_t __p0)33769 __ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) {
33770   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
33771   uint32x2_t __ret;
33772   __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__rev0, 18);
33773   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
33774   return __ret;
33775 }
33776 #endif
33777 
33778 #ifdef __LITTLE_ENDIAN__
vcvtpq_s32_f32(float32x4_t __p0)33779 __ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) {
33780   int32x4_t __ret;
33781   __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__p0, 34);
33782   return __ret;
33783 }
33784 #else
vcvtpq_s32_f32(float32x4_t __p0)33785 __ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) {
33786   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
33787   int32x4_t __ret;
33788   __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__rev0, 34);
33789   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
33790   return __ret;
33791 }
33792 #endif
33793 
33794 #ifdef __LITTLE_ENDIAN__
vcvtp_s32_f32(float32x2_t __p0)33795 __ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) {
33796   int32x2_t __ret;
33797   __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__p0, 2);
33798   return __ret;
33799 }
33800 #else
vcvtp_s32_f32(float32x2_t __p0)33801 __ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) {
33802   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
33803   int32x2_t __ret;
33804   __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__rev0, 2);
33805   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
33806   return __ret;
33807 }
33808 #endif
33809 
33810 #ifdef __LITTLE_ENDIAN__
vcvtpq_u32_f32(float32x4_t __p0)33811 __ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) {
33812   uint32x4_t __ret;
33813   __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__p0, 50);
33814   return __ret;
33815 }
33816 #else
vcvtpq_u32_f32(float32x4_t __p0)33817 __ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) {
33818   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
33819   uint32x4_t __ret;
33820   __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__rev0, 50);
33821   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
33822   return __ret;
33823 }
33824 #endif
33825 
33826 #ifdef __LITTLE_ENDIAN__
vcvtp_u32_f32(float32x2_t __p0)33827 __ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) {
33828   uint32x2_t __ret;
33829   __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__p0, 18);
33830   return __ret;
33831 }
33832 #else
vcvtp_u32_f32(float32x2_t __p0)33833 __ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) {
33834   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
33835   uint32x2_t __ret;
33836   __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__rev0, 18);
33837   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
33838   return __ret;
33839 }
33840 #endif
33841 
33842 #endif
33843 #if __ARM_ARCH >= 8 && defined(__aarch64__)
33844 #ifdef __LITTLE_ENDIAN__
vcvtaq_s64_f64(float64x2_t __p0)33845 __ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
33846   int64x2_t __ret;
33847   __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__p0, 35);
33848   return __ret;
33849 }
33850 #else
vcvtaq_s64_f64(float64x2_t __p0)33851 __ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
33852   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
33853   int64x2_t __ret;
33854   __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__rev0, 35);
33855   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
33856   return __ret;
33857 }
33858 #endif
33859 
33860 #ifdef __LITTLE_ENDIAN__
vcvta_s64_f64(float64x1_t __p0)33861 __ai int64x1_t vcvta_s64_f64(float64x1_t __p0) {
33862   int64x1_t __ret;
33863   __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3);
33864   return __ret;
33865 }
33866 #else
vcvta_s64_f64(float64x1_t __p0)33867 __ai int64x1_t vcvta_s64_f64(float64x1_t __p0) {
33868   int64x1_t __ret;
33869   __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3);
33870   return __ret;
33871 }
33872 #endif
33873 
33874 #ifdef __LITTLE_ENDIAN__
vcvtaq_u64_f64(float64x2_t __p0)33875 __ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
33876   uint64x2_t __ret;
33877   __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__p0, 51);
33878   return __ret;
33879 }
33880 #else
vcvtaq_u64_f64(float64x2_t __p0)33881 __ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
33882   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
33883   uint64x2_t __ret;
33884   __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__rev0, 51);
33885   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
33886   return __ret;
33887 }
33888 #endif
33889 
33890 #ifdef __LITTLE_ENDIAN__
vcvta_u64_f64(float64x1_t __p0)33891 __ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) {
33892   uint64x1_t __ret;
33893   __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19);
33894   return __ret;
33895 }
33896 #else
vcvta_u64_f64(float64x1_t __p0)33897 __ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) {
33898   uint64x1_t __ret;
33899   __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19);
33900   return __ret;
33901 }
33902 #endif
33903 
33904 #ifdef __LITTLE_ENDIAN__
vcvtmq_s64_f64(float64x2_t __p0)33905 __ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
33906   int64x2_t __ret;
33907   __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__p0, 35);
33908   return __ret;
33909 }
33910 #else
vcvtmq_s64_f64(float64x2_t __p0)33911 __ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
33912   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
33913   int64x2_t __ret;
33914   __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__rev0, 35);
33915   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
33916   return __ret;
33917 }
33918 #endif
33919 
33920 #ifdef __LITTLE_ENDIAN__
vcvtm_s64_f64(float64x1_t __p0)33921 __ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) {
33922   int64x1_t __ret;
33923   __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3);
33924   return __ret;
33925 }
33926 #else
vcvtm_s64_f64(float64x1_t __p0)33927 __ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) {
33928   int64x1_t __ret;
33929   __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3);
33930   return __ret;
33931 }
33932 #endif
33933 
33934 #ifdef __LITTLE_ENDIAN__
vcvtmq_u64_f64(float64x2_t __p0)33935 __ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
33936   uint64x2_t __ret;
33937   __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__p0, 51);
33938   return __ret;
33939 }
33940 #else
vcvtmq_u64_f64(float64x2_t __p0)33941 __ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
33942   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
33943   uint64x2_t __ret;
33944   __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__rev0, 51);
33945   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
33946   return __ret;
33947 }
33948 #endif
33949 
33950 #ifdef __LITTLE_ENDIAN__
vcvtm_u64_f64(float64x1_t __p0)33951 __ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) {
33952   uint64x1_t __ret;
33953   __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19);
33954   return __ret;
33955 }
33956 #else
vcvtm_u64_f64(float64x1_t __p0)33957 __ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) {
33958   uint64x1_t __ret;
33959   __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19);
33960   return __ret;
33961 }
33962 #endif
33963 
33964 #ifdef __LITTLE_ENDIAN__
vcvtnq_s64_f64(float64x2_t __p0)33965 __ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
33966   int64x2_t __ret;
33967   __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__p0, 35);
33968   return __ret;
33969 }
33970 #else
vcvtnq_s64_f64(float64x2_t __p0)33971 __ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
33972   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
33973   int64x2_t __ret;
33974   __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__rev0, 35);
33975   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
33976   return __ret;
33977 }
33978 #endif
33979 
33980 #ifdef __LITTLE_ENDIAN__
vcvtn_s64_f64(float64x1_t __p0)33981 __ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) {
33982   int64x1_t __ret;
33983   __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3);
33984   return __ret;
33985 }
33986 #else
vcvtn_s64_f64(float64x1_t __p0)33987 __ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) {
33988   int64x1_t __ret;
33989   __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3);
33990   return __ret;
33991 }
33992 #endif
33993 
33994 #ifdef __LITTLE_ENDIAN__
vcvtnq_u64_f64(float64x2_t __p0)33995 __ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
33996   uint64x2_t __ret;
33997   __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__p0, 51);
33998   return __ret;
33999 }
34000 #else
vcvtnq_u64_f64(float64x2_t __p0)34001 __ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
34002   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
34003   uint64x2_t __ret;
34004   __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__rev0, 51);
34005   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
34006   return __ret;
34007 }
34008 #endif
34009 
34010 #ifdef __LITTLE_ENDIAN__
vcvtn_u64_f64(float64x1_t __p0)34011 __ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) {
34012   uint64x1_t __ret;
34013   __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19);
34014   return __ret;
34015 }
34016 #else
vcvtn_u64_f64(float64x1_t __p0)34017 __ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) {
34018   uint64x1_t __ret;
34019   __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19);
34020   return __ret;
34021 }
34022 #endif
34023 
34024 #ifdef __LITTLE_ENDIAN__
vcvtpq_s64_f64(float64x2_t __p0)34025 __ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
34026   int64x2_t __ret;
34027   __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__p0, 35);
34028   return __ret;
34029 }
34030 #else
vcvtpq_s64_f64(float64x2_t __p0)34031 __ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
34032   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
34033   int64x2_t __ret;
34034   __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__rev0, 35);
34035   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
34036   return __ret;
34037 }
34038 #endif
34039 
34040 #ifdef __LITTLE_ENDIAN__
vcvtp_s64_f64(float64x1_t __p0)34041 __ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) {
34042   int64x1_t __ret;
34043   __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3);
34044   return __ret;
34045 }
34046 #else
vcvtp_s64_f64(float64x1_t __p0)34047 __ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) {
34048   int64x1_t __ret;
34049   __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3);
34050   return __ret;
34051 }
34052 #endif
34053 
34054 #ifdef __LITTLE_ENDIAN__
vcvtpq_u64_f64(float64x2_t __p0)34055 __ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
34056   uint64x2_t __ret;
34057   __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__p0, 51);
34058   return __ret;
34059 }
34060 #else
vcvtpq_u64_f64(float64x2_t __p0)34061 __ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
34062   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
34063   uint64x2_t __ret;
34064   __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__rev0, 51);
34065   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
34066   return __ret;
34067 }
34068 #endif
34069 
34070 #ifdef __LITTLE_ENDIAN__
vcvtp_u64_f64(float64x1_t __p0)34071 __ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) {
34072   uint64x1_t __ret;
34073   __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19);
34074   return __ret;
34075 }
34076 #else
vcvtp_u64_f64(float64x1_t __p0)34077 __ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) {
34078   uint64x1_t __ret;
34079   __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19);
34080   return __ret;
34081 }
34082 #endif
34083 
34084 #ifdef __LITTLE_ENDIAN__
vreinterpret_p8_p64(poly64x1_t __p0)34085 __ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) {
34086   poly8x8_t __ret;
34087   __ret = (poly8x8_t)(__p0);
34088   return __ret;
34089 }
34090 #else
vreinterpret_p8_p64(poly64x1_t __p0)34091 __ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) {
34092   poly8x8_t __ret;
34093   __ret = (poly8x8_t)(__p0);
34094   return __ret;
34095 }
34096 #endif
34097 
34098 #ifdef __LITTLE_ENDIAN__
vreinterpret_p8_p16(poly16x4_t __p0)34099 __ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
34100   poly8x8_t __ret;
34101   __ret = (poly8x8_t)(__p0);
34102   return __ret;
34103 }
34104 #else
vreinterpret_p8_p16(poly16x4_t __p0)34105 __ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
34106   poly8x8_t __ret;
34107   __ret = (poly8x8_t)(__p0);
34108   return __ret;
34109 }
34110 #endif
34111 
34112 #ifdef __LITTLE_ENDIAN__
vreinterpret_p8_u8(uint8x8_t __p0)34113 __ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
34114   poly8x8_t __ret;
34115   __ret = (poly8x8_t)(__p0);
34116   return __ret;
34117 }
34118 #else
vreinterpret_p8_u8(uint8x8_t __p0)34119 __ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
34120   poly8x8_t __ret;
34121   __ret = (poly8x8_t)(__p0);
34122   return __ret;
34123 }
34124 #endif
34125 
34126 #ifdef __LITTLE_ENDIAN__
vreinterpret_p8_u32(uint32x2_t __p0)34127 __ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
34128   poly8x8_t __ret;
34129   __ret = (poly8x8_t)(__p0);
34130   return __ret;
34131 }
34132 #else
vreinterpret_p8_u32(uint32x2_t __p0)34133 __ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
34134   poly8x8_t __ret;
34135   __ret = (poly8x8_t)(__p0);
34136   return __ret;
34137 }
34138 #endif
34139 
34140 #ifdef __LITTLE_ENDIAN__
vreinterpret_p8_u64(uint64x1_t __p0)34141 __ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
34142   poly8x8_t __ret;
34143   __ret = (poly8x8_t)(__p0);
34144   return __ret;
34145 }
34146 #else
vreinterpret_p8_u64(uint64x1_t __p0)34147 __ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
34148   poly8x8_t __ret;
34149   __ret = (poly8x8_t)(__p0);
34150   return __ret;
34151 }
34152 #endif
34153 
34154 #ifdef __LITTLE_ENDIAN__
vreinterpret_p8_u16(uint16x4_t __p0)34155 __ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
34156   poly8x8_t __ret;
34157   __ret = (poly8x8_t)(__p0);
34158   return __ret;
34159 }
34160 #else
vreinterpret_p8_u16(uint16x4_t __p0)34161 __ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
34162   poly8x8_t __ret;
34163   __ret = (poly8x8_t)(__p0);
34164   return __ret;
34165 }
34166 #endif
34167 
34168 #ifdef __LITTLE_ENDIAN__
vreinterpret_p8_s8(int8x8_t __p0)34169 __ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
34170   poly8x8_t __ret;
34171   __ret = (poly8x8_t)(__p0);
34172   return __ret;
34173 }
34174 #else
vreinterpret_p8_s8(int8x8_t __p0)34175 __ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
34176   poly8x8_t __ret;
34177   __ret = (poly8x8_t)(__p0);
34178   return __ret;
34179 }
34180 #endif
34181 
34182 #ifdef __LITTLE_ENDIAN__
vreinterpret_p8_f64(float64x1_t __p0)34183 __ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) {
34184   poly8x8_t __ret;
34185   __ret = (poly8x8_t)(__p0);
34186   return __ret;
34187 }
34188 #else
vreinterpret_p8_f64(float64x1_t __p0)34189 __ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) {
34190   poly8x8_t __ret;
34191   __ret = (poly8x8_t)(__p0);
34192   return __ret;
34193 }
34194 #endif
34195 
34196 #ifdef __LITTLE_ENDIAN__
vreinterpret_p8_f32(float32x2_t __p0)34197 __ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
34198   poly8x8_t __ret;
34199   __ret = (poly8x8_t)(__p0);
34200   return __ret;
34201 }
34202 #else
vreinterpret_p8_f32(float32x2_t __p0)34203 __ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
34204   poly8x8_t __ret;
34205   __ret = (poly8x8_t)(__p0);
34206   return __ret;
34207 }
34208 #endif
34209 
34210 #ifdef __LITTLE_ENDIAN__
vreinterpret_p8_f16(float16x4_t __p0)34211 __ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
34212   poly8x8_t __ret;
34213   __ret = (poly8x8_t)(__p0);
34214   return __ret;
34215 }
34216 #else
vreinterpret_p8_f16(float16x4_t __p0)34217 __ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
34218   poly8x8_t __ret;
34219   __ret = (poly8x8_t)(__p0);
34220   return __ret;
34221 }
34222 #endif
34223 
34224 #ifdef __LITTLE_ENDIAN__
vreinterpret_p8_s32(int32x2_t __p0)34225 __ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
34226   poly8x8_t __ret;
34227   __ret = (poly8x8_t)(__p0);
34228   return __ret;
34229 }
34230 #else
vreinterpret_p8_s32(int32x2_t __p0)34231 __ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
34232   poly8x8_t __ret;
34233   __ret = (poly8x8_t)(__p0);
34234   return __ret;
34235 }
34236 #endif
34237 
34238 #ifdef __LITTLE_ENDIAN__
vreinterpret_p8_s64(int64x1_t __p0)34239 __ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
34240   poly8x8_t __ret;
34241   __ret = (poly8x8_t)(__p0);
34242   return __ret;
34243 }
34244 #else
vreinterpret_p8_s64(int64x1_t __p0)34245 __ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
34246   poly8x8_t __ret;
34247   __ret = (poly8x8_t)(__p0);
34248   return __ret;
34249 }
34250 #endif
34251 
34252 #ifdef __LITTLE_ENDIAN__
vreinterpret_p8_s16(int16x4_t __p0)34253 __ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
34254   poly8x8_t __ret;
34255   __ret = (poly8x8_t)(__p0);
34256   return __ret;
34257 }
34258 #else
vreinterpret_p8_s16(int16x4_t __p0)34259 __ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
34260   poly8x8_t __ret;
34261   __ret = (poly8x8_t)(__p0);
34262   return __ret;
34263 }
34264 #endif
34265 
34266 #ifdef __LITTLE_ENDIAN__
vreinterpret_p64_p8(poly8x8_t __p0)34267 __ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) {
34268   poly64x1_t __ret;
34269   __ret = (poly64x1_t)(__p0);
34270   return __ret;
34271 }
34272 #else
vreinterpret_p64_p8(poly8x8_t __p0)34273 __ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) {
34274   poly64x1_t __ret;
34275   __ret = (poly64x1_t)(__p0);
34276   return __ret;
34277 }
34278 #endif
34279 
34280 #ifdef __LITTLE_ENDIAN__
vreinterpret_p64_p16(poly16x4_t __p0)34281 __ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) {
34282   poly64x1_t __ret;
34283   __ret = (poly64x1_t)(__p0);
34284   return __ret;
34285 }
34286 #else
vreinterpret_p64_p16(poly16x4_t __p0)34287 __ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) {
34288   poly64x1_t __ret;
34289   __ret = (poly64x1_t)(__p0);
34290   return __ret;
34291 }
34292 #endif
34293 
34294 #ifdef __LITTLE_ENDIAN__
vreinterpret_p64_u8(uint8x8_t __p0)34295 __ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) {
34296   poly64x1_t __ret;
34297   __ret = (poly64x1_t)(__p0);
34298   return __ret;
34299 }
34300 #else
vreinterpret_p64_u8(uint8x8_t __p0)34301 __ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) {
34302   poly64x1_t __ret;
34303   __ret = (poly64x1_t)(__p0);
34304   return __ret;
34305 }
34306 #endif
34307 
34308 #ifdef __LITTLE_ENDIAN__
vreinterpret_p64_u32(uint32x2_t __p0)34309 __ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) {
34310   poly64x1_t __ret;
34311   __ret = (poly64x1_t)(__p0);
34312   return __ret;
34313 }
34314 #else
vreinterpret_p64_u32(uint32x2_t __p0)34315 __ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) {
34316   poly64x1_t __ret;
34317   __ret = (poly64x1_t)(__p0);
34318   return __ret;
34319 }
34320 #endif
34321 
34322 #ifdef __LITTLE_ENDIAN__
vreinterpret_p64_u64(uint64x1_t __p0)34323 __ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) {
34324   poly64x1_t __ret;
34325   __ret = (poly64x1_t)(__p0);
34326   return __ret;
34327 }
34328 #else
vreinterpret_p64_u64(uint64x1_t __p0)34329 __ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) {
34330   poly64x1_t __ret;
34331   __ret = (poly64x1_t)(__p0);
34332   return __ret;
34333 }
34334 #endif
34335 
34336 #ifdef __LITTLE_ENDIAN__
vreinterpret_p64_u16(uint16x4_t __p0)34337 __ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) {
34338   poly64x1_t __ret;
34339   __ret = (poly64x1_t)(__p0);
34340   return __ret;
34341 }
34342 #else
vreinterpret_p64_u16(uint16x4_t __p0)34343 __ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) {
34344   poly64x1_t __ret;
34345   __ret = (poly64x1_t)(__p0);
34346   return __ret;
34347 }
34348 #endif
34349 
34350 #ifdef __LITTLE_ENDIAN__
vreinterpret_p64_s8(int8x8_t __p0)34351 __ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) {
34352   poly64x1_t __ret;
34353   __ret = (poly64x1_t)(__p0);
34354   return __ret;
34355 }
34356 #else
vreinterpret_p64_s8(int8x8_t __p0)34357 __ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) {
34358   poly64x1_t __ret;
34359   __ret = (poly64x1_t)(__p0);
34360   return __ret;
34361 }
34362 #endif
34363 
34364 #ifdef __LITTLE_ENDIAN__
vreinterpret_p64_f64(float64x1_t __p0)34365 __ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) {
34366   poly64x1_t __ret;
34367   __ret = (poly64x1_t)(__p0);
34368   return __ret;
34369 }
34370 #else
vreinterpret_p64_f64(float64x1_t __p0)34371 __ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) {
34372   poly64x1_t __ret;
34373   __ret = (poly64x1_t)(__p0);
34374   return __ret;
34375 }
34376 #endif
34377 
34378 #ifdef __LITTLE_ENDIAN__
vreinterpret_p64_f32(float32x2_t __p0)34379 __ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) {
34380   poly64x1_t __ret;
34381   __ret = (poly64x1_t)(__p0);
34382   return __ret;
34383 }
34384 #else
vreinterpret_p64_f32(float32x2_t __p0)34385 __ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) {
34386   poly64x1_t __ret;
34387   __ret = (poly64x1_t)(__p0);
34388   return __ret;
34389 }
34390 #endif
34391 
34392 #ifdef __LITTLE_ENDIAN__
vreinterpret_p64_f16(float16x4_t __p0)34393 __ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) {
34394   poly64x1_t __ret;
34395   __ret = (poly64x1_t)(__p0);
34396   return __ret;
34397 }
34398 #else
vreinterpret_p64_f16(float16x4_t __p0)34399 __ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) {
34400   poly64x1_t __ret;
34401   __ret = (poly64x1_t)(__p0);
34402   return __ret;
34403 }
34404 #endif
34405 
34406 #ifdef __LITTLE_ENDIAN__
vreinterpret_p64_s32(int32x2_t __p0)34407 __ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) {
34408   poly64x1_t __ret;
34409   __ret = (poly64x1_t)(__p0);
34410   return __ret;
34411 }
34412 #else
vreinterpret_p64_s32(int32x2_t __p0)34413 __ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) {
34414   poly64x1_t __ret;
34415   __ret = (poly64x1_t)(__p0);
34416   return __ret;
34417 }
34418 #endif
34419 
34420 #ifdef __LITTLE_ENDIAN__
vreinterpret_p64_s64(int64x1_t __p0)34421 __ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) {
34422   poly64x1_t __ret;
34423   __ret = (poly64x1_t)(__p0);
34424   return __ret;
34425 }
34426 #else
vreinterpret_p64_s64(int64x1_t __p0)34427 __ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) {
34428   poly64x1_t __ret;
34429   __ret = (poly64x1_t)(__p0);
34430   return __ret;
34431 }
34432 #endif
34433 
34434 #ifdef __LITTLE_ENDIAN__
vreinterpret_p64_s16(int16x4_t __p0)34435 __ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) {
34436   poly64x1_t __ret;
34437   __ret = (poly64x1_t)(__p0);
34438   return __ret;
34439 }
34440 #else
vreinterpret_p64_s16(int16x4_t __p0)34441 __ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) {
34442   poly64x1_t __ret;
34443   __ret = (poly64x1_t)(__p0);
34444   return __ret;
34445 }
34446 #endif
34447 
34448 #ifdef __LITTLE_ENDIAN__
vreinterpret_p16_p8(poly8x8_t __p0)34449 __ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
34450   poly16x4_t __ret;
34451   __ret = (poly16x4_t)(__p0);
34452   return __ret;
34453 }
34454 #else
vreinterpret_p16_p8(poly8x8_t __p0)34455 __ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
34456   poly16x4_t __ret;
34457   __ret = (poly16x4_t)(__p0);
34458   return __ret;
34459 }
34460 #endif
34461 
34462 #ifdef __LITTLE_ENDIAN__
vreinterpret_p16_p64(poly64x1_t __p0)34463 __ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) {
34464   poly16x4_t __ret;
34465   __ret = (poly16x4_t)(__p0);
34466   return __ret;
34467 }
34468 #else
vreinterpret_p16_p64(poly64x1_t __p0)34469 __ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) {
34470   poly16x4_t __ret;
34471   __ret = (poly16x4_t)(__p0);
34472   return __ret;
34473 }
34474 #endif
34475 
34476 #ifdef __LITTLE_ENDIAN__
vreinterpret_p16_u8(uint8x8_t __p0)34477 __ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
34478   poly16x4_t __ret;
34479   __ret = (poly16x4_t)(__p0);
34480   return __ret;
34481 }
34482 #else
vreinterpret_p16_u8(uint8x8_t __p0)34483 __ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
34484   poly16x4_t __ret;
34485   __ret = (poly16x4_t)(__p0);
34486   return __ret;
34487 }
34488 #endif
34489 
34490 #ifdef __LITTLE_ENDIAN__
vreinterpret_p16_u32(uint32x2_t __p0)34491 __ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
34492   poly16x4_t __ret;
34493   __ret = (poly16x4_t)(__p0);
34494   return __ret;
34495 }
34496 #else
vreinterpret_p16_u32(uint32x2_t __p0)34497 __ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
34498   poly16x4_t __ret;
34499   __ret = (poly16x4_t)(__p0);
34500   return __ret;
34501 }
34502 #endif
34503 
34504 #ifdef __LITTLE_ENDIAN__
vreinterpret_p16_u64(uint64x1_t __p0)34505 __ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
34506   poly16x4_t __ret;
34507   __ret = (poly16x4_t)(__p0);
34508   return __ret;
34509 }
34510 #else
vreinterpret_p16_u64(uint64x1_t __p0)34511 __ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
34512   poly16x4_t __ret;
34513   __ret = (poly16x4_t)(__p0);
34514   return __ret;
34515 }
34516 #endif
34517 
34518 #ifdef __LITTLE_ENDIAN__
vreinterpret_p16_u16(uint16x4_t __p0)34519 __ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
34520   poly16x4_t __ret;
34521   __ret = (poly16x4_t)(__p0);
34522   return __ret;
34523 }
34524 #else
vreinterpret_p16_u16(uint16x4_t __p0)34525 __ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
34526   poly16x4_t __ret;
34527   __ret = (poly16x4_t)(__p0);
34528   return __ret;
34529 }
34530 #endif
34531 
34532 #ifdef __LITTLE_ENDIAN__
vreinterpret_p16_s8(int8x8_t __p0)34533 __ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
34534   poly16x4_t __ret;
34535   __ret = (poly16x4_t)(__p0);
34536   return __ret;
34537 }
34538 #else
vreinterpret_p16_s8(int8x8_t __p0)34539 __ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
34540   poly16x4_t __ret;
34541   __ret = (poly16x4_t)(__p0);
34542   return __ret;
34543 }
34544 #endif
34545 
34546 #ifdef __LITTLE_ENDIAN__
vreinterpret_p16_f64(float64x1_t __p0)34547 __ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) {
34548   poly16x4_t __ret;
34549   __ret = (poly16x4_t)(__p0);
34550   return __ret;
34551 }
34552 #else
vreinterpret_p16_f64(float64x1_t __p0)34553 __ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) {
34554   poly16x4_t __ret;
34555   __ret = (poly16x4_t)(__p0);
34556   return __ret;
34557 }
34558 #endif
34559 
34560 #ifdef __LITTLE_ENDIAN__
vreinterpret_p16_f32(float32x2_t __p0)34561 __ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
34562   poly16x4_t __ret;
34563   __ret = (poly16x4_t)(__p0);
34564   return __ret;
34565 }
34566 #else
vreinterpret_p16_f32(float32x2_t __p0)34567 __ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
34568   poly16x4_t __ret;
34569   __ret = (poly16x4_t)(__p0);
34570   return __ret;
34571 }
34572 #endif
34573 
34574 #ifdef __LITTLE_ENDIAN__
vreinterpret_p16_f16(float16x4_t __p0)34575 __ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
34576   poly16x4_t __ret;
34577   __ret = (poly16x4_t)(__p0);
34578   return __ret;
34579 }
34580 #else
vreinterpret_p16_f16(float16x4_t __p0)34581 __ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
34582   poly16x4_t __ret;
34583   __ret = (poly16x4_t)(__p0);
34584   return __ret;
34585 }
34586 #endif
34587 
34588 #ifdef __LITTLE_ENDIAN__
vreinterpret_p16_s32(int32x2_t __p0)34589 __ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
34590   poly16x4_t __ret;
34591   __ret = (poly16x4_t)(__p0);
34592   return __ret;
34593 }
34594 #else
vreinterpret_p16_s32(int32x2_t __p0)34595 __ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
34596   poly16x4_t __ret;
34597   __ret = (poly16x4_t)(__p0);
34598   return __ret;
34599 }
34600 #endif
34601 
34602 #ifdef __LITTLE_ENDIAN__
vreinterpret_p16_s64(int64x1_t __p0)34603 __ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
34604   poly16x4_t __ret;
34605   __ret = (poly16x4_t)(__p0);
34606   return __ret;
34607 }
34608 #else
vreinterpret_p16_s64(int64x1_t __p0)34609 __ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
34610   poly16x4_t __ret;
34611   __ret = (poly16x4_t)(__p0);
34612   return __ret;
34613 }
34614 #endif
34615 
34616 #ifdef __LITTLE_ENDIAN__
vreinterpret_p16_s16(int16x4_t __p0)34617 __ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
34618   poly16x4_t __ret;
34619   __ret = (poly16x4_t)(__p0);
34620   return __ret;
34621 }
34622 #else
vreinterpret_p16_s16(int16x4_t __p0)34623 __ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
34624   poly16x4_t __ret;
34625   __ret = (poly16x4_t)(__p0);
34626   return __ret;
34627 }
34628 #endif
34629 
34630 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p8_p128(poly128_t __p0)34631 __ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) {
34632   poly8x16_t __ret;
34633   __ret = (poly8x16_t)(__p0);
34634   return __ret;
34635 }
34636 #else
vreinterpretq_p8_p128(poly128_t __p0)34637 __ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) {
34638   poly8x16_t __ret;
34639   __ret = (poly8x16_t)(__p0);
34640   return __ret;
34641 }
34642 #endif
34643 
34644 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p8_p64(poly64x2_t __p0)34645 __ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) {
34646   poly8x16_t __ret;
34647   __ret = (poly8x16_t)(__p0);
34648   return __ret;
34649 }
34650 #else
vreinterpretq_p8_p64(poly64x2_t __p0)34651 __ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) {
34652   poly8x16_t __ret;
34653   __ret = (poly8x16_t)(__p0);
34654   return __ret;
34655 }
34656 #endif
34657 
34658 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p8_p16(poly16x8_t __p0)34659 __ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
34660   poly8x16_t __ret;
34661   __ret = (poly8x16_t)(__p0);
34662   return __ret;
34663 }
34664 #else
vreinterpretq_p8_p16(poly16x8_t __p0)34665 __ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
34666   poly8x16_t __ret;
34667   __ret = (poly8x16_t)(__p0);
34668   return __ret;
34669 }
34670 #endif
34671 
34672 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p8_u8(uint8x16_t __p0)34673 __ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
34674   poly8x16_t __ret;
34675   __ret = (poly8x16_t)(__p0);
34676   return __ret;
34677 }
34678 #else
vreinterpretq_p8_u8(uint8x16_t __p0)34679 __ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
34680   poly8x16_t __ret;
34681   __ret = (poly8x16_t)(__p0);
34682   return __ret;
34683 }
34684 #endif
34685 
34686 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p8_u32(uint32x4_t __p0)34687 __ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
34688   poly8x16_t __ret;
34689   __ret = (poly8x16_t)(__p0);
34690   return __ret;
34691 }
34692 #else
vreinterpretq_p8_u32(uint32x4_t __p0)34693 __ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
34694   poly8x16_t __ret;
34695   __ret = (poly8x16_t)(__p0);
34696   return __ret;
34697 }
34698 #endif
34699 
34700 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p8_u64(uint64x2_t __p0)34701 __ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
34702   poly8x16_t __ret;
34703   __ret = (poly8x16_t)(__p0);
34704   return __ret;
34705 }
34706 #else
vreinterpretq_p8_u64(uint64x2_t __p0)34707 __ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
34708   poly8x16_t __ret;
34709   __ret = (poly8x16_t)(__p0);
34710   return __ret;
34711 }
34712 #endif
34713 
34714 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p8_u16(uint16x8_t __p0)34715 __ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
34716   poly8x16_t __ret;
34717   __ret = (poly8x16_t)(__p0);
34718   return __ret;
34719 }
34720 #else
vreinterpretq_p8_u16(uint16x8_t __p0)34721 __ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
34722   poly8x16_t __ret;
34723   __ret = (poly8x16_t)(__p0);
34724   return __ret;
34725 }
34726 #endif
34727 
34728 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p8_s8(int8x16_t __p0)34729 __ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
34730   poly8x16_t __ret;
34731   __ret = (poly8x16_t)(__p0);
34732   return __ret;
34733 }
34734 #else
vreinterpretq_p8_s8(int8x16_t __p0)34735 __ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
34736   poly8x16_t __ret;
34737   __ret = (poly8x16_t)(__p0);
34738   return __ret;
34739 }
34740 #endif
34741 
34742 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p8_f64(float64x2_t __p0)34743 __ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) {
34744   poly8x16_t __ret;
34745   __ret = (poly8x16_t)(__p0);
34746   return __ret;
34747 }
34748 #else
vreinterpretq_p8_f64(float64x2_t __p0)34749 __ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) {
34750   poly8x16_t __ret;
34751   __ret = (poly8x16_t)(__p0);
34752   return __ret;
34753 }
34754 #endif
34755 
34756 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p8_f32(float32x4_t __p0)34757 __ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
34758   poly8x16_t __ret;
34759   __ret = (poly8x16_t)(__p0);
34760   return __ret;
34761 }
34762 #else
vreinterpretq_p8_f32(float32x4_t __p0)34763 __ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
34764   poly8x16_t __ret;
34765   __ret = (poly8x16_t)(__p0);
34766   return __ret;
34767 }
34768 #endif
34769 
34770 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p8_f16(float16x8_t __p0)34771 __ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
34772   poly8x16_t __ret;
34773   __ret = (poly8x16_t)(__p0);
34774   return __ret;
34775 }
34776 #else
vreinterpretq_p8_f16(float16x8_t __p0)34777 __ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
34778   poly8x16_t __ret;
34779   __ret = (poly8x16_t)(__p0);
34780   return __ret;
34781 }
34782 #endif
34783 
34784 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p8_s32(int32x4_t __p0)34785 __ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
34786   poly8x16_t __ret;
34787   __ret = (poly8x16_t)(__p0);
34788   return __ret;
34789 }
34790 #else
vreinterpretq_p8_s32(int32x4_t __p0)34791 __ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
34792   poly8x16_t __ret;
34793   __ret = (poly8x16_t)(__p0);
34794   return __ret;
34795 }
34796 #endif
34797 
34798 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p8_s64(int64x2_t __p0)34799 __ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
34800   poly8x16_t __ret;
34801   __ret = (poly8x16_t)(__p0);
34802   return __ret;
34803 }
34804 #else
vreinterpretq_p8_s64(int64x2_t __p0)34805 __ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
34806   poly8x16_t __ret;
34807   __ret = (poly8x16_t)(__p0);
34808   return __ret;
34809 }
34810 #endif
34811 
34812 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p8_s16(int16x8_t __p0)34813 __ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
34814   poly8x16_t __ret;
34815   __ret = (poly8x16_t)(__p0);
34816   return __ret;
34817 }
34818 #else
vreinterpretq_p8_s16(int16x8_t __p0)34819 __ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
34820   poly8x16_t __ret;
34821   __ret = (poly8x16_t)(__p0);
34822   return __ret;
34823 }
34824 #endif
34825 
34826 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p128_p8(poly8x16_t __p0)34827 __ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) {
34828   poly128_t __ret;
34829   __ret = (poly128_t)(__p0);
34830   return __ret;
34831 }
34832 #else
vreinterpretq_p128_p8(poly8x16_t __p0)34833 __ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) {
34834   poly128_t __ret;
34835   __ret = (poly128_t)(__p0);
34836   return __ret;
34837 }
34838 #endif
34839 
34840 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p128_p64(poly64x2_t __p0)34841 __ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) {
34842   poly128_t __ret;
34843   __ret = (poly128_t)(__p0);
34844   return __ret;
34845 }
34846 #else
vreinterpretq_p128_p64(poly64x2_t __p0)34847 __ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) {
34848   poly128_t __ret;
34849   __ret = (poly128_t)(__p0);
34850   return __ret;
34851 }
34852 #endif
34853 
34854 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p128_p16(poly16x8_t __p0)34855 __ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) {
34856   poly128_t __ret;
34857   __ret = (poly128_t)(__p0);
34858   return __ret;
34859 }
34860 #else
vreinterpretq_p128_p16(poly16x8_t __p0)34861 __ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) {
34862   poly128_t __ret;
34863   __ret = (poly128_t)(__p0);
34864   return __ret;
34865 }
34866 #endif
34867 
34868 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p128_u8(uint8x16_t __p0)34869 __ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) {
34870   poly128_t __ret;
34871   __ret = (poly128_t)(__p0);
34872   return __ret;
34873 }
34874 #else
vreinterpretq_p128_u8(uint8x16_t __p0)34875 __ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) {
34876   poly128_t __ret;
34877   __ret = (poly128_t)(__p0);
34878   return __ret;
34879 }
34880 #endif
34881 
34882 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p128_u32(uint32x4_t __p0)34883 __ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) {
34884   poly128_t __ret;
34885   __ret = (poly128_t)(__p0);
34886   return __ret;
34887 }
34888 #else
vreinterpretq_p128_u32(uint32x4_t __p0)34889 __ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) {
34890   poly128_t __ret;
34891   __ret = (poly128_t)(__p0);
34892   return __ret;
34893 }
34894 #endif
34895 
34896 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p128_u64(uint64x2_t __p0)34897 __ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) {
34898   poly128_t __ret;
34899   __ret = (poly128_t)(__p0);
34900   return __ret;
34901 }
34902 #else
vreinterpretq_p128_u64(uint64x2_t __p0)34903 __ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) {
34904   poly128_t __ret;
34905   __ret = (poly128_t)(__p0);
34906   return __ret;
34907 }
34908 #endif
34909 
34910 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p128_u16(uint16x8_t __p0)34911 __ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) {
34912   poly128_t __ret;
34913   __ret = (poly128_t)(__p0);
34914   return __ret;
34915 }
34916 #else
vreinterpretq_p128_u16(uint16x8_t __p0)34917 __ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) {
34918   poly128_t __ret;
34919   __ret = (poly128_t)(__p0);
34920   return __ret;
34921 }
34922 #endif
34923 
34924 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p128_s8(int8x16_t __p0)34925 __ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) {
34926   poly128_t __ret;
34927   __ret = (poly128_t)(__p0);
34928   return __ret;
34929 }
34930 #else
vreinterpretq_p128_s8(int8x16_t __p0)34931 __ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) {
34932   poly128_t __ret;
34933   __ret = (poly128_t)(__p0);
34934   return __ret;
34935 }
34936 #endif
34937 
34938 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p128_f64(float64x2_t __p0)34939 __ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) {
34940   poly128_t __ret;
34941   __ret = (poly128_t)(__p0);
34942   return __ret;
34943 }
34944 #else
vreinterpretq_p128_f64(float64x2_t __p0)34945 __ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) {
34946   poly128_t __ret;
34947   __ret = (poly128_t)(__p0);
34948   return __ret;
34949 }
34950 #endif
34951 
34952 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p128_f32(float32x4_t __p0)34953 __ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) {
34954   poly128_t __ret;
34955   __ret = (poly128_t)(__p0);
34956   return __ret;
34957 }
34958 #else
vreinterpretq_p128_f32(float32x4_t __p0)34959 __ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) {
34960   poly128_t __ret;
34961   __ret = (poly128_t)(__p0);
34962   return __ret;
34963 }
34964 #endif
34965 
34966 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p128_f16(float16x8_t __p0)34967 __ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) {
34968   poly128_t __ret;
34969   __ret = (poly128_t)(__p0);
34970   return __ret;
34971 }
34972 #else
vreinterpretq_p128_f16(float16x8_t __p0)34973 __ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) {
34974   poly128_t __ret;
34975   __ret = (poly128_t)(__p0);
34976   return __ret;
34977 }
34978 #endif
34979 
34980 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p128_s32(int32x4_t __p0)34981 __ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) {
34982   poly128_t __ret;
34983   __ret = (poly128_t)(__p0);
34984   return __ret;
34985 }
34986 #else
vreinterpretq_p128_s32(int32x4_t __p0)34987 __ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) {
34988   poly128_t __ret;
34989   __ret = (poly128_t)(__p0);
34990   return __ret;
34991 }
34992 #endif
34993 
34994 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p128_s64(int64x2_t __p0)34995 __ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) {
34996   poly128_t __ret;
34997   __ret = (poly128_t)(__p0);
34998   return __ret;
34999 }
35000 #else
vreinterpretq_p128_s64(int64x2_t __p0)35001 __ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) {
35002   poly128_t __ret;
35003   __ret = (poly128_t)(__p0);
35004   return __ret;
35005 }
35006 #endif
35007 
35008 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p128_s16(int16x8_t __p0)35009 __ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) {
35010   poly128_t __ret;
35011   __ret = (poly128_t)(__p0);
35012   return __ret;
35013 }
35014 #else
vreinterpretq_p128_s16(int16x8_t __p0)35015 __ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) {
35016   poly128_t __ret;
35017   __ret = (poly128_t)(__p0);
35018   return __ret;
35019 }
35020 #endif
35021 
35022 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p64_p8(poly8x16_t __p0)35023 __ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) {
35024   poly64x2_t __ret;
35025   __ret = (poly64x2_t)(__p0);
35026   return __ret;
35027 }
35028 #else
vreinterpretq_p64_p8(poly8x16_t __p0)35029 __ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) {
35030   poly64x2_t __ret;
35031   __ret = (poly64x2_t)(__p0);
35032   return __ret;
35033 }
35034 #endif
35035 
35036 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p64_p128(poly128_t __p0)35037 __ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) {
35038   poly64x2_t __ret;
35039   __ret = (poly64x2_t)(__p0);
35040   return __ret;
35041 }
35042 #else
vreinterpretq_p64_p128(poly128_t __p0)35043 __ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) {
35044   poly64x2_t __ret;
35045   __ret = (poly64x2_t)(__p0);
35046   return __ret;
35047 }
35048 #endif
35049 
35050 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p64_p16(poly16x8_t __p0)35051 __ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) {
35052   poly64x2_t __ret;
35053   __ret = (poly64x2_t)(__p0);
35054   return __ret;
35055 }
35056 #else
vreinterpretq_p64_p16(poly16x8_t __p0)35057 __ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) {
35058   poly64x2_t __ret;
35059   __ret = (poly64x2_t)(__p0);
35060   return __ret;
35061 }
35062 #endif
35063 
35064 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p64_u8(uint8x16_t __p0)35065 __ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) {
35066   poly64x2_t __ret;
35067   __ret = (poly64x2_t)(__p0);
35068   return __ret;
35069 }
35070 #else
vreinterpretq_p64_u8(uint8x16_t __p0)35071 __ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) {
35072   poly64x2_t __ret;
35073   __ret = (poly64x2_t)(__p0);
35074   return __ret;
35075 }
35076 #endif
35077 
35078 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p64_u32(uint32x4_t __p0)35079 __ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) {
35080   poly64x2_t __ret;
35081   __ret = (poly64x2_t)(__p0);
35082   return __ret;
35083 }
35084 #else
vreinterpretq_p64_u32(uint32x4_t __p0)35085 __ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) {
35086   poly64x2_t __ret;
35087   __ret = (poly64x2_t)(__p0);
35088   return __ret;
35089 }
35090 #endif
35091 
35092 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p64_u64(uint64x2_t __p0)35093 __ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) {
35094   poly64x2_t __ret;
35095   __ret = (poly64x2_t)(__p0);
35096   return __ret;
35097 }
35098 #else
vreinterpretq_p64_u64(uint64x2_t __p0)35099 __ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) {
35100   poly64x2_t __ret;
35101   __ret = (poly64x2_t)(__p0);
35102   return __ret;
35103 }
35104 #endif
35105 
35106 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p64_u16(uint16x8_t __p0)35107 __ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) {
35108   poly64x2_t __ret;
35109   __ret = (poly64x2_t)(__p0);
35110   return __ret;
35111 }
35112 #else
vreinterpretq_p64_u16(uint16x8_t __p0)35113 __ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) {
35114   poly64x2_t __ret;
35115   __ret = (poly64x2_t)(__p0);
35116   return __ret;
35117 }
35118 #endif
35119 
35120 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p64_s8(int8x16_t __p0)35121 __ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) {
35122   poly64x2_t __ret;
35123   __ret = (poly64x2_t)(__p0);
35124   return __ret;
35125 }
35126 #else
vreinterpretq_p64_s8(int8x16_t __p0)35127 __ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) {
35128   poly64x2_t __ret;
35129   __ret = (poly64x2_t)(__p0);
35130   return __ret;
35131 }
35132 #endif
35133 
35134 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p64_f64(float64x2_t __p0)35135 __ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) {
35136   poly64x2_t __ret;
35137   __ret = (poly64x2_t)(__p0);
35138   return __ret;
35139 }
35140 #else
vreinterpretq_p64_f64(float64x2_t __p0)35141 __ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) {
35142   poly64x2_t __ret;
35143   __ret = (poly64x2_t)(__p0);
35144   return __ret;
35145 }
35146 #endif
35147 
35148 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p64_f32(float32x4_t __p0)35149 __ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) {
35150   poly64x2_t __ret;
35151   __ret = (poly64x2_t)(__p0);
35152   return __ret;
35153 }
35154 #else
vreinterpretq_p64_f32(float32x4_t __p0)35155 __ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) {
35156   poly64x2_t __ret;
35157   __ret = (poly64x2_t)(__p0);
35158   return __ret;
35159 }
35160 #endif
35161 
35162 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p64_f16(float16x8_t __p0)35163 __ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) {
35164   poly64x2_t __ret;
35165   __ret = (poly64x2_t)(__p0);
35166   return __ret;
35167 }
35168 #else
vreinterpretq_p64_f16(float16x8_t __p0)35169 __ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) {
35170   poly64x2_t __ret;
35171   __ret = (poly64x2_t)(__p0);
35172   return __ret;
35173 }
35174 #endif
35175 
35176 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p64_s32(int32x4_t __p0)35177 __ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) {
35178   poly64x2_t __ret;
35179   __ret = (poly64x2_t)(__p0);
35180   return __ret;
35181 }
35182 #else
vreinterpretq_p64_s32(int32x4_t __p0)35183 __ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) {
35184   poly64x2_t __ret;
35185   __ret = (poly64x2_t)(__p0);
35186   return __ret;
35187 }
35188 #endif
35189 
35190 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p64_s64(int64x2_t __p0)35191 __ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) {
35192   poly64x2_t __ret;
35193   __ret = (poly64x2_t)(__p0);
35194   return __ret;
35195 }
35196 #else
vreinterpretq_p64_s64(int64x2_t __p0)35197 __ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) {
35198   poly64x2_t __ret;
35199   __ret = (poly64x2_t)(__p0);
35200   return __ret;
35201 }
35202 #endif
35203 
35204 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p64_s16(int16x8_t __p0)35205 __ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) {
35206   poly64x2_t __ret;
35207   __ret = (poly64x2_t)(__p0);
35208   return __ret;
35209 }
35210 #else
vreinterpretq_p64_s16(int16x8_t __p0)35211 __ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) {
35212   poly64x2_t __ret;
35213   __ret = (poly64x2_t)(__p0);
35214   return __ret;
35215 }
35216 #endif
35217 
35218 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p16_p8(poly8x16_t __p0)35219 __ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
35220   poly16x8_t __ret;
35221   __ret = (poly16x8_t)(__p0);
35222   return __ret;
35223 }
35224 #else
vreinterpretq_p16_p8(poly8x16_t __p0)35225 __ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
35226   poly16x8_t __ret;
35227   __ret = (poly16x8_t)(__p0);
35228   return __ret;
35229 }
35230 #endif
35231 
35232 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p16_p128(poly128_t __p0)35233 __ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) {
35234   poly16x8_t __ret;
35235   __ret = (poly16x8_t)(__p0);
35236   return __ret;
35237 }
35238 #else
vreinterpretq_p16_p128(poly128_t __p0)35239 __ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) {
35240   poly16x8_t __ret;
35241   __ret = (poly16x8_t)(__p0);
35242   return __ret;
35243 }
35244 #endif
35245 
35246 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p16_p64(poly64x2_t __p0)35247 __ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) {
35248   poly16x8_t __ret;
35249   __ret = (poly16x8_t)(__p0);
35250   return __ret;
35251 }
35252 #else
vreinterpretq_p16_p64(poly64x2_t __p0)35253 __ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) {
35254   poly16x8_t __ret;
35255   __ret = (poly16x8_t)(__p0);
35256   return __ret;
35257 }
35258 #endif
35259 
35260 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p16_u8(uint8x16_t __p0)35261 __ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
35262   poly16x8_t __ret;
35263   __ret = (poly16x8_t)(__p0);
35264   return __ret;
35265 }
35266 #else
vreinterpretq_p16_u8(uint8x16_t __p0)35267 __ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
35268   poly16x8_t __ret;
35269   __ret = (poly16x8_t)(__p0);
35270   return __ret;
35271 }
35272 #endif
35273 
35274 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p16_u32(uint32x4_t __p0)35275 __ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
35276   poly16x8_t __ret;
35277   __ret = (poly16x8_t)(__p0);
35278   return __ret;
35279 }
35280 #else
vreinterpretq_p16_u32(uint32x4_t __p0)35281 __ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
35282   poly16x8_t __ret;
35283   __ret = (poly16x8_t)(__p0);
35284   return __ret;
35285 }
35286 #endif
35287 
35288 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p16_u64(uint64x2_t __p0)35289 __ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
35290   poly16x8_t __ret;
35291   __ret = (poly16x8_t)(__p0);
35292   return __ret;
35293 }
35294 #else
vreinterpretq_p16_u64(uint64x2_t __p0)35295 __ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
35296   poly16x8_t __ret;
35297   __ret = (poly16x8_t)(__p0);
35298   return __ret;
35299 }
35300 #endif
35301 
35302 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p16_u16(uint16x8_t __p0)35303 __ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
35304   poly16x8_t __ret;
35305   __ret = (poly16x8_t)(__p0);
35306   return __ret;
35307 }
35308 #else
vreinterpretq_p16_u16(uint16x8_t __p0)35309 __ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
35310   poly16x8_t __ret;
35311   __ret = (poly16x8_t)(__p0);
35312   return __ret;
35313 }
35314 #endif
35315 
35316 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p16_s8(int8x16_t __p0)35317 __ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
35318   poly16x8_t __ret;
35319   __ret = (poly16x8_t)(__p0);
35320   return __ret;
35321 }
35322 #else
vreinterpretq_p16_s8(int8x16_t __p0)35323 __ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
35324   poly16x8_t __ret;
35325   __ret = (poly16x8_t)(__p0);
35326   return __ret;
35327 }
35328 #endif
35329 
35330 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p16_f64(float64x2_t __p0)35331 __ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) {
35332   poly16x8_t __ret;
35333   __ret = (poly16x8_t)(__p0);
35334   return __ret;
35335 }
35336 #else
vreinterpretq_p16_f64(float64x2_t __p0)35337 __ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) {
35338   poly16x8_t __ret;
35339   __ret = (poly16x8_t)(__p0);
35340   return __ret;
35341 }
35342 #endif
35343 
35344 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p16_f32(float32x4_t __p0)35345 __ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
35346   poly16x8_t __ret;
35347   __ret = (poly16x8_t)(__p0);
35348   return __ret;
35349 }
35350 #else
vreinterpretq_p16_f32(float32x4_t __p0)35351 __ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
35352   poly16x8_t __ret;
35353   __ret = (poly16x8_t)(__p0);
35354   return __ret;
35355 }
35356 #endif
35357 
35358 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p16_f16(float16x8_t __p0)35359 __ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
35360   poly16x8_t __ret;
35361   __ret = (poly16x8_t)(__p0);
35362   return __ret;
35363 }
35364 #else
vreinterpretq_p16_f16(float16x8_t __p0)35365 __ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
35366   poly16x8_t __ret;
35367   __ret = (poly16x8_t)(__p0);
35368   return __ret;
35369 }
35370 #endif
35371 
35372 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p16_s32(int32x4_t __p0)35373 __ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
35374   poly16x8_t __ret;
35375   __ret = (poly16x8_t)(__p0);
35376   return __ret;
35377 }
35378 #else
vreinterpretq_p16_s32(int32x4_t __p0)35379 __ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
35380   poly16x8_t __ret;
35381   __ret = (poly16x8_t)(__p0);
35382   return __ret;
35383 }
35384 #endif
35385 
35386 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p16_s64(int64x2_t __p0)35387 __ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
35388   poly16x8_t __ret;
35389   __ret = (poly16x8_t)(__p0);
35390   return __ret;
35391 }
35392 #else
vreinterpretq_p16_s64(int64x2_t __p0)35393 __ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
35394   poly16x8_t __ret;
35395   __ret = (poly16x8_t)(__p0);
35396   return __ret;
35397 }
35398 #endif
35399 
35400 #ifdef __LITTLE_ENDIAN__
vreinterpretq_p16_s16(int16x8_t __p0)35401 __ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
35402   poly16x8_t __ret;
35403   __ret = (poly16x8_t)(__p0);
35404   return __ret;
35405 }
35406 #else
vreinterpretq_p16_s16(int16x8_t __p0)35407 __ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
35408   poly16x8_t __ret;
35409   __ret = (poly16x8_t)(__p0);
35410   return __ret;
35411 }
35412 #endif
35413 
35414 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u8_p8(poly8x16_t __p0)35415 __ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
35416   uint8x16_t __ret;
35417   __ret = (uint8x16_t)(__p0);
35418   return __ret;
35419 }
35420 #else
vreinterpretq_u8_p8(poly8x16_t __p0)35421 __ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
35422   uint8x16_t __ret;
35423   __ret = (uint8x16_t)(__p0);
35424   return __ret;
35425 }
35426 #endif
35427 
35428 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u8_p128(poly128_t __p0)35429 __ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) {
35430   uint8x16_t __ret;
35431   __ret = (uint8x16_t)(__p0);
35432   return __ret;
35433 }
35434 #else
vreinterpretq_u8_p128(poly128_t __p0)35435 __ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) {
35436   uint8x16_t __ret;
35437   __ret = (uint8x16_t)(__p0);
35438   return __ret;
35439 }
35440 #endif
35441 
35442 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u8_p64(poly64x2_t __p0)35443 __ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) {
35444   uint8x16_t __ret;
35445   __ret = (uint8x16_t)(__p0);
35446   return __ret;
35447 }
35448 #else
vreinterpretq_u8_p64(poly64x2_t __p0)35449 __ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) {
35450   uint8x16_t __ret;
35451   __ret = (uint8x16_t)(__p0);
35452   return __ret;
35453 }
35454 #endif
35455 
35456 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u8_p16(poly16x8_t __p0)35457 __ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
35458   uint8x16_t __ret;
35459   __ret = (uint8x16_t)(__p0);
35460   return __ret;
35461 }
35462 #else
vreinterpretq_u8_p16(poly16x8_t __p0)35463 __ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
35464   uint8x16_t __ret;
35465   __ret = (uint8x16_t)(__p0);
35466   return __ret;
35467 }
35468 #endif
35469 
35470 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u8_u32(uint32x4_t __p0)35471 __ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
35472   uint8x16_t __ret;
35473   __ret = (uint8x16_t)(__p0);
35474   return __ret;
35475 }
35476 #else
vreinterpretq_u8_u32(uint32x4_t __p0)35477 __ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
35478   uint8x16_t __ret;
35479   __ret = (uint8x16_t)(__p0);
35480   return __ret;
35481 }
35482 #endif
35483 
35484 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u8_u64(uint64x2_t __p0)35485 __ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
35486   uint8x16_t __ret;
35487   __ret = (uint8x16_t)(__p0);
35488   return __ret;
35489 }
35490 #else
vreinterpretq_u8_u64(uint64x2_t __p0)35491 __ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
35492   uint8x16_t __ret;
35493   __ret = (uint8x16_t)(__p0);
35494   return __ret;
35495 }
35496 #endif
35497 
35498 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u8_u16(uint16x8_t __p0)35499 __ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
35500   uint8x16_t __ret;
35501   __ret = (uint8x16_t)(__p0);
35502   return __ret;
35503 }
35504 #else
vreinterpretq_u8_u16(uint16x8_t __p0)35505 __ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
35506   uint8x16_t __ret;
35507   __ret = (uint8x16_t)(__p0);
35508   return __ret;
35509 }
35510 #endif
35511 
35512 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u8_s8(int8x16_t __p0)35513 __ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
35514   uint8x16_t __ret;
35515   __ret = (uint8x16_t)(__p0);
35516   return __ret;
35517 }
35518 #else
vreinterpretq_u8_s8(int8x16_t __p0)35519 __ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
35520   uint8x16_t __ret;
35521   __ret = (uint8x16_t)(__p0);
35522   return __ret;
35523 }
35524 #endif
35525 
35526 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u8_f64(float64x2_t __p0)35527 __ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) {
35528   uint8x16_t __ret;
35529   __ret = (uint8x16_t)(__p0);
35530   return __ret;
35531 }
35532 #else
vreinterpretq_u8_f64(float64x2_t __p0)35533 __ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) {
35534   uint8x16_t __ret;
35535   __ret = (uint8x16_t)(__p0);
35536   return __ret;
35537 }
35538 #endif
35539 
35540 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u8_f32(float32x4_t __p0)35541 __ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
35542   uint8x16_t __ret;
35543   __ret = (uint8x16_t)(__p0);
35544   return __ret;
35545 }
35546 #else
vreinterpretq_u8_f32(float32x4_t __p0)35547 __ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
35548   uint8x16_t __ret;
35549   __ret = (uint8x16_t)(__p0);
35550   return __ret;
35551 }
35552 #endif
35553 
35554 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u8_f16(float16x8_t __p0)35555 __ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
35556   uint8x16_t __ret;
35557   __ret = (uint8x16_t)(__p0);
35558   return __ret;
35559 }
35560 #else
vreinterpretq_u8_f16(float16x8_t __p0)35561 __ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
35562   uint8x16_t __ret;
35563   __ret = (uint8x16_t)(__p0);
35564   return __ret;
35565 }
35566 #endif
35567 
35568 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u8_s32(int32x4_t __p0)35569 __ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
35570   uint8x16_t __ret;
35571   __ret = (uint8x16_t)(__p0);
35572   return __ret;
35573 }
35574 #else
vreinterpretq_u8_s32(int32x4_t __p0)35575 __ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
35576   uint8x16_t __ret;
35577   __ret = (uint8x16_t)(__p0);
35578   return __ret;
35579 }
35580 #endif
35581 
35582 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u8_s64(int64x2_t __p0)35583 __ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
35584   uint8x16_t __ret;
35585   __ret = (uint8x16_t)(__p0);
35586   return __ret;
35587 }
35588 #else
vreinterpretq_u8_s64(int64x2_t __p0)35589 __ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
35590   uint8x16_t __ret;
35591   __ret = (uint8x16_t)(__p0);
35592   return __ret;
35593 }
35594 #endif
35595 
35596 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u8_s16(int16x8_t __p0)35597 __ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
35598   uint8x16_t __ret;
35599   __ret = (uint8x16_t)(__p0);
35600   return __ret;
35601 }
35602 #else
vreinterpretq_u8_s16(int16x8_t __p0)35603 __ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
35604   uint8x16_t __ret;
35605   __ret = (uint8x16_t)(__p0);
35606   return __ret;
35607 }
35608 #endif
35609 
35610 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u32_p8(poly8x16_t __p0)35611 __ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
35612   uint32x4_t __ret;
35613   __ret = (uint32x4_t)(__p0);
35614   return __ret;
35615 }
35616 #else
vreinterpretq_u32_p8(poly8x16_t __p0)35617 __ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
35618   uint32x4_t __ret;
35619   __ret = (uint32x4_t)(__p0);
35620   return __ret;
35621 }
35622 #endif
35623 
35624 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u32_p128(poly128_t __p0)35625 __ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) {
35626   uint32x4_t __ret;
35627   __ret = (uint32x4_t)(__p0);
35628   return __ret;
35629 }
35630 #else
vreinterpretq_u32_p128(poly128_t __p0)35631 __ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) {
35632   uint32x4_t __ret;
35633   __ret = (uint32x4_t)(__p0);
35634   return __ret;
35635 }
35636 #endif
35637 
35638 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u32_p64(poly64x2_t __p0)35639 __ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) {
35640   uint32x4_t __ret;
35641   __ret = (uint32x4_t)(__p0);
35642   return __ret;
35643 }
35644 #else
vreinterpretq_u32_p64(poly64x2_t __p0)35645 __ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) {
35646   uint32x4_t __ret;
35647   __ret = (uint32x4_t)(__p0);
35648   return __ret;
35649 }
35650 #endif
35651 
35652 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u32_p16(poly16x8_t __p0)35653 __ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
35654   uint32x4_t __ret;
35655   __ret = (uint32x4_t)(__p0);
35656   return __ret;
35657 }
35658 #else
vreinterpretq_u32_p16(poly16x8_t __p0)35659 __ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
35660   uint32x4_t __ret;
35661   __ret = (uint32x4_t)(__p0);
35662   return __ret;
35663 }
35664 #endif
35665 
35666 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u32_u8(uint8x16_t __p0)35667 __ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
35668   uint32x4_t __ret;
35669   __ret = (uint32x4_t)(__p0);
35670   return __ret;
35671 }
35672 #else
vreinterpretq_u32_u8(uint8x16_t __p0)35673 __ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
35674   uint32x4_t __ret;
35675   __ret = (uint32x4_t)(__p0);
35676   return __ret;
35677 }
35678 #endif
35679 
35680 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u32_u64(uint64x2_t __p0)35681 __ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
35682   uint32x4_t __ret;
35683   __ret = (uint32x4_t)(__p0);
35684   return __ret;
35685 }
35686 #else
vreinterpretq_u32_u64(uint64x2_t __p0)35687 __ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
35688   uint32x4_t __ret;
35689   __ret = (uint32x4_t)(__p0);
35690   return __ret;
35691 }
35692 #endif
35693 
35694 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u32_u16(uint16x8_t __p0)35695 __ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
35696   uint32x4_t __ret;
35697   __ret = (uint32x4_t)(__p0);
35698   return __ret;
35699 }
35700 #else
vreinterpretq_u32_u16(uint16x8_t __p0)35701 __ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
35702   uint32x4_t __ret;
35703   __ret = (uint32x4_t)(__p0);
35704   return __ret;
35705 }
35706 #endif
35707 
35708 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u32_s8(int8x16_t __p0)35709 __ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
35710   uint32x4_t __ret;
35711   __ret = (uint32x4_t)(__p0);
35712   return __ret;
35713 }
35714 #else
vreinterpretq_u32_s8(int8x16_t __p0)35715 __ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
35716   uint32x4_t __ret;
35717   __ret = (uint32x4_t)(__p0);
35718   return __ret;
35719 }
35720 #endif
35721 
35722 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u32_f64(float64x2_t __p0)35723 __ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) {
35724   uint32x4_t __ret;
35725   __ret = (uint32x4_t)(__p0);
35726   return __ret;
35727 }
35728 #else
vreinterpretq_u32_f64(float64x2_t __p0)35729 __ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) {
35730   uint32x4_t __ret;
35731   __ret = (uint32x4_t)(__p0);
35732   return __ret;
35733 }
35734 #endif
35735 
35736 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u32_f32(float32x4_t __p0)35737 __ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
35738   uint32x4_t __ret;
35739   __ret = (uint32x4_t)(__p0);
35740   return __ret;
35741 }
35742 #else
vreinterpretq_u32_f32(float32x4_t __p0)35743 __ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
35744   uint32x4_t __ret;
35745   __ret = (uint32x4_t)(__p0);
35746   return __ret;
35747 }
35748 #endif
35749 
35750 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u32_f16(float16x8_t __p0)35751 __ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
35752   uint32x4_t __ret;
35753   __ret = (uint32x4_t)(__p0);
35754   return __ret;
35755 }
35756 #else
vreinterpretq_u32_f16(float16x8_t __p0)35757 __ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
35758   uint32x4_t __ret;
35759   __ret = (uint32x4_t)(__p0);
35760   return __ret;
35761 }
35762 #endif
35763 
35764 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u32_s32(int32x4_t __p0)35765 __ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
35766   uint32x4_t __ret;
35767   __ret = (uint32x4_t)(__p0);
35768   return __ret;
35769 }
35770 #else
vreinterpretq_u32_s32(int32x4_t __p0)35771 __ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
35772   uint32x4_t __ret;
35773   __ret = (uint32x4_t)(__p0);
35774   return __ret;
35775 }
35776 #endif
35777 
35778 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u32_s64(int64x2_t __p0)35779 __ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
35780   uint32x4_t __ret;
35781   __ret = (uint32x4_t)(__p0);
35782   return __ret;
35783 }
35784 #else
vreinterpretq_u32_s64(int64x2_t __p0)35785 __ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
35786   uint32x4_t __ret;
35787   __ret = (uint32x4_t)(__p0);
35788   return __ret;
35789 }
35790 #endif
35791 
35792 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u32_s16(int16x8_t __p0)35793 __ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
35794   uint32x4_t __ret;
35795   __ret = (uint32x4_t)(__p0);
35796   return __ret;
35797 }
35798 #else
vreinterpretq_u32_s16(int16x8_t __p0)35799 __ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
35800   uint32x4_t __ret;
35801   __ret = (uint32x4_t)(__p0);
35802   return __ret;
35803 }
35804 #endif
35805 
35806 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u64_p8(poly8x16_t __p0)35807 __ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
35808   uint64x2_t __ret;
35809   __ret = (uint64x2_t)(__p0);
35810   return __ret;
35811 }
35812 #else
vreinterpretq_u64_p8(poly8x16_t __p0)35813 __ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
35814   uint64x2_t __ret;
35815   __ret = (uint64x2_t)(__p0);
35816   return __ret;
35817 }
35818 #endif
35819 
35820 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u64_p128(poly128_t __p0)35821 __ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) {
35822   uint64x2_t __ret;
35823   __ret = (uint64x2_t)(__p0);
35824   return __ret;
35825 }
35826 #else
vreinterpretq_u64_p128(poly128_t __p0)35827 __ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) {
35828   uint64x2_t __ret;
35829   __ret = (uint64x2_t)(__p0);
35830   return __ret;
35831 }
35832 #endif
35833 
35834 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u64_p64(poly64x2_t __p0)35835 __ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) {
35836   uint64x2_t __ret;
35837   __ret = (uint64x2_t)(__p0);
35838   return __ret;
35839 }
35840 #else
vreinterpretq_u64_p64(poly64x2_t __p0)35841 __ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) {
35842   uint64x2_t __ret;
35843   __ret = (uint64x2_t)(__p0);
35844   return __ret;
35845 }
35846 #endif
35847 
35848 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u64_p16(poly16x8_t __p0)35849 __ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
35850   uint64x2_t __ret;
35851   __ret = (uint64x2_t)(__p0);
35852   return __ret;
35853 }
35854 #else
vreinterpretq_u64_p16(poly16x8_t __p0)35855 __ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
35856   uint64x2_t __ret;
35857   __ret = (uint64x2_t)(__p0);
35858   return __ret;
35859 }
35860 #endif
35861 
35862 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u64_u8(uint8x16_t __p0)35863 __ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
35864   uint64x2_t __ret;
35865   __ret = (uint64x2_t)(__p0);
35866   return __ret;
35867 }
35868 #else
vreinterpretq_u64_u8(uint8x16_t __p0)35869 __ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
35870   uint64x2_t __ret;
35871   __ret = (uint64x2_t)(__p0);
35872   return __ret;
35873 }
35874 #endif
35875 
35876 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u64_u32(uint32x4_t __p0)35877 __ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
35878   uint64x2_t __ret;
35879   __ret = (uint64x2_t)(__p0);
35880   return __ret;
35881 }
35882 #else
vreinterpretq_u64_u32(uint32x4_t __p0)35883 __ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
35884   uint64x2_t __ret;
35885   __ret = (uint64x2_t)(__p0);
35886   return __ret;
35887 }
35888 #endif
35889 
35890 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u64_u16(uint16x8_t __p0)35891 __ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
35892   uint64x2_t __ret;
35893   __ret = (uint64x2_t)(__p0);
35894   return __ret;
35895 }
35896 #else
vreinterpretq_u64_u16(uint16x8_t __p0)35897 __ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
35898   uint64x2_t __ret;
35899   __ret = (uint64x2_t)(__p0);
35900   return __ret;
35901 }
35902 #endif
35903 
35904 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u64_s8(int8x16_t __p0)35905 __ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
35906   uint64x2_t __ret;
35907   __ret = (uint64x2_t)(__p0);
35908   return __ret;
35909 }
35910 #else
vreinterpretq_u64_s8(int8x16_t __p0)35911 __ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
35912   uint64x2_t __ret;
35913   __ret = (uint64x2_t)(__p0);
35914   return __ret;
35915 }
35916 #endif
35917 
35918 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u64_f64(float64x2_t __p0)35919 __ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) {
35920   uint64x2_t __ret;
35921   __ret = (uint64x2_t)(__p0);
35922   return __ret;
35923 }
35924 #else
vreinterpretq_u64_f64(float64x2_t __p0)35925 __ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) {
35926   uint64x2_t __ret;
35927   __ret = (uint64x2_t)(__p0);
35928   return __ret;
35929 }
35930 #endif
35931 
35932 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u64_f32(float32x4_t __p0)35933 __ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
35934   uint64x2_t __ret;
35935   __ret = (uint64x2_t)(__p0);
35936   return __ret;
35937 }
35938 #else
vreinterpretq_u64_f32(float32x4_t __p0)35939 __ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
35940   uint64x2_t __ret;
35941   __ret = (uint64x2_t)(__p0);
35942   return __ret;
35943 }
35944 #endif
35945 
35946 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u64_f16(float16x8_t __p0)35947 __ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
35948   uint64x2_t __ret;
35949   __ret = (uint64x2_t)(__p0);
35950   return __ret;
35951 }
35952 #else
vreinterpretq_u64_f16(float16x8_t __p0)35953 __ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
35954   uint64x2_t __ret;
35955   __ret = (uint64x2_t)(__p0);
35956   return __ret;
35957 }
35958 #endif
35959 
35960 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u64_s32(int32x4_t __p0)35961 __ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
35962   uint64x2_t __ret;
35963   __ret = (uint64x2_t)(__p0);
35964   return __ret;
35965 }
35966 #else
vreinterpretq_u64_s32(int32x4_t __p0)35967 __ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
35968   uint64x2_t __ret;
35969   __ret = (uint64x2_t)(__p0);
35970   return __ret;
35971 }
35972 #endif
35973 
35974 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u64_s64(int64x2_t __p0)35975 __ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
35976   uint64x2_t __ret;
35977   __ret = (uint64x2_t)(__p0);
35978   return __ret;
35979 }
35980 #else
vreinterpretq_u64_s64(int64x2_t __p0)35981 __ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
35982   uint64x2_t __ret;
35983   __ret = (uint64x2_t)(__p0);
35984   return __ret;
35985 }
35986 #endif
35987 
35988 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u64_s16(int16x8_t __p0)35989 __ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
35990   uint64x2_t __ret;
35991   __ret = (uint64x2_t)(__p0);
35992   return __ret;
35993 }
35994 #else
vreinterpretq_u64_s16(int16x8_t __p0)35995 __ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
35996   uint64x2_t __ret;
35997   __ret = (uint64x2_t)(__p0);
35998   return __ret;
35999 }
36000 #endif
36001 
36002 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u16_p8(poly8x16_t __p0)36003 __ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
36004   uint16x8_t __ret;
36005   __ret = (uint16x8_t)(__p0);
36006   return __ret;
36007 }
36008 #else
vreinterpretq_u16_p8(poly8x16_t __p0)36009 __ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
36010   uint16x8_t __ret;
36011   __ret = (uint16x8_t)(__p0);
36012   return __ret;
36013 }
36014 #endif
36015 
36016 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u16_p128(poly128_t __p0)36017 __ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) {
36018   uint16x8_t __ret;
36019   __ret = (uint16x8_t)(__p0);
36020   return __ret;
36021 }
36022 #else
vreinterpretq_u16_p128(poly128_t __p0)36023 __ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) {
36024   uint16x8_t __ret;
36025   __ret = (uint16x8_t)(__p0);
36026   return __ret;
36027 }
36028 #endif
36029 
36030 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u16_p64(poly64x2_t __p0)36031 __ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) {
36032   uint16x8_t __ret;
36033   __ret = (uint16x8_t)(__p0);
36034   return __ret;
36035 }
36036 #else
vreinterpretq_u16_p64(poly64x2_t __p0)36037 __ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) {
36038   uint16x8_t __ret;
36039   __ret = (uint16x8_t)(__p0);
36040   return __ret;
36041 }
36042 #endif
36043 
36044 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u16_p16(poly16x8_t __p0)36045 __ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
36046   uint16x8_t __ret;
36047   __ret = (uint16x8_t)(__p0);
36048   return __ret;
36049 }
36050 #else
vreinterpretq_u16_p16(poly16x8_t __p0)36051 __ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
36052   uint16x8_t __ret;
36053   __ret = (uint16x8_t)(__p0);
36054   return __ret;
36055 }
36056 #endif
36057 
36058 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u16_u8(uint8x16_t __p0)36059 __ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
36060   uint16x8_t __ret;
36061   __ret = (uint16x8_t)(__p0);
36062   return __ret;
36063 }
36064 #else
vreinterpretq_u16_u8(uint8x16_t __p0)36065 __ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
36066   uint16x8_t __ret;
36067   __ret = (uint16x8_t)(__p0);
36068   return __ret;
36069 }
36070 #endif
36071 
36072 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u16_u32(uint32x4_t __p0)36073 __ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
36074   uint16x8_t __ret;
36075   __ret = (uint16x8_t)(__p0);
36076   return __ret;
36077 }
36078 #else
vreinterpretq_u16_u32(uint32x4_t __p0)36079 __ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
36080   uint16x8_t __ret;
36081   __ret = (uint16x8_t)(__p0);
36082   return __ret;
36083 }
36084 #endif
36085 
36086 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u16_u64(uint64x2_t __p0)36087 __ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
36088   uint16x8_t __ret;
36089   __ret = (uint16x8_t)(__p0);
36090   return __ret;
36091 }
36092 #else
vreinterpretq_u16_u64(uint64x2_t __p0)36093 __ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
36094   uint16x8_t __ret;
36095   __ret = (uint16x8_t)(__p0);
36096   return __ret;
36097 }
36098 #endif
36099 
36100 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u16_s8(int8x16_t __p0)36101 __ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
36102   uint16x8_t __ret;
36103   __ret = (uint16x8_t)(__p0);
36104   return __ret;
36105 }
36106 #else
vreinterpretq_u16_s8(int8x16_t __p0)36107 __ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
36108   uint16x8_t __ret;
36109   __ret = (uint16x8_t)(__p0);
36110   return __ret;
36111 }
36112 #endif
36113 
36114 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u16_f64(float64x2_t __p0)36115 __ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) {
36116   uint16x8_t __ret;
36117   __ret = (uint16x8_t)(__p0);
36118   return __ret;
36119 }
36120 #else
vreinterpretq_u16_f64(float64x2_t __p0)36121 __ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) {
36122   uint16x8_t __ret;
36123   __ret = (uint16x8_t)(__p0);
36124   return __ret;
36125 }
36126 #endif
36127 
36128 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u16_f32(float32x4_t __p0)36129 __ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
36130   uint16x8_t __ret;
36131   __ret = (uint16x8_t)(__p0);
36132   return __ret;
36133 }
36134 #else
vreinterpretq_u16_f32(float32x4_t __p0)36135 __ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
36136   uint16x8_t __ret;
36137   __ret = (uint16x8_t)(__p0);
36138   return __ret;
36139 }
36140 #endif
36141 
36142 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u16_f16(float16x8_t __p0)36143 __ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
36144   uint16x8_t __ret;
36145   __ret = (uint16x8_t)(__p0);
36146   return __ret;
36147 }
36148 #else
vreinterpretq_u16_f16(float16x8_t __p0)36149 __ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
36150   uint16x8_t __ret;
36151   __ret = (uint16x8_t)(__p0);
36152   return __ret;
36153 }
36154 #endif
36155 
36156 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u16_s32(int32x4_t __p0)36157 __ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
36158   uint16x8_t __ret;
36159   __ret = (uint16x8_t)(__p0);
36160   return __ret;
36161 }
36162 #else
vreinterpretq_u16_s32(int32x4_t __p0)36163 __ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
36164   uint16x8_t __ret;
36165   __ret = (uint16x8_t)(__p0);
36166   return __ret;
36167 }
36168 #endif
36169 
36170 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u16_s64(int64x2_t __p0)36171 __ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
36172   uint16x8_t __ret;
36173   __ret = (uint16x8_t)(__p0);
36174   return __ret;
36175 }
36176 #else
vreinterpretq_u16_s64(int64x2_t __p0)36177 __ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
36178   uint16x8_t __ret;
36179   __ret = (uint16x8_t)(__p0);
36180   return __ret;
36181 }
36182 #endif
36183 
36184 #ifdef __LITTLE_ENDIAN__
vreinterpretq_u16_s16(int16x8_t __p0)36185 __ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
36186   uint16x8_t __ret;
36187   __ret = (uint16x8_t)(__p0);
36188   return __ret;
36189 }
36190 #else
vreinterpretq_u16_s16(int16x8_t __p0)36191 __ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
36192   uint16x8_t __ret;
36193   __ret = (uint16x8_t)(__p0);
36194   return __ret;
36195 }
36196 #endif
36197 
36198 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s8_p8(poly8x16_t __p0)36199 __ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
36200   int8x16_t __ret;
36201   __ret = (int8x16_t)(__p0);
36202   return __ret;
36203 }
36204 #else
vreinterpretq_s8_p8(poly8x16_t __p0)36205 __ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
36206   int8x16_t __ret;
36207   __ret = (int8x16_t)(__p0);
36208   return __ret;
36209 }
36210 #endif
36211 
36212 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s8_p128(poly128_t __p0)36213 __ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) {
36214   int8x16_t __ret;
36215   __ret = (int8x16_t)(__p0);
36216   return __ret;
36217 }
36218 #else
vreinterpretq_s8_p128(poly128_t __p0)36219 __ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) {
36220   int8x16_t __ret;
36221   __ret = (int8x16_t)(__p0);
36222   return __ret;
36223 }
36224 #endif
36225 
36226 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s8_p64(poly64x2_t __p0)36227 __ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) {
36228   int8x16_t __ret;
36229   __ret = (int8x16_t)(__p0);
36230   return __ret;
36231 }
36232 #else
vreinterpretq_s8_p64(poly64x2_t __p0)36233 __ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) {
36234   int8x16_t __ret;
36235   __ret = (int8x16_t)(__p0);
36236   return __ret;
36237 }
36238 #endif
36239 
36240 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s8_p16(poly16x8_t __p0)36241 __ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
36242   int8x16_t __ret;
36243   __ret = (int8x16_t)(__p0);
36244   return __ret;
36245 }
36246 #else
vreinterpretq_s8_p16(poly16x8_t __p0)36247 __ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
36248   int8x16_t __ret;
36249   __ret = (int8x16_t)(__p0);
36250   return __ret;
36251 }
36252 #endif
36253 
36254 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s8_u8(uint8x16_t __p0)36255 __ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
36256   int8x16_t __ret;
36257   __ret = (int8x16_t)(__p0);
36258   return __ret;
36259 }
36260 #else
vreinterpretq_s8_u8(uint8x16_t __p0)36261 __ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
36262   int8x16_t __ret;
36263   __ret = (int8x16_t)(__p0);
36264   return __ret;
36265 }
36266 #endif
36267 
36268 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s8_u32(uint32x4_t __p0)36269 __ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
36270   int8x16_t __ret;
36271   __ret = (int8x16_t)(__p0);
36272   return __ret;
36273 }
36274 #else
vreinterpretq_s8_u32(uint32x4_t __p0)36275 __ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
36276   int8x16_t __ret;
36277   __ret = (int8x16_t)(__p0);
36278   return __ret;
36279 }
36280 #endif
36281 
36282 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s8_u64(uint64x2_t __p0)36283 __ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
36284   int8x16_t __ret;
36285   __ret = (int8x16_t)(__p0);
36286   return __ret;
36287 }
36288 #else
vreinterpretq_s8_u64(uint64x2_t __p0)36289 __ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
36290   int8x16_t __ret;
36291   __ret = (int8x16_t)(__p0);
36292   return __ret;
36293 }
36294 #endif
36295 
36296 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s8_u16(uint16x8_t __p0)36297 __ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
36298   int8x16_t __ret;
36299   __ret = (int8x16_t)(__p0);
36300   return __ret;
36301 }
36302 #else
vreinterpretq_s8_u16(uint16x8_t __p0)36303 __ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
36304   int8x16_t __ret;
36305   __ret = (int8x16_t)(__p0);
36306   return __ret;
36307 }
36308 #endif
36309 
36310 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s8_f64(float64x2_t __p0)36311 __ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) {
36312   int8x16_t __ret;
36313   __ret = (int8x16_t)(__p0);
36314   return __ret;
36315 }
36316 #else
vreinterpretq_s8_f64(float64x2_t __p0)36317 __ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) {
36318   int8x16_t __ret;
36319   __ret = (int8x16_t)(__p0);
36320   return __ret;
36321 }
36322 #endif
36323 
36324 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s8_f32(float32x4_t __p0)36325 __ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
36326   int8x16_t __ret;
36327   __ret = (int8x16_t)(__p0);
36328   return __ret;
36329 }
36330 #else
vreinterpretq_s8_f32(float32x4_t __p0)36331 __ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
36332   int8x16_t __ret;
36333   __ret = (int8x16_t)(__p0);
36334   return __ret;
36335 }
36336 #endif
36337 
36338 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s8_f16(float16x8_t __p0)36339 __ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
36340   int8x16_t __ret;
36341   __ret = (int8x16_t)(__p0);
36342   return __ret;
36343 }
36344 #else
vreinterpretq_s8_f16(float16x8_t __p0)36345 __ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
36346   int8x16_t __ret;
36347   __ret = (int8x16_t)(__p0);
36348   return __ret;
36349 }
36350 #endif
36351 
36352 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s8_s32(int32x4_t __p0)36353 __ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
36354   int8x16_t __ret;
36355   __ret = (int8x16_t)(__p0);
36356   return __ret;
36357 }
36358 #else
vreinterpretq_s8_s32(int32x4_t __p0)36359 __ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
36360   int8x16_t __ret;
36361   __ret = (int8x16_t)(__p0);
36362   return __ret;
36363 }
36364 #endif
36365 
36366 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s8_s64(int64x2_t __p0)36367 __ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
36368   int8x16_t __ret;
36369   __ret = (int8x16_t)(__p0);
36370   return __ret;
36371 }
36372 #else
vreinterpretq_s8_s64(int64x2_t __p0)36373 __ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
36374   int8x16_t __ret;
36375   __ret = (int8x16_t)(__p0);
36376   return __ret;
36377 }
36378 #endif
36379 
36380 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s8_s16(int16x8_t __p0)36381 __ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
36382   int8x16_t __ret;
36383   __ret = (int8x16_t)(__p0);
36384   return __ret;
36385 }
36386 #else
vreinterpretq_s8_s16(int16x8_t __p0)36387 __ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
36388   int8x16_t __ret;
36389   __ret = (int8x16_t)(__p0);
36390   return __ret;
36391 }
36392 #endif
36393 
36394 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f64_p8(poly8x16_t __p0)36395 __ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) {
36396   float64x2_t __ret;
36397   __ret = (float64x2_t)(__p0);
36398   return __ret;
36399 }
36400 #else
vreinterpretq_f64_p8(poly8x16_t __p0)36401 __ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) {
36402   float64x2_t __ret;
36403   __ret = (float64x2_t)(__p0);
36404   return __ret;
36405 }
36406 #endif
36407 
36408 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f64_p128(poly128_t __p0)36409 __ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) {
36410   float64x2_t __ret;
36411   __ret = (float64x2_t)(__p0);
36412   return __ret;
36413 }
36414 #else
vreinterpretq_f64_p128(poly128_t __p0)36415 __ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) {
36416   float64x2_t __ret;
36417   __ret = (float64x2_t)(__p0);
36418   return __ret;
36419 }
36420 #endif
36421 
36422 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f64_p64(poly64x2_t __p0)36423 __ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) {
36424   float64x2_t __ret;
36425   __ret = (float64x2_t)(__p0);
36426   return __ret;
36427 }
36428 #else
vreinterpretq_f64_p64(poly64x2_t __p0)36429 __ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) {
36430   float64x2_t __ret;
36431   __ret = (float64x2_t)(__p0);
36432   return __ret;
36433 }
36434 #endif
36435 
36436 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f64_p16(poly16x8_t __p0)36437 __ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) {
36438   float64x2_t __ret;
36439   __ret = (float64x2_t)(__p0);
36440   return __ret;
36441 }
36442 #else
vreinterpretq_f64_p16(poly16x8_t __p0)36443 __ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) {
36444   float64x2_t __ret;
36445   __ret = (float64x2_t)(__p0);
36446   return __ret;
36447 }
36448 #endif
36449 
36450 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f64_u8(uint8x16_t __p0)36451 __ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) {
36452   float64x2_t __ret;
36453   __ret = (float64x2_t)(__p0);
36454   return __ret;
36455 }
36456 #else
vreinterpretq_f64_u8(uint8x16_t __p0)36457 __ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) {
36458   float64x2_t __ret;
36459   __ret = (float64x2_t)(__p0);
36460   return __ret;
36461 }
36462 #endif
36463 
36464 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f64_u32(uint32x4_t __p0)36465 __ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) {
36466   float64x2_t __ret;
36467   __ret = (float64x2_t)(__p0);
36468   return __ret;
36469 }
36470 #else
vreinterpretq_f64_u32(uint32x4_t __p0)36471 __ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) {
36472   float64x2_t __ret;
36473   __ret = (float64x2_t)(__p0);
36474   return __ret;
36475 }
36476 #endif
36477 
36478 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f64_u64(uint64x2_t __p0)36479 __ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) {
36480   float64x2_t __ret;
36481   __ret = (float64x2_t)(__p0);
36482   return __ret;
36483 }
36484 #else
vreinterpretq_f64_u64(uint64x2_t __p0)36485 __ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) {
36486   float64x2_t __ret;
36487   __ret = (float64x2_t)(__p0);
36488   return __ret;
36489 }
36490 #endif
36491 
36492 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f64_u16(uint16x8_t __p0)36493 __ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) {
36494   float64x2_t __ret;
36495   __ret = (float64x2_t)(__p0);
36496   return __ret;
36497 }
36498 #else
vreinterpretq_f64_u16(uint16x8_t __p0)36499 __ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) {
36500   float64x2_t __ret;
36501   __ret = (float64x2_t)(__p0);
36502   return __ret;
36503 }
36504 #endif
36505 
36506 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f64_s8(int8x16_t __p0)36507 __ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) {
36508   float64x2_t __ret;
36509   __ret = (float64x2_t)(__p0);
36510   return __ret;
36511 }
36512 #else
vreinterpretq_f64_s8(int8x16_t __p0)36513 __ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) {
36514   float64x2_t __ret;
36515   __ret = (float64x2_t)(__p0);
36516   return __ret;
36517 }
36518 #endif
36519 
36520 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f64_f32(float32x4_t __p0)36521 __ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) {
36522   float64x2_t __ret;
36523   __ret = (float64x2_t)(__p0);
36524   return __ret;
36525 }
36526 #else
vreinterpretq_f64_f32(float32x4_t __p0)36527 __ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) {
36528   float64x2_t __ret;
36529   __ret = (float64x2_t)(__p0);
36530   return __ret;
36531 }
36532 #endif
36533 
36534 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f64_f16(float16x8_t __p0)36535 __ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) {
36536   float64x2_t __ret;
36537   __ret = (float64x2_t)(__p0);
36538   return __ret;
36539 }
36540 #else
vreinterpretq_f64_f16(float16x8_t __p0)36541 __ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) {
36542   float64x2_t __ret;
36543   __ret = (float64x2_t)(__p0);
36544   return __ret;
36545 }
36546 #endif
36547 
36548 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f64_s32(int32x4_t __p0)36549 __ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) {
36550   float64x2_t __ret;
36551   __ret = (float64x2_t)(__p0);
36552   return __ret;
36553 }
36554 #else
vreinterpretq_f64_s32(int32x4_t __p0)36555 __ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) {
36556   float64x2_t __ret;
36557   __ret = (float64x2_t)(__p0);
36558   return __ret;
36559 }
36560 #endif
36561 
36562 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f64_s64(int64x2_t __p0)36563 __ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) {
36564   float64x2_t __ret;
36565   __ret = (float64x2_t)(__p0);
36566   return __ret;
36567 }
36568 #else
vreinterpretq_f64_s64(int64x2_t __p0)36569 __ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) {
36570   float64x2_t __ret;
36571   __ret = (float64x2_t)(__p0);
36572   return __ret;
36573 }
36574 #endif
36575 
36576 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f64_s16(int16x8_t __p0)36577 __ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) {
36578   float64x2_t __ret;
36579   __ret = (float64x2_t)(__p0);
36580   return __ret;
36581 }
36582 #else
vreinterpretq_f64_s16(int16x8_t __p0)36583 __ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) {
36584   float64x2_t __ret;
36585   __ret = (float64x2_t)(__p0);
36586   return __ret;
36587 }
36588 #endif
36589 
36590 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f32_p8(poly8x16_t __p0)36591 __ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
36592   float32x4_t __ret;
36593   __ret = (float32x4_t)(__p0);
36594   return __ret;
36595 }
36596 #else
vreinterpretq_f32_p8(poly8x16_t __p0)36597 __ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
36598   float32x4_t __ret;
36599   __ret = (float32x4_t)(__p0);
36600   return __ret;
36601 }
36602 #endif
36603 
36604 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f32_p128(poly128_t __p0)36605 __ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) {
36606   float32x4_t __ret;
36607   __ret = (float32x4_t)(__p0);
36608   return __ret;
36609 }
36610 #else
vreinterpretq_f32_p128(poly128_t __p0)36611 __ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) {
36612   float32x4_t __ret;
36613   __ret = (float32x4_t)(__p0);
36614   return __ret;
36615 }
36616 #endif
36617 
36618 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f32_p64(poly64x2_t __p0)36619 __ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) {
36620   float32x4_t __ret;
36621   __ret = (float32x4_t)(__p0);
36622   return __ret;
36623 }
36624 #else
vreinterpretq_f32_p64(poly64x2_t __p0)36625 __ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) {
36626   float32x4_t __ret;
36627   __ret = (float32x4_t)(__p0);
36628   return __ret;
36629 }
36630 #endif
36631 
36632 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f32_p16(poly16x8_t __p0)36633 __ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
36634   float32x4_t __ret;
36635   __ret = (float32x4_t)(__p0);
36636   return __ret;
36637 }
36638 #else
vreinterpretq_f32_p16(poly16x8_t __p0)36639 __ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
36640   float32x4_t __ret;
36641   __ret = (float32x4_t)(__p0);
36642   return __ret;
36643 }
36644 #endif
36645 
36646 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f32_u8(uint8x16_t __p0)36647 __ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
36648   float32x4_t __ret;
36649   __ret = (float32x4_t)(__p0);
36650   return __ret;
36651 }
36652 #else
vreinterpretq_f32_u8(uint8x16_t __p0)36653 __ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
36654   float32x4_t __ret;
36655   __ret = (float32x4_t)(__p0);
36656   return __ret;
36657 }
36658 #endif
36659 
36660 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f32_u32(uint32x4_t __p0)36661 __ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
36662   float32x4_t __ret;
36663   __ret = (float32x4_t)(__p0);
36664   return __ret;
36665 }
36666 #else
vreinterpretq_f32_u32(uint32x4_t __p0)36667 __ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
36668   float32x4_t __ret;
36669   __ret = (float32x4_t)(__p0);
36670   return __ret;
36671 }
36672 #endif
36673 
36674 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f32_u64(uint64x2_t __p0)36675 __ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
36676   float32x4_t __ret;
36677   __ret = (float32x4_t)(__p0);
36678   return __ret;
36679 }
36680 #else
vreinterpretq_f32_u64(uint64x2_t __p0)36681 __ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
36682   float32x4_t __ret;
36683   __ret = (float32x4_t)(__p0);
36684   return __ret;
36685 }
36686 #endif
36687 
36688 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f32_u16(uint16x8_t __p0)36689 __ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
36690   float32x4_t __ret;
36691   __ret = (float32x4_t)(__p0);
36692   return __ret;
36693 }
36694 #else
vreinterpretq_f32_u16(uint16x8_t __p0)36695 __ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
36696   float32x4_t __ret;
36697   __ret = (float32x4_t)(__p0);
36698   return __ret;
36699 }
36700 #endif
36701 
36702 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f32_s8(int8x16_t __p0)36703 __ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
36704   float32x4_t __ret;
36705   __ret = (float32x4_t)(__p0);
36706   return __ret;
36707 }
36708 #else
vreinterpretq_f32_s8(int8x16_t __p0)36709 __ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
36710   float32x4_t __ret;
36711   __ret = (float32x4_t)(__p0);
36712   return __ret;
36713 }
36714 #endif
36715 
36716 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f32_f64(float64x2_t __p0)36717 __ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) {
36718   float32x4_t __ret;
36719   __ret = (float32x4_t)(__p0);
36720   return __ret;
36721 }
36722 #else
vreinterpretq_f32_f64(float64x2_t __p0)36723 __ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) {
36724   float32x4_t __ret;
36725   __ret = (float32x4_t)(__p0);
36726   return __ret;
36727 }
36728 #endif
36729 
36730 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f32_f16(float16x8_t __p0)36731 __ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
36732   float32x4_t __ret;
36733   __ret = (float32x4_t)(__p0);
36734   return __ret;
36735 }
36736 #else
vreinterpretq_f32_f16(float16x8_t __p0)36737 __ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
36738   float32x4_t __ret;
36739   __ret = (float32x4_t)(__p0);
36740   return __ret;
36741 }
36742 #endif
36743 
36744 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f32_s32(int32x4_t __p0)36745 __ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
36746   float32x4_t __ret;
36747   __ret = (float32x4_t)(__p0);
36748   return __ret;
36749 }
36750 #else
vreinterpretq_f32_s32(int32x4_t __p0)36751 __ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
36752   float32x4_t __ret;
36753   __ret = (float32x4_t)(__p0);
36754   return __ret;
36755 }
36756 #endif
36757 
36758 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f32_s64(int64x2_t __p0)36759 __ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
36760   float32x4_t __ret;
36761   __ret = (float32x4_t)(__p0);
36762   return __ret;
36763 }
36764 #else
vreinterpretq_f32_s64(int64x2_t __p0)36765 __ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
36766   float32x4_t __ret;
36767   __ret = (float32x4_t)(__p0);
36768   return __ret;
36769 }
36770 #endif
36771 
36772 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f32_s16(int16x8_t __p0)36773 __ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
36774   float32x4_t __ret;
36775   __ret = (float32x4_t)(__p0);
36776   return __ret;
36777 }
36778 #else
vreinterpretq_f32_s16(int16x8_t __p0)36779 __ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
36780   float32x4_t __ret;
36781   __ret = (float32x4_t)(__p0);
36782   return __ret;
36783 }
36784 #endif
36785 
36786 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f16_p8(poly8x16_t __p0)36787 __ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
36788   float16x8_t __ret;
36789   __ret = (float16x8_t)(__p0);
36790   return __ret;
36791 }
36792 #else
vreinterpretq_f16_p8(poly8x16_t __p0)36793 __ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
36794   float16x8_t __ret;
36795   __ret = (float16x8_t)(__p0);
36796   return __ret;
36797 }
36798 #endif
36799 
36800 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f16_p128(poly128_t __p0)36801 __ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) {
36802   float16x8_t __ret;
36803   __ret = (float16x8_t)(__p0);
36804   return __ret;
36805 }
36806 #else
vreinterpretq_f16_p128(poly128_t __p0)36807 __ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) {
36808   float16x8_t __ret;
36809   __ret = (float16x8_t)(__p0);
36810   return __ret;
36811 }
36812 #endif
36813 
36814 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f16_p64(poly64x2_t __p0)36815 __ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) {
36816   float16x8_t __ret;
36817   __ret = (float16x8_t)(__p0);
36818   return __ret;
36819 }
36820 #else
vreinterpretq_f16_p64(poly64x2_t __p0)36821 __ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) {
36822   float16x8_t __ret;
36823   __ret = (float16x8_t)(__p0);
36824   return __ret;
36825 }
36826 #endif
36827 
36828 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f16_p16(poly16x8_t __p0)36829 __ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
36830   float16x8_t __ret;
36831   __ret = (float16x8_t)(__p0);
36832   return __ret;
36833 }
36834 #else
vreinterpretq_f16_p16(poly16x8_t __p0)36835 __ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
36836   float16x8_t __ret;
36837   __ret = (float16x8_t)(__p0);
36838   return __ret;
36839 }
36840 #endif
36841 
36842 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f16_u8(uint8x16_t __p0)36843 __ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
36844   float16x8_t __ret;
36845   __ret = (float16x8_t)(__p0);
36846   return __ret;
36847 }
36848 #else
vreinterpretq_f16_u8(uint8x16_t __p0)36849 __ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
36850   float16x8_t __ret;
36851   __ret = (float16x8_t)(__p0);
36852   return __ret;
36853 }
36854 #endif
36855 
36856 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f16_u32(uint32x4_t __p0)36857 __ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
36858   float16x8_t __ret;
36859   __ret = (float16x8_t)(__p0);
36860   return __ret;
36861 }
36862 #else
vreinterpretq_f16_u32(uint32x4_t __p0)36863 __ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
36864   float16x8_t __ret;
36865   __ret = (float16x8_t)(__p0);
36866   return __ret;
36867 }
36868 #endif
36869 
36870 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f16_u64(uint64x2_t __p0)36871 __ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
36872   float16x8_t __ret;
36873   __ret = (float16x8_t)(__p0);
36874   return __ret;
36875 }
36876 #else
vreinterpretq_f16_u64(uint64x2_t __p0)36877 __ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
36878   float16x8_t __ret;
36879   __ret = (float16x8_t)(__p0);
36880   return __ret;
36881 }
36882 #endif
36883 
36884 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f16_u16(uint16x8_t __p0)36885 __ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
36886   float16x8_t __ret;
36887   __ret = (float16x8_t)(__p0);
36888   return __ret;
36889 }
36890 #else
vreinterpretq_f16_u16(uint16x8_t __p0)36891 __ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
36892   float16x8_t __ret;
36893   __ret = (float16x8_t)(__p0);
36894   return __ret;
36895 }
36896 #endif
36897 
36898 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f16_s8(int8x16_t __p0)36899 __ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
36900   float16x8_t __ret;
36901   __ret = (float16x8_t)(__p0);
36902   return __ret;
36903 }
36904 #else
vreinterpretq_f16_s8(int8x16_t __p0)36905 __ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
36906   float16x8_t __ret;
36907   __ret = (float16x8_t)(__p0);
36908   return __ret;
36909 }
36910 #endif
36911 
36912 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f16_f64(float64x2_t __p0)36913 __ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) {
36914   float16x8_t __ret;
36915   __ret = (float16x8_t)(__p0);
36916   return __ret;
36917 }
36918 #else
vreinterpretq_f16_f64(float64x2_t __p0)36919 __ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) {
36920   float16x8_t __ret;
36921   __ret = (float16x8_t)(__p0);
36922   return __ret;
36923 }
36924 #endif
36925 
36926 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f16_f32(float32x4_t __p0)36927 __ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
36928   float16x8_t __ret;
36929   __ret = (float16x8_t)(__p0);
36930   return __ret;
36931 }
36932 #else
vreinterpretq_f16_f32(float32x4_t __p0)36933 __ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
36934   float16x8_t __ret;
36935   __ret = (float16x8_t)(__p0);
36936   return __ret;
36937 }
36938 #endif
36939 
36940 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f16_s32(int32x4_t __p0)36941 __ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
36942   float16x8_t __ret;
36943   __ret = (float16x8_t)(__p0);
36944   return __ret;
36945 }
36946 #else
vreinterpretq_f16_s32(int32x4_t __p0)36947 __ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
36948   float16x8_t __ret;
36949   __ret = (float16x8_t)(__p0);
36950   return __ret;
36951 }
36952 #endif
36953 
36954 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f16_s64(int64x2_t __p0)36955 __ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
36956   float16x8_t __ret;
36957   __ret = (float16x8_t)(__p0);
36958   return __ret;
36959 }
36960 #else
vreinterpretq_f16_s64(int64x2_t __p0)36961 __ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
36962   float16x8_t __ret;
36963   __ret = (float16x8_t)(__p0);
36964   return __ret;
36965 }
36966 #endif
36967 
36968 #ifdef __LITTLE_ENDIAN__
vreinterpretq_f16_s16(int16x8_t __p0)36969 __ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
36970   float16x8_t __ret;
36971   __ret = (float16x8_t)(__p0);
36972   return __ret;
36973 }
36974 #else
vreinterpretq_f16_s16(int16x8_t __p0)36975 __ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
36976   float16x8_t __ret;
36977   __ret = (float16x8_t)(__p0);
36978   return __ret;
36979 }
36980 #endif
36981 
36982 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s32_p8(poly8x16_t __p0)36983 __ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
36984   int32x4_t __ret;
36985   __ret = (int32x4_t)(__p0);
36986   return __ret;
36987 }
36988 #else
vreinterpretq_s32_p8(poly8x16_t __p0)36989 __ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
36990   int32x4_t __ret;
36991   __ret = (int32x4_t)(__p0);
36992   return __ret;
36993 }
36994 #endif
36995 
36996 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s32_p128(poly128_t __p0)36997 __ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) {
36998   int32x4_t __ret;
36999   __ret = (int32x4_t)(__p0);
37000   return __ret;
37001 }
37002 #else
vreinterpretq_s32_p128(poly128_t __p0)37003 __ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) {
37004   int32x4_t __ret;
37005   __ret = (int32x4_t)(__p0);
37006   return __ret;
37007 }
37008 #endif
37009 
37010 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s32_p64(poly64x2_t __p0)37011 __ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) {
37012   int32x4_t __ret;
37013   __ret = (int32x4_t)(__p0);
37014   return __ret;
37015 }
37016 #else
vreinterpretq_s32_p64(poly64x2_t __p0)37017 __ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) {
37018   int32x4_t __ret;
37019   __ret = (int32x4_t)(__p0);
37020   return __ret;
37021 }
37022 #endif
37023 
37024 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s32_p16(poly16x8_t __p0)37025 __ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
37026   int32x4_t __ret;
37027   __ret = (int32x4_t)(__p0);
37028   return __ret;
37029 }
37030 #else
vreinterpretq_s32_p16(poly16x8_t __p0)37031 __ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
37032   int32x4_t __ret;
37033   __ret = (int32x4_t)(__p0);
37034   return __ret;
37035 }
37036 #endif
37037 
37038 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s32_u8(uint8x16_t __p0)37039 __ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
37040   int32x4_t __ret;
37041   __ret = (int32x4_t)(__p0);
37042   return __ret;
37043 }
37044 #else
vreinterpretq_s32_u8(uint8x16_t __p0)37045 __ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
37046   int32x4_t __ret;
37047   __ret = (int32x4_t)(__p0);
37048   return __ret;
37049 }
37050 #endif
37051 
37052 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s32_u32(uint32x4_t __p0)37053 __ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
37054   int32x4_t __ret;
37055   __ret = (int32x4_t)(__p0);
37056   return __ret;
37057 }
37058 #else
vreinterpretq_s32_u32(uint32x4_t __p0)37059 __ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
37060   int32x4_t __ret;
37061   __ret = (int32x4_t)(__p0);
37062   return __ret;
37063 }
37064 #endif
37065 
37066 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s32_u64(uint64x2_t __p0)37067 __ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
37068   int32x4_t __ret;
37069   __ret = (int32x4_t)(__p0);
37070   return __ret;
37071 }
37072 #else
vreinterpretq_s32_u64(uint64x2_t __p0)37073 __ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
37074   int32x4_t __ret;
37075   __ret = (int32x4_t)(__p0);
37076   return __ret;
37077 }
37078 #endif
37079 
37080 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s32_u16(uint16x8_t __p0)37081 __ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
37082   int32x4_t __ret;
37083   __ret = (int32x4_t)(__p0);
37084   return __ret;
37085 }
37086 #else
vreinterpretq_s32_u16(uint16x8_t __p0)37087 __ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
37088   int32x4_t __ret;
37089   __ret = (int32x4_t)(__p0);
37090   return __ret;
37091 }
37092 #endif
37093 
37094 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s32_s8(int8x16_t __p0)37095 __ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
37096   int32x4_t __ret;
37097   __ret = (int32x4_t)(__p0);
37098   return __ret;
37099 }
37100 #else
vreinterpretq_s32_s8(int8x16_t __p0)37101 __ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
37102   int32x4_t __ret;
37103   __ret = (int32x4_t)(__p0);
37104   return __ret;
37105 }
37106 #endif
37107 
37108 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s32_f64(float64x2_t __p0)37109 __ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) {
37110   int32x4_t __ret;
37111   __ret = (int32x4_t)(__p0);
37112   return __ret;
37113 }
37114 #else
vreinterpretq_s32_f64(float64x2_t __p0)37115 __ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) {
37116   int32x4_t __ret;
37117   __ret = (int32x4_t)(__p0);
37118   return __ret;
37119 }
37120 #endif
37121 
37122 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s32_f32(float32x4_t __p0)37123 __ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
37124   int32x4_t __ret;
37125   __ret = (int32x4_t)(__p0);
37126   return __ret;
37127 }
37128 #else
vreinterpretq_s32_f32(float32x4_t __p0)37129 __ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
37130   int32x4_t __ret;
37131   __ret = (int32x4_t)(__p0);
37132   return __ret;
37133 }
37134 #endif
37135 
37136 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s32_f16(float16x8_t __p0)37137 __ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
37138   int32x4_t __ret;
37139   __ret = (int32x4_t)(__p0);
37140   return __ret;
37141 }
37142 #else
vreinterpretq_s32_f16(float16x8_t __p0)37143 __ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
37144   int32x4_t __ret;
37145   __ret = (int32x4_t)(__p0);
37146   return __ret;
37147 }
37148 #endif
37149 
37150 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s32_s64(int64x2_t __p0)37151 __ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
37152   int32x4_t __ret;
37153   __ret = (int32x4_t)(__p0);
37154   return __ret;
37155 }
37156 #else
vreinterpretq_s32_s64(int64x2_t __p0)37157 __ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
37158   int32x4_t __ret;
37159   __ret = (int32x4_t)(__p0);
37160   return __ret;
37161 }
37162 #endif
37163 
37164 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s32_s16(int16x8_t __p0)37165 __ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
37166   int32x4_t __ret;
37167   __ret = (int32x4_t)(__p0);
37168   return __ret;
37169 }
37170 #else
vreinterpretq_s32_s16(int16x8_t __p0)37171 __ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
37172   int32x4_t __ret;
37173   __ret = (int32x4_t)(__p0);
37174   return __ret;
37175 }
37176 #endif
37177 
37178 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s64_p8(poly8x16_t __p0)37179 __ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
37180   int64x2_t __ret;
37181   __ret = (int64x2_t)(__p0);
37182   return __ret;
37183 }
37184 #else
vreinterpretq_s64_p8(poly8x16_t __p0)37185 __ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
37186   int64x2_t __ret;
37187   __ret = (int64x2_t)(__p0);
37188   return __ret;
37189 }
37190 #endif
37191 
37192 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s64_p128(poly128_t __p0)37193 __ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) {
37194   int64x2_t __ret;
37195   __ret = (int64x2_t)(__p0);
37196   return __ret;
37197 }
37198 #else
vreinterpretq_s64_p128(poly128_t __p0)37199 __ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) {
37200   int64x2_t __ret;
37201   __ret = (int64x2_t)(__p0);
37202   return __ret;
37203 }
37204 #endif
37205 
37206 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s64_p64(poly64x2_t __p0)37207 __ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) {
37208   int64x2_t __ret;
37209   __ret = (int64x2_t)(__p0);
37210   return __ret;
37211 }
37212 #else
vreinterpretq_s64_p64(poly64x2_t __p0)37213 __ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) {
37214   int64x2_t __ret;
37215   __ret = (int64x2_t)(__p0);
37216   return __ret;
37217 }
37218 #endif
37219 
37220 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s64_p16(poly16x8_t __p0)37221 __ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
37222   int64x2_t __ret;
37223   __ret = (int64x2_t)(__p0);
37224   return __ret;
37225 }
37226 #else
vreinterpretq_s64_p16(poly16x8_t __p0)37227 __ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
37228   int64x2_t __ret;
37229   __ret = (int64x2_t)(__p0);
37230   return __ret;
37231 }
37232 #endif
37233 
37234 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s64_u8(uint8x16_t __p0)37235 __ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
37236   int64x2_t __ret;
37237   __ret = (int64x2_t)(__p0);
37238   return __ret;
37239 }
37240 #else
vreinterpretq_s64_u8(uint8x16_t __p0)37241 __ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
37242   int64x2_t __ret;
37243   __ret = (int64x2_t)(__p0);
37244   return __ret;
37245 }
37246 #endif
37247 
37248 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s64_u32(uint32x4_t __p0)37249 __ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
37250   int64x2_t __ret;
37251   __ret = (int64x2_t)(__p0);
37252   return __ret;
37253 }
37254 #else
vreinterpretq_s64_u32(uint32x4_t __p0)37255 __ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
37256   int64x2_t __ret;
37257   __ret = (int64x2_t)(__p0);
37258   return __ret;
37259 }
37260 #endif
37261 
37262 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s64_u64(uint64x2_t __p0)37263 __ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
37264   int64x2_t __ret;
37265   __ret = (int64x2_t)(__p0);
37266   return __ret;
37267 }
37268 #else
vreinterpretq_s64_u64(uint64x2_t __p0)37269 __ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
37270   int64x2_t __ret;
37271   __ret = (int64x2_t)(__p0);
37272   return __ret;
37273 }
37274 #endif
37275 
37276 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s64_u16(uint16x8_t __p0)37277 __ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
37278   int64x2_t __ret;
37279   __ret = (int64x2_t)(__p0);
37280   return __ret;
37281 }
37282 #else
vreinterpretq_s64_u16(uint16x8_t __p0)37283 __ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
37284   int64x2_t __ret;
37285   __ret = (int64x2_t)(__p0);
37286   return __ret;
37287 }
37288 #endif
37289 
37290 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s64_s8(int8x16_t __p0)37291 __ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
37292   int64x2_t __ret;
37293   __ret = (int64x2_t)(__p0);
37294   return __ret;
37295 }
37296 #else
vreinterpretq_s64_s8(int8x16_t __p0)37297 __ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
37298   int64x2_t __ret;
37299   __ret = (int64x2_t)(__p0);
37300   return __ret;
37301 }
37302 #endif
37303 
37304 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s64_f64(float64x2_t __p0)37305 __ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) {
37306   int64x2_t __ret;
37307   __ret = (int64x2_t)(__p0);
37308   return __ret;
37309 }
37310 #else
vreinterpretq_s64_f64(float64x2_t __p0)37311 __ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) {
37312   int64x2_t __ret;
37313   __ret = (int64x2_t)(__p0);
37314   return __ret;
37315 }
37316 #endif
37317 
37318 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s64_f32(float32x4_t __p0)37319 __ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
37320   int64x2_t __ret;
37321   __ret = (int64x2_t)(__p0);
37322   return __ret;
37323 }
37324 #else
vreinterpretq_s64_f32(float32x4_t __p0)37325 __ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
37326   int64x2_t __ret;
37327   __ret = (int64x2_t)(__p0);
37328   return __ret;
37329 }
37330 #endif
37331 
37332 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s64_f16(float16x8_t __p0)37333 __ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
37334   int64x2_t __ret;
37335   __ret = (int64x2_t)(__p0);
37336   return __ret;
37337 }
37338 #else
vreinterpretq_s64_f16(float16x8_t __p0)37339 __ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
37340   int64x2_t __ret;
37341   __ret = (int64x2_t)(__p0);
37342   return __ret;
37343 }
37344 #endif
37345 
37346 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s64_s32(int32x4_t __p0)37347 __ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
37348   int64x2_t __ret;
37349   __ret = (int64x2_t)(__p0);
37350   return __ret;
37351 }
37352 #else
vreinterpretq_s64_s32(int32x4_t __p0)37353 __ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
37354   int64x2_t __ret;
37355   __ret = (int64x2_t)(__p0);
37356   return __ret;
37357 }
37358 #endif
37359 
37360 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s64_s16(int16x8_t __p0)37361 __ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
37362   int64x2_t __ret;
37363   __ret = (int64x2_t)(__p0);
37364   return __ret;
37365 }
37366 #else
vreinterpretq_s64_s16(int16x8_t __p0)37367 __ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
37368   int64x2_t __ret;
37369   __ret = (int64x2_t)(__p0);
37370   return __ret;
37371 }
37372 #endif
37373 
37374 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s16_p8(poly8x16_t __p0)37375 __ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
37376   int16x8_t __ret;
37377   __ret = (int16x8_t)(__p0);
37378   return __ret;
37379 }
37380 #else
vreinterpretq_s16_p8(poly8x16_t __p0)37381 __ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
37382   int16x8_t __ret;
37383   __ret = (int16x8_t)(__p0);
37384   return __ret;
37385 }
37386 #endif
37387 
37388 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s16_p128(poly128_t __p0)37389 __ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) {
37390   int16x8_t __ret;
37391   __ret = (int16x8_t)(__p0);
37392   return __ret;
37393 }
37394 #else
vreinterpretq_s16_p128(poly128_t __p0)37395 __ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) {
37396   int16x8_t __ret;
37397   __ret = (int16x8_t)(__p0);
37398   return __ret;
37399 }
37400 #endif
37401 
37402 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s16_p64(poly64x2_t __p0)37403 __ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) {
37404   int16x8_t __ret;
37405   __ret = (int16x8_t)(__p0);
37406   return __ret;
37407 }
37408 #else
vreinterpretq_s16_p64(poly64x2_t __p0)37409 __ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) {
37410   int16x8_t __ret;
37411   __ret = (int16x8_t)(__p0);
37412   return __ret;
37413 }
37414 #endif
37415 
37416 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s16_p16(poly16x8_t __p0)37417 __ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
37418   int16x8_t __ret;
37419   __ret = (int16x8_t)(__p0);
37420   return __ret;
37421 }
37422 #else
vreinterpretq_s16_p16(poly16x8_t __p0)37423 __ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
37424   int16x8_t __ret;
37425   __ret = (int16x8_t)(__p0);
37426   return __ret;
37427 }
37428 #endif
37429 
37430 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s16_u8(uint8x16_t __p0)37431 __ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
37432   int16x8_t __ret;
37433   __ret = (int16x8_t)(__p0);
37434   return __ret;
37435 }
37436 #else
vreinterpretq_s16_u8(uint8x16_t __p0)37437 __ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
37438   int16x8_t __ret;
37439   __ret = (int16x8_t)(__p0);
37440   return __ret;
37441 }
37442 #endif
37443 
37444 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s16_u32(uint32x4_t __p0)37445 __ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
37446   int16x8_t __ret;
37447   __ret = (int16x8_t)(__p0);
37448   return __ret;
37449 }
37450 #else
vreinterpretq_s16_u32(uint32x4_t __p0)37451 __ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
37452   int16x8_t __ret;
37453   __ret = (int16x8_t)(__p0);
37454   return __ret;
37455 }
37456 #endif
37457 
37458 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s16_u64(uint64x2_t __p0)37459 __ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
37460   int16x8_t __ret;
37461   __ret = (int16x8_t)(__p0);
37462   return __ret;
37463 }
37464 #else
vreinterpretq_s16_u64(uint64x2_t __p0)37465 __ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
37466   int16x8_t __ret;
37467   __ret = (int16x8_t)(__p0);
37468   return __ret;
37469 }
37470 #endif
37471 
37472 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s16_u16(uint16x8_t __p0)37473 __ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
37474   int16x8_t __ret;
37475   __ret = (int16x8_t)(__p0);
37476   return __ret;
37477 }
37478 #else
vreinterpretq_s16_u16(uint16x8_t __p0)37479 __ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
37480   int16x8_t __ret;
37481   __ret = (int16x8_t)(__p0);
37482   return __ret;
37483 }
37484 #endif
37485 
37486 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s16_s8(int8x16_t __p0)37487 __ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
37488   int16x8_t __ret;
37489   __ret = (int16x8_t)(__p0);
37490   return __ret;
37491 }
37492 #else
vreinterpretq_s16_s8(int8x16_t __p0)37493 __ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
37494   int16x8_t __ret;
37495   __ret = (int16x8_t)(__p0);
37496   return __ret;
37497 }
37498 #endif
37499 
37500 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s16_f64(float64x2_t __p0)37501 __ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) {
37502   int16x8_t __ret;
37503   __ret = (int16x8_t)(__p0);
37504   return __ret;
37505 }
37506 #else
vreinterpretq_s16_f64(float64x2_t __p0)37507 __ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) {
37508   int16x8_t __ret;
37509   __ret = (int16x8_t)(__p0);
37510   return __ret;
37511 }
37512 #endif
37513 
37514 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s16_f32(float32x4_t __p0)37515 __ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
37516   int16x8_t __ret;
37517   __ret = (int16x8_t)(__p0);
37518   return __ret;
37519 }
37520 #else
vreinterpretq_s16_f32(float32x4_t __p0)37521 __ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
37522   int16x8_t __ret;
37523   __ret = (int16x8_t)(__p0);
37524   return __ret;
37525 }
37526 #endif
37527 
37528 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s16_f16(float16x8_t __p0)37529 __ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
37530   int16x8_t __ret;
37531   __ret = (int16x8_t)(__p0);
37532   return __ret;
37533 }
37534 #else
vreinterpretq_s16_f16(float16x8_t __p0)37535 __ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
37536   int16x8_t __ret;
37537   __ret = (int16x8_t)(__p0);
37538   return __ret;
37539 }
37540 #endif
37541 
37542 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s16_s32(int32x4_t __p0)37543 __ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
37544   int16x8_t __ret;
37545   __ret = (int16x8_t)(__p0);
37546   return __ret;
37547 }
37548 #else
vreinterpretq_s16_s32(int32x4_t __p0)37549 __ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
37550   int16x8_t __ret;
37551   __ret = (int16x8_t)(__p0);
37552   return __ret;
37553 }
37554 #endif
37555 
37556 #ifdef __LITTLE_ENDIAN__
vreinterpretq_s16_s64(int64x2_t __p0)37557 __ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
37558   int16x8_t __ret;
37559   __ret = (int16x8_t)(__p0);
37560   return __ret;
37561 }
37562 #else
vreinterpretq_s16_s64(int64x2_t __p0)37563 __ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
37564   int16x8_t __ret;
37565   __ret = (int16x8_t)(__p0);
37566   return __ret;
37567 }
37568 #endif
37569 
37570 #ifdef __LITTLE_ENDIAN__
vreinterpret_u8_p8(poly8x8_t __p0)37571 __ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
37572   uint8x8_t __ret;
37573   __ret = (uint8x8_t)(__p0);
37574   return __ret;
37575 }
37576 #else
vreinterpret_u8_p8(poly8x8_t __p0)37577 __ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
37578   uint8x8_t __ret;
37579   __ret = (uint8x8_t)(__p0);
37580   return __ret;
37581 }
37582 #endif
37583 
37584 #ifdef __LITTLE_ENDIAN__
vreinterpret_u8_p64(poly64x1_t __p0)37585 __ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) {
37586   uint8x8_t __ret;
37587   __ret = (uint8x8_t)(__p0);
37588   return __ret;
37589 }
37590 #else
vreinterpret_u8_p64(poly64x1_t __p0)37591 __ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) {
37592   uint8x8_t __ret;
37593   __ret = (uint8x8_t)(__p0);
37594   return __ret;
37595 }
37596 #endif
37597 
37598 #ifdef __LITTLE_ENDIAN__
vreinterpret_u8_p16(poly16x4_t __p0)37599 __ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
37600   uint8x8_t __ret;
37601   __ret = (uint8x8_t)(__p0);
37602   return __ret;
37603 }
37604 #else
vreinterpret_u8_p16(poly16x4_t __p0)37605 __ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
37606   uint8x8_t __ret;
37607   __ret = (uint8x8_t)(__p0);
37608   return __ret;
37609 }
37610 #endif
37611 
37612 #ifdef __LITTLE_ENDIAN__
vreinterpret_u8_u32(uint32x2_t __p0)37613 __ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
37614   uint8x8_t __ret;
37615   __ret = (uint8x8_t)(__p0);
37616   return __ret;
37617 }
37618 #else
vreinterpret_u8_u32(uint32x2_t __p0)37619 __ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
37620   uint8x8_t __ret;
37621   __ret = (uint8x8_t)(__p0);
37622   return __ret;
37623 }
37624 #endif
37625 
37626 #ifdef __LITTLE_ENDIAN__
vreinterpret_u8_u64(uint64x1_t __p0)37627 __ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
37628   uint8x8_t __ret;
37629   __ret = (uint8x8_t)(__p0);
37630   return __ret;
37631 }
37632 #else
vreinterpret_u8_u64(uint64x1_t __p0)37633 __ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
37634   uint8x8_t __ret;
37635   __ret = (uint8x8_t)(__p0);
37636   return __ret;
37637 }
37638 #endif
37639 
37640 #ifdef __LITTLE_ENDIAN__
vreinterpret_u8_u16(uint16x4_t __p0)37641 __ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
37642   uint8x8_t __ret;
37643   __ret = (uint8x8_t)(__p0);
37644   return __ret;
37645 }
37646 #else
vreinterpret_u8_u16(uint16x4_t __p0)37647 __ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
37648   uint8x8_t __ret;
37649   __ret = (uint8x8_t)(__p0);
37650   return __ret;
37651 }
37652 #endif
37653 
37654 #ifdef __LITTLE_ENDIAN__
vreinterpret_u8_s8(int8x8_t __p0)37655 __ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
37656   uint8x8_t __ret;
37657   __ret = (uint8x8_t)(__p0);
37658   return __ret;
37659 }
37660 #else
vreinterpret_u8_s8(int8x8_t __p0)37661 __ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
37662   uint8x8_t __ret;
37663   __ret = (uint8x8_t)(__p0);
37664   return __ret;
37665 }
37666 #endif
37667 
37668 #ifdef __LITTLE_ENDIAN__
vreinterpret_u8_f64(float64x1_t __p0)37669 __ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) {
37670   uint8x8_t __ret;
37671   __ret = (uint8x8_t)(__p0);
37672   return __ret;
37673 }
37674 #else
vreinterpret_u8_f64(float64x1_t __p0)37675 __ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) {
37676   uint8x8_t __ret;
37677   __ret = (uint8x8_t)(__p0);
37678   return __ret;
37679 }
37680 #endif
37681 
37682 #ifdef __LITTLE_ENDIAN__
vreinterpret_u8_f32(float32x2_t __p0)37683 __ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
37684   uint8x8_t __ret;
37685   __ret = (uint8x8_t)(__p0);
37686   return __ret;
37687 }
37688 #else
vreinterpret_u8_f32(float32x2_t __p0)37689 __ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
37690   uint8x8_t __ret;
37691   __ret = (uint8x8_t)(__p0);
37692   return __ret;
37693 }
37694 #endif
37695 
37696 #ifdef __LITTLE_ENDIAN__
vreinterpret_u8_f16(float16x4_t __p0)37697 __ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
37698   uint8x8_t __ret;
37699   __ret = (uint8x8_t)(__p0);
37700   return __ret;
37701 }
37702 #else
vreinterpret_u8_f16(float16x4_t __p0)37703 __ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
37704   uint8x8_t __ret;
37705   __ret = (uint8x8_t)(__p0);
37706   return __ret;
37707 }
37708 #endif
37709 
37710 #ifdef __LITTLE_ENDIAN__
vreinterpret_u8_s32(int32x2_t __p0)37711 __ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
37712   uint8x8_t __ret;
37713   __ret = (uint8x8_t)(__p0);
37714   return __ret;
37715 }
37716 #else
vreinterpret_u8_s32(int32x2_t __p0)37717 __ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
37718   uint8x8_t __ret;
37719   __ret = (uint8x8_t)(__p0);
37720   return __ret;
37721 }
37722 #endif
37723 
37724 #ifdef __LITTLE_ENDIAN__
vreinterpret_u8_s64(int64x1_t __p0)37725 __ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
37726   uint8x8_t __ret;
37727   __ret = (uint8x8_t)(__p0);
37728   return __ret;
37729 }
37730 #else
vreinterpret_u8_s64(int64x1_t __p0)37731 __ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
37732   uint8x8_t __ret;
37733   __ret = (uint8x8_t)(__p0);
37734   return __ret;
37735 }
37736 #endif
37737 
37738 #ifdef __LITTLE_ENDIAN__
vreinterpret_u8_s16(int16x4_t __p0)37739 __ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
37740   uint8x8_t __ret;
37741   __ret = (uint8x8_t)(__p0);
37742   return __ret;
37743 }
37744 #else
vreinterpret_u8_s16(int16x4_t __p0)37745 __ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
37746   uint8x8_t __ret;
37747   __ret = (uint8x8_t)(__p0);
37748   return __ret;
37749 }
37750 #endif
37751 
37752 #ifdef __LITTLE_ENDIAN__
vreinterpret_u32_p8(poly8x8_t __p0)37753 __ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
37754   uint32x2_t __ret;
37755   __ret = (uint32x2_t)(__p0);
37756   return __ret;
37757 }
37758 #else
vreinterpret_u32_p8(poly8x8_t __p0)37759 __ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
37760   uint32x2_t __ret;
37761   __ret = (uint32x2_t)(__p0);
37762   return __ret;
37763 }
37764 #endif
37765 
37766 #ifdef __LITTLE_ENDIAN__
vreinterpret_u32_p64(poly64x1_t __p0)37767 __ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) {
37768   uint32x2_t __ret;
37769   __ret = (uint32x2_t)(__p0);
37770   return __ret;
37771 }
37772 #else
vreinterpret_u32_p64(poly64x1_t __p0)37773 __ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) {
37774   uint32x2_t __ret;
37775   __ret = (uint32x2_t)(__p0);
37776   return __ret;
37777 }
37778 #endif
37779 
37780 #ifdef __LITTLE_ENDIAN__
vreinterpret_u32_p16(poly16x4_t __p0)37781 __ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
37782   uint32x2_t __ret;
37783   __ret = (uint32x2_t)(__p0);
37784   return __ret;
37785 }
37786 #else
vreinterpret_u32_p16(poly16x4_t __p0)37787 __ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
37788   uint32x2_t __ret;
37789   __ret = (uint32x2_t)(__p0);
37790   return __ret;
37791 }
37792 #endif
37793 
37794 #ifdef __LITTLE_ENDIAN__
vreinterpret_u32_u8(uint8x8_t __p0)37795 __ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
37796   uint32x2_t __ret;
37797   __ret = (uint32x2_t)(__p0);
37798   return __ret;
37799 }
37800 #else
vreinterpret_u32_u8(uint8x8_t __p0)37801 __ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
37802   uint32x2_t __ret;
37803   __ret = (uint32x2_t)(__p0);
37804   return __ret;
37805 }
37806 #endif
37807 
37808 #ifdef __LITTLE_ENDIAN__
vreinterpret_u32_u64(uint64x1_t __p0)37809 __ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
37810   uint32x2_t __ret;
37811   __ret = (uint32x2_t)(__p0);
37812   return __ret;
37813 }
37814 #else
vreinterpret_u32_u64(uint64x1_t __p0)37815 __ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
37816   uint32x2_t __ret;
37817   __ret = (uint32x2_t)(__p0);
37818   return __ret;
37819 }
37820 #endif
37821 
37822 #ifdef __LITTLE_ENDIAN__
vreinterpret_u32_u16(uint16x4_t __p0)37823 __ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
37824   uint32x2_t __ret;
37825   __ret = (uint32x2_t)(__p0);
37826   return __ret;
37827 }
37828 #else
vreinterpret_u32_u16(uint16x4_t __p0)37829 __ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
37830   uint32x2_t __ret;
37831   __ret = (uint32x2_t)(__p0);
37832   return __ret;
37833 }
37834 #endif
37835 
37836 #ifdef __LITTLE_ENDIAN__
vreinterpret_u32_s8(int8x8_t __p0)37837 __ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
37838   uint32x2_t __ret;
37839   __ret = (uint32x2_t)(__p0);
37840   return __ret;
37841 }
37842 #else
vreinterpret_u32_s8(int8x8_t __p0)37843 __ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
37844   uint32x2_t __ret;
37845   __ret = (uint32x2_t)(__p0);
37846   return __ret;
37847 }
37848 #endif
37849 
37850 #ifdef __LITTLE_ENDIAN__
vreinterpret_u32_f64(float64x1_t __p0)37851 __ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) {
37852   uint32x2_t __ret;
37853   __ret = (uint32x2_t)(__p0);
37854   return __ret;
37855 }
37856 #else
vreinterpret_u32_f64(float64x1_t __p0)37857 __ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) {
37858   uint32x2_t __ret;
37859   __ret = (uint32x2_t)(__p0);
37860   return __ret;
37861 }
37862 #endif
37863 
37864 #ifdef __LITTLE_ENDIAN__
vreinterpret_u32_f32(float32x2_t __p0)37865 __ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
37866   uint32x2_t __ret;
37867   __ret = (uint32x2_t)(__p0);
37868   return __ret;
37869 }
37870 #else
vreinterpret_u32_f32(float32x2_t __p0)37871 __ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
37872   uint32x2_t __ret;
37873   __ret = (uint32x2_t)(__p0);
37874   return __ret;
37875 }
37876 #endif
37877 
37878 #ifdef __LITTLE_ENDIAN__
vreinterpret_u32_f16(float16x4_t __p0)37879 __ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
37880   uint32x2_t __ret;
37881   __ret = (uint32x2_t)(__p0);
37882   return __ret;
37883 }
37884 #else
vreinterpret_u32_f16(float16x4_t __p0)37885 __ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
37886   uint32x2_t __ret;
37887   __ret = (uint32x2_t)(__p0);
37888   return __ret;
37889 }
37890 #endif
37891 
37892 #ifdef __LITTLE_ENDIAN__
vreinterpret_u32_s32(int32x2_t __p0)37893 __ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
37894   uint32x2_t __ret;
37895   __ret = (uint32x2_t)(__p0);
37896   return __ret;
37897 }
37898 #else
vreinterpret_u32_s32(int32x2_t __p0)37899 __ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
37900   uint32x2_t __ret;
37901   __ret = (uint32x2_t)(__p0);
37902   return __ret;
37903 }
37904 #endif
37905 
37906 #ifdef __LITTLE_ENDIAN__
vreinterpret_u32_s64(int64x1_t __p0)37907 __ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
37908   uint32x2_t __ret;
37909   __ret = (uint32x2_t)(__p0);
37910   return __ret;
37911 }
37912 #else
vreinterpret_u32_s64(int64x1_t __p0)37913 __ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
37914   uint32x2_t __ret;
37915   __ret = (uint32x2_t)(__p0);
37916   return __ret;
37917 }
37918 #endif
37919 
37920 #ifdef __LITTLE_ENDIAN__
vreinterpret_u32_s16(int16x4_t __p0)37921 __ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
37922   uint32x2_t __ret;
37923   __ret = (uint32x2_t)(__p0);
37924   return __ret;
37925 }
37926 #else
vreinterpret_u32_s16(int16x4_t __p0)37927 __ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
37928   uint32x2_t __ret;
37929   __ret = (uint32x2_t)(__p0);
37930   return __ret;
37931 }
37932 #endif
37933 
37934 #ifdef __LITTLE_ENDIAN__
vreinterpret_u64_p8(poly8x8_t __p0)37935 __ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
37936   uint64x1_t __ret;
37937   __ret = (uint64x1_t)(__p0);
37938   return __ret;
37939 }
37940 #else
vreinterpret_u64_p8(poly8x8_t __p0)37941 __ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
37942   uint64x1_t __ret;
37943   __ret = (uint64x1_t)(__p0);
37944   return __ret;
37945 }
37946 #endif
37947 
37948 #ifdef __LITTLE_ENDIAN__
vreinterpret_u64_p64(poly64x1_t __p0)37949 __ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) {
37950   uint64x1_t __ret;
37951   __ret = (uint64x1_t)(__p0);
37952   return __ret;
37953 }
37954 #else
vreinterpret_u64_p64(poly64x1_t __p0)37955 __ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) {
37956   uint64x1_t __ret;
37957   __ret = (uint64x1_t)(__p0);
37958   return __ret;
37959 }
37960 #endif
37961 
37962 #ifdef __LITTLE_ENDIAN__
vreinterpret_u64_p16(poly16x4_t __p0)37963 __ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
37964   uint64x1_t __ret;
37965   __ret = (uint64x1_t)(__p0);
37966   return __ret;
37967 }
37968 #else
vreinterpret_u64_p16(poly16x4_t __p0)37969 __ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
37970   uint64x1_t __ret;
37971   __ret = (uint64x1_t)(__p0);
37972   return __ret;
37973 }
37974 #endif
37975 
37976 #ifdef __LITTLE_ENDIAN__
vreinterpret_u64_u8(uint8x8_t __p0)37977 __ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
37978   uint64x1_t __ret;
37979   __ret = (uint64x1_t)(__p0);
37980   return __ret;
37981 }
37982 #else
vreinterpret_u64_u8(uint8x8_t __p0)37983 __ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
37984   uint64x1_t __ret;
37985   __ret = (uint64x1_t)(__p0);
37986   return __ret;
37987 }
37988 #endif
37989 
37990 #ifdef __LITTLE_ENDIAN__
vreinterpret_u64_u32(uint32x2_t __p0)37991 __ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
37992   uint64x1_t __ret;
37993   __ret = (uint64x1_t)(__p0);
37994   return __ret;
37995 }
37996 #else
vreinterpret_u64_u32(uint32x2_t __p0)37997 __ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
37998   uint64x1_t __ret;
37999   __ret = (uint64x1_t)(__p0);
38000   return __ret;
38001 }
38002 #endif
38003 
38004 #ifdef __LITTLE_ENDIAN__
vreinterpret_u64_u16(uint16x4_t __p0)38005 __ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
38006   uint64x1_t __ret;
38007   __ret = (uint64x1_t)(__p0);
38008   return __ret;
38009 }
38010 #else
vreinterpret_u64_u16(uint16x4_t __p0)38011 __ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
38012   uint64x1_t __ret;
38013   __ret = (uint64x1_t)(__p0);
38014   return __ret;
38015 }
38016 #endif
38017 
38018 #ifdef __LITTLE_ENDIAN__
vreinterpret_u64_s8(int8x8_t __p0)38019 __ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
38020   uint64x1_t __ret;
38021   __ret = (uint64x1_t)(__p0);
38022   return __ret;
38023 }
38024 #else
vreinterpret_u64_s8(int8x8_t __p0)38025 __ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
38026   uint64x1_t __ret;
38027   __ret = (uint64x1_t)(__p0);
38028   return __ret;
38029 }
38030 #endif
38031 
38032 #ifdef __LITTLE_ENDIAN__
vreinterpret_u64_f64(float64x1_t __p0)38033 __ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) {
38034   uint64x1_t __ret;
38035   __ret = (uint64x1_t)(__p0);
38036   return __ret;
38037 }
38038 #else
vreinterpret_u64_f64(float64x1_t __p0)38039 __ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) {
38040   uint64x1_t __ret;
38041   __ret = (uint64x1_t)(__p0);
38042   return __ret;
38043 }
38044 #endif
38045 
38046 #ifdef __LITTLE_ENDIAN__
vreinterpret_u64_f32(float32x2_t __p0)38047 __ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
38048   uint64x1_t __ret;
38049   __ret = (uint64x1_t)(__p0);
38050   return __ret;
38051 }
38052 #else
vreinterpret_u64_f32(float32x2_t __p0)38053 __ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
38054   uint64x1_t __ret;
38055   __ret = (uint64x1_t)(__p0);
38056   return __ret;
38057 }
38058 #endif
38059 
38060 #ifdef __LITTLE_ENDIAN__
vreinterpret_u64_f16(float16x4_t __p0)38061 __ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
38062   uint64x1_t __ret;
38063   __ret = (uint64x1_t)(__p0);
38064   return __ret;
38065 }
38066 #else
vreinterpret_u64_f16(float16x4_t __p0)38067 __ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
38068   uint64x1_t __ret;
38069   __ret = (uint64x1_t)(__p0);
38070   return __ret;
38071 }
38072 #endif
38073 
38074 #ifdef __LITTLE_ENDIAN__
vreinterpret_u64_s32(int32x2_t __p0)38075 __ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
38076   uint64x1_t __ret;
38077   __ret = (uint64x1_t)(__p0);
38078   return __ret;
38079 }
38080 #else
vreinterpret_u64_s32(int32x2_t __p0)38081 __ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
38082   uint64x1_t __ret;
38083   __ret = (uint64x1_t)(__p0);
38084   return __ret;
38085 }
38086 #endif
38087 
38088 #ifdef __LITTLE_ENDIAN__
vreinterpret_u64_s64(int64x1_t __p0)38089 __ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
38090   uint64x1_t __ret;
38091   __ret = (uint64x1_t)(__p0);
38092   return __ret;
38093 }
38094 #else
vreinterpret_u64_s64(int64x1_t __p0)38095 __ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
38096   uint64x1_t __ret;
38097   __ret = (uint64x1_t)(__p0);
38098   return __ret;
38099 }
38100 #endif
38101 
38102 #ifdef __LITTLE_ENDIAN__
vreinterpret_u64_s16(int16x4_t __p0)38103 __ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
38104   uint64x1_t __ret;
38105   __ret = (uint64x1_t)(__p0);
38106   return __ret;
38107 }
38108 #else
vreinterpret_u64_s16(int16x4_t __p0)38109 __ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
38110   uint64x1_t __ret;
38111   __ret = (uint64x1_t)(__p0);
38112   return __ret;
38113 }
38114 #endif
38115 
38116 #ifdef __LITTLE_ENDIAN__
vreinterpret_u16_p8(poly8x8_t __p0)38117 __ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
38118   uint16x4_t __ret;
38119   __ret = (uint16x4_t)(__p0);
38120   return __ret;
38121 }
38122 #else
vreinterpret_u16_p8(poly8x8_t __p0)38123 __ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
38124   uint16x4_t __ret;
38125   __ret = (uint16x4_t)(__p0);
38126   return __ret;
38127 }
38128 #endif
38129 
38130 #ifdef __LITTLE_ENDIAN__
vreinterpret_u16_p64(poly64x1_t __p0)38131 __ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) {
38132   uint16x4_t __ret;
38133   __ret = (uint16x4_t)(__p0);
38134   return __ret;
38135 }
38136 #else
vreinterpret_u16_p64(poly64x1_t __p0)38137 __ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) {
38138   uint16x4_t __ret;
38139   __ret = (uint16x4_t)(__p0);
38140   return __ret;
38141 }
38142 #endif
38143 
38144 #ifdef __LITTLE_ENDIAN__
vreinterpret_u16_p16(poly16x4_t __p0)38145 __ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
38146   uint16x4_t __ret;
38147   __ret = (uint16x4_t)(__p0);
38148   return __ret;
38149 }
38150 #else
vreinterpret_u16_p16(poly16x4_t __p0)38151 __ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
38152   uint16x4_t __ret;
38153   __ret = (uint16x4_t)(__p0);
38154   return __ret;
38155 }
38156 #endif
38157 
38158 #ifdef __LITTLE_ENDIAN__
vreinterpret_u16_u8(uint8x8_t __p0)38159 __ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
38160   uint16x4_t __ret;
38161   __ret = (uint16x4_t)(__p0);
38162   return __ret;
38163 }
38164 #else
vreinterpret_u16_u8(uint8x8_t __p0)38165 __ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
38166   uint16x4_t __ret;
38167   __ret = (uint16x4_t)(__p0);
38168   return __ret;
38169 }
38170 #endif
38171 
38172 #ifdef __LITTLE_ENDIAN__
vreinterpret_u16_u32(uint32x2_t __p0)38173 __ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
38174   uint16x4_t __ret;
38175   __ret = (uint16x4_t)(__p0);
38176   return __ret;
38177 }
38178 #else
vreinterpret_u16_u32(uint32x2_t __p0)38179 __ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
38180   uint16x4_t __ret;
38181   __ret = (uint16x4_t)(__p0);
38182   return __ret;
38183 }
38184 #endif
38185 
38186 #ifdef __LITTLE_ENDIAN__
vreinterpret_u16_u64(uint64x1_t __p0)38187 __ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
38188   uint16x4_t __ret;
38189   __ret = (uint16x4_t)(__p0);
38190   return __ret;
38191 }
38192 #else
vreinterpret_u16_u64(uint64x1_t __p0)38193 __ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
38194   uint16x4_t __ret;
38195   __ret = (uint16x4_t)(__p0);
38196   return __ret;
38197 }
38198 #endif
38199 
38200 #ifdef __LITTLE_ENDIAN__
vreinterpret_u16_s8(int8x8_t __p0)38201 __ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
38202   uint16x4_t __ret;
38203   __ret = (uint16x4_t)(__p0);
38204   return __ret;
38205 }
38206 #else
vreinterpret_u16_s8(int8x8_t __p0)38207 __ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
38208   uint16x4_t __ret;
38209   __ret = (uint16x4_t)(__p0);
38210   return __ret;
38211 }
38212 #endif
38213 
38214 #ifdef __LITTLE_ENDIAN__
vreinterpret_u16_f64(float64x1_t __p0)38215 __ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) {
38216   uint16x4_t __ret;
38217   __ret = (uint16x4_t)(__p0);
38218   return __ret;
38219 }
38220 #else
vreinterpret_u16_f64(float64x1_t __p0)38221 __ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) {
38222   uint16x4_t __ret;
38223   __ret = (uint16x4_t)(__p0);
38224   return __ret;
38225 }
38226 #endif
38227 
38228 #ifdef __LITTLE_ENDIAN__
vreinterpret_u16_f32(float32x2_t __p0)38229 __ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
38230   uint16x4_t __ret;
38231   __ret = (uint16x4_t)(__p0);
38232   return __ret;
38233 }
38234 #else
vreinterpret_u16_f32(float32x2_t __p0)38235 __ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
38236   uint16x4_t __ret;
38237   __ret = (uint16x4_t)(__p0);
38238   return __ret;
38239 }
38240 #endif
38241 
38242 #ifdef __LITTLE_ENDIAN__
vreinterpret_u16_f16(float16x4_t __p0)38243 __ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
38244   uint16x4_t __ret;
38245   __ret = (uint16x4_t)(__p0);
38246   return __ret;
38247 }
38248 #else
vreinterpret_u16_f16(float16x4_t __p0)38249 __ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
38250   uint16x4_t __ret;
38251   __ret = (uint16x4_t)(__p0);
38252   return __ret;
38253 }
38254 #endif
38255 
38256 #ifdef __LITTLE_ENDIAN__
vreinterpret_u16_s32(int32x2_t __p0)38257 __ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
38258   uint16x4_t __ret;
38259   __ret = (uint16x4_t)(__p0);
38260   return __ret;
38261 }
38262 #else
vreinterpret_u16_s32(int32x2_t __p0)38263 __ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
38264   uint16x4_t __ret;
38265   __ret = (uint16x4_t)(__p0);
38266   return __ret;
38267 }
38268 #endif
38269 
38270 #ifdef __LITTLE_ENDIAN__
vreinterpret_u16_s64(int64x1_t __p0)38271 __ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
38272   uint16x4_t __ret;
38273   __ret = (uint16x4_t)(__p0);
38274   return __ret;
38275 }
38276 #else
vreinterpret_u16_s64(int64x1_t __p0)38277 __ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
38278   uint16x4_t __ret;
38279   __ret = (uint16x4_t)(__p0);
38280   return __ret;
38281 }
38282 #endif
38283 
38284 #ifdef __LITTLE_ENDIAN__
vreinterpret_u16_s16(int16x4_t __p0)38285 __ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
38286   uint16x4_t __ret;
38287   __ret = (uint16x4_t)(__p0);
38288   return __ret;
38289 }
38290 #else
vreinterpret_u16_s16(int16x4_t __p0)38291 __ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
38292   uint16x4_t __ret;
38293   __ret = (uint16x4_t)(__p0);
38294   return __ret;
38295 }
38296 #endif
38297 
38298 #ifdef __LITTLE_ENDIAN__
vreinterpret_s8_p8(poly8x8_t __p0)38299 __ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
38300   int8x8_t __ret;
38301   __ret = (int8x8_t)(__p0);
38302   return __ret;
38303 }
38304 #else
vreinterpret_s8_p8(poly8x8_t __p0)38305 __ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
38306   int8x8_t __ret;
38307   __ret = (int8x8_t)(__p0);
38308   return __ret;
38309 }
38310 #endif
38311 
38312 #ifdef __LITTLE_ENDIAN__
vreinterpret_s8_p64(poly64x1_t __p0)38313 __ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) {
38314   int8x8_t __ret;
38315   __ret = (int8x8_t)(__p0);
38316   return __ret;
38317 }
38318 #else
vreinterpret_s8_p64(poly64x1_t __p0)38319 __ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) {
38320   int8x8_t __ret;
38321   __ret = (int8x8_t)(__p0);
38322   return __ret;
38323 }
38324 #endif
38325 
38326 #ifdef __LITTLE_ENDIAN__
vreinterpret_s8_p16(poly16x4_t __p0)38327 __ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
38328   int8x8_t __ret;
38329   __ret = (int8x8_t)(__p0);
38330   return __ret;
38331 }
38332 #else
vreinterpret_s8_p16(poly16x4_t __p0)38333 __ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
38334   int8x8_t __ret;
38335   __ret = (int8x8_t)(__p0);
38336   return __ret;
38337 }
38338 #endif
38339 
38340 #ifdef __LITTLE_ENDIAN__
vreinterpret_s8_u8(uint8x8_t __p0)38341 __ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
38342   int8x8_t __ret;
38343   __ret = (int8x8_t)(__p0);
38344   return __ret;
38345 }
38346 #else
vreinterpret_s8_u8(uint8x8_t __p0)38347 __ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
38348   int8x8_t __ret;
38349   __ret = (int8x8_t)(__p0);
38350   return __ret;
38351 }
38352 #endif
38353 
38354 #ifdef __LITTLE_ENDIAN__
vreinterpret_s8_u32(uint32x2_t __p0)38355 __ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
38356   int8x8_t __ret;
38357   __ret = (int8x8_t)(__p0);
38358   return __ret;
38359 }
38360 #else
vreinterpret_s8_u32(uint32x2_t __p0)38361 __ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
38362   int8x8_t __ret;
38363   __ret = (int8x8_t)(__p0);
38364   return __ret;
38365 }
38366 #endif
38367 
38368 #ifdef __LITTLE_ENDIAN__
vreinterpret_s8_u64(uint64x1_t __p0)38369 __ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
38370   int8x8_t __ret;
38371   __ret = (int8x8_t)(__p0);
38372   return __ret;
38373 }
38374 #else
vreinterpret_s8_u64(uint64x1_t __p0)38375 __ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
38376   int8x8_t __ret;
38377   __ret = (int8x8_t)(__p0);
38378   return __ret;
38379 }
38380 #endif
38381 
38382 #ifdef __LITTLE_ENDIAN__
vreinterpret_s8_u16(uint16x4_t __p0)38383 __ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
38384   int8x8_t __ret;
38385   __ret = (int8x8_t)(__p0);
38386   return __ret;
38387 }
38388 #else
vreinterpret_s8_u16(uint16x4_t __p0)38389 __ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
38390   int8x8_t __ret;
38391   __ret = (int8x8_t)(__p0);
38392   return __ret;
38393 }
38394 #endif
38395 
38396 #ifdef __LITTLE_ENDIAN__
vreinterpret_s8_f64(float64x1_t __p0)38397 __ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) {
38398   int8x8_t __ret;
38399   __ret = (int8x8_t)(__p0);
38400   return __ret;
38401 }
38402 #else
vreinterpret_s8_f64(float64x1_t __p0)38403 __ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) {
38404   int8x8_t __ret;
38405   __ret = (int8x8_t)(__p0);
38406   return __ret;
38407 }
38408 #endif
38409 
38410 #ifdef __LITTLE_ENDIAN__
vreinterpret_s8_f32(float32x2_t __p0)38411 __ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
38412   int8x8_t __ret;
38413   __ret = (int8x8_t)(__p0);
38414   return __ret;
38415 }
38416 #else
vreinterpret_s8_f32(float32x2_t __p0)38417 __ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
38418   int8x8_t __ret;
38419   __ret = (int8x8_t)(__p0);
38420   return __ret;
38421 }
38422 #endif
38423 
38424 #ifdef __LITTLE_ENDIAN__
vreinterpret_s8_f16(float16x4_t __p0)38425 __ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
38426   int8x8_t __ret;
38427   __ret = (int8x8_t)(__p0);
38428   return __ret;
38429 }
38430 #else
vreinterpret_s8_f16(float16x4_t __p0)38431 __ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
38432   int8x8_t __ret;
38433   __ret = (int8x8_t)(__p0);
38434   return __ret;
38435 }
38436 #endif
38437 
38438 #ifdef __LITTLE_ENDIAN__
vreinterpret_s8_s32(int32x2_t __p0)38439 __ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
38440   int8x8_t __ret;
38441   __ret = (int8x8_t)(__p0);
38442   return __ret;
38443 }
38444 #else
vreinterpret_s8_s32(int32x2_t __p0)38445 __ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
38446   int8x8_t __ret;
38447   __ret = (int8x8_t)(__p0);
38448   return __ret;
38449 }
38450 #endif
38451 
38452 #ifdef __LITTLE_ENDIAN__
vreinterpret_s8_s64(int64x1_t __p0)38453 __ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
38454   int8x8_t __ret;
38455   __ret = (int8x8_t)(__p0);
38456   return __ret;
38457 }
38458 #else
vreinterpret_s8_s64(int64x1_t __p0)38459 __ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
38460   int8x8_t __ret;
38461   __ret = (int8x8_t)(__p0);
38462   return __ret;
38463 }
38464 #endif
38465 
38466 #ifdef __LITTLE_ENDIAN__
vreinterpret_s8_s16(int16x4_t __p0)38467 __ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
38468   int8x8_t __ret;
38469   __ret = (int8x8_t)(__p0);
38470   return __ret;
38471 }
38472 #else
vreinterpret_s8_s16(int16x4_t __p0)38473 __ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
38474   int8x8_t __ret;
38475   __ret = (int8x8_t)(__p0);
38476   return __ret;
38477 }
38478 #endif
38479 
38480 #ifdef __LITTLE_ENDIAN__
vreinterpret_f64_p8(poly8x8_t __p0)38481 __ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) {
38482   float64x1_t __ret;
38483   __ret = (float64x1_t)(__p0);
38484   return __ret;
38485 }
38486 #else
vreinterpret_f64_p8(poly8x8_t __p0)38487 __ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) {
38488   float64x1_t __ret;
38489   __ret = (float64x1_t)(__p0);
38490   return __ret;
38491 }
38492 #endif
38493 
38494 #ifdef __LITTLE_ENDIAN__
vreinterpret_f64_p64(poly64x1_t __p0)38495 __ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) {
38496   float64x1_t __ret;
38497   __ret = (float64x1_t)(__p0);
38498   return __ret;
38499 }
38500 #else
vreinterpret_f64_p64(poly64x1_t __p0)38501 __ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) {
38502   float64x1_t __ret;
38503   __ret = (float64x1_t)(__p0);
38504   return __ret;
38505 }
38506 #endif
38507 
38508 #ifdef __LITTLE_ENDIAN__
vreinterpret_f64_p16(poly16x4_t __p0)38509 __ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) {
38510   float64x1_t __ret;
38511   __ret = (float64x1_t)(__p0);
38512   return __ret;
38513 }
38514 #else
vreinterpret_f64_p16(poly16x4_t __p0)38515 __ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) {
38516   float64x1_t __ret;
38517   __ret = (float64x1_t)(__p0);
38518   return __ret;
38519 }
38520 #endif
38521 
38522 #ifdef __LITTLE_ENDIAN__
vreinterpret_f64_u8(uint8x8_t __p0)38523 __ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) {
38524   float64x1_t __ret;
38525   __ret = (float64x1_t)(__p0);
38526   return __ret;
38527 }
38528 #else
vreinterpret_f64_u8(uint8x8_t __p0)38529 __ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) {
38530   float64x1_t __ret;
38531   __ret = (float64x1_t)(__p0);
38532   return __ret;
38533 }
38534 #endif
38535 
38536 #ifdef __LITTLE_ENDIAN__
vreinterpret_f64_u32(uint32x2_t __p0)38537 __ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) {
38538   float64x1_t __ret;
38539   __ret = (float64x1_t)(__p0);
38540   return __ret;
38541 }
38542 #else
vreinterpret_f64_u32(uint32x2_t __p0)38543 __ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) {
38544   float64x1_t __ret;
38545   __ret = (float64x1_t)(__p0);
38546   return __ret;
38547 }
38548 #endif
38549 
38550 #ifdef __LITTLE_ENDIAN__
vreinterpret_f64_u64(uint64x1_t __p0)38551 __ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) {
38552   float64x1_t __ret;
38553   __ret = (float64x1_t)(__p0);
38554   return __ret;
38555 }
38556 #else
vreinterpret_f64_u64(uint64x1_t __p0)38557 __ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) {
38558   float64x1_t __ret;
38559   __ret = (float64x1_t)(__p0);
38560   return __ret;
38561 }
38562 #endif
38563 
38564 #ifdef __LITTLE_ENDIAN__
vreinterpret_f64_u16(uint16x4_t __p0)38565 __ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) {
38566   float64x1_t __ret;
38567   __ret = (float64x1_t)(__p0);
38568   return __ret;
38569 }
38570 #else
vreinterpret_f64_u16(uint16x4_t __p0)38571 __ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) {
38572   float64x1_t __ret;
38573   __ret = (float64x1_t)(__p0);
38574   return __ret;
38575 }
38576 #endif
38577 
38578 #ifdef __LITTLE_ENDIAN__
vreinterpret_f64_s8(int8x8_t __p0)38579 __ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) {
38580   float64x1_t __ret;
38581   __ret = (float64x1_t)(__p0);
38582   return __ret;
38583 }
38584 #else
vreinterpret_f64_s8(int8x8_t __p0)38585 __ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) {
38586   float64x1_t __ret;
38587   __ret = (float64x1_t)(__p0);
38588   return __ret;
38589 }
38590 #endif
38591 
38592 #ifdef __LITTLE_ENDIAN__
vreinterpret_f64_f32(float32x2_t __p0)38593 __ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) {
38594   float64x1_t __ret;
38595   __ret = (float64x1_t)(__p0);
38596   return __ret;
38597 }
38598 #else
vreinterpret_f64_f32(float32x2_t __p0)38599 __ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) {
38600   float64x1_t __ret;
38601   __ret = (float64x1_t)(__p0);
38602   return __ret;
38603 }
38604 #endif
38605 
38606 #ifdef __LITTLE_ENDIAN__
vreinterpret_f64_f16(float16x4_t __p0)38607 __ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) {
38608   float64x1_t __ret;
38609   __ret = (float64x1_t)(__p0);
38610   return __ret;
38611 }
38612 #else
vreinterpret_f64_f16(float16x4_t __p0)38613 __ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) {
38614   float64x1_t __ret;
38615   __ret = (float64x1_t)(__p0);
38616   return __ret;
38617 }
38618 #endif
38619 
38620 #ifdef __LITTLE_ENDIAN__
vreinterpret_f64_s32(int32x2_t __p0)38621 __ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) {
38622   float64x1_t __ret;
38623   __ret = (float64x1_t)(__p0);
38624   return __ret;
38625 }
38626 #else
vreinterpret_f64_s32(int32x2_t __p0)38627 __ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) {
38628   float64x1_t __ret;
38629   __ret = (float64x1_t)(__p0);
38630   return __ret;
38631 }
38632 #endif
38633 
38634 #ifdef __LITTLE_ENDIAN__
vreinterpret_f64_s64(int64x1_t __p0)38635 __ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) {
38636   float64x1_t __ret;
38637   __ret = (float64x1_t)(__p0);
38638   return __ret;
38639 }
38640 #else
vreinterpret_f64_s64(int64x1_t __p0)38641 __ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) {
38642   float64x1_t __ret;
38643   __ret = (float64x1_t)(__p0);
38644   return __ret;
38645 }
38646 #endif
38647 
38648 #ifdef __LITTLE_ENDIAN__
vreinterpret_f64_s16(int16x4_t __p0)38649 __ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) {
38650   float64x1_t __ret;
38651   __ret = (float64x1_t)(__p0);
38652   return __ret;
38653 }
38654 #else
vreinterpret_f64_s16(int16x4_t __p0)38655 __ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) {
38656   float64x1_t __ret;
38657   __ret = (float64x1_t)(__p0);
38658   return __ret;
38659 }
38660 #endif
38661 
38662 #ifdef __LITTLE_ENDIAN__
vreinterpret_f32_p8(poly8x8_t __p0)38663 __ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
38664   float32x2_t __ret;
38665   __ret = (float32x2_t)(__p0);
38666   return __ret;
38667 }
38668 #else
vreinterpret_f32_p8(poly8x8_t __p0)38669 __ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
38670   float32x2_t __ret;
38671   __ret = (float32x2_t)(__p0);
38672   return __ret;
38673 }
38674 #endif
38675 
38676 #ifdef __LITTLE_ENDIAN__
vreinterpret_f32_p64(poly64x1_t __p0)38677 __ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) {
38678   float32x2_t __ret;
38679   __ret = (float32x2_t)(__p0);
38680   return __ret;
38681 }
38682 #else
vreinterpret_f32_p64(poly64x1_t __p0)38683 __ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) {
38684   float32x2_t __ret;
38685   __ret = (float32x2_t)(__p0);
38686   return __ret;
38687 }
38688 #endif
38689 
38690 #ifdef __LITTLE_ENDIAN__
vreinterpret_f32_p16(poly16x4_t __p0)38691 __ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
38692   float32x2_t __ret;
38693   __ret = (float32x2_t)(__p0);
38694   return __ret;
38695 }
38696 #else
vreinterpret_f32_p16(poly16x4_t __p0)38697 __ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
38698   float32x2_t __ret;
38699   __ret = (float32x2_t)(__p0);
38700   return __ret;
38701 }
38702 #endif
38703 
38704 #ifdef __LITTLE_ENDIAN__
vreinterpret_f32_u8(uint8x8_t __p0)38705 __ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
38706   float32x2_t __ret;
38707   __ret = (float32x2_t)(__p0);
38708   return __ret;
38709 }
38710 #else
vreinterpret_f32_u8(uint8x8_t __p0)38711 __ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
38712   float32x2_t __ret;
38713   __ret = (float32x2_t)(__p0);
38714   return __ret;
38715 }
38716 #endif
38717 
38718 #ifdef __LITTLE_ENDIAN__
vreinterpret_f32_u32(uint32x2_t __p0)38719 __ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
38720   float32x2_t __ret;
38721   __ret = (float32x2_t)(__p0);
38722   return __ret;
38723 }
38724 #else
vreinterpret_f32_u32(uint32x2_t __p0)38725 __ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
38726   float32x2_t __ret;
38727   __ret = (float32x2_t)(__p0);
38728   return __ret;
38729 }
38730 #endif
38731 
38732 #ifdef __LITTLE_ENDIAN__
vreinterpret_f32_u64(uint64x1_t __p0)38733 __ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
38734   float32x2_t __ret;
38735   __ret = (float32x2_t)(__p0);
38736   return __ret;
38737 }
38738 #else
vreinterpret_f32_u64(uint64x1_t __p0)38739 __ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
38740   float32x2_t __ret;
38741   __ret = (float32x2_t)(__p0);
38742   return __ret;
38743 }
38744 #endif
38745 
38746 #ifdef __LITTLE_ENDIAN__
vreinterpret_f32_u16(uint16x4_t __p0)38747 __ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
38748   float32x2_t __ret;
38749   __ret = (float32x2_t)(__p0);
38750   return __ret;
38751 }
38752 #else
vreinterpret_f32_u16(uint16x4_t __p0)38753 __ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
38754   float32x2_t __ret;
38755   __ret = (float32x2_t)(__p0);
38756   return __ret;
38757 }
38758 #endif
38759 
38760 #ifdef __LITTLE_ENDIAN__
vreinterpret_f32_s8(int8x8_t __p0)38761 __ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
38762   float32x2_t __ret;
38763   __ret = (float32x2_t)(__p0);
38764   return __ret;
38765 }
38766 #else
vreinterpret_f32_s8(int8x8_t __p0)38767 __ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
38768   float32x2_t __ret;
38769   __ret = (float32x2_t)(__p0);
38770   return __ret;
38771 }
38772 #endif
38773 
38774 #ifdef __LITTLE_ENDIAN__
vreinterpret_f32_f64(float64x1_t __p0)38775 __ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) {
38776   float32x2_t __ret;
38777   __ret = (float32x2_t)(__p0);
38778   return __ret;
38779 }
38780 #else
vreinterpret_f32_f64(float64x1_t __p0)38781 __ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) {
38782   float32x2_t __ret;
38783   __ret = (float32x2_t)(__p0);
38784   return __ret;
38785 }
38786 #endif
38787 
38788 #ifdef __LITTLE_ENDIAN__
vreinterpret_f32_f16(float16x4_t __p0)38789 __ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
38790   float32x2_t __ret;
38791   __ret = (float32x2_t)(__p0);
38792   return __ret;
38793 }
38794 #else
vreinterpret_f32_f16(float16x4_t __p0)38795 __ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
38796   float32x2_t __ret;
38797   __ret = (float32x2_t)(__p0);
38798   return __ret;
38799 }
38800 #endif
38801 
38802 #ifdef __LITTLE_ENDIAN__
vreinterpret_f32_s32(int32x2_t __p0)38803 __ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
38804   float32x2_t __ret;
38805   __ret = (float32x2_t)(__p0);
38806   return __ret;
38807 }
38808 #else
vreinterpret_f32_s32(int32x2_t __p0)38809 __ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
38810   float32x2_t __ret;
38811   __ret = (float32x2_t)(__p0);
38812   return __ret;
38813 }
38814 #endif
38815 
38816 #ifdef __LITTLE_ENDIAN__
vreinterpret_f32_s64(int64x1_t __p0)38817 __ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
38818   float32x2_t __ret;
38819   __ret = (float32x2_t)(__p0);
38820   return __ret;
38821 }
38822 #else
vreinterpret_f32_s64(int64x1_t __p0)38823 __ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
38824   float32x2_t __ret;
38825   __ret = (float32x2_t)(__p0);
38826   return __ret;
38827 }
38828 #endif
38829 
38830 #ifdef __LITTLE_ENDIAN__
vreinterpret_f32_s16(int16x4_t __p0)38831 __ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
38832   float32x2_t __ret;
38833   __ret = (float32x2_t)(__p0);
38834   return __ret;
38835 }
38836 #else
vreinterpret_f32_s16(int16x4_t __p0)38837 __ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
38838   float32x2_t __ret;
38839   __ret = (float32x2_t)(__p0);
38840   return __ret;
38841 }
38842 #endif
38843 
38844 #ifdef __LITTLE_ENDIAN__
vreinterpret_f16_p8(poly8x8_t __p0)38845 __ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
38846   float16x4_t __ret;
38847   __ret = (float16x4_t)(__p0);
38848   return __ret;
38849 }
38850 #else
vreinterpret_f16_p8(poly8x8_t __p0)38851 __ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
38852   float16x4_t __ret;
38853   __ret = (float16x4_t)(__p0);
38854   return __ret;
38855 }
38856 #endif
38857 
38858 #ifdef __LITTLE_ENDIAN__
vreinterpret_f16_p64(poly64x1_t __p0)38859 __ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) {
38860   float16x4_t __ret;
38861   __ret = (float16x4_t)(__p0);
38862   return __ret;
38863 }
38864 #else
vreinterpret_f16_p64(poly64x1_t __p0)38865 __ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) {
38866   float16x4_t __ret;
38867   __ret = (float16x4_t)(__p0);
38868   return __ret;
38869 }
38870 #endif
38871 
38872 #ifdef __LITTLE_ENDIAN__
vreinterpret_f16_p16(poly16x4_t __p0)38873 __ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
38874   float16x4_t __ret;
38875   __ret = (float16x4_t)(__p0);
38876   return __ret;
38877 }
38878 #else
vreinterpret_f16_p16(poly16x4_t __p0)38879 __ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
38880   float16x4_t __ret;
38881   __ret = (float16x4_t)(__p0);
38882   return __ret;
38883 }
38884 #endif
38885 
38886 #ifdef __LITTLE_ENDIAN__
vreinterpret_f16_u8(uint8x8_t __p0)38887 __ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
38888   float16x4_t __ret;
38889   __ret = (float16x4_t)(__p0);
38890   return __ret;
38891 }
38892 #else
vreinterpret_f16_u8(uint8x8_t __p0)38893 __ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
38894   float16x4_t __ret;
38895   __ret = (float16x4_t)(__p0);
38896   return __ret;
38897 }
38898 #endif
38899 
38900 #ifdef __LITTLE_ENDIAN__
vreinterpret_f16_u32(uint32x2_t __p0)38901 __ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
38902   float16x4_t __ret;
38903   __ret = (float16x4_t)(__p0);
38904   return __ret;
38905 }
38906 #else
vreinterpret_f16_u32(uint32x2_t __p0)38907 __ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
38908   float16x4_t __ret;
38909   __ret = (float16x4_t)(__p0);
38910   return __ret;
38911 }
38912 #endif
38913 
38914 #ifdef __LITTLE_ENDIAN__
vreinterpret_f16_u64(uint64x1_t __p0)38915 __ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
38916   float16x4_t __ret;
38917   __ret = (float16x4_t)(__p0);
38918   return __ret;
38919 }
38920 #else
vreinterpret_f16_u64(uint64x1_t __p0)38921 __ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
38922   float16x4_t __ret;
38923   __ret = (float16x4_t)(__p0);
38924   return __ret;
38925 }
38926 #endif
38927 
38928 #ifdef __LITTLE_ENDIAN__
vreinterpret_f16_u16(uint16x4_t __p0)38929 __ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
38930   float16x4_t __ret;
38931   __ret = (float16x4_t)(__p0);
38932   return __ret;
38933 }
38934 #else
vreinterpret_f16_u16(uint16x4_t __p0)38935 __ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
38936   float16x4_t __ret;
38937   __ret = (float16x4_t)(__p0);
38938   return __ret;
38939 }
38940 #endif
38941 
38942 #ifdef __LITTLE_ENDIAN__
vreinterpret_f16_s8(int8x8_t __p0)38943 __ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
38944   float16x4_t __ret;
38945   __ret = (float16x4_t)(__p0);
38946   return __ret;
38947 }
38948 #else
vreinterpret_f16_s8(int8x8_t __p0)38949 __ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
38950   float16x4_t __ret;
38951   __ret = (float16x4_t)(__p0);
38952   return __ret;
38953 }
38954 #endif
38955 
38956 #ifdef __LITTLE_ENDIAN__
vreinterpret_f16_f64(float64x1_t __p0)38957 __ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) {
38958   float16x4_t __ret;
38959   __ret = (float16x4_t)(__p0);
38960   return __ret;
38961 }
38962 #else
vreinterpret_f16_f64(float64x1_t __p0)38963 __ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) {
38964   float16x4_t __ret;
38965   __ret = (float16x4_t)(__p0);
38966   return __ret;
38967 }
38968 #endif
38969 
38970 #ifdef __LITTLE_ENDIAN__
vreinterpret_f16_f32(float32x2_t __p0)38971 __ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
38972   float16x4_t __ret;
38973   __ret = (float16x4_t)(__p0);
38974   return __ret;
38975 }
38976 #else
vreinterpret_f16_f32(float32x2_t __p0)38977 __ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
38978   float16x4_t __ret;
38979   __ret = (float16x4_t)(__p0);
38980   return __ret;
38981 }
38982 #endif
38983 
38984 #ifdef __LITTLE_ENDIAN__
vreinterpret_f16_s32(int32x2_t __p0)38985 __ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
38986   float16x4_t __ret;
38987   __ret = (float16x4_t)(__p0);
38988   return __ret;
38989 }
38990 #else
vreinterpret_f16_s32(int32x2_t __p0)38991 __ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
38992   float16x4_t __ret;
38993   __ret = (float16x4_t)(__p0);
38994   return __ret;
38995 }
38996 #endif
38997 
38998 #ifdef __LITTLE_ENDIAN__
vreinterpret_f16_s64(int64x1_t __p0)38999 __ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
39000   float16x4_t __ret;
39001   __ret = (float16x4_t)(__p0);
39002   return __ret;
39003 }
39004 #else
vreinterpret_f16_s64(int64x1_t __p0)39005 __ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
39006   float16x4_t __ret;
39007   __ret = (float16x4_t)(__p0);
39008   return __ret;
39009 }
39010 #endif
39011 
39012 #ifdef __LITTLE_ENDIAN__
vreinterpret_f16_s16(int16x4_t __p0)39013 __ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
39014   float16x4_t __ret;
39015   __ret = (float16x4_t)(__p0);
39016   return __ret;
39017 }
39018 #else
vreinterpret_f16_s16(int16x4_t __p0)39019 __ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
39020   float16x4_t __ret;
39021   __ret = (float16x4_t)(__p0);
39022   return __ret;
39023 }
39024 #endif
39025 
39026 #ifdef __LITTLE_ENDIAN__
vreinterpret_s32_p8(poly8x8_t __p0)39027 __ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
39028   int32x2_t __ret;
39029   __ret = (int32x2_t)(__p0);
39030   return __ret;
39031 }
39032 #else
vreinterpret_s32_p8(poly8x8_t __p0)39033 __ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
39034   int32x2_t __ret;
39035   __ret = (int32x2_t)(__p0);
39036   return __ret;
39037 }
39038 #endif
39039 
39040 #ifdef __LITTLE_ENDIAN__
vreinterpret_s32_p64(poly64x1_t __p0)39041 __ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) {
39042   int32x2_t __ret;
39043   __ret = (int32x2_t)(__p0);
39044   return __ret;
39045 }
39046 #else
vreinterpret_s32_p64(poly64x1_t __p0)39047 __ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) {
39048   int32x2_t __ret;
39049   __ret = (int32x2_t)(__p0);
39050   return __ret;
39051 }
39052 #endif
39053 
39054 #ifdef __LITTLE_ENDIAN__
vreinterpret_s32_p16(poly16x4_t __p0)39055 __ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
39056   int32x2_t __ret;
39057   __ret = (int32x2_t)(__p0);
39058   return __ret;
39059 }
39060 #else
vreinterpret_s32_p16(poly16x4_t __p0)39061 __ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
39062   int32x2_t __ret;
39063   __ret = (int32x2_t)(__p0);
39064   return __ret;
39065 }
39066 #endif
39067 
39068 #ifdef __LITTLE_ENDIAN__
vreinterpret_s32_u8(uint8x8_t __p0)39069 __ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
39070   int32x2_t __ret;
39071   __ret = (int32x2_t)(__p0);
39072   return __ret;
39073 }
39074 #else
vreinterpret_s32_u8(uint8x8_t __p0)39075 __ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
39076   int32x2_t __ret;
39077   __ret = (int32x2_t)(__p0);
39078   return __ret;
39079 }
39080 #endif
39081 
39082 #ifdef __LITTLE_ENDIAN__
vreinterpret_s32_u32(uint32x2_t __p0)39083 __ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
39084   int32x2_t __ret;
39085   __ret = (int32x2_t)(__p0);
39086   return __ret;
39087 }
39088 #else
vreinterpret_s32_u32(uint32x2_t __p0)39089 __ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
39090   int32x2_t __ret;
39091   __ret = (int32x2_t)(__p0);
39092   return __ret;
39093 }
39094 #endif
39095 
39096 #ifdef __LITTLE_ENDIAN__
vreinterpret_s32_u64(uint64x1_t __p0)39097 __ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
39098   int32x2_t __ret;
39099   __ret = (int32x2_t)(__p0);
39100   return __ret;
39101 }
39102 #else
vreinterpret_s32_u64(uint64x1_t __p0)39103 __ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
39104   int32x2_t __ret;
39105   __ret = (int32x2_t)(__p0);
39106   return __ret;
39107 }
39108 #endif
39109 
39110 #ifdef __LITTLE_ENDIAN__
vreinterpret_s32_u16(uint16x4_t __p0)39111 __ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
39112   int32x2_t __ret;
39113   __ret = (int32x2_t)(__p0);
39114   return __ret;
39115 }
39116 #else
vreinterpret_s32_u16(uint16x4_t __p0)39117 __ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
39118   int32x2_t __ret;
39119   __ret = (int32x2_t)(__p0);
39120   return __ret;
39121 }
39122 #endif
39123 
39124 #ifdef __LITTLE_ENDIAN__
vreinterpret_s32_s8(int8x8_t __p0)39125 __ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
39126   int32x2_t __ret;
39127   __ret = (int32x2_t)(__p0);
39128   return __ret;
39129 }
39130 #else
vreinterpret_s32_s8(int8x8_t __p0)39131 __ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
39132   int32x2_t __ret;
39133   __ret = (int32x2_t)(__p0);
39134   return __ret;
39135 }
39136 #endif
39137 
39138 #ifdef __LITTLE_ENDIAN__
vreinterpret_s32_f64(float64x1_t __p0)39139 __ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) {
39140   int32x2_t __ret;
39141   __ret = (int32x2_t)(__p0);
39142   return __ret;
39143 }
39144 #else
vreinterpret_s32_f64(float64x1_t __p0)39145 __ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) {
39146   int32x2_t __ret;
39147   __ret = (int32x2_t)(__p0);
39148   return __ret;
39149 }
39150 #endif
39151 
39152 #ifdef __LITTLE_ENDIAN__
vreinterpret_s32_f32(float32x2_t __p0)39153 __ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
39154   int32x2_t __ret;
39155   __ret = (int32x2_t)(__p0);
39156   return __ret;
39157 }
39158 #else
vreinterpret_s32_f32(float32x2_t __p0)39159 __ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
39160   int32x2_t __ret;
39161   __ret = (int32x2_t)(__p0);
39162   return __ret;
39163 }
39164 #endif
39165 
39166 #ifdef __LITTLE_ENDIAN__
vreinterpret_s32_f16(float16x4_t __p0)39167 __ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
39168   int32x2_t __ret;
39169   __ret = (int32x2_t)(__p0);
39170   return __ret;
39171 }
39172 #else
vreinterpret_s32_f16(float16x4_t __p0)39173 __ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
39174   int32x2_t __ret;
39175   __ret = (int32x2_t)(__p0);
39176   return __ret;
39177 }
39178 #endif
39179 
39180 #ifdef __LITTLE_ENDIAN__
vreinterpret_s32_s64(int64x1_t __p0)39181 __ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
39182   int32x2_t __ret;
39183   __ret = (int32x2_t)(__p0);
39184   return __ret;
39185 }
39186 #else
vreinterpret_s32_s64(int64x1_t __p0)39187 __ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
39188   int32x2_t __ret;
39189   __ret = (int32x2_t)(__p0);
39190   return __ret;
39191 }
39192 #endif
39193 
39194 #ifdef __LITTLE_ENDIAN__
vreinterpret_s32_s16(int16x4_t __p0)39195 __ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
39196   int32x2_t __ret;
39197   __ret = (int32x2_t)(__p0);
39198   return __ret;
39199 }
39200 #else
vreinterpret_s32_s16(int16x4_t __p0)39201 __ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
39202   int32x2_t __ret;
39203   __ret = (int32x2_t)(__p0);
39204   return __ret;
39205 }
39206 #endif
39207 
39208 #ifdef __LITTLE_ENDIAN__
vreinterpret_s64_p8(poly8x8_t __p0)39209 __ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
39210   int64x1_t __ret;
39211   __ret = (int64x1_t)(__p0);
39212   return __ret;
39213 }
39214 #else
vreinterpret_s64_p8(poly8x8_t __p0)39215 __ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
39216   int64x1_t __ret;
39217   __ret = (int64x1_t)(__p0);
39218   return __ret;
39219 }
39220 #endif
39221 
39222 #ifdef __LITTLE_ENDIAN__
vreinterpret_s64_p64(poly64x1_t __p0)39223 __ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) {
39224   int64x1_t __ret;
39225   __ret = (int64x1_t)(__p0);
39226   return __ret;
39227 }
39228 #else
vreinterpret_s64_p64(poly64x1_t __p0)39229 __ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) {
39230   int64x1_t __ret;
39231   __ret = (int64x1_t)(__p0);
39232   return __ret;
39233 }
39234 #endif
39235 
39236 #ifdef __LITTLE_ENDIAN__
vreinterpret_s64_p16(poly16x4_t __p0)39237 __ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
39238   int64x1_t __ret;
39239   __ret = (int64x1_t)(__p0);
39240   return __ret;
39241 }
39242 #else
vreinterpret_s64_p16(poly16x4_t __p0)39243 __ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
39244   int64x1_t __ret;
39245   __ret = (int64x1_t)(__p0);
39246   return __ret;
39247 }
39248 #endif
39249 
39250 #ifdef __LITTLE_ENDIAN__
vreinterpret_s64_u8(uint8x8_t __p0)39251 __ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
39252   int64x1_t __ret;
39253   __ret = (int64x1_t)(__p0);
39254   return __ret;
39255 }
39256 #else
vreinterpret_s64_u8(uint8x8_t __p0)39257 __ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
39258   int64x1_t __ret;
39259   __ret = (int64x1_t)(__p0);
39260   return __ret;
39261 }
39262 #endif
39263 
39264 #ifdef __LITTLE_ENDIAN__
vreinterpret_s64_u32(uint32x2_t __p0)39265 __ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
39266   int64x1_t __ret;
39267   __ret = (int64x1_t)(__p0);
39268   return __ret;
39269 }
39270 #else
vreinterpret_s64_u32(uint32x2_t __p0)39271 __ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
39272   int64x1_t __ret;
39273   __ret = (int64x1_t)(__p0);
39274   return __ret;
39275 }
39276 #endif
39277 
39278 #ifdef __LITTLE_ENDIAN__
vreinterpret_s64_u64(uint64x1_t __p0)39279 __ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
39280   int64x1_t __ret;
39281   __ret = (int64x1_t)(__p0);
39282   return __ret;
39283 }
39284 #else
vreinterpret_s64_u64(uint64x1_t __p0)39285 __ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
39286   int64x1_t __ret;
39287   __ret = (int64x1_t)(__p0);
39288   return __ret;
39289 }
39290 #endif
39291 
39292 #ifdef __LITTLE_ENDIAN__
vreinterpret_s64_u16(uint16x4_t __p0)39293 __ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
39294   int64x1_t __ret;
39295   __ret = (int64x1_t)(__p0);
39296   return __ret;
39297 }
39298 #else
vreinterpret_s64_u16(uint16x4_t __p0)39299 __ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
39300   int64x1_t __ret;
39301   __ret = (int64x1_t)(__p0);
39302   return __ret;
39303 }
39304 #endif
39305 
39306 #ifdef __LITTLE_ENDIAN__
vreinterpret_s64_s8(int8x8_t __p0)39307 __ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
39308   int64x1_t __ret;
39309   __ret = (int64x1_t)(__p0);
39310   return __ret;
39311 }
39312 #else
vreinterpret_s64_s8(int8x8_t __p0)39313 __ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
39314   int64x1_t __ret;
39315   __ret = (int64x1_t)(__p0);
39316   return __ret;
39317 }
39318 #endif
39319 
39320 #ifdef __LITTLE_ENDIAN__
vreinterpret_s64_f64(float64x1_t __p0)39321 __ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) {
39322   int64x1_t __ret;
39323   __ret = (int64x1_t)(__p0);
39324   return __ret;
39325 }
39326 #else
vreinterpret_s64_f64(float64x1_t __p0)39327 __ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) {
39328   int64x1_t __ret;
39329   __ret = (int64x1_t)(__p0);
39330   return __ret;
39331 }
39332 #endif
39333 
39334 #ifdef __LITTLE_ENDIAN__
vreinterpret_s64_f32(float32x2_t __p0)39335 __ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
39336   int64x1_t __ret;
39337   __ret = (int64x1_t)(__p0);
39338   return __ret;
39339 }
39340 #else
vreinterpret_s64_f32(float32x2_t __p0)39341 __ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
39342   int64x1_t __ret;
39343   __ret = (int64x1_t)(__p0);
39344   return __ret;
39345 }
39346 #endif
39347 
39348 #ifdef __LITTLE_ENDIAN__
vreinterpret_s64_f16(float16x4_t __p0)39349 __ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
39350   int64x1_t __ret;
39351   __ret = (int64x1_t)(__p0);
39352   return __ret;
39353 }
39354 #else
vreinterpret_s64_f16(float16x4_t __p0)39355 __ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
39356   int64x1_t __ret;
39357   __ret = (int64x1_t)(__p0);
39358   return __ret;
39359 }
39360 #endif
39361 
39362 #ifdef __LITTLE_ENDIAN__
vreinterpret_s64_s32(int32x2_t __p0)39363 __ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
39364   int64x1_t __ret;
39365   __ret = (int64x1_t)(__p0);
39366   return __ret;
39367 }
39368 #else
vreinterpret_s64_s32(int32x2_t __p0)39369 __ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
39370   int64x1_t __ret;
39371   __ret = (int64x1_t)(__p0);
39372   return __ret;
39373 }
39374 #endif
39375 
39376 #ifdef __LITTLE_ENDIAN__
vreinterpret_s64_s16(int16x4_t __p0)39377 __ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
39378   int64x1_t __ret;
39379   __ret = (int64x1_t)(__p0);
39380   return __ret;
39381 }
39382 #else
vreinterpret_s64_s16(int16x4_t __p0)39383 __ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
39384   int64x1_t __ret;
39385   __ret = (int64x1_t)(__p0);
39386   return __ret;
39387 }
39388 #endif
39389 
39390 #ifdef __LITTLE_ENDIAN__
vreinterpret_s16_p8(poly8x8_t __p0)39391 __ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
39392   int16x4_t __ret;
39393   __ret = (int16x4_t)(__p0);
39394   return __ret;
39395 }
39396 #else
vreinterpret_s16_p8(poly8x8_t __p0)39397 __ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
39398   int16x4_t __ret;
39399   __ret = (int16x4_t)(__p0);
39400   return __ret;
39401 }
39402 #endif
39403 
39404 #ifdef __LITTLE_ENDIAN__
vreinterpret_s16_p64(poly64x1_t __p0)39405 __ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) {
39406   int16x4_t __ret;
39407   __ret = (int16x4_t)(__p0);
39408   return __ret;
39409 }
39410 #else
vreinterpret_s16_p64(poly64x1_t __p0)39411 __ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) {
39412   int16x4_t __ret;
39413   __ret = (int16x4_t)(__p0);
39414   return __ret;
39415 }
39416 #endif
39417 
39418 #ifdef __LITTLE_ENDIAN__
vreinterpret_s16_p16(poly16x4_t __p0)39419 __ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
39420   int16x4_t __ret;
39421   __ret = (int16x4_t)(__p0);
39422   return __ret;
39423 }
39424 #else
vreinterpret_s16_p16(poly16x4_t __p0)39425 __ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
39426   int16x4_t __ret;
39427   __ret = (int16x4_t)(__p0);
39428   return __ret;
39429 }
39430 #endif
39431 
39432 #ifdef __LITTLE_ENDIAN__
vreinterpret_s16_u8(uint8x8_t __p0)39433 __ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
39434   int16x4_t __ret;
39435   __ret = (int16x4_t)(__p0);
39436   return __ret;
39437 }
39438 #else
vreinterpret_s16_u8(uint8x8_t __p0)39439 __ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
39440   int16x4_t __ret;
39441   __ret = (int16x4_t)(__p0);
39442   return __ret;
39443 }
39444 #endif
39445 
39446 #ifdef __LITTLE_ENDIAN__
vreinterpret_s16_u32(uint32x2_t __p0)39447 __ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
39448   int16x4_t __ret;
39449   __ret = (int16x4_t)(__p0);
39450   return __ret;
39451 }
39452 #else
vreinterpret_s16_u32(uint32x2_t __p0)39453 __ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
39454   int16x4_t __ret;
39455   __ret = (int16x4_t)(__p0);
39456   return __ret;
39457 }
39458 #endif
39459 
39460 #ifdef __LITTLE_ENDIAN__
vreinterpret_s16_u64(uint64x1_t __p0)39461 __ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
39462   int16x4_t __ret;
39463   __ret = (int16x4_t)(__p0);
39464   return __ret;
39465 }
39466 #else
vreinterpret_s16_u64(uint64x1_t __p0)39467 __ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
39468   int16x4_t __ret;
39469   __ret = (int16x4_t)(__p0);
39470   return __ret;
39471 }
39472 #endif
39473 
39474 #ifdef __LITTLE_ENDIAN__
vreinterpret_s16_u16(uint16x4_t __p0)39475 __ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
39476   int16x4_t __ret;
39477   __ret = (int16x4_t)(__p0);
39478   return __ret;
39479 }
39480 #else
vreinterpret_s16_u16(uint16x4_t __p0)39481 __ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
39482   int16x4_t __ret;
39483   __ret = (int16x4_t)(__p0);
39484   return __ret;
39485 }
39486 #endif
39487 
39488 #ifdef __LITTLE_ENDIAN__
vreinterpret_s16_s8(int8x8_t __p0)39489 __ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
39490   int16x4_t __ret;
39491   __ret = (int16x4_t)(__p0);
39492   return __ret;
39493 }
39494 #else
vreinterpret_s16_s8(int8x8_t __p0)39495 __ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
39496   int16x4_t __ret;
39497   __ret = (int16x4_t)(__p0);
39498   return __ret;
39499 }
39500 #endif
39501 
39502 #ifdef __LITTLE_ENDIAN__
vreinterpret_s16_f64(float64x1_t __p0)39503 __ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) {
39504   int16x4_t __ret;
39505   __ret = (int16x4_t)(__p0);
39506   return __ret;
39507 }
39508 #else
vreinterpret_s16_f64(float64x1_t __p0)39509 __ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) {
39510   int16x4_t __ret;
39511   __ret = (int16x4_t)(__p0);
39512   return __ret;
39513 }
39514 #endif
39515 
39516 #ifdef __LITTLE_ENDIAN__
vreinterpret_s16_f32(float32x2_t __p0)39517 __ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
39518   int16x4_t __ret;
39519   __ret = (int16x4_t)(__p0);
39520   return __ret;
39521 }
39522 #else
vreinterpret_s16_f32(float32x2_t __p0)39523 __ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
39524   int16x4_t __ret;
39525   __ret = (int16x4_t)(__p0);
39526   return __ret;
39527 }
39528 #endif
39529 
39530 #ifdef __LITTLE_ENDIAN__
vreinterpret_s16_f16(float16x4_t __p0)39531 __ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
39532   int16x4_t __ret;
39533   __ret = (int16x4_t)(__p0);
39534   return __ret;
39535 }
39536 #else
vreinterpret_s16_f16(float16x4_t __p0)39537 __ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
39538   int16x4_t __ret;
39539   __ret = (int16x4_t)(__p0);
39540   return __ret;
39541 }
39542 #endif
39543 
39544 #ifdef __LITTLE_ENDIAN__
vreinterpret_s16_s32(int32x2_t __p0)39545 __ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
39546   int16x4_t __ret;
39547   __ret = (int16x4_t)(__p0);
39548   return __ret;
39549 }
39550 #else
vreinterpret_s16_s32(int32x2_t __p0)39551 __ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
39552   int16x4_t __ret;
39553   __ret = (int16x4_t)(__p0);
39554   return __ret;
39555 }
39556 #endif
39557 
39558 #ifdef __LITTLE_ENDIAN__
vreinterpret_s16_s64(int64x1_t __p0)39559 __ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
39560   int16x4_t __ret;
39561   __ret = (int16x4_t)(__p0);
39562   return __ret;
39563 }
39564 #else
vreinterpret_s16_s64(int64x1_t __p0)39565 __ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
39566   int16x4_t __ret;
39567   __ret = (int16x4_t)(__p0);
39568   return __ret;
39569 }
39570 #endif
39571 
39572 #endif
39573 #if __ARM_FEATURE_CRYPTO
39574 #ifdef __LITTLE_ENDIAN__
vaesdq_u8(uint8x16_t __p0,uint8x16_t __p1)39575 __ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
39576   uint8x16_t __ret;
39577   __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
39578   return __ret;
39579 }
39580 #else
vaesdq_u8(uint8x16_t __p0,uint8x16_t __p1)39581 __ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
39582   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
39583   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
39584   uint8x16_t __ret;
39585   __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
39586   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
39587   return __ret;
39588 }
39589 #endif
39590 
39591 #ifdef __LITTLE_ENDIAN__
vaeseq_u8(uint8x16_t __p0,uint8x16_t __p1)39592 __ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) {
39593   uint8x16_t __ret;
39594   __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
39595   return __ret;
39596 }
39597 #else
vaeseq_u8(uint8x16_t __p0,uint8x16_t __p1)39598 __ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) {
39599   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
39600   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
39601   uint8x16_t __ret;
39602   __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
39603   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
39604   return __ret;
39605 }
39606 #endif
39607 
39608 #ifdef __LITTLE_ENDIAN__
vaesimcq_u8(uint8x16_t __p0)39609 __ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) {
39610   uint8x16_t __ret;
39611   __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__p0, 48);
39612   return __ret;
39613 }
39614 #else
vaesimcq_u8(uint8x16_t __p0)39615 __ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) {
39616   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
39617   uint8x16_t __ret;
39618   __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__rev0, 48);
39619   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
39620   return __ret;
39621 }
39622 #endif
39623 
39624 #ifdef __LITTLE_ENDIAN__
vaesmcq_u8(uint8x16_t __p0)39625 __ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) {
39626   uint8x16_t __ret;
39627   __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__p0, 48);
39628   return __ret;
39629 }
39630 #else
vaesmcq_u8(uint8x16_t __p0)39631 __ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) {
39632   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
39633   uint8x16_t __ret;
39634   __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__rev0, 48);
39635   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
39636   return __ret;
39637 }
39638 #endif
39639 
39640 #ifdef __LITTLE_ENDIAN__
vsha1cq_u32(uint32x4_t __p0,uint32_t __p1,uint32x4_t __p2)39641 __ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
39642   uint32x4_t __ret;
39643   __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2);
39644   return __ret;
39645 }
39646 #else
vsha1cq_u32(uint32x4_t __p0,uint32_t __p1,uint32x4_t __p2)39647 __ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
39648   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
39649   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
39650   uint32x4_t __ret;
39651   __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2);
39652   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
39653   return __ret;
39654 }
39655 #endif
39656 
39657 #ifdef __LITTLE_ENDIAN__
vsha1h_u32(uint32_t __p0)39658 __ai uint32_t vsha1h_u32(uint32_t __p0) {
39659   uint32_t __ret;
39660   __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0);
39661   return __ret;
39662 }
39663 #else
vsha1h_u32(uint32_t __p0)39664 __ai uint32_t vsha1h_u32(uint32_t __p0) {
39665   uint32_t __ret;
39666   __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0);
39667   return __ret;
39668 }
39669 #endif
39670 
39671 #ifdef __LITTLE_ENDIAN__
vsha1mq_u32(uint32x4_t __p0,uint32_t __p1,uint32x4_t __p2)39672 __ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
39673   uint32x4_t __ret;
39674   __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2);
39675   return __ret;
39676 }
39677 #else
vsha1mq_u32(uint32x4_t __p0,uint32_t __p1,uint32x4_t __p2)39678 __ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
39679   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
39680   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
39681   uint32x4_t __ret;
39682   __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2);
39683   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
39684   return __ret;
39685 }
39686 #endif
39687 
39688 #ifdef __LITTLE_ENDIAN__
vsha1pq_u32(uint32x4_t __p0,uint32_t __p1,uint32x4_t __p2)39689 __ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
39690   uint32x4_t __ret;
39691   __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2);
39692   return __ret;
39693 }
39694 #else
vsha1pq_u32(uint32x4_t __p0,uint32_t __p1,uint32x4_t __p2)39695 __ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
39696   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
39697   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
39698   uint32x4_t __ret;
39699   __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2);
39700   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
39701   return __ret;
39702 }
39703 #endif
39704 
39705 #ifdef __LITTLE_ENDIAN__
vsha1su0q_u32(uint32x4_t __p0,uint32x4_t __p1,uint32x4_t __p2)39706 __ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
39707   uint32x4_t __ret;
39708   __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
39709   return __ret;
39710 }
39711 #else
vsha1su0q_u32(uint32x4_t __p0,uint32x4_t __p1,uint32x4_t __p2)39712 __ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
39713   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
39714   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
39715   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
39716   uint32x4_t __ret;
39717   __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
39718   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
39719   return __ret;
39720 }
39721 #endif
39722 
39723 #ifdef __LITTLE_ENDIAN__
vsha1su1q_u32(uint32x4_t __p0,uint32x4_t __p1)39724 __ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
39725   uint32x4_t __ret;
39726   __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
39727   return __ret;
39728 }
39729 #else
vsha1su1q_u32(uint32x4_t __p0,uint32x4_t __p1)39730 __ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
39731   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
39732   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
39733   uint32x4_t __ret;
39734   __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
39735   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
39736   return __ret;
39737 }
39738 #endif
39739 
39740 #ifdef __LITTLE_ENDIAN__
vsha256hq_u32(uint32x4_t __p0,uint32x4_t __p1,uint32x4_t __p2)39741 __ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
39742   uint32x4_t __ret;
39743   __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
39744   return __ret;
39745 }
39746 #else
vsha256hq_u32(uint32x4_t __p0,uint32x4_t __p1,uint32x4_t __p2)39747 __ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
39748   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
39749   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
39750   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
39751   uint32x4_t __ret;
39752   __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
39753   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
39754   return __ret;
39755 }
39756 #endif
39757 
39758 #ifdef __LITTLE_ENDIAN__
vsha256h2q_u32(uint32x4_t __p0,uint32x4_t __p1,uint32x4_t __p2)39759 __ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
39760   uint32x4_t __ret;
39761   __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
39762   return __ret;
39763 }
39764 #else
vsha256h2q_u32(uint32x4_t __p0,uint32x4_t __p1,uint32x4_t __p2)39765 __ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
39766   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
39767   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
39768   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
39769   uint32x4_t __ret;
39770   __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
39771   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
39772   return __ret;
39773 }
39774 #endif
39775 
39776 #ifdef __LITTLE_ENDIAN__
vsha256su0q_u32(uint32x4_t __p0,uint32x4_t __p1)39777 __ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) {
39778   uint32x4_t __ret;
39779   __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
39780   return __ret;
39781 }
39782 #else
vsha256su0q_u32(uint32x4_t __p0,uint32x4_t __p1)39783 __ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) {
39784   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
39785   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
39786   uint32x4_t __ret;
39787   __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
39788   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
39789   return __ret;
39790 }
39791 #endif
39792 
39793 #ifdef __LITTLE_ENDIAN__
vsha256su1q_u32(uint32x4_t __p0,uint32x4_t __p1,uint32x4_t __p2)39794 __ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
39795   uint32x4_t __ret;
39796   __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
39797   return __ret;
39798 }
39799 #else
vsha256su1q_u32(uint32x4_t __p0,uint32x4_t __p1,uint32x4_t __p2)39800 __ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
39801   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
39802   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
39803   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
39804   uint32x4_t __ret;
39805   __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
39806   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
39807   return __ret;
39808 }
39809 #endif
39810 
39811 #endif
39812 #if defined(__aarch64__)
39813 #ifdef __LITTLE_ENDIAN__
vabdq_f64(float64x2_t __p0,float64x2_t __p1)39814 __ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
39815   float64x2_t __ret;
39816   __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
39817   return __ret;
39818 }
39819 #else
vabdq_f64(float64x2_t __p0,float64x2_t __p1)39820 __ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
39821   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
39822   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
39823   float64x2_t __ret;
39824   __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
39825   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
39826   return __ret;
39827 }
39828 #endif
39829 
39830 #ifdef __LITTLE_ENDIAN__
vabd_f64(float64x1_t __p0,float64x1_t __p1)39831 __ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) {
39832   float64x1_t __ret;
39833   __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
39834   return __ret;
39835 }
39836 #else
vabd_f64(float64x1_t __p0,float64x1_t __p1)39837 __ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) {
39838   float64x1_t __ret;
39839   __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
39840   return __ret;
39841 }
39842 #endif
39843 
39844 #ifdef __LITTLE_ENDIAN__
vabdd_f64(float64_t __p0,float64_t __p1)39845 __ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) {
39846   float64_t __ret;
39847   __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1);
39848   return __ret;
39849 }
39850 #else
vabdd_f64(float64_t __p0,float64_t __p1)39851 __ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) {
39852   float64_t __ret;
39853   __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1);
39854   return __ret;
39855 }
39856 #endif
39857 
39858 #ifdef __LITTLE_ENDIAN__
vabds_f32(float32_t __p0,float32_t __p1)39859 __ai float32_t vabds_f32(float32_t __p0, float32_t __p1) {
39860   float32_t __ret;
39861   __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1);
39862   return __ret;
39863 }
39864 #else
vabds_f32(float32_t __p0,float32_t __p1)39865 __ai float32_t vabds_f32(float32_t __p0, float32_t __p1) {
39866   float32_t __ret;
39867   __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1);
39868   return __ret;
39869 }
39870 #endif
39871 
39872 #ifdef __LITTLE_ENDIAN__
vabsq_f64(float64x2_t __p0)39873 __ai float64x2_t vabsq_f64(float64x2_t __p0) {
39874   float64x2_t __ret;
39875   __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 42);
39876   return __ret;
39877 }
39878 #else
vabsq_f64(float64x2_t __p0)39879 __ai float64x2_t vabsq_f64(float64x2_t __p0) {
39880   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
39881   float64x2_t __ret;
39882   __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 42);
39883   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
39884   return __ret;
39885 }
39886 #endif
39887 
39888 #ifdef __LITTLE_ENDIAN__
vabsq_s64(int64x2_t __p0)39889 __ai int64x2_t vabsq_s64(int64x2_t __p0) {
39890   int64x2_t __ret;
39891   __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 35);
39892   return __ret;
39893 }
39894 #else
vabsq_s64(int64x2_t __p0)39895 __ai int64x2_t vabsq_s64(int64x2_t __p0) {
39896   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
39897   int64x2_t __ret;
39898   __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 35);
39899   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
39900   return __ret;
39901 }
39902 #endif
39903 
39904 #ifdef __LITTLE_ENDIAN__
vabs_f64(float64x1_t __p0)39905 __ai float64x1_t vabs_f64(float64x1_t __p0) {
39906   float64x1_t __ret;
39907   __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10);
39908   return __ret;
39909 }
39910 #else
vabs_f64(float64x1_t __p0)39911 __ai float64x1_t vabs_f64(float64x1_t __p0) {
39912   float64x1_t __ret;
39913   __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10);
39914   return __ret;
39915 }
39916 #endif
39917 
39918 #ifdef __LITTLE_ENDIAN__
vabs_s64(int64x1_t __p0)39919 __ai int64x1_t vabs_s64(int64x1_t __p0) {
39920   int64x1_t __ret;
39921   __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3);
39922   return __ret;
39923 }
39924 #else
vabs_s64(int64x1_t __p0)39925 __ai int64x1_t vabs_s64(int64x1_t __p0) {
39926   int64x1_t __ret;
39927   __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3);
39928   return __ret;
39929 }
39930 #endif
39931 
39932 #ifdef __LITTLE_ENDIAN__
vabsd_s64(int64_t __p0)39933 __ai int64_t vabsd_s64(int64_t __p0) {
39934   int64_t __ret;
39935   __ret = (int64_t) __builtin_neon_vabsd_s64(__p0);
39936   return __ret;
39937 }
39938 #else
vabsd_s64(int64_t __p0)39939 __ai int64_t vabsd_s64(int64_t __p0) {
39940   int64_t __ret;
39941   __ret = (int64_t) __builtin_neon_vabsd_s64(__p0);
39942   return __ret;
39943 }
39944 #endif
39945 
39946 #ifdef __LITTLE_ENDIAN__
vaddq_f64(float64x2_t __p0,float64x2_t __p1)39947 __ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
39948   float64x2_t __ret;
39949   __ret = __p0 + __p1;
39950   return __ret;
39951 }
39952 #else
vaddq_f64(float64x2_t __p0,float64x2_t __p1)39953 __ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
39954   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
39955   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
39956   float64x2_t __ret;
39957   __ret = __rev0 + __rev1;
39958   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
39959   return __ret;
39960 }
39961 #endif
39962 
39963 #ifdef __LITTLE_ENDIAN__
vadd_f64(float64x1_t __p0,float64x1_t __p1)39964 __ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) {
39965   float64x1_t __ret;
39966   __ret = __p0 + __p1;
39967   return __ret;
39968 }
39969 #else
vadd_f64(float64x1_t __p0,float64x1_t __p1)39970 __ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) {
39971   float64x1_t __ret;
39972   __ret = __p0 + __p1;
39973   return __ret;
39974 }
39975 #endif
39976 
39977 #ifdef __LITTLE_ENDIAN__
vaddd_u64(uint64_t __p0,uint64_t __p1)39978 __ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) {
39979   uint64_t __ret;
39980   __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1);
39981   return __ret;
39982 }
39983 #else
vaddd_u64(uint64_t __p0,uint64_t __p1)39984 __ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) {
39985   uint64_t __ret;
39986   __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1);
39987   return __ret;
39988 }
39989 #endif
39990 
39991 #ifdef __LITTLE_ENDIAN__
vaddd_s64(int64_t __p0,int64_t __p1)39992 __ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) {
39993   int64_t __ret;
39994   __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1);
39995   return __ret;
39996 }
39997 #else
vaddd_s64(int64_t __p0,int64_t __p1)39998 __ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) {
39999   int64_t __ret;
40000   __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1);
40001   return __ret;
40002 }
40003 #endif
40004 
40005 #ifdef __LITTLE_ENDIAN__
vaddhn_high_u32(uint16x4_t __p0,uint32x4_t __p1,uint32x4_t __p2)40006 __ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
40007   uint16x8_t __ret;
40008   __ret = vcombine_u16(__p0, vaddhn_u32(__p1, __p2));
40009   return __ret;
40010 }
40011 #else
vaddhn_high_u32(uint16x4_t __p0,uint32x4_t __p1,uint32x4_t __p2)40012 __ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
40013   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
40014   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
40015   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
40016   uint16x8_t __ret;
40017   __ret = __noswap_vcombine_u16(__rev0, __noswap_vaddhn_u32(__rev1, __rev2));
40018   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
40019   return __ret;
40020 }
40021 #endif
40022 
40023 #ifdef __LITTLE_ENDIAN__
vaddhn_high_u64(uint32x2_t __p0,uint64x2_t __p1,uint64x2_t __p2)40024 __ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
40025   uint32x4_t __ret;
40026   __ret = vcombine_u32(__p0, vaddhn_u64(__p1, __p2));
40027   return __ret;
40028 }
40029 #else
vaddhn_high_u64(uint32x2_t __p0,uint64x2_t __p1,uint64x2_t __p2)40030 __ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
40031   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40032   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
40033   uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
40034   uint32x4_t __ret;
40035   __ret = __noswap_vcombine_u32(__rev0, __noswap_vaddhn_u64(__rev1, __rev2));
40036   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
40037   return __ret;
40038 }
40039 #endif
40040 
40041 #ifdef __LITTLE_ENDIAN__
vaddhn_high_u16(uint8x8_t __p0,uint16x8_t __p1,uint16x8_t __p2)40042 __ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
40043   uint8x16_t __ret;
40044   __ret = vcombine_u8(__p0, vaddhn_u16(__p1, __p2));
40045   return __ret;
40046 }
40047 #else
vaddhn_high_u16(uint8x8_t __p0,uint16x8_t __p1,uint16x8_t __p2)40048 __ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
40049   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
40050   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
40051   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
40052   uint8x16_t __ret;
40053   __ret = __noswap_vcombine_u8(__rev0, __noswap_vaddhn_u16(__rev1, __rev2));
40054   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
40055   return __ret;
40056 }
40057 #endif
40058 
40059 #ifdef __LITTLE_ENDIAN__
vaddhn_high_s32(int16x4_t __p0,int32x4_t __p1,int32x4_t __p2)40060 __ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
40061   int16x8_t __ret;
40062   __ret = vcombine_s16(__p0, vaddhn_s32(__p1, __p2));
40063   return __ret;
40064 }
40065 #else
vaddhn_high_s32(int16x4_t __p0,int32x4_t __p1,int32x4_t __p2)40066 __ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
40067   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
40068   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
40069   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
40070   int16x8_t __ret;
40071   __ret = __noswap_vcombine_s16(__rev0, __noswap_vaddhn_s32(__rev1, __rev2));
40072   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
40073   return __ret;
40074 }
40075 #endif
40076 
40077 #ifdef __LITTLE_ENDIAN__
vaddhn_high_s64(int32x2_t __p0,int64x2_t __p1,int64x2_t __p2)40078 __ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
40079   int32x4_t __ret;
40080   __ret = vcombine_s32(__p0, vaddhn_s64(__p1, __p2));
40081   return __ret;
40082 }
40083 #else
vaddhn_high_s64(int32x2_t __p0,int64x2_t __p1,int64x2_t __p2)40084 __ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
40085   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40086   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
40087   int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
40088   int32x4_t __ret;
40089   __ret = __noswap_vcombine_s32(__rev0, __noswap_vaddhn_s64(__rev1, __rev2));
40090   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
40091   return __ret;
40092 }
40093 #endif
40094 
40095 #ifdef __LITTLE_ENDIAN__
vaddhn_high_s16(int8x8_t __p0,int16x8_t __p1,int16x8_t __p2)40096 __ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
40097   int8x16_t __ret;
40098   __ret = vcombine_s8(__p0, vaddhn_s16(__p1, __p2));
40099   return __ret;
40100 }
40101 #else
vaddhn_high_s16(int8x8_t __p0,int16x8_t __p1,int16x8_t __p2)40102 __ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
40103   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
40104   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
40105   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
40106   int8x16_t __ret;
40107   __ret = __noswap_vcombine_s8(__rev0, __noswap_vaddhn_s16(__rev1, __rev2));
40108   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
40109   return __ret;
40110 }
40111 #endif
40112 
40113 #ifdef __LITTLE_ENDIAN__
vaddlvq_u8(uint8x16_t __p0)40114 __ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
40115   uint16_t __ret;
40116   __ret = (uint16_t) __builtin_neon_vaddlvq_u8((int8x16_t)__p0);
40117   return __ret;
40118 }
40119 #else
vaddlvq_u8(uint8x16_t __p0)40120 __ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
40121   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
40122   uint16_t __ret;
40123   __ret = (uint16_t) __builtin_neon_vaddlvq_u8((int8x16_t)__rev0);
40124   return __ret;
40125 }
40126 #endif
40127 
40128 #ifdef __LITTLE_ENDIAN__
vaddlvq_u32(uint32x4_t __p0)40129 __ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
40130   uint64_t __ret;
40131   __ret = (uint64_t) __builtin_neon_vaddlvq_u32((int8x16_t)__p0);
40132   return __ret;
40133 }
40134 #else
vaddlvq_u32(uint32x4_t __p0)40135 __ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
40136   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
40137   uint64_t __ret;
40138   __ret = (uint64_t) __builtin_neon_vaddlvq_u32((int8x16_t)__rev0);
40139   return __ret;
40140 }
40141 #endif
40142 
40143 #ifdef __LITTLE_ENDIAN__
vaddlvq_u16(uint16x8_t __p0)40144 __ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
40145   uint32_t __ret;
40146   __ret = (uint32_t) __builtin_neon_vaddlvq_u16((int8x16_t)__p0);
40147   return __ret;
40148 }
40149 #else
vaddlvq_u16(uint16x8_t __p0)40150 __ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
40151   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
40152   uint32_t __ret;
40153   __ret = (uint32_t) __builtin_neon_vaddlvq_u16((int8x16_t)__rev0);
40154   return __ret;
40155 }
40156 #endif
40157 
40158 #ifdef __LITTLE_ENDIAN__
vaddlvq_s8(int8x16_t __p0)40159 __ai int16_t vaddlvq_s8(int8x16_t __p0) {
40160   int16_t __ret;
40161   __ret = (int16_t) __builtin_neon_vaddlvq_s8((int8x16_t)__p0);
40162   return __ret;
40163 }
40164 #else
vaddlvq_s8(int8x16_t __p0)40165 __ai int16_t vaddlvq_s8(int8x16_t __p0) {
40166   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
40167   int16_t __ret;
40168   __ret = (int16_t) __builtin_neon_vaddlvq_s8((int8x16_t)__rev0);
40169   return __ret;
40170 }
40171 #endif
40172 
40173 #ifdef __LITTLE_ENDIAN__
vaddlvq_s32(int32x4_t __p0)40174 __ai int64_t vaddlvq_s32(int32x4_t __p0) {
40175   int64_t __ret;
40176   __ret = (int64_t) __builtin_neon_vaddlvq_s32((int8x16_t)__p0);
40177   return __ret;
40178 }
40179 #else
vaddlvq_s32(int32x4_t __p0)40180 __ai int64_t vaddlvq_s32(int32x4_t __p0) {
40181   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
40182   int64_t __ret;
40183   __ret = (int64_t) __builtin_neon_vaddlvq_s32((int8x16_t)__rev0);
40184   return __ret;
40185 }
40186 #endif
40187 
40188 #ifdef __LITTLE_ENDIAN__
vaddlvq_s16(int16x8_t __p0)40189 __ai int32_t vaddlvq_s16(int16x8_t __p0) {
40190   int32_t __ret;
40191   __ret = (int32_t) __builtin_neon_vaddlvq_s16((int8x16_t)__p0);
40192   return __ret;
40193 }
40194 #else
vaddlvq_s16(int16x8_t __p0)40195 __ai int32_t vaddlvq_s16(int16x8_t __p0) {
40196   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
40197   int32_t __ret;
40198   __ret = (int32_t) __builtin_neon_vaddlvq_s16((int8x16_t)__rev0);
40199   return __ret;
40200 }
40201 #endif
40202 
40203 #ifdef __LITTLE_ENDIAN__
vaddlv_u8(uint8x8_t __p0)40204 __ai uint16_t vaddlv_u8(uint8x8_t __p0) {
40205   uint16_t __ret;
40206   __ret = (uint16_t) __builtin_neon_vaddlv_u8((int8x8_t)__p0);
40207   return __ret;
40208 }
40209 #else
vaddlv_u8(uint8x8_t __p0)40210 __ai uint16_t vaddlv_u8(uint8x8_t __p0) {
40211   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
40212   uint16_t __ret;
40213   __ret = (uint16_t) __builtin_neon_vaddlv_u8((int8x8_t)__rev0);
40214   return __ret;
40215 }
40216 #endif
40217 
40218 #ifdef __LITTLE_ENDIAN__
vaddlv_u32(uint32x2_t __p0)40219 __ai uint64_t vaddlv_u32(uint32x2_t __p0) {
40220   uint64_t __ret;
40221   __ret = (uint64_t) __builtin_neon_vaddlv_u32((int8x8_t)__p0);
40222   return __ret;
40223 }
40224 #else
vaddlv_u32(uint32x2_t __p0)40225 __ai uint64_t vaddlv_u32(uint32x2_t __p0) {
40226   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40227   uint64_t __ret;
40228   __ret = (uint64_t) __builtin_neon_vaddlv_u32((int8x8_t)__rev0);
40229   return __ret;
40230 }
40231 #endif
40232 
40233 #ifdef __LITTLE_ENDIAN__
vaddlv_u16(uint16x4_t __p0)40234 __ai uint32_t vaddlv_u16(uint16x4_t __p0) {
40235   uint32_t __ret;
40236   __ret = (uint32_t) __builtin_neon_vaddlv_u16((int8x8_t)__p0);
40237   return __ret;
40238 }
40239 #else
vaddlv_u16(uint16x4_t __p0)40240 __ai uint32_t vaddlv_u16(uint16x4_t __p0) {
40241   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
40242   uint32_t __ret;
40243   __ret = (uint32_t) __builtin_neon_vaddlv_u16((int8x8_t)__rev0);
40244   return __ret;
40245 }
40246 #endif
40247 
40248 #ifdef __LITTLE_ENDIAN__
vaddlv_s8(int8x8_t __p0)40249 __ai int16_t vaddlv_s8(int8x8_t __p0) {
40250   int16_t __ret;
40251   __ret = (int16_t) __builtin_neon_vaddlv_s8((int8x8_t)__p0);
40252   return __ret;
40253 }
40254 #else
vaddlv_s8(int8x8_t __p0)40255 __ai int16_t vaddlv_s8(int8x8_t __p0) {
40256   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
40257   int16_t __ret;
40258   __ret = (int16_t) __builtin_neon_vaddlv_s8((int8x8_t)__rev0);
40259   return __ret;
40260 }
40261 #endif
40262 
40263 #ifdef __LITTLE_ENDIAN__
vaddlv_s32(int32x2_t __p0)40264 __ai int64_t vaddlv_s32(int32x2_t __p0) {
40265   int64_t __ret;
40266   __ret = (int64_t) __builtin_neon_vaddlv_s32((int8x8_t)__p0);
40267   return __ret;
40268 }
40269 #else
vaddlv_s32(int32x2_t __p0)40270 __ai int64_t vaddlv_s32(int32x2_t __p0) {
40271   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40272   int64_t __ret;
40273   __ret = (int64_t) __builtin_neon_vaddlv_s32((int8x8_t)__rev0);
40274   return __ret;
40275 }
40276 #endif
40277 
40278 #ifdef __LITTLE_ENDIAN__
vaddlv_s16(int16x4_t __p0)40279 __ai int32_t vaddlv_s16(int16x4_t __p0) {
40280   int32_t __ret;
40281   __ret = (int32_t) __builtin_neon_vaddlv_s16((int8x8_t)__p0);
40282   return __ret;
40283 }
40284 #else
vaddlv_s16(int16x4_t __p0)40285 __ai int32_t vaddlv_s16(int16x4_t __p0) {
40286   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
40287   int32_t __ret;
40288   __ret = (int32_t) __builtin_neon_vaddlv_s16((int8x8_t)__rev0);
40289   return __ret;
40290 }
40291 #endif
40292 
40293 #ifdef __LITTLE_ENDIAN__
vaddvq_u8(uint8x16_t __p0)40294 __ai uint8_t vaddvq_u8(uint8x16_t __p0) {
40295   uint8_t __ret;
40296   __ret = (uint8_t) __builtin_neon_vaddvq_u8((int8x16_t)__p0);
40297   return __ret;
40298 }
40299 #else
vaddvq_u8(uint8x16_t __p0)40300 __ai uint8_t vaddvq_u8(uint8x16_t __p0) {
40301   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
40302   uint8_t __ret;
40303   __ret = (uint8_t) __builtin_neon_vaddvq_u8((int8x16_t)__rev0);
40304   return __ret;
40305 }
40306 #endif
40307 
40308 #ifdef __LITTLE_ENDIAN__
vaddvq_u32(uint32x4_t __p0)40309 __ai uint32_t vaddvq_u32(uint32x4_t __p0) {
40310   uint32_t __ret;
40311   __ret = (uint32_t) __builtin_neon_vaddvq_u32((int8x16_t)__p0);
40312   return __ret;
40313 }
40314 #else
vaddvq_u32(uint32x4_t __p0)40315 __ai uint32_t vaddvq_u32(uint32x4_t __p0) {
40316   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
40317   uint32_t __ret;
40318   __ret = (uint32_t) __builtin_neon_vaddvq_u32((int8x16_t)__rev0);
40319   return __ret;
40320 }
40321 #endif
40322 
40323 #ifdef __LITTLE_ENDIAN__
vaddvq_u64(uint64x2_t __p0)40324 __ai uint64_t vaddvq_u64(uint64x2_t __p0) {
40325   uint64_t __ret;
40326   __ret = (uint64_t) __builtin_neon_vaddvq_u64((int8x16_t)__p0);
40327   return __ret;
40328 }
40329 #else
vaddvq_u64(uint64x2_t __p0)40330 __ai uint64_t vaddvq_u64(uint64x2_t __p0) {
40331   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40332   uint64_t __ret;
40333   __ret = (uint64_t) __builtin_neon_vaddvq_u64((int8x16_t)__rev0);
40334   return __ret;
40335 }
40336 #endif
40337 
40338 #ifdef __LITTLE_ENDIAN__
vaddvq_u16(uint16x8_t __p0)40339 __ai uint16_t vaddvq_u16(uint16x8_t __p0) {
40340   uint16_t __ret;
40341   __ret = (uint16_t) __builtin_neon_vaddvq_u16((int8x16_t)__p0);
40342   return __ret;
40343 }
40344 #else
vaddvq_u16(uint16x8_t __p0)40345 __ai uint16_t vaddvq_u16(uint16x8_t __p0) {
40346   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
40347   uint16_t __ret;
40348   __ret = (uint16_t) __builtin_neon_vaddvq_u16((int8x16_t)__rev0);
40349   return __ret;
40350 }
40351 #endif
40352 
40353 #ifdef __LITTLE_ENDIAN__
vaddvq_s8(int8x16_t __p0)40354 __ai int8_t vaddvq_s8(int8x16_t __p0) {
40355   int8_t __ret;
40356   __ret = (int8_t) __builtin_neon_vaddvq_s8((int8x16_t)__p0);
40357   return __ret;
40358 }
40359 #else
vaddvq_s8(int8x16_t __p0)40360 __ai int8_t vaddvq_s8(int8x16_t __p0) {
40361   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
40362   int8_t __ret;
40363   __ret = (int8_t) __builtin_neon_vaddvq_s8((int8x16_t)__rev0);
40364   return __ret;
40365 }
40366 #endif
40367 
40368 #ifdef __LITTLE_ENDIAN__
vaddvq_f64(float64x2_t __p0)40369 __ai float64_t vaddvq_f64(float64x2_t __p0) {
40370   float64_t __ret;
40371   __ret = (float64_t) __builtin_neon_vaddvq_f64((int8x16_t)__p0);
40372   return __ret;
40373 }
40374 #else
vaddvq_f64(float64x2_t __p0)40375 __ai float64_t vaddvq_f64(float64x2_t __p0) {
40376   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40377   float64_t __ret;
40378   __ret = (float64_t) __builtin_neon_vaddvq_f64((int8x16_t)__rev0);
40379   return __ret;
40380 }
40381 #endif
40382 
40383 #ifdef __LITTLE_ENDIAN__
vaddvq_f32(float32x4_t __p0)40384 __ai float32_t vaddvq_f32(float32x4_t __p0) {
40385   float32_t __ret;
40386   __ret = (float32_t) __builtin_neon_vaddvq_f32((int8x16_t)__p0);
40387   return __ret;
40388 }
40389 #else
vaddvq_f32(float32x4_t __p0)40390 __ai float32_t vaddvq_f32(float32x4_t __p0) {
40391   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
40392   float32_t __ret;
40393   __ret = (float32_t) __builtin_neon_vaddvq_f32((int8x16_t)__rev0);
40394   return __ret;
40395 }
40396 #endif
40397 
40398 #ifdef __LITTLE_ENDIAN__
vaddvq_s32(int32x4_t __p0)40399 __ai int32_t vaddvq_s32(int32x4_t __p0) {
40400   int32_t __ret;
40401   __ret = (int32_t) __builtin_neon_vaddvq_s32((int8x16_t)__p0);
40402   return __ret;
40403 }
40404 #else
vaddvq_s32(int32x4_t __p0)40405 __ai int32_t vaddvq_s32(int32x4_t __p0) {
40406   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
40407   int32_t __ret;
40408   __ret = (int32_t) __builtin_neon_vaddvq_s32((int8x16_t)__rev0);
40409   return __ret;
40410 }
40411 #endif
40412 
40413 #ifdef __LITTLE_ENDIAN__
vaddvq_s64(int64x2_t __p0)40414 __ai int64_t vaddvq_s64(int64x2_t __p0) {
40415   int64_t __ret;
40416   __ret = (int64_t) __builtin_neon_vaddvq_s64((int8x16_t)__p0);
40417   return __ret;
40418 }
40419 #else
vaddvq_s64(int64x2_t __p0)40420 __ai int64_t vaddvq_s64(int64x2_t __p0) {
40421   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40422   int64_t __ret;
40423   __ret = (int64_t) __builtin_neon_vaddvq_s64((int8x16_t)__rev0);
40424   return __ret;
40425 }
40426 #endif
40427 
40428 #ifdef __LITTLE_ENDIAN__
vaddvq_s16(int16x8_t __p0)40429 __ai int16_t vaddvq_s16(int16x8_t __p0) {
40430   int16_t __ret;
40431   __ret = (int16_t) __builtin_neon_vaddvq_s16((int8x16_t)__p0);
40432   return __ret;
40433 }
40434 #else
vaddvq_s16(int16x8_t __p0)40435 __ai int16_t vaddvq_s16(int16x8_t __p0) {
40436   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
40437   int16_t __ret;
40438   __ret = (int16_t) __builtin_neon_vaddvq_s16((int8x16_t)__rev0);
40439   return __ret;
40440 }
40441 #endif
40442 
40443 #ifdef __LITTLE_ENDIAN__
vaddv_u8(uint8x8_t __p0)40444 __ai uint8_t vaddv_u8(uint8x8_t __p0) {
40445   uint8_t __ret;
40446   __ret = (uint8_t) __builtin_neon_vaddv_u8((int8x8_t)__p0);
40447   return __ret;
40448 }
40449 #else
vaddv_u8(uint8x8_t __p0)40450 __ai uint8_t vaddv_u8(uint8x8_t __p0) {
40451   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
40452   uint8_t __ret;
40453   __ret = (uint8_t) __builtin_neon_vaddv_u8((int8x8_t)__rev0);
40454   return __ret;
40455 }
40456 #endif
40457 
40458 #ifdef __LITTLE_ENDIAN__
vaddv_u32(uint32x2_t __p0)40459 __ai uint32_t vaddv_u32(uint32x2_t __p0) {
40460   uint32_t __ret;
40461   __ret = (uint32_t) __builtin_neon_vaddv_u32((int8x8_t)__p0);
40462   return __ret;
40463 }
40464 #else
vaddv_u32(uint32x2_t __p0)40465 __ai uint32_t vaddv_u32(uint32x2_t __p0) {
40466   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40467   uint32_t __ret;
40468   __ret = (uint32_t) __builtin_neon_vaddv_u32((int8x8_t)__rev0);
40469   return __ret;
40470 }
40471 #endif
40472 
40473 #ifdef __LITTLE_ENDIAN__
vaddv_u16(uint16x4_t __p0)40474 __ai uint16_t vaddv_u16(uint16x4_t __p0) {
40475   uint16_t __ret;
40476   __ret = (uint16_t) __builtin_neon_vaddv_u16((int8x8_t)__p0);
40477   return __ret;
40478 }
40479 #else
vaddv_u16(uint16x4_t __p0)40480 __ai uint16_t vaddv_u16(uint16x4_t __p0) {
40481   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
40482   uint16_t __ret;
40483   __ret = (uint16_t) __builtin_neon_vaddv_u16((int8x8_t)__rev0);
40484   return __ret;
40485 }
40486 #endif
40487 
40488 #ifdef __LITTLE_ENDIAN__
vaddv_s8(int8x8_t __p0)40489 __ai int8_t vaddv_s8(int8x8_t __p0) {
40490   int8_t __ret;
40491   __ret = (int8_t) __builtin_neon_vaddv_s8((int8x8_t)__p0);
40492   return __ret;
40493 }
40494 #else
vaddv_s8(int8x8_t __p0)40495 __ai int8_t vaddv_s8(int8x8_t __p0) {
40496   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
40497   int8_t __ret;
40498   __ret = (int8_t) __builtin_neon_vaddv_s8((int8x8_t)__rev0);
40499   return __ret;
40500 }
40501 #endif
40502 
40503 #ifdef __LITTLE_ENDIAN__
vaddv_f32(float32x2_t __p0)40504 __ai float32_t vaddv_f32(float32x2_t __p0) {
40505   float32_t __ret;
40506   __ret = (float32_t) __builtin_neon_vaddv_f32((int8x8_t)__p0);
40507   return __ret;
40508 }
40509 #else
vaddv_f32(float32x2_t __p0)40510 __ai float32_t vaddv_f32(float32x2_t __p0) {
40511   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40512   float32_t __ret;
40513   __ret = (float32_t) __builtin_neon_vaddv_f32((int8x8_t)__rev0);
40514   return __ret;
40515 }
40516 #endif
40517 
40518 #ifdef __LITTLE_ENDIAN__
vaddv_s32(int32x2_t __p0)40519 __ai int32_t vaddv_s32(int32x2_t __p0) {
40520   int32_t __ret;
40521   __ret = (int32_t) __builtin_neon_vaddv_s32((int8x8_t)__p0);
40522   return __ret;
40523 }
40524 #else
vaddv_s32(int32x2_t __p0)40525 __ai int32_t vaddv_s32(int32x2_t __p0) {
40526   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40527   int32_t __ret;
40528   __ret = (int32_t) __builtin_neon_vaddv_s32((int8x8_t)__rev0);
40529   return __ret;
40530 }
40531 #endif
40532 
40533 #ifdef __LITTLE_ENDIAN__
vaddv_s16(int16x4_t __p0)40534 __ai int16_t vaddv_s16(int16x4_t __p0) {
40535   int16_t __ret;
40536   __ret = (int16_t) __builtin_neon_vaddv_s16((int8x8_t)__p0);
40537   return __ret;
40538 }
40539 #else
vaddv_s16(int16x4_t __p0)40540 __ai int16_t vaddv_s16(int16x4_t __p0) {
40541   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
40542   int16_t __ret;
40543   __ret = (int16_t) __builtin_neon_vaddv_s16((int8x8_t)__rev0);
40544   return __ret;
40545 }
40546 #endif
40547 
40548 #ifdef __LITTLE_ENDIAN__
vbsl_p64(uint64x1_t __p0,poly64x1_t __p1,poly64x1_t __p2)40549 __ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) {
40550   poly64x1_t __ret;
40551   __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6);
40552   return __ret;
40553 }
40554 #else
vbsl_p64(uint64x1_t __p0,poly64x1_t __p1,poly64x1_t __p2)40555 __ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) {
40556   poly64x1_t __ret;
40557   __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6);
40558   return __ret;
40559 }
40560 #endif
40561 
40562 #ifdef __LITTLE_ENDIAN__
vbslq_p64(uint64x2_t __p0,poly64x2_t __p1,poly64x2_t __p2)40563 __ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
40564   poly64x2_t __ret;
40565   __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 38);
40566   return __ret;
40567 }
40568 #else
vbslq_p64(uint64x2_t __p0,poly64x2_t __p1,poly64x2_t __p2)40569 __ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
40570   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40571   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
40572   poly64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
40573   poly64x2_t __ret;
40574   __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 38);
40575   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
40576   return __ret;
40577 }
40578 #endif
40579 
40580 #ifdef __LITTLE_ENDIAN__
vbslq_f64(uint64x2_t __p0,float64x2_t __p1,float64x2_t __p2)40581 __ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
40582   float64x2_t __ret;
40583   __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
40584   return __ret;
40585 }
40586 #else
vbslq_f64(uint64x2_t __p0,float64x2_t __p1,float64x2_t __p2)40587 __ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
40588   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40589   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
40590   float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
40591   float64x2_t __ret;
40592   __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
40593   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
40594   return __ret;
40595 }
40596 #endif
40597 
40598 #ifdef __LITTLE_ENDIAN__
vbsl_f64(uint64x1_t __p0,float64x1_t __p1,float64x1_t __p2)40599 __ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
40600   float64x1_t __ret;
40601   __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
40602   return __ret;
40603 }
40604 #else
vbsl_f64(uint64x1_t __p0,float64x1_t __p1,float64x1_t __p2)40605 __ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
40606   float64x1_t __ret;
40607   __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
40608   return __ret;
40609 }
40610 #endif
40611 
40612 #ifdef __LITTLE_ENDIAN__
vcageq_f64(float64x2_t __p0,float64x2_t __p1)40613 __ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
40614   uint64x2_t __ret;
40615   __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
40616   return __ret;
40617 }
40618 #else
vcageq_f64(float64x2_t __p0,float64x2_t __p1)40619 __ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
40620   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40621   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
40622   uint64x2_t __ret;
40623   __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
40624   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
40625   return __ret;
40626 }
40627 #endif
40628 
40629 #ifdef __LITTLE_ENDIAN__
vcage_f64(float64x1_t __p0,float64x1_t __p1)40630 __ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) {
40631   uint64x1_t __ret;
40632   __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
40633   return __ret;
40634 }
40635 #else
vcage_f64(float64x1_t __p0,float64x1_t __p1)40636 __ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) {
40637   uint64x1_t __ret;
40638   __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
40639   return __ret;
40640 }
40641 #endif
40642 
40643 #ifdef __LITTLE_ENDIAN__
vcaged_f64(float64_t __p0,float64_t __p1)40644 __ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) {
40645   uint64_t __ret;
40646   __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1);
40647   return __ret;
40648 }
40649 #else
vcaged_f64(float64_t __p0,float64_t __p1)40650 __ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) {
40651   uint64_t __ret;
40652   __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1);
40653   return __ret;
40654 }
40655 #endif
40656 
40657 #ifdef __LITTLE_ENDIAN__
vcages_f32(float32_t __p0,float32_t __p1)40658 __ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) {
40659   uint32_t __ret;
40660   __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1);
40661   return __ret;
40662 }
40663 #else
vcages_f32(float32_t __p0,float32_t __p1)40664 __ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) {
40665   uint32_t __ret;
40666   __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1);
40667   return __ret;
40668 }
40669 #endif
40670 
40671 #ifdef __LITTLE_ENDIAN__
vcagtq_f64(float64x2_t __p0,float64x2_t __p1)40672 __ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
40673   uint64x2_t __ret;
40674   __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
40675   return __ret;
40676 }
40677 #else
vcagtq_f64(float64x2_t __p0,float64x2_t __p1)40678 __ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
40679   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40680   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
40681   uint64x2_t __ret;
40682   __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
40683   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
40684   return __ret;
40685 }
40686 #endif
40687 
40688 #ifdef __LITTLE_ENDIAN__
vcagt_f64(float64x1_t __p0,float64x1_t __p1)40689 __ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) {
40690   uint64x1_t __ret;
40691   __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
40692   return __ret;
40693 }
40694 #else
vcagt_f64(float64x1_t __p0,float64x1_t __p1)40695 __ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) {
40696   uint64x1_t __ret;
40697   __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
40698   return __ret;
40699 }
40700 #endif
40701 
40702 #ifdef __LITTLE_ENDIAN__
vcagtd_f64(float64_t __p0,float64_t __p1)40703 __ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) {
40704   uint64_t __ret;
40705   __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1);
40706   return __ret;
40707 }
40708 #else
vcagtd_f64(float64_t __p0,float64_t __p1)40709 __ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) {
40710   uint64_t __ret;
40711   __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1);
40712   return __ret;
40713 }
40714 #endif
40715 
40716 #ifdef __LITTLE_ENDIAN__
vcagts_f32(float32_t __p0,float32_t __p1)40717 __ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) {
40718   uint32_t __ret;
40719   __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1);
40720   return __ret;
40721 }
40722 #else
vcagts_f32(float32_t __p0,float32_t __p1)40723 __ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) {
40724   uint32_t __ret;
40725   __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1);
40726   return __ret;
40727 }
40728 #endif
40729 
40730 #ifdef __LITTLE_ENDIAN__
vcaleq_f64(float64x2_t __p0,float64x2_t __p1)40731 __ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
40732   uint64x2_t __ret;
40733   __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
40734   return __ret;
40735 }
40736 #else
vcaleq_f64(float64x2_t __p0,float64x2_t __p1)40737 __ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
40738   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40739   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
40740   uint64x2_t __ret;
40741   __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
40742   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
40743   return __ret;
40744 }
40745 #endif
40746 
40747 #ifdef __LITTLE_ENDIAN__
vcale_f64(float64x1_t __p0,float64x1_t __p1)40748 __ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) {
40749   uint64x1_t __ret;
40750   __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
40751   return __ret;
40752 }
40753 #else
vcale_f64(float64x1_t __p0,float64x1_t __p1)40754 __ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) {
40755   uint64x1_t __ret;
40756   __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
40757   return __ret;
40758 }
40759 #endif
40760 
40761 #ifdef __LITTLE_ENDIAN__
vcaled_f64(float64_t __p0,float64_t __p1)40762 __ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) {
40763   uint64_t __ret;
40764   __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1);
40765   return __ret;
40766 }
40767 #else
vcaled_f64(float64_t __p0,float64_t __p1)40768 __ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) {
40769   uint64_t __ret;
40770   __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1);
40771   return __ret;
40772 }
40773 #endif
40774 
40775 #ifdef __LITTLE_ENDIAN__
vcales_f32(float32_t __p0,float32_t __p1)40776 __ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) {
40777   uint32_t __ret;
40778   __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1);
40779   return __ret;
40780 }
40781 #else
vcales_f32(float32_t __p0,float32_t __p1)40782 __ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) {
40783   uint32_t __ret;
40784   __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1);
40785   return __ret;
40786 }
40787 #endif
40788 
40789 #ifdef __LITTLE_ENDIAN__
vcaltq_f64(float64x2_t __p0,float64x2_t __p1)40790 __ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
40791   uint64x2_t __ret;
40792   __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
40793   return __ret;
40794 }
40795 #else
vcaltq_f64(float64x2_t __p0,float64x2_t __p1)40796 __ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
40797   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40798   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
40799   uint64x2_t __ret;
40800   __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
40801   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
40802   return __ret;
40803 }
40804 #endif
40805 
40806 #ifdef __LITTLE_ENDIAN__
vcalt_f64(float64x1_t __p0,float64x1_t __p1)40807 __ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) {
40808   uint64x1_t __ret;
40809   __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
40810   return __ret;
40811 }
40812 #else
vcalt_f64(float64x1_t __p0,float64x1_t __p1)40813 __ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) {
40814   uint64x1_t __ret;
40815   __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
40816   return __ret;
40817 }
40818 #endif
40819 
40820 #ifdef __LITTLE_ENDIAN__
vcaltd_f64(float64_t __p0,float64_t __p1)40821 __ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) {
40822   uint64_t __ret;
40823   __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1);
40824   return __ret;
40825 }
40826 #else
vcaltd_f64(float64_t __p0,float64_t __p1)40827 __ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) {
40828   uint64_t __ret;
40829   __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1);
40830   return __ret;
40831 }
40832 #endif
40833 
40834 #ifdef __LITTLE_ENDIAN__
vcalts_f32(float32_t __p0,float32_t __p1)40835 __ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) {
40836   uint32_t __ret;
40837   __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1);
40838   return __ret;
40839 }
40840 #else
vcalts_f32(float32_t __p0,float32_t __p1)40841 __ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) {
40842   uint32_t __ret;
40843   __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1);
40844   return __ret;
40845 }
40846 #endif
40847 
40848 #ifdef __LITTLE_ENDIAN__
vceq_p64(poly64x1_t __p0,poly64x1_t __p1)40849 __ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) {
40850   uint64x1_t __ret;
40851   __ret = (uint64x1_t)(__p0 == __p1);
40852   return __ret;
40853 }
40854 #else
vceq_p64(poly64x1_t __p0,poly64x1_t __p1)40855 __ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) {
40856   uint64x1_t __ret;
40857   __ret = (uint64x1_t)(__p0 == __p1);
40858   return __ret;
40859 }
40860 #endif
40861 
40862 #ifdef __LITTLE_ENDIAN__
vceqq_p64(poly64x2_t __p0,poly64x2_t __p1)40863 __ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
40864   uint64x2_t __ret;
40865   __ret = (uint64x2_t)(__p0 == __p1);
40866   return __ret;
40867 }
40868 #else
vceqq_p64(poly64x2_t __p0,poly64x2_t __p1)40869 __ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
40870   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40871   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
40872   uint64x2_t __ret;
40873   __ret = (uint64x2_t)(__rev0 == __rev1);
40874   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
40875   return __ret;
40876 }
40877 #endif
40878 
40879 #ifdef __LITTLE_ENDIAN__
vceqq_u64(uint64x2_t __p0,uint64x2_t __p1)40880 __ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
40881   uint64x2_t __ret;
40882   __ret = (uint64x2_t)(__p0 == __p1);
40883   return __ret;
40884 }
40885 #else
vceqq_u64(uint64x2_t __p0,uint64x2_t __p1)40886 __ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
40887   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40888   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
40889   uint64x2_t __ret;
40890   __ret = (uint64x2_t)(__rev0 == __rev1);
40891   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
40892   return __ret;
40893 }
40894 #endif
40895 
40896 #ifdef __LITTLE_ENDIAN__
vceqq_f64(float64x2_t __p0,float64x2_t __p1)40897 __ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
40898   uint64x2_t __ret;
40899   __ret = (uint64x2_t)(__p0 == __p1);
40900   return __ret;
40901 }
40902 #else
vceqq_f64(float64x2_t __p0,float64x2_t __p1)40903 __ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
40904   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40905   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
40906   uint64x2_t __ret;
40907   __ret = (uint64x2_t)(__rev0 == __rev1);
40908   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
40909   return __ret;
40910 }
40911 #endif
40912 
40913 #ifdef __LITTLE_ENDIAN__
vceqq_s64(int64x2_t __p0,int64x2_t __p1)40914 __ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
40915   uint64x2_t __ret;
40916   __ret = (uint64x2_t)(__p0 == __p1);
40917   return __ret;
40918 }
40919 #else
vceqq_s64(int64x2_t __p0,int64x2_t __p1)40920 __ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
40921   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40922   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
40923   uint64x2_t __ret;
40924   __ret = (uint64x2_t)(__rev0 == __rev1);
40925   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
40926   return __ret;
40927 }
40928 #endif
40929 
40930 #ifdef __LITTLE_ENDIAN__
vceq_u64(uint64x1_t __p0,uint64x1_t __p1)40931 __ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) {
40932   uint64x1_t __ret;
40933   __ret = (uint64x1_t)(__p0 == __p1);
40934   return __ret;
40935 }
40936 #else
vceq_u64(uint64x1_t __p0,uint64x1_t __p1)40937 __ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) {
40938   uint64x1_t __ret;
40939   __ret = (uint64x1_t)(__p0 == __p1);
40940   return __ret;
40941 }
40942 #endif
40943 
40944 #ifdef __LITTLE_ENDIAN__
vceq_f64(float64x1_t __p0,float64x1_t __p1)40945 __ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) {
40946   uint64x1_t __ret;
40947   __ret = (uint64x1_t)(__p0 == __p1);
40948   return __ret;
40949 }
40950 #else
vceq_f64(float64x1_t __p0,float64x1_t __p1)40951 __ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) {
40952   uint64x1_t __ret;
40953   __ret = (uint64x1_t)(__p0 == __p1);
40954   return __ret;
40955 }
40956 #endif
40957 
40958 #ifdef __LITTLE_ENDIAN__
vceq_s64(int64x1_t __p0,int64x1_t __p1)40959 __ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) {
40960   uint64x1_t __ret;
40961   __ret = (uint64x1_t)(__p0 == __p1);
40962   return __ret;
40963 }
40964 #else
vceq_s64(int64x1_t __p0,int64x1_t __p1)40965 __ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) {
40966   uint64x1_t __ret;
40967   __ret = (uint64x1_t)(__p0 == __p1);
40968   return __ret;
40969 }
40970 #endif
40971 
40972 #ifdef __LITTLE_ENDIAN__
vceqd_u64(uint64_t __p0,uint64_t __p1)40973 __ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) {
40974   uint64_t __ret;
40975   __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1);
40976   return __ret;
40977 }
40978 #else
vceqd_u64(uint64_t __p0,uint64_t __p1)40979 __ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) {
40980   uint64_t __ret;
40981   __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1);
40982   return __ret;
40983 }
40984 #endif
40985 
40986 #ifdef __LITTLE_ENDIAN__
vceqd_s64(int64_t __p0,int64_t __p1)40987 __ai int64_t vceqd_s64(int64_t __p0, int64_t __p1) {
40988   int64_t __ret;
40989   __ret = (int64_t) __builtin_neon_vceqd_s64(__p0, __p1);
40990   return __ret;
40991 }
40992 #else
vceqd_s64(int64_t __p0,int64_t __p1)40993 __ai int64_t vceqd_s64(int64_t __p0, int64_t __p1) {
40994   int64_t __ret;
40995   __ret = (int64_t) __builtin_neon_vceqd_s64(__p0, __p1);
40996   return __ret;
40997 }
40998 #endif
40999 
41000 #ifdef __LITTLE_ENDIAN__
vceqd_f64(float64_t __p0,float64_t __p1)41001 __ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) {
41002   uint64_t __ret;
41003   __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1);
41004   return __ret;
41005 }
41006 #else
vceqd_f64(float64_t __p0,float64_t __p1)41007 __ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) {
41008   uint64_t __ret;
41009   __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1);
41010   return __ret;
41011 }
41012 #endif
41013 
41014 #ifdef __LITTLE_ENDIAN__
vceqs_f32(float32_t __p0,float32_t __p1)41015 __ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) {
41016   uint32_t __ret;
41017   __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1);
41018   return __ret;
41019 }
41020 #else
vceqs_f32(float32_t __p0,float32_t __p1)41021 __ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) {
41022   uint32_t __ret;
41023   __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1);
41024   return __ret;
41025 }
41026 #endif
41027 
41028 #ifdef __LITTLE_ENDIAN__
vceqz_p8(poly8x8_t __p0)41029 __ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
41030   uint8x8_t __ret;
41031   __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
41032   return __ret;
41033 }
41034 #else
vceqz_p8(poly8x8_t __p0)41035 __ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
41036   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
41037   uint8x8_t __ret;
41038   __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
41039   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
41040   return __ret;
41041 }
41042 #endif
41043 
41044 #ifdef __LITTLE_ENDIAN__
vceqz_p64(poly64x1_t __p0)41045 __ai uint64x1_t vceqz_p64(poly64x1_t __p0) {
41046   uint64x1_t __ret;
41047   __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
41048   return __ret;
41049 }
41050 #else
vceqz_p64(poly64x1_t __p0)41051 __ai uint64x1_t vceqz_p64(poly64x1_t __p0) {
41052   uint64x1_t __ret;
41053   __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
41054   return __ret;
41055 }
41056 #endif
41057 
41058 #ifdef __LITTLE_ENDIAN__
vceqz_p16(poly16x4_t __p0)41059 __ai uint16x4_t vceqz_p16(poly16x4_t __p0) {
41060   uint16x4_t __ret;
41061   __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
41062   return __ret;
41063 }
41064 #else
vceqz_p16(poly16x4_t __p0)41065 __ai uint16x4_t vceqz_p16(poly16x4_t __p0) {
41066   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
41067   uint16x4_t __ret;
41068   __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
41069   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
41070   return __ret;
41071 }
41072 #endif
41073 
41074 #ifdef __LITTLE_ENDIAN__
vceqzq_p8(poly8x16_t __p0)41075 __ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
41076   uint8x16_t __ret;
41077   __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
41078   return __ret;
41079 }
41080 #else
vceqzq_p8(poly8x16_t __p0)41081 __ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
41082   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
41083   uint8x16_t __ret;
41084   __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
41085   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
41086   return __ret;
41087 }
41088 #endif
41089 
41090 #ifdef __LITTLE_ENDIAN__
vceqzq_p64(poly64x2_t __p0)41091 __ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
41092   uint64x2_t __ret;
41093   __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
41094   return __ret;
41095 }
41096 #else
vceqzq_p64(poly64x2_t __p0)41097 __ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
41098   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
41099   uint64x2_t __ret;
41100   __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
41101   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
41102   return __ret;
41103 }
41104 #endif
41105 
41106 #ifdef __LITTLE_ENDIAN__
vceqzq_p16(poly16x8_t __p0)41107 __ai uint16x8_t vceqzq_p16(poly16x8_t __p0) {
41108   uint16x8_t __ret;
41109   __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
41110   return __ret;
41111 }
41112 #else
vceqzq_p16(poly16x8_t __p0)41113 __ai uint16x8_t vceqzq_p16(poly16x8_t __p0) {
41114   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
41115   uint16x8_t __ret;
41116   __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
41117   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
41118   return __ret;
41119 }
41120 #endif
41121 
41122 #ifdef __LITTLE_ENDIAN__
vceqzq_u8(uint8x16_t __p0)41123 __ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
41124   uint8x16_t __ret;
41125   __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
41126   return __ret;
41127 }
41128 #else
vceqzq_u8(uint8x16_t __p0)41129 __ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
41130   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
41131   uint8x16_t __ret;
41132   __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
41133   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
41134   return __ret;
41135 }
41136 #endif
41137 
41138 #ifdef __LITTLE_ENDIAN__
vceqzq_u32(uint32x4_t __p0)41139 __ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
41140   uint32x4_t __ret;
41141   __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
41142   return __ret;
41143 }
41144 #else
vceqzq_u32(uint32x4_t __p0)41145 __ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
41146   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
41147   uint32x4_t __ret;
41148   __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
41149   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
41150   return __ret;
41151 }
41152 #endif
41153 
41154 #ifdef __LITTLE_ENDIAN__
vceqzq_u64(uint64x2_t __p0)41155 __ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
41156   uint64x2_t __ret;
41157   __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
41158   return __ret;
41159 }
41160 #else
vceqzq_u64(uint64x2_t __p0)41161 __ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
41162   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
41163   uint64x2_t __ret;
41164   __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
41165   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
41166   return __ret;
41167 }
41168 #endif
41169 
41170 #ifdef __LITTLE_ENDIAN__
vceqzq_u16(uint16x8_t __p0)41171 __ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
41172   uint16x8_t __ret;
41173   __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
41174   return __ret;
41175 }
41176 #else
vceqzq_u16(uint16x8_t __p0)41177 __ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
41178   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
41179   uint16x8_t __ret;
41180   __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
41181   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
41182   return __ret;
41183 }
41184 #endif
41185 
41186 #ifdef __LITTLE_ENDIAN__
vceqzq_s8(int8x16_t __p0)41187 __ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
41188   uint8x16_t __ret;
41189   __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
41190   return __ret;
41191 }
41192 #else
vceqzq_s8(int8x16_t __p0)41193 __ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
41194   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
41195   uint8x16_t __ret;
41196   __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
41197   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
41198   return __ret;
41199 }
41200 #endif
41201 
41202 #ifdef __LITTLE_ENDIAN__
vceqzq_f64(float64x2_t __p0)41203 __ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
41204   uint64x2_t __ret;
41205   __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
41206   return __ret;
41207 }
41208 #else
vceqzq_f64(float64x2_t __p0)41209 __ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
41210   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
41211   uint64x2_t __ret;
41212   __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
41213   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
41214   return __ret;
41215 }
41216 #endif
41217 
41218 #ifdef __LITTLE_ENDIAN__
vceqzq_f32(float32x4_t __p0)41219 __ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
41220   uint32x4_t __ret;
41221   __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
41222   return __ret;
41223 }
41224 #else
vceqzq_f32(float32x4_t __p0)41225 __ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
41226   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
41227   uint32x4_t __ret;
41228   __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
41229   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
41230   return __ret;
41231 }
41232 #endif
41233 
41234 #ifdef __LITTLE_ENDIAN__
vceqzq_s32(int32x4_t __p0)41235 __ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
41236   uint32x4_t __ret;
41237   __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
41238   return __ret;
41239 }
41240 #else
vceqzq_s32(int32x4_t __p0)41241 __ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
41242   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
41243   uint32x4_t __ret;
41244   __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
41245   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
41246   return __ret;
41247 }
41248 #endif
41249 
41250 #ifdef __LITTLE_ENDIAN__
vceqzq_s64(int64x2_t __p0)41251 __ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
41252   uint64x2_t __ret;
41253   __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
41254   return __ret;
41255 }
41256 #else
vceqzq_s64(int64x2_t __p0)41257 __ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
41258   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
41259   uint64x2_t __ret;
41260   __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
41261   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
41262   return __ret;
41263 }
41264 #endif
41265 
41266 #ifdef __LITTLE_ENDIAN__
vceqzq_s16(int16x8_t __p0)41267 __ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
41268   uint16x8_t __ret;
41269   __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
41270   return __ret;
41271 }
41272 #else
vceqzq_s16(int16x8_t __p0)41273 __ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
41274   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
41275   uint16x8_t __ret;
41276   __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
41277   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
41278   return __ret;
41279 }
41280 #endif
41281 
41282 #ifdef __LITTLE_ENDIAN__
vceqz_u8(uint8x8_t __p0)41283 __ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
41284   uint8x8_t __ret;
41285   __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
41286   return __ret;
41287 }
41288 #else
vceqz_u8(uint8x8_t __p0)41289 __ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
41290   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
41291   uint8x8_t __ret;
41292   __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
41293   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
41294   return __ret;
41295 }
41296 #endif
41297 
41298 #ifdef __LITTLE_ENDIAN__
vceqz_u32(uint32x2_t __p0)41299 __ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
41300   uint32x2_t __ret;
41301   __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
41302   return __ret;
41303 }
41304 #else
vceqz_u32(uint32x2_t __p0)41305 __ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
41306   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
41307   uint32x2_t __ret;
41308   __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
41309   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
41310   return __ret;
41311 }
41312 #endif
41313 
41314 #ifdef __LITTLE_ENDIAN__
vceqz_u64(uint64x1_t __p0)41315 __ai uint64x1_t vceqz_u64(uint64x1_t __p0) {
41316   uint64x1_t __ret;
41317   __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
41318   return __ret;
41319 }
41320 #else
vceqz_u64(uint64x1_t __p0)41321 __ai uint64x1_t vceqz_u64(uint64x1_t __p0) {
41322   uint64x1_t __ret;
41323   __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
41324   return __ret;
41325 }
41326 #endif
41327 
41328 #ifdef __LITTLE_ENDIAN__
vceqz_u16(uint16x4_t __p0)41329 __ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
41330   uint16x4_t __ret;
41331   __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
41332   return __ret;
41333 }
41334 #else
vceqz_u16(uint16x4_t __p0)41335 __ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
41336   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
41337   uint16x4_t __ret;
41338   __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
41339   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
41340   return __ret;
41341 }
41342 #endif
41343 
41344 #ifdef __LITTLE_ENDIAN__
vceqz_s8(int8x8_t __p0)41345 __ai uint8x8_t vceqz_s8(int8x8_t __p0) {
41346   uint8x8_t __ret;
41347   __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
41348   return __ret;
41349 }
41350 #else
vceqz_s8(int8x8_t __p0)41351 __ai uint8x8_t vceqz_s8(int8x8_t __p0) {
41352   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
41353   uint8x8_t __ret;
41354   __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
41355   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
41356   return __ret;
41357 }
41358 #endif
41359 
41360 #ifdef __LITTLE_ENDIAN__
vceqz_f64(float64x1_t __p0)41361 __ai uint64x1_t vceqz_f64(float64x1_t __p0) {
41362   uint64x1_t __ret;
41363   __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
41364   return __ret;
41365 }
41366 #else
vceqz_f64(float64x1_t __p0)41367 __ai uint64x1_t vceqz_f64(float64x1_t __p0) {
41368   uint64x1_t __ret;
41369   __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
41370   return __ret;
41371 }
41372 #endif
41373 
41374 #ifdef __LITTLE_ENDIAN__
vceqz_f32(float32x2_t __p0)41375 __ai uint32x2_t vceqz_f32(float32x2_t __p0) {
41376   uint32x2_t __ret;
41377   __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
41378   return __ret;
41379 }
41380 #else
vceqz_f32(float32x2_t __p0)41381 __ai uint32x2_t vceqz_f32(float32x2_t __p0) {
41382   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
41383   uint32x2_t __ret;
41384   __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
41385   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
41386   return __ret;
41387 }
41388 #endif
41389 
41390 #ifdef __LITTLE_ENDIAN__
vceqz_s32(int32x2_t __p0)41391 __ai uint32x2_t vceqz_s32(int32x2_t __p0) {
41392   uint32x2_t __ret;
41393   __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
41394   return __ret;
41395 }
41396 #else
vceqz_s32(int32x2_t __p0)41397 __ai uint32x2_t vceqz_s32(int32x2_t __p0) {
41398   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
41399   uint32x2_t __ret;
41400   __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
41401   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
41402   return __ret;
41403 }
41404 #endif
41405 
41406 #ifdef __LITTLE_ENDIAN__
vceqz_s64(int64x1_t __p0)41407 __ai uint64x1_t vceqz_s64(int64x1_t __p0) {
41408   uint64x1_t __ret;
41409   __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
41410   return __ret;
41411 }
41412 #else
vceqz_s64(int64x1_t __p0)41413 __ai uint64x1_t vceqz_s64(int64x1_t __p0) {
41414   uint64x1_t __ret;
41415   __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
41416   return __ret;
41417 }
41418 #endif
41419 
41420 #ifdef __LITTLE_ENDIAN__
vceqz_s16(int16x4_t __p0)41421 __ai uint16x4_t vceqz_s16(int16x4_t __p0) {
41422   uint16x4_t __ret;
41423   __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
41424   return __ret;
41425 }
41426 #else
vceqz_s16(int16x4_t __p0)41427 __ai uint16x4_t vceqz_s16(int16x4_t __p0) {
41428   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
41429   uint16x4_t __ret;
41430   __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
41431   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
41432   return __ret;
41433 }
41434 #endif
41435 
41436 #ifdef __LITTLE_ENDIAN__
vceqzd_u64(uint64_t __p0)41437 __ai uint64_t vceqzd_u64(uint64_t __p0) {
41438   uint64_t __ret;
41439   __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0);
41440   return __ret;
41441 }
41442 #else
vceqzd_u64(uint64_t __p0)41443 __ai uint64_t vceqzd_u64(uint64_t __p0) {
41444   uint64_t __ret;
41445   __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0);
41446   return __ret;
41447 }
41448 #endif
41449 
41450 #ifdef __LITTLE_ENDIAN__
vceqzd_s64(int64_t __p0)41451 __ai int64_t vceqzd_s64(int64_t __p0) {
41452   int64_t __ret;
41453   __ret = (int64_t) __builtin_neon_vceqzd_s64(__p0);
41454   return __ret;
41455 }
41456 #else
vceqzd_s64(int64_t __p0)41457 __ai int64_t vceqzd_s64(int64_t __p0) {
41458   int64_t __ret;
41459   __ret = (int64_t) __builtin_neon_vceqzd_s64(__p0);
41460   return __ret;
41461 }
41462 #endif
41463 
41464 #ifdef __LITTLE_ENDIAN__
vceqzd_f64(float64_t __p0)41465 __ai uint64_t vceqzd_f64(float64_t __p0) {
41466   uint64_t __ret;
41467   __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0);
41468   return __ret;
41469 }
41470 #else
vceqzd_f64(float64_t __p0)41471 __ai uint64_t vceqzd_f64(float64_t __p0) {
41472   uint64_t __ret;
41473   __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0);
41474   return __ret;
41475 }
41476 #endif
41477 
41478 #ifdef __LITTLE_ENDIAN__
vceqzs_f32(float32_t __p0)41479 __ai uint32_t vceqzs_f32(float32_t __p0) {
41480   uint32_t __ret;
41481   __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0);
41482   return __ret;
41483 }
41484 #else
vceqzs_f32(float32_t __p0)41485 __ai uint32_t vceqzs_f32(float32_t __p0) {
41486   uint32_t __ret;
41487   __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0);
41488   return __ret;
41489 }
41490 #endif
41491 
41492 #ifdef __LITTLE_ENDIAN__
vcgeq_u64(uint64x2_t __p0,uint64x2_t __p1)41493 __ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
41494   uint64x2_t __ret;
41495   __ret = (uint64x2_t)(__p0 >= __p1);
41496   return __ret;
41497 }
41498 #else
vcgeq_u64(uint64x2_t __p0,uint64x2_t __p1)41499 __ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
41500   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
41501   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
41502   uint64x2_t __ret;
41503   __ret = (uint64x2_t)(__rev0 >= __rev1);
41504   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
41505   return __ret;
41506 }
41507 #endif
41508 
41509 #ifdef __LITTLE_ENDIAN__
vcgeq_f64(float64x2_t __p0,float64x2_t __p1)41510 __ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
41511   uint64x2_t __ret;
41512   __ret = (uint64x2_t)(__p0 >= __p1);
41513   return __ret;
41514 }
41515 #else
vcgeq_f64(float64x2_t __p0,float64x2_t __p1)41516 __ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
41517   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
41518   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
41519   uint64x2_t __ret;
41520   __ret = (uint64x2_t)(__rev0 >= __rev1);
41521   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
41522   return __ret;
41523 }
41524 #endif
41525 
41526 #ifdef __LITTLE_ENDIAN__
vcgeq_s64(int64x2_t __p0,int64x2_t __p1)41527 __ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
41528   uint64x2_t __ret;
41529   __ret = (uint64x2_t)(__p0 >= __p1);
41530   return __ret;
41531 }
41532 #else
vcgeq_s64(int64x2_t __p0,int64x2_t __p1)41533 __ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
41534   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
41535   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
41536   uint64x2_t __ret;
41537   __ret = (uint64x2_t)(__rev0 >= __rev1);
41538   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
41539   return __ret;
41540 }
41541 #endif
41542 
41543 #ifdef __LITTLE_ENDIAN__
vcge_u64(uint64x1_t __p0,uint64x1_t __p1)41544 __ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) {
41545   uint64x1_t __ret;
41546   __ret = (uint64x1_t)(__p0 >= __p1);
41547   return __ret;
41548 }
41549 #else
vcge_u64(uint64x1_t __p0,uint64x1_t __p1)41550 __ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) {
41551   uint64x1_t __ret;
41552   __ret = (uint64x1_t)(__p0 >= __p1);
41553   return __ret;
41554 }
41555 #endif
41556 
41557 #ifdef __LITTLE_ENDIAN__
vcge_f64(float64x1_t __p0,float64x1_t __p1)41558 __ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) {
41559   uint64x1_t __ret;
41560   __ret = (uint64x1_t)(__p0 >= __p1);
41561   return __ret;
41562 }
41563 #else
vcge_f64(float64x1_t __p0,float64x1_t __p1)41564 __ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) {
41565   uint64x1_t __ret;
41566   __ret = (uint64x1_t)(__p0 >= __p1);
41567   return __ret;
41568 }
41569 #endif
41570 
41571 #ifdef __LITTLE_ENDIAN__
vcge_s64(int64x1_t __p0,int64x1_t __p1)41572 __ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) {
41573   uint64x1_t __ret;
41574   __ret = (uint64x1_t)(__p0 >= __p1);
41575   return __ret;
41576 }
41577 #else
vcge_s64(int64x1_t __p0,int64x1_t __p1)41578 __ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) {
41579   uint64x1_t __ret;
41580   __ret = (uint64x1_t)(__p0 >= __p1);
41581   return __ret;
41582 }
41583 #endif
41584 
41585 #ifdef __LITTLE_ENDIAN__
vcged_s64(int64_t __p0,int64_t __p1)41586 __ai int64_t vcged_s64(int64_t __p0, int64_t __p1) {
41587   int64_t __ret;
41588   __ret = (int64_t) __builtin_neon_vcged_s64(__p0, __p1);
41589   return __ret;
41590 }
41591 #else
vcged_s64(int64_t __p0,int64_t __p1)41592 __ai int64_t vcged_s64(int64_t __p0, int64_t __p1) {
41593   int64_t __ret;
41594   __ret = (int64_t) __builtin_neon_vcged_s64(__p0, __p1);
41595   return __ret;
41596 }
41597 #endif
41598 
41599 #ifdef __LITTLE_ENDIAN__
vcged_u64(uint64_t __p0,uint64_t __p1)41600 __ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) {
41601   uint64_t __ret;
41602   __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1);
41603   return __ret;
41604 }
41605 #else
vcged_u64(uint64_t __p0,uint64_t __p1)41606 __ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) {
41607   uint64_t __ret;
41608   __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1);
41609   return __ret;
41610 }
41611 #endif
41612 
41613 #ifdef __LITTLE_ENDIAN__
vcged_f64(float64_t __p0,float64_t __p1)41614 __ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) {
41615   uint64_t __ret;
41616   __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1);
41617   return __ret;
41618 }
41619 #else
vcged_f64(float64_t __p0,float64_t __p1)41620 __ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) {
41621   uint64_t __ret;
41622   __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1);
41623   return __ret;
41624 }
41625 #endif
41626 
41627 #ifdef __LITTLE_ENDIAN__
vcges_f32(float32_t __p0,float32_t __p1)41628 __ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) {
41629   uint32_t __ret;
41630   __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1);
41631   return __ret;
41632 }
41633 #else
vcges_f32(float32_t __p0,float32_t __p1)41634 __ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) {
41635   uint32_t __ret;
41636   __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1);
41637   return __ret;
41638 }
41639 #endif
41640 
41641 #ifdef __LITTLE_ENDIAN__
vcgezq_s8(int8x16_t __p0)41642 __ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
41643   uint8x16_t __ret;
41644   __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 48);
41645   return __ret;
41646 }
41647 #else
vcgezq_s8(int8x16_t __p0)41648 __ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
41649   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
41650   uint8x16_t __ret;
41651   __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 48);
41652   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
41653   return __ret;
41654 }
41655 #endif
41656 
41657 #ifdef __LITTLE_ENDIAN__
vcgezq_f64(float64x2_t __p0)41658 __ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
41659   uint64x2_t __ret;
41660   __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
41661   return __ret;
41662 }
41663 #else
vcgezq_f64(float64x2_t __p0)41664 __ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
41665   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
41666   uint64x2_t __ret;
41667   __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
41668   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
41669   return __ret;
41670 }
41671 #endif
41672 
41673 #ifdef __LITTLE_ENDIAN__
vcgezq_f32(float32x4_t __p0)41674 __ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
41675   uint32x4_t __ret;
41676   __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
41677   return __ret;
41678 }
41679 #else
vcgezq_f32(float32x4_t __p0)41680 __ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
41681   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
41682   uint32x4_t __ret;
41683   __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
41684   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
41685   return __ret;
41686 }
41687 #endif
41688 
41689 #ifdef __LITTLE_ENDIAN__
vcgezq_s32(int32x4_t __p0)41690 __ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
41691   uint32x4_t __ret;
41692   __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
41693   return __ret;
41694 }
41695 #else
vcgezq_s32(int32x4_t __p0)41696 __ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
41697   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
41698   uint32x4_t __ret;
41699   __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
41700   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
41701   return __ret;
41702 }
41703 #endif
41704 
41705 #ifdef __LITTLE_ENDIAN__
vcgezq_s64(int64x2_t __p0)41706 __ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
41707   uint64x2_t __ret;
41708   __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
41709   return __ret;
41710 }
41711 #else
vcgezq_s64(int64x2_t __p0)41712 __ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
41713   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
41714   uint64x2_t __ret;
41715   __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
41716   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
41717   return __ret;
41718 }
41719 #endif
41720 
41721 #ifdef __LITTLE_ENDIAN__
vcgezq_s16(int16x8_t __p0)41722 __ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
41723   uint16x8_t __ret;
41724   __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
41725   return __ret;
41726 }
41727 #else
vcgezq_s16(int16x8_t __p0)41728 __ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
41729   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
41730   uint16x8_t __ret;
41731   __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
41732   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
41733   return __ret;
41734 }
41735 #endif
41736 
41737 #ifdef __LITTLE_ENDIAN__
vcgez_s8(int8x8_t __p0)41738 __ai uint8x8_t vcgez_s8(int8x8_t __p0) {
41739   uint8x8_t __ret;
41740   __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 16);
41741   return __ret;
41742 }
41743 #else
vcgez_s8(int8x8_t __p0)41744 __ai uint8x8_t vcgez_s8(int8x8_t __p0) {
41745   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
41746   uint8x8_t __ret;
41747   __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 16);
41748   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
41749   return __ret;
41750 }
41751 #endif
41752 
41753 #ifdef __LITTLE_ENDIAN__
vcgez_f64(float64x1_t __p0)41754 __ai uint64x1_t vcgez_f64(float64x1_t __p0) {
41755   uint64x1_t __ret;
41756   __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
41757   return __ret;
41758 }
41759 #else
vcgez_f64(float64x1_t __p0)41760 __ai uint64x1_t vcgez_f64(float64x1_t __p0) {
41761   uint64x1_t __ret;
41762   __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
41763   return __ret;
41764 }
41765 #endif
41766 
41767 #ifdef __LITTLE_ENDIAN__
vcgez_f32(float32x2_t __p0)41768 __ai uint32x2_t vcgez_f32(float32x2_t __p0) {
41769   uint32x2_t __ret;
41770   __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
41771   return __ret;
41772 }
41773 #else
vcgez_f32(float32x2_t __p0)41774 __ai uint32x2_t vcgez_f32(float32x2_t __p0) {
41775   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
41776   uint32x2_t __ret;
41777   __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
41778   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
41779   return __ret;
41780 }
41781 #endif
41782 
41783 #ifdef __LITTLE_ENDIAN__
vcgez_s32(int32x2_t __p0)41784 __ai uint32x2_t vcgez_s32(int32x2_t __p0) {
41785   uint32x2_t __ret;
41786   __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
41787   return __ret;
41788 }
41789 #else
vcgez_s32(int32x2_t __p0)41790 __ai uint32x2_t vcgez_s32(int32x2_t __p0) {
41791   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
41792   uint32x2_t __ret;
41793   __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
41794   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
41795   return __ret;
41796 }
41797 #endif
41798 
41799 #ifdef __LITTLE_ENDIAN__
vcgez_s64(int64x1_t __p0)41800 __ai uint64x1_t vcgez_s64(int64x1_t __p0) {
41801   uint64x1_t __ret;
41802   __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
41803   return __ret;
41804 }
41805 #else
vcgez_s64(int64x1_t __p0)41806 __ai uint64x1_t vcgez_s64(int64x1_t __p0) {
41807   uint64x1_t __ret;
41808   __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
41809   return __ret;
41810 }
41811 #endif
41812 
41813 #ifdef __LITTLE_ENDIAN__
vcgez_s16(int16x4_t __p0)41814 __ai uint16x4_t vcgez_s16(int16x4_t __p0) {
41815   uint16x4_t __ret;
41816   __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
41817   return __ret;
41818 }
41819 #else
vcgez_s16(int16x4_t __p0)41820 __ai uint16x4_t vcgez_s16(int16x4_t __p0) {
41821   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
41822   uint16x4_t __ret;
41823   __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
41824   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
41825   return __ret;
41826 }
41827 #endif
41828 
41829 #ifdef __LITTLE_ENDIAN__
vcgezd_s64(int64_t __p0)41830 __ai int64_t vcgezd_s64(int64_t __p0) {
41831   int64_t __ret;
41832   __ret = (int64_t) __builtin_neon_vcgezd_s64(__p0);
41833   return __ret;
41834 }
41835 #else
vcgezd_s64(int64_t __p0)41836 __ai int64_t vcgezd_s64(int64_t __p0) {
41837   int64_t __ret;
41838   __ret = (int64_t) __builtin_neon_vcgezd_s64(__p0);
41839   return __ret;
41840 }
41841 #endif
41842 
41843 #ifdef __LITTLE_ENDIAN__
vcgezd_f64(float64_t __p0)41844 __ai uint64_t vcgezd_f64(float64_t __p0) {
41845   uint64_t __ret;
41846   __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0);
41847   return __ret;
41848 }
41849 #else
vcgezd_f64(float64_t __p0)41850 __ai uint64_t vcgezd_f64(float64_t __p0) {
41851   uint64_t __ret;
41852   __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0);
41853   return __ret;
41854 }
41855 #endif
41856 
41857 #ifdef __LITTLE_ENDIAN__
vcgezs_f32(float32_t __p0)41858 __ai uint32_t vcgezs_f32(float32_t __p0) {
41859   uint32_t __ret;
41860   __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0);
41861   return __ret;
41862 }
41863 #else
vcgezs_f32(float32_t __p0)41864 __ai uint32_t vcgezs_f32(float32_t __p0) {
41865   uint32_t __ret;
41866   __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0);
41867   return __ret;
41868 }
41869 #endif
41870 
41871 #ifdef __LITTLE_ENDIAN__
vcgtq_u64(uint64x2_t __p0,uint64x2_t __p1)41872 __ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
41873   uint64x2_t __ret;
41874   __ret = (uint64x2_t)(__p0 > __p1);
41875   return __ret;
41876 }
41877 #else
vcgtq_u64(uint64x2_t __p0,uint64x2_t __p1)41878 __ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
41879   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
41880   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
41881   uint64x2_t __ret;
41882   __ret = (uint64x2_t)(__rev0 > __rev1);
41883   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
41884   return __ret;
41885 }
41886 #endif
41887 
41888 #ifdef __LITTLE_ENDIAN__
vcgtq_f64(float64x2_t __p0,float64x2_t __p1)41889 __ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
41890   uint64x2_t __ret;
41891   __ret = (uint64x2_t)(__p0 > __p1);
41892   return __ret;
41893 }
41894 #else
vcgtq_f64(float64x2_t __p0,float64x2_t __p1)41895 __ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
41896   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
41897   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
41898   uint64x2_t __ret;
41899   __ret = (uint64x2_t)(__rev0 > __rev1);
41900   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
41901   return __ret;
41902 }
41903 #endif
41904 
41905 #ifdef __LITTLE_ENDIAN__
vcgtq_s64(int64x2_t __p0,int64x2_t __p1)41906 __ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
41907   uint64x2_t __ret;
41908   __ret = (uint64x2_t)(__p0 > __p1);
41909   return __ret;
41910 }
41911 #else
vcgtq_s64(int64x2_t __p0,int64x2_t __p1)41912 __ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
41913   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
41914   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
41915   uint64x2_t __ret;
41916   __ret = (uint64x2_t)(__rev0 > __rev1);
41917   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
41918   return __ret;
41919 }
41920 #endif
41921 
41922 #ifdef __LITTLE_ENDIAN__
vcgt_u64(uint64x1_t __p0,uint64x1_t __p1)41923 __ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) {
41924   uint64x1_t __ret;
41925   __ret = (uint64x1_t)(__p0 > __p1);
41926   return __ret;
41927 }
41928 #else
vcgt_u64(uint64x1_t __p0,uint64x1_t __p1)41929 __ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) {
41930   uint64x1_t __ret;
41931   __ret = (uint64x1_t)(__p0 > __p1);
41932   return __ret;
41933 }
41934 #endif
41935 
41936 #ifdef __LITTLE_ENDIAN__
vcgt_f64(float64x1_t __p0,float64x1_t __p1)41937 __ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) {
41938   uint64x1_t __ret;
41939   __ret = (uint64x1_t)(__p0 > __p1);
41940   return __ret;
41941 }
41942 #else
vcgt_f64(float64x1_t __p0,float64x1_t __p1)41943 __ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) {
41944   uint64x1_t __ret;
41945   __ret = (uint64x1_t)(__p0 > __p1);
41946   return __ret;
41947 }
41948 #endif
41949 
41950 #ifdef __LITTLE_ENDIAN__
vcgt_s64(int64x1_t __p0,int64x1_t __p1)41951 __ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) {
41952   uint64x1_t __ret;
41953   __ret = (uint64x1_t)(__p0 > __p1);
41954   return __ret;
41955 }
41956 #else
vcgt_s64(int64x1_t __p0,int64x1_t __p1)41957 __ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) {
41958   uint64x1_t __ret;
41959   __ret = (uint64x1_t)(__p0 > __p1);
41960   return __ret;
41961 }
41962 #endif
41963 
41964 #ifdef __LITTLE_ENDIAN__
vcgtd_s64(int64_t __p0,int64_t __p1)41965 __ai int64_t vcgtd_s64(int64_t __p0, int64_t __p1) {
41966   int64_t __ret;
41967   __ret = (int64_t) __builtin_neon_vcgtd_s64(__p0, __p1);
41968   return __ret;
41969 }
41970 #else
vcgtd_s64(int64_t __p0,int64_t __p1)41971 __ai int64_t vcgtd_s64(int64_t __p0, int64_t __p1) {
41972   int64_t __ret;
41973   __ret = (int64_t) __builtin_neon_vcgtd_s64(__p0, __p1);
41974   return __ret;
41975 }
41976 #endif
41977 
41978 #ifdef __LITTLE_ENDIAN__
vcgtd_u64(uint64_t __p0,uint64_t __p1)41979 __ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) {
41980   uint64_t __ret;
41981   __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1);
41982   return __ret;
41983 }
41984 #else
vcgtd_u64(uint64_t __p0,uint64_t __p1)41985 __ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) {
41986   uint64_t __ret;
41987   __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1);
41988   return __ret;
41989 }
41990 #endif
41991 
41992 #ifdef __LITTLE_ENDIAN__
vcgtd_f64(float64_t __p0,float64_t __p1)41993 __ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) {
41994   uint64_t __ret;
41995   __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1);
41996   return __ret;
41997 }
41998 #else
vcgtd_f64(float64_t __p0,float64_t __p1)41999 __ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) {
42000   uint64_t __ret;
42001   __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1);
42002   return __ret;
42003 }
42004 #endif
42005 
42006 #ifdef __LITTLE_ENDIAN__
vcgts_f32(float32_t __p0,float32_t __p1)42007 __ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) {
42008   uint32_t __ret;
42009   __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1);
42010   return __ret;
42011 }
42012 #else
vcgts_f32(float32_t __p0,float32_t __p1)42013 __ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) {
42014   uint32_t __ret;
42015   __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1);
42016   return __ret;
42017 }
42018 #endif
42019 
42020 #ifdef __LITTLE_ENDIAN__
vcgtzq_s8(int8x16_t __p0)42021 __ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
42022   uint8x16_t __ret;
42023   __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 48);
42024   return __ret;
42025 }
42026 #else
vcgtzq_s8(int8x16_t __p0)42027 __ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
42028   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
42029   uint8x16_t __ret;
42030   __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 48);
42031   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
42032   return __ret;
42033 }
42034 #endif
42035 
42036 #ifdef __LITTLE_ENDIAN__
vcgtzq_f64(float64x2_t __p0)42037 __ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
42038   uint64x2_t __ret;
42039   __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
42040   return __ret;
42041 }
42042 #else
vcgtzq_f64(float64x2_t __p0)42043 __ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
42044   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
42045   uint64x2_t __ret;
42046   __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
42047   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
42048   return __ret;
42049 }
42050 #endif
42051 
42052 #ifdef __LITTLE_ENDIAN__
vcgtzq_f32(float32x4_t __p0)42053 __ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
42054   uint32x4_t __ret;
42055   __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
42056   return __ret;
42057 }
42058 #else
vcgtzq_f32(float32x4_t __p0)42059 __ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
42060   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
42061   uint32x4_t __ret;
42062   __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
42063   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
42064   return __ret;
42065 }
42066 #endif
42067 
42068 #ifdef __LITTLE_ENDIAN__
vcgtzq_s32(int32x4_t __p0)42069 __ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
42070   uint32x4_t __ret;
42071   __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
42072   return __ret;
42073 }
42074 #else
vcgtzq_s32(int32x4_t __p0)42075 __ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
42076   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
42077   uint32x4_t __ret;
42078   __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
42079   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
42080   return __ret;
42081 }
42082 #endif
42083 
42084 #ifdef __LITTLE_ENDIAN__
vcgtzq_s64(int64x2_t __p0)42085 __ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
42086   uint64x2_t __ret;
42087   __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
42088   return __ret;
42089 }
42090 #else
vcgtzq_s64(int64x2_t __p0)42091 __ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
42092   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
42093   uint64x2_t __ret;
42094   __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
42095   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
42096   return __ret;
42097 }
42098 #endif
42099 
42100 #ifdef __LITTLE_ENDIAN__
vcgtzq_s16(int16x8_t __p0)42101 __ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
42102   uint16x8_t __ret;
42103   __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
42104   return __ret;
42105 }
42106 #else
vcgtzq_s16(int16x8_t __p0)42107 __ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
42108   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
42109   uint16x8_t __ret;
42110   __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
42111   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
42112   return __ret;
42113 }
42114 #endif
42115 
42116 #ifdef __LITTLE_ENDIAN__
vcgtz_s8(int8x8_t __p0)42117 __ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
42118   uint8x8_t __ret;
42119   __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 16);
42120   return __ret;
42121 }
42122 #else
vcgtz_s8(int8x8_t __p0)42123 __ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
42124   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
42125   uint8x8_t __ret;
42126   __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 16);
42127   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
42128   return __ret;
42129 }
42130 #endif
42131 
42132 #ifdef __LITTLE_ENDIAN__
vcgtz_f64(float64x1_t __p0)42133 __ai uint64x1_t vcgtz_f64(float64x1_t __p0) {
42134   uint64x1_t __ret;
42135   __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
42136   return __ret;
42137 }
42138 #else
vcgtz_f64(float64x1_t __p0)42139 __ai uint64x1_t vcgtz_f64(float64x1_t __p0) {
42140   uint64x1_t __ret;
42141   __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
42142   return __ret;
42143 }
42144 #endif
42145 
42146 #ifdef __LITTLE_ENDIAN__
vcgtz_f32(float32x2_t __p0)42147 __ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
42148   uint32x2_t __ret;
42149   __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
42150   return __ret;
42151 }
42152 #else
vcgtz_f32(float32x2_t __p0)42153 __ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
42154   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
42155   uint32x2_t __ret;
42156   __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
42157   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
42158   return __ret;
42159 }
42160 #endif
42161 
42162 #ifdef __LITTLE_ENDIAN__
vcgtz_s32(int32x2_t __p0)42163 __ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
42164   uint32x2_t __ret;
42165   __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
42166   return __ret;
42167 }
42168 #else
vcgtz_s32(int32x2_t __p0)42169 __ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
42170   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
42171   uint32x2_t __ret;
42172   __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
42173   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
42174   return __ret;
42175 }
42176 #endif
42177 
42178 #ifdef __LITTLE_ENDIAN__
vcgtz_s64(int64x1_t __p0)42179 __ai uint64x1_t vcgtz_s64(int64x1_t __p0) {
42180   uint64x1_t __ret;
42181   __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
42182   return __ret;
42183 }
42184 #else
vcgtz_s64(int64x1_t __p0)42185 __ai uint64x1_t vcgtz_s64(int64x1_t __p0) {
42186   uint64x1_t __ret;
42187   __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
42188   return __ret;
42189 }
42190 #endif
42191 
42192 #ifdef __LITTLE_ENDIAN__
vcgtz_s16(int16x4_t __p0)42193 __ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
42194   uint16x4_t __ret;
42195   __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
42196   return __ret;
42197 }
42198 #else
vcgtz_s16(int16x4_t __p0)42199 __ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
42200   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
42201   uint16x4_t __ret;
42202   __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
42203   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
42204   return __ret;
42205 }
42206 #endif
42207 
42208 #ifdef __LITTLE_ENDIAN__
vcgtzd_s64(int64_t __p0)42209 __ai int64_t vcgtzd_s64(int64_t __p0) {
42210   int64_t __ret;
42211   __ret = (int64_t) __builtin_neon_vcgtzd_s64(__p0);
42212   return __ret;
42213 }
42214 #else
vcgtzd_s64(int64_t __p0)42215 __ai int64_t vcgtzd_s64(int64_t __p0) {
42216   int64_t __ret;
42217   __ret = (int64_t) __builtin_neon_vcgtzd_s64(__p0);
42218   return __ret;
42219 }
42220 #endif
42221 
42222 #ifdef __LITTLE_ENDIAN__
vcgtzd_f64(float64_t __p0)42223 __ai uint64_t vcgtzd_f64(float64_t __p0) {
42224   uint64_t __ret;
42225   __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0);
42226   return __ret;
42227 }
42228 #else
vcgtzd_f64(float64_t __p0)42229 __ai uint64_t vcgtzd_f64(float64_t __p0) {
42230   uint64_t __ret;
42231   __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0);
42232   return __ret;
42233 }
42234 #endif
42235 
42236 #ifdef __LITTLE_ENDIAN__
vcgtzs_f32(float32_t __p0)42237 __ai uint32_t vcgtzs_f32(float32_t __p0) {
42238   uint32_t __ret;
42239   __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0);
42240   return __ret;
42241 }
42242 #else
vcgtzs_f32(float32_t __p0)42243 __ai uint32_t vcgtzs_f32(float32_t __p0) {
42244   uint32_t __ret;
42245   __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0);
42246   return __ret;
42247 }
42248 #endif
42249 
42250 #ifdef __LITTLE_ENDIAN__
vcleq_u64(uint64x2_t __p0,uint64x2_t __p1)42251 __ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
42252   uint64x2_t __ret;
42253   __ret = (uint64x2_t)(__p0 <= __p1);
42254   return __ret;
42255 }
42256 #else
vcleq_u64(uint64x2_t __p0,uint64x2_t __p1)42257 __ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
42258   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
42259   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
42260   uint64x2_t __ret;
42261   __ret = (uint64x2_t)(__rev0 <= __rev1);
42262   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
42263   return __ret;
42264 }
42265 #endif
42266 
42267 #ifdef __LITTLE_ENDIAN__
vcleq_f64(float64x2_t __p0,float64x2_t __p1)42268 __ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
42269   uint64x2_t __ret;
42270   __ret = (uint64x2_t)(__p0 <= __p1);
42271   return __ret;
42272 }
42273 #else
vcleq_f64(float64x2_t __p0,float64x2_t __p1)42274 __ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
42275   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
42276   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
42277   uint64x2_t __ret;
42278   __ret = (uint64x2_t)(__rev0 <= __rev1);
42279   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
42280   return __ret;
42281 }
42282 #endif
42283 
42284 #ifdef __LITTLE_ENDIAN__
vcleq_s64(int64x2_t __p0,int64x2_t __p1)42285 __ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
42286   uint64x2_t __ret;
42287   __ret = (uint64x2_t)(__p0 <= __p1);
42288   return __ret;
42289 }
42290 #else
vcleq_s64(int64x2_t __p0,int64x2_t __p1)42291 __ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
42292   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
42293   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
42294   uint64x2_t __ret;
42295   __ret = (uint64x2_t)(__rev0 <= __rev1);
42296   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
42297   return __ret;
42298 }
42299 #endif
42300 
42301 #ifdef __LITTLE_ENDIAN__
vcle_u64(uint64x1_t __p0,uint64x1_t __p1)42302 __ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) {
42303   uint64x1_t __ret;
42304   __ret = (uint64x1_t)(__p0 <= __p1);
42305   return __ret;
42306 }
42307 #else
vcle_u64(uint64x1_t __p0,uint64x1_t __p1)42308 __ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) {
42309   uint64x1_t __ret;
42310   __ret = (uint64x1_t)(__p0 <= __p1);
42311   return __ret;
42312 }
42313 #endif
42314 
42315 #ifdef __LITTLE_ENDIAN__
vcle_f64(float64x1_t __p0,float64x1_t __p1)42316 __ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) {
42317   uint64x1_t __ret;
42318   __ret = (uint64x1_t)(__p0 <= __p1);
42319   return __ret;
42320 }
42321 #else
vcle_f64(float64x1_t __p0,float64x1_t __p1)42322 __ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) {
42323   uint64x1_t __ret;
42324   __ret = (uint64x1_t)(__p0 <= __p1);
42325   return __ret;
42326 }
42327 #endif
42328 
42329 #ifdef __LITTLE_ENDIAN__
vcle_s64(int64x1_t __p0,int64x1_t __p1)42330 __ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) {
42331   uint64x1_t __ret;
42332   __ret = (uint64x1_t)(__p0 <= __p1);
42333   return __ret;
42334 }
42335 #else
vcle_s64(int64x1_t __p0,int64x1_t __p1)42336 __ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) {
42337   uint64x1_t __ret;
42338   __ret = (uint64x1_t)(__p0 <= __p1);
42339   return __ret;
42340 }
42341 #endif
42342 
42343 #ifdef __LITTLE_ENDIAN__
vcled_u64(uint64_t __p0,uint64_t __p1)42344 __ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) {
42345   uint64_t __ret;
42346   __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1);
42347   return __ret;
42348 }
42349 #else
vcled_u64(uint64_t __p0,uint64_t __p1)42350 __ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) {
42351   uint64_t __ret;
42352   __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1);
42353   return __ret;
42354 }
42355 #endif
42356 
42357 #ifdef __LITTLE_ENDIAN__
vcled_s64(int64_t __p0,int64_t __p1)42358 __ai int64_t vcled_s64(int64_t __p0, int64_t __p1) {
42359   int64_t __ret;
42360   __ret = (int64_t) __builtin_neon_vcled_s64(__p0, __p1);
42361   return __ret;
42362 }
42363 #else
vcled_s64(int64_t __p0,int64_t __p1)42364 __ai int64_t vcled_s64(int64_t __p0, int64_t __p1) {
42365   int64_t __ret;
42366   __ret = (int64_t) __builtin_neon_vcled_s64(__p0, __p1);
42367   return __ret;
42368 }
42369 #endif
42370 
42371 #ifdef __LITTLE_ENDIAN__
vcled_f64(float64_t __p0,float64_t __p1)42372 __ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) {
42373   uint64_t __ret;
42374   __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1);
42375   return __ret;
42376 }
42377 #else
vcled_f64(float64_t __p0,float64_t __p1)42378 __ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) {
42379   uint64_t __ret;
42380   __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1);
42381   return __ret;
42382 }
42383 #endif
42384 
42385 #ifdef __LITTLE_ENDIAN__
vcles_f32(float32_t __p0,float32_t __p1)42386 __ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) {
42387   uint32_t __ret;
42388   __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1);
42389   return __ret;
42390 }
42391 #else
vcles_f32(float32_t __p0,float32_t __p1)42392 __ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) {
42393   uint32_t __ret;
42394   __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1);
42395   return __ret;
42396 }
42397 #endif
42398 
42399 #ifdef __LITTLE_ENDIAN__
vclezq_s8(int8x16_t __p0)42400 __ai uint8x16_t vclezq_s8(int8x16_t __p0) {
42401   uint8x16_t __ret;
42402   __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 48);
42403   return __ret;
42404 }
42405 #else
vclezq_s8(int8x16_t __p0)42406 __ai uint8x16_t vclezq_s8(int8x16_t __p0) {
42407   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
42408   uint8x16_t __ret;
42409   __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 48);
42410   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
42411   return __ret;
42412 }
42413 #endif
42414 
42415 #ifdef __LITTLE_ENDIAN__
vclezq_f64(float64x2_t __p0)42416 __ai uint64x2_t vclezq_f64(float64x2_t __p0) {
42417   uint64x2_t __ret;
42418   __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
42419   return __ret;
42420 }
42421 #else
vclezq_f64(float64x2_t __p0)42422 __ai uint64x2_t vclezq_f64(float64x2_t __p0) {
42423   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
42424   uint64x2_t __ret;
42425   __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
42426   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
42427   return __ret;
42428 }
42429 #endif
42430 
42431 #ifdef __LITTLE_ENDIAN__
vclezq_f32(float32x4_t __p0)42432 __ai uint32x4_t vclezq_f32(float32x4_t __p0) {
42433   uint32x4_t __ret;
42434   __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
42435   return __ret;
42436 }
42437 #else
vclezq_f32(float32x4_t __p0)42438 __ai uint32x4_t vclezq_f32(float32x4_t __p0) {
42439   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
42440   uint32x4_t __ret;
42441   __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
42442   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
42443   return __ret;
42444 }
42445 #endif
42446 
42447 #ifdef __LITTLE_ENDIAN__
vclezq_s32(int32x4_t __p0)42448 __ai uint32x4_t vclezq_s32(int32x4_t __p0) {
42449   uint32x4_t __ret;
42450   __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
42451   return __ret;
42452 }
42453 #else
vclezq_s32(int32x4_t __p0)42454 __ai uint32x4_t vclezq_s32(int32x4_t __p0) {
42455   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
42456   uint32x4_t __ret;
42457   __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
42458   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
42459   return __ret;
42460 }
42461 #endif
42462 
42463 #ifdef __LITTLE_ENDIAN__
vclezq_s64(int64x2_t __p0)42464 __ai uint64x2_t vclezq_s64(int64x2_t __p0) {
42465   uint64x2_t __ret;
42466   __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
42467   return __ret;
42468 }
42469 #else
vclezq_s64(int64x2_t __p0)42470 __ai uint64x2_t vclezq_s64(int64x2_t __p0) {
42471   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
42472   uint64x2_t __ret;
42473   __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
42474   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
42475   return __ret;
42476 }
42477 #endif
42478 
42479 #ifdef __LITTLE_ENDIAN__
vclezq_s16(int16x8_t __p0)42480 __ai uint16x8_t vclezq_s16(int16x8_t __p0) {
42481   uint16x8_t __ret;
42482   __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
42483   return __ret;
42484 }
42485 #else
vclezq_s16(int16x8_t __p0)42486 __ai uint16x8_t vclezq_s16(int16x8_t __p0) {
42487   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
42488   uint16x8_t __ret;
42489   __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
42490   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
42491   return __ret;
42492 }
42493 #endif
42494 
42495 #ifdef __LITTLE_ENDIAN__
vclez_s8(int8x8_t __p0)42496 __ai uint8x8_t vclez_s8(int8x8_t __p0) {
42497   uint8x8_t __ret;
42498   __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__p0, 16);
42499   return __ret;
42500 }
42501 #else
vclez_s8(int8x8_t __p0)42502 __ai uint8x8_t vclez_s8(int8x8_t __p0) {
42503   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
42504   uint8x8_t __ret;
42505   __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 16);
42506   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
42507   return __ret;
42508 }
42509 #endif
42510 
42511 #ifdef __LITTLE_ENDIAN__
vclez_f64(float64x1_t __p0)42512 __ai uint64x1_t vclez_f64(float64x1_t __p0) {
42513   uint64x1_t __ret;
42514   __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
42515   return __ret;
42516 }
42517 #else
vclez_f64(float64x1_t __p0)42518 __ai uint64x1_t vclez_f64(float64x1_t __p0) {
42519   uint64x1_t __ret;
42520   __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
42521   return __ret;
42522 }
42523 #endif
42524 
42525 #ifdef __LITTLE_ENDIAN__
vclez_f32(float32x2_t __p0)42526 __ai uint32x2_t vclez_f32(float32x2_t __p0) {
42527   uint32x2_t __ret;
42528   __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
42529   return __ret;
42530 }
42531 #else
vclez_f32(float32x2_t __p0)42532 __ai uint32x2_t vclez_f32(float32x2_t __p0) {
42533   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
42534   uint32x2_t __ret;
42535   __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
42536   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
42537   return __ret;
42538 }
42539 #endif
42540 
42541 #ifdef __LITTLE_ENDIAN__
vclez_s32(int32x2_t __p0)42542 __ai uint32x2_t vclez_s32(int32x2_t __p0) {
42543   uint32x2_t __ret;
42544   __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
42545   return __ret;
42546 }
42547 #else
vclez_s32(int32x2_t __p0)42548 __ai uint32x2_t vclez_s32(int32x2_t __p0) {
42549   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
42550   uint32x2_t __ret;
42551   __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
42552   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
42553   return __ret;
42554 }
42555 #endif
42556 
42557 #ifdef __LITTLE_ENDIAN__
vclez_s64(int64x1_t __p0)42558 __ai uint64x1_t vclez_s64(int64x1_t __p0) {
42559   uint64x1_t __ret;
42560   __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
42561   return __ret;
42562 }
42563 #else
vclez_s64(int64x1_t __p0)42564 __ai uint64x1_t vclez_s64(int64x1_t __p0) {
42565   uint64x1_t __ret;
42566   __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
42567   return __ret;
42568 }
42569 #endif
42570 
42571 #ifdef __LITTLE_ENDIAN__
vclez_s16(int16x4_t __p0)42572 __ai uint16x4_t vclez_s16(int16x4_t __p0) {
42573   uint16x4_t __ret;
42574   __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
42575   return __ret;
42576 }
42577 #else
vclez_s16(int16x4_t __p0)42578 __ai uint16x4_t vclez_s16(int16x4_t __p0) {
42579   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
42580   uint16x4_t __ret;
42581   __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
42582   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
42583   return __ret;
42584 }
42585 #endif
42586 
42587 #ifdef __LITTLE_ENDIAN__
vclezd_s64(int64_t __p0)42588 __ai int64_t vclezd_s64(int64_t __p0) {
42589   int64_t __ret;
42590   __ret = (int64_t) __builtin_neon_vclezd_s64(__p0);
42591   return __ret;
42592 }
42593 #else
vclezd_s64(int64_t __p0)42594 __ai int64_t vclezd_s64(int64_t __p0) {
42595   int64_t __ret;
42596   __ret = (int64_t) __builtin_neon_vclezd_s64(__p0);
42597   return __ret;
42598 }
42599 #endif
42600 
42601 #ifdef __LITTLE_ENDIAN__
vclezd_f64(float64_t __p0)42602 __ai uint64_t vclezd_f64(float64_t __p0) {
42603   uint64_t __ret;
42604   __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0);
42605   return __ret;
42606 }
42607 #else
vclezd_f64(float64_t __p0)42608 __ai uint64_t vclezd_f64(float64_t __p0) {
42609   uint64_t __ret;
42610   __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0);
42611   return __ret;
42612 }
42613 #endif
42614 
42615 #ifdef __LITTLE_ENDIAN__
vclezs_f32(float32_t __p0)42616 __ai uint32_t vclezs_f32(float32_t __p0) {
42617   uint32_t __ret;
42618   __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0);
42619   return __ret;
42620 }
42621 #else
vclezs_f32(float32_t __p0)42622 __ai uint32_t vclezs_f32(float32_t __p0) {
42623   uint32_t __ret;
42624   __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0);
42625   return __ret;
42626 }
42627 #endif
42628 
42629 #ifdef __LITTLE_ENDIAN__
vcltq_u64(uint64x2_t __p0,uint64x2_t __p1)42630 __ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
42631   uint64x2_t __ret;
42632   __ret = (uint64x2_t)(__p0 < __p1);
42633   return __ret;
42634 }
42635 #else
vcltq_u64(uint64x2_t __p0,uint64x2_t __p1)42636 __ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
42637   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
42638   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
42639   uint64x2_t __ret;
42640   __ret = (uint64x2_t)(__rev0 < __rev1);
42641   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
42642   return __ret;
42643 }
42644 #endif
42645 
42646 #ifdef __LITTLE_ENDIAN__
vcltq_f64(float64x2_t __p0,float64x2_t __p1)42647 __ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
42648   uint64x2_t __ret;
42649   __ret = (uint64x2_t)(__p0 < __p1);
42650   return __ret;
42651 }
42652 #else
vcltq_f64(float64x2_t __p0,float64x2_t __p1)42653 __ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
42654   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
42655   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
42656   uint64x2_t __ret;
42657   __ret = (uint64x2_t)(__rev0 < __rev1);
42658   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
42659   return __ret;
42660 }
42661 #endif
42662 
42663 #ifdef __LITTLE_ENDIAN__
vcltq_s64(int64x2_t __p0,int64x2_t __p1)42664 __ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
42665   uint64x2_t __ret;
42666   __ret = (uint64x2_t)(__p0 < __p1);
42667   return __ret;
42668 }
42669 #else
vcltq_s64(int64x2_t __p0,int64x2_t __p1)42670 __ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
42671   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
42672   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
42673   uint64x2_t __ret;
42674   __ret = (uint64x2_t)(__rev0 < __rev1);
42675   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
42676   return __ret;
42677 }
42678 #endif
42679 
42680 #ifdef __LITTLE_ENDIAN__
vclt_u64(uint64x1_t __p0,uint64x1_t __p1)42681 __ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) {
42682   uint64x1_t __ret;
42683   __ret = (uint64x1_t)(__p0 < __p1);
42684   return __ret;
42685 }
42686 #else
vclt_u64(uint64x1_t __p0,uint64x1_t __p1)42687 __ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) {
42688   uint64x1_t __ret;
42689   __ret = (uint64x1_t)(__p0 < __p1);
42690   return __ret;
42691 }
42692 #endif
42693 
42694 #ifdef __LITTLE_ENDIAN__
vclt_f64(float64x1_t __p0,float64x1_t __p1)42695 __ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) {
42696   uint64x1_t __ret;
42697   __ret = (uint64x1_t)(__p0 < __p1);
42698   return __ret;
42699 }
42700 #else
vclt_f64(float64x1_t __p0,float64x1_t __p1)42701 __ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) {
42702   uint64x1_t __ret;
42703   __ret = (uint64x1_t)(__p0 < __p1);
42704   return __ret;
42705 }
42706 #endif
42707 
42708 #ifdef __LITTLE_ENDIAN__
vclt_s64(int64x1_t __p0,int64x1_t __p1)42709 __ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) {
42710   uint64x1_t __ret;
42711   __ret = (uint64x1_t)(__p0 < __p1);
42712   return __ret;
42713 }
42714 #else
vclt_s64(int64x1_t __p0,int64x1_t __p1)42715 __ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) {
42716   uint64x1_t __ret;
42717   __ret = (uint64x1_t)(__p0 < __p1);
42718   return __ret;
42719 }
42720 #endif
42721 
42722 #ifdef __LITTLE_ENDIAN__
vcltd_u64(uint64_t __p0,uint64_t __p1)42723 __ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) {
42724   uint64_t __ret;
42725   __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1);
42726   return __ret;
42727 }
42728 #else
vcltd_u64(uint64_t __p0,uint64_t __p1)42729 __ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) {
42730   uint64_t __ret;
42731   __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1);
42732   return __ret;
42733 }
42734 #endif
42735 
42736 #ifdef __LITTLE_ENDIAN__
vcltd_s64(int64_t __p0,int64_t __p1)42737 __ai int64_t vcltd_s64(int64_t __p0, int64_t __p1) {
42738   int64_t __ret;
42739   __ret = (int64_t) __builtin_neon_vcltd_s64(__p0, __p1);
42740   return __ret;
42741 }
42742 #else
vcltd_s64(int64_t __p0,int64_t __p1)42743 __ai int64_t vcltd_s64(int64_t __p0, int64_t __p1) {
42744   int64_t __ret;
42745   __ret = (int64_t) __builtin_neon_vcltd_s64(__p0, __p1);
42746   return __ret;
42747 }
42748 #endif
42749 
42750 #ifdef __LITTLE_ENDIAN__
vcltd_f64(float64_t __p0,float64_t __p1)42751 __ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) {
42752   uint64_t __ret;
42753   __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1);
42754   return __ret;
42755 }
42756 #else
vcltd_f64(float64_t __p0,float64_t __p1)42757 __ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) {
42758   uint64_t __ret;
42759   __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1);
42760   return __ret;
42761 }
42762 #endif
42763 
42764 #ifdef __LITTLE_ENDIAN__
vclts_f32(float32_t __p0,float32_t __p1)42765 __ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) {
42766   uint32_t __ret;
42767   __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1);
42768   return __ret;
42769 }
42770 #else
vclts_f32(float32_t __p0,float32_t __p1)42771 __ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) {
42772   uint32_t __ret;
42773   __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1);
42774   return __ret;
42775 }
42776 #endif
42777 
42778 #ifdef __LITTLE_ENDIAN__
vcltzq_s8(int8x16_t __p0)42779 __ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
42780   uint8x16_t __ret;
42781   __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 48);
42782   return __ret;
42783 }
42784 #else
vcltzq_s8(int8x16_t __p0)42785 __ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
42786   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
42787   uint8x16_t __ret;
42788   __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 48);
42789   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
42790   return __ret;
42791 }
42792 #endif
42793 
42794 #ifdef __LITTLE_ENDIAN__
vcltzq_f64(float64x2_t __p0)42795 __ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
42796   uint64x2_t __ret;
42797   __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
42798   return __ret;
42799 }
42800 #else
vcltzq_f64(float64x2_t __p0)42801 __ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
42802   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
42803   uint64x2_t __ret;
42804   __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
42805   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
42806   return __ret;
42807 }
42808 #endif
42809 
42810 #ifdef __LITTLE_ENDIAN__
vcltzq_f32(float32x4_t __p0)42811 __ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
42812   uint32x4_t __ret;
42813   __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
42814   return __ret;
42815 }
42816 #else
vcltzq_f32(float32x4_t __p0)42817 __ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
42818   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
42819   uint32x4_t __ret;
42820   __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
42821   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
42822   return __ret;
42823 }
42824 #endif
42825 
42826 #ifdef __LITTLE_ENDIAN__
vcltzq_s32(int32x4_t __p0)42827 __ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
42828   uint32x4_t __ret;
42829   __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
42830   return __ret;
42831 }
42832 #else
vcltzq_s32(int32x4_t __p0)42833 __ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
42834   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
42835   uint32x4_t __ret;
42836   __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
42837   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
42838   return __ret;
42839 }
42840 #endif
42841 
42842 #ifdef __LITTLE_ENDIAN__
vcltzq_s64(int64x2_t __p0)42843 __ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
42844   uint64x2_t __ret;
42845   __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
42846   return __ret;
42847 }
42848 #else
vcltzq_s64(int64x2_t __p0)42849 __ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
42850   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
42851   uint64x2_t __ret;
42852   __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
42853   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
42854   return __ret;
42855 }
42856 #endif
42857 
42858 #ifdef __LITTLE_ENDIAN__
vcltzq_s16(int16x8_t __p0)42859 __ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
42860   uint16x8_t __ret;
42861   __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
42862   return __ret;
42863 }
42864 #else
vcltzq_s16(int16x8_t __p0)42865 __ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
42866   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
42867   uint16x8_t __ret;
42868   __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
42869   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
42870   return __ret;
42871 }
42872 #endif
42873 
42874 #ifdef __LITTLE_ENDIAN__
vcltz_s8(int8x8_t __p0)42875 __ai uint8x8_t vcltz_s8(int8x8_t __p0) {
42876   uint8x8_t __ret;
42877   __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 16);
42878   return __ret;
42879 }
42880 #else
vcltz_s8(int8x8_t __p0)42881 __ai uint8x8_t vcltz_s8(int8x8_t __p0) {
42882   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
42883   uint8x8_t __ret;
42884   __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 16);
42885   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
42886   return __ret;
42887 }
42888 #endif
42889 
42890 #ifdef __LITTLE_ENDIAN__
vcltz_f64(float64x1_t __p0)42891 __ai uint64x1_t vcltz_f64(float64x1_t __p0) {
42892   uint64x1_t __ret;
42893   __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
42894   return __ret;
42895 }
42896 #else
vcltz_f64(float64x1_t __p0)42897 __ai uint64x1_t vcltz_f64(float64x1_t __p0) {
42898   uint64x1_t __ret;
42899   __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
42900   return __ret;
42901 }
42902 #endif
42903 
42904 #ifdef __LITTLE_ENDIAN__
vcltz_f32(float32x2_t __p0)42905 __ai uint32x2_t vcltz_f32(float32x2_t __p0) {
42906   uint32x2_t __ret;
42907   __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
42908   return __ret;
42909 }
42910 #else
vcltz_f32(float32x2_t __p0)42911 __ai uint32x2_t vcltz_f32(float32x2_t __p0) {
42912   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
42913   uint32x2_t __ret;
42914   __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
42915   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
42916   return __ret;
42917 }
42918 #endif
42919 
42920 #ifdef __LITTLE_ENDIAN__
vcltz_s32(int32x2_t __p0)42921 __ai uint32x2_t vcltz_s32(int32x2_t __p0) {
42922   uint32x2_t __ret;
42923   __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
42924   return __ret;
42925 }
42926 #else
vcltz_s32(int32x2_t __p0)42927 __ai uint32x2_t vcltz_s32(int32x2_t __p0) {
42928   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
42929   uint32x2_t __ret;
42930   __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
42931   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
42932   return __ret;
42933 }
42934 #endif
42935 
42936 #ifdef __LITTLE_ENDIAN__
vcltz_s64(int64x1_t __p0)42937 __ai uint64x1_t vcltz_s64(int64x1_t __p0) {
42938   uint64x1_t __ret;
42939   __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
42940   return __ret;
42941 }
42942 #else
vcltz_s64(int64x1_t __p0)42943 __ai uint64x1_t vcltz_s64(int64x1_t __p0) {
42944   uint64x1_t __ret;
42945   __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
42946   return __ret;
42947 }
42948 #endif
42949 
42950 #ifdef __LITTLE_ENDIAN__
vcltz_s16(int16x4_t __p0)42951 __ai uint16x4_t vcltz_s16(int16x4_t __p0) {
42952   uint16x4_t __ret;
42953   __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
42954   return __ret;
42955 }
42956 #else
vcltz_s16(int16x4_t __p0)42957 __ai uint16x4_t vcltz_s16(int16x4_t __p0) {
42958   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
42959   uint16x4_t __ret;
42960   __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
42961   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
42962   return __ret;
42963 }
42964 #endif
42965 
42966 #ifdef __LITTLE_ENDIAN__
vcltzd_s64(int64_t __p0)42967 __ai int64_t vcltzd_s64(int64_t __p0) {
42968   int64_t __ret;
42969   __ret = (int64_t) __builtin_neon_vcltzd_s64(__p0);
42970   return __ret;
42971 }
42972 #else
vcltzd_s64(int64_t __p0)42973 __ai int64_t vcltzd_s64(int64_t __p0) {
42974   int64_t __ret;
42975   __ret = (int64_t) __builtin_neon_vcltzd_s64(__p0);
42976   return __ret;
42977 }
42978 #endif
42979 
42980 #ifdef __LITTLE_ENDIAN__
vcltzd_f64(float64_t __p0)42981 __ai uint64_t vcltzd_f64(float64_t __p0) {
42982   uint64_t __ret;
42983   __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0);
42984   return __ret;
42985 }
42986 #else
vcltzd_f64(float64_t __p0)42987 __ai uint64_t vcltzd_f64(float64_t __p0) {
42988   uint64_t __ret;
42989   __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0);
42990   return __ret;
42991 }
42992 #endif
42993 
42994 #ifdef __LITTLE_ENDIAN__
vcltzs_f32(float32_t __p0)42995 __ai uint32_t vcltzs_f32(float32_t __p0) {
42996   uint32_t __ret;
42997   __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0);
42998   return __ret;
42999 }
43000 #else
vcltzs_f32(float32_t __p0)43001 __ai uint32_t vcltzs_f32(float32_t __p0) {
43002   uint32_t __ret;
43003   __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0);
43004   return __ret;
43005 }
43006 #endif
43007 
43008 #ifdef __LITTLE_ENDIAN__
vcombine_p64(poly64x1_t __p0,poly64x1_t __p1)43009 __ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
43010   poly64x2_t __ret;
43011   __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
43012   return __ret;
43013 }
43014 #else
vcombine_p64(poly64x1_t __p0,poly64x1_t __p1)43015 __ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
43016   poly64x2_t __ret;
43017   __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
43018   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
43019   return __ret;
43020 }
43021 #endif
43022 
43023 #ifdef __LITTLE_ENDIAN__
vcombine_f64(float64x1_t __p0,float64x1_t __p1)43024 __ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
43025   float64x2_t __ret;
43026   __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
43027   return __ret;
43028 }
43029 #else
vcombine_f64(float64x1_t __p0,float64x1_t __p1)43030 __ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
43031   float64x2_t __ret;
43032   __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
43033   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
43034   return __ret;
43035 }
43036 #endif
43037 
43038 #ifdef __LITTLE_ENDIAN__
43039 #define vcopyq_lane_p8(__p0_0, __p1_0, __p2_0, __p3_0) __extension__ ({ \
43040   poly8x16_t __s0_0 = __p0_0; \
43041   poly8x8_t __s2_0 = __p2_0; \
43042   poly8x16_t __ret_0; \
43043   __ret_0 = vsetq_lane_p8(vget_lane_p8(__s2_0, __p3_0), __s0_0, __p1_0); \
43044   __ret_0; \
43045 })
43046 #else
43047 #define vcopyq_lane_p8(__p0_1, __p1_1, __p2_1, __p3_1) __extension__ ({ \
43048   poly8x16_t __s0_1 = __p0_1; \
43049   poly8x8_t __s2_1 = __p2_1; \
43050   poly8x16_t __rev0_1;  __rev0_1 = __builtin_shufflevector(__s0_1, __s0_1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
43051   poly8x8_t __rev2_1;  __rev2_1 = __builtin_shufflevector(__s2_1, __s2_1, 7, 6, 5, 4, 3, 2, 1, 0); \
43052   poly8x16_t __ret_1; \
43053   __ret_1 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_1, __p3_1), __rev0_1, __p1_1); \
43054   __ret_1 = __builtin_shufflevector(__ret_1, __ret_1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
43055   __ret_1; \
43056 })
43057 #endif
43058 
43059 #ifdef __LITTLE_ENDIAN__
43060 #define vcopyq_lane_p16(__p0_2, __p1_2, __p2_2, __p3_2) __extension__ ({ \
43061   poly16x8_t __s0_2 = __p0_2; \
43062   poly16x4_t __s2_2 = __p2_2; \
43063   poly16x8_t __ret_2; \
43064   __ret_2 = vsetq_lane_p16(vget_lane_p16(__s2_2, __p3_2), __s0_2, __p1_2); \
43065   __ret_2; \
43066 })
43067 #else
43068 #define vcopyq_lane_p16(__p0_3, __p1_3, __p2_3, __p3_3) __extension__ ({ \
43069   poly16x8_t __s0_3 = __p0_3; \
43070   poly16x4_t __s2_3 = __p2_3; \
43071   poly16x8_t __rev0_3;  __rev0_3 = __builtin_shufflevector(__s0_3, __s0_3, 7, 6, 5, 4, 3, 2, 1, 0); \
43072   poly16x4_t __rev2_3;  __rev2_3 = __builtin_shufflevector(__s2_3, __s2_3, 3, 2, 1, 0); \
43073   poly16x8_t __ret_3; \
43074   __ret_3 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_3, __p3_3), __rev0_3, __p1_3); \
43075   __ret_3 = __builtin_shufflevector(__ret_3, __ret_3, 7, 6, 5, 4, 3, 2, 1, 0); \
43076   __ret_3; \
43077 })
43078 #endif
43079 
43080 #ifdef __LITTLE_ENDIAN__
43081 #define vcopyq_lane_u8(__p0_4, __p1_4, __p2_4, __p3_4) __extension__ ({ \
43082   uint8x16_t __s0_4 = __p0_4; \
43083   uint8x8_t __s2_4 = __p2_4; \
43084   uint8x16_t __ret_4; \
43085   __ret_4 = vsetq_lane_u8(vget_lane_u8(__s2_4, __p3_4), __s0_4, __p1_4); \
43086   __ret_4; \
43087 })
43088 #else
43089 #define vcopyq_lane_u8(__p0_5, __p1_5, __p2_5, __p3_5) __extension__ ({ \
43090   uint8x16_t __s0_5 = __p0_5; \
43091   uint8x8_t __s2_5 = __p2_5; \
43092   uint8x16_t __rev0_5;  __rev0_5 = __builtin_shufflevector(__s0_5, __s0_5, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
43093   uint8x8_t __rev2_5;  __rev2_5 = __builtin_shufflevector(__s2_5, __s2_5, 7, 6, 5, 4, 3, 2, 1, 0); \
43094   uint8x16_t __ret_5; \
43095   __ret_5 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_5, __p3_5), __rev0_5, __p1_5); \
43096   __ret_5 = __builtin_shufflevector(__ret_5, __ret_5, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
43097   __ret_5; \
43098 })
43099 #endif
43100 
43101 #ifdef __LITTLE_ENDIAN__
43102 #define vcopyq_lane_u32(__p0_6, __p1_6, __p2_6, __p3_6) __extension__ ({ \
43103   uint32x4_t __s0_6 = __p0_6; \
43104   uint32x2_t __s2_6 = __p2_6; \
43105   uint32x4_t __ret_6; \
43106   __ret_6 = vsetq_lane_u32(vget_lane_u32(__s2_6, __p3_6), __s0_6, __p1_6); \
43107   __ret_6; \
43108 })
43109 #else
43110 #define vcopyq_lane_u32(__p0_7, __p1_7, __p2_7, __p3_7) __extension__ ({ \
43111   uint32x4_t __s0_7 = __p0_7; \
43112   uint32x2_t __s2_7 = __p2_7; \
43113   uint32x4_t __rev0_7;  __rev0_7 = __builtin_shufflevector(__s0_7, __s0_7, 3, 2, 1, 0); \
43114   uint32x2_t __rev2_7;  __rev2_7 = __builtin_shufflevector(__s2_7, __s2_7, 1, 0); \
43115   uint32x4_t __ret_7; \
43116   __ret_7 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_7, __p3_7), __rev0_7, __p1_7); \
43117   __ret_7 = __builtin_shufflevector(__ret_7, __ret_7, 3, 2, 1, 0); \
43118   __ret_7; \
43119 })
43120 #endif
43121 
43122 #ifdef __LITTLE_ENDIAN__
43123 #define vcopyq_lane_u64(__p0_8, __p1_8, __p2_8, __p3_8) __extension__ ({ \
43124   uint64x2_t __s0_8 = __p0_8; \
43125   uint64x1_t __s2_8 = __p2_8; \
43126   uint64x2_t __ret_8; \
43127   __ret_8 = vsetq_lane_u64(vget_lane_u64(__s2_8, __p3_8), __s0_8, __p1_8); \
43128   __ret_8; \
43129 })
43130 #else
43131 #define vcopyq_lane_u64(__p0_9, __p1_9, __p2_9, __p3_9) __extension__ ({ \
43132   uint64x2_t __s0_9 = __p0_9; \
43133   uint64x1_t __s2_9 = __p2_9; \
43134   uint64x2_t __rev0_9;  __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 1, 0); \
43135   uint64x2_t __ret_9; \
43136   __ret_9 = __noswap_vsetq_lane_u64(__noswap_vget_lane_u64(__s2_9, __p3_9), __rev0_9, __p1_9); \
43137   __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 1, 0); \
43138   __ret_9; \
43139 })
43140 #endif
43141 
43142 #ifdef __LITTLE_ENDIAN__
43143 #define vcopyq_lane_u16(__p0_10, __p1_10, __p2_10, __p3_10) __extension__ ({ \
43144   uint16x8_t __s0_10 = __p0_10; \
43145   uint16x4_t __s2_10 = __p2_10; \
43146   uint16x8_t __ret_10; \
43147   __ret_10 = vsetq_lane_u16(vget_lane_u16(__s2_10, __p3_10), __s0_10, __p1_10); \
43148   __ret_10; \
43149 })
43150 #else
43151 #define vcopyq_lane_u16(__p0_11, __p1_11, __p2_11, __p3_11) __extension__ ({ \
43152   uint16x8_t __s0_11 = __p0_11; \
43153   uint16x4_t __s2_11 = __p2_11; \
43154   uint16x8_t __rev0_11;  __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 7, 6, 5, 4, 3, 2, 1, 0); \
43155   uint16x4_t __rev2_11;  __rev2_11 = __builtin_shufflevector(__s2_11, __s2_11, 3, 2, 1, 0); \
43156   uint16x8_t __ret_11; \
43157   __ret_11 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_11, __p3_11), __rev0_11, __p1_11); \
43158   __ret_11 = __builtin_shufflevector(__ret_11, __ret_11, 7, 6, 5, 4, 3, 2, 1, 0); \
43159   __ret_11; \
43160 })
43161 #endif
43162 
43163 #ifdef __LITTLE_ENDIAN__
43164 #define vcopyq_lane_s8(__p0_12, __p1_12, __p2_12, __p3_12) __extension__ ({ \
43165   int8x16_t __s0_12 = __p0_12; \
43166   int8x8_t __s2_12 = __p2_12; \
43167   int8x16_t __ret_12; \
43168   __ret_12 = vsetq_lane_s8(vget_lane_s8(__s2_12, __p3_12), __s0_12, __p1_12); \
43169   __ret_12; \
43170 })
43171 #else
43172 #define vcopyq_lane_s8(__p0_13, __p1_13, __p2_13, __p3_13) __extension__ ({ \
43173   int8x16_t __s0_13 = __p0_13; \
43174   int8x8_t __s2_13 = __p2_13; \
43175   int8x16_t __rev0_13;  __rev0_13 = __builtin_shufflevector(__s0_13, __s0_13, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
43176   int8x8_t __rev2_13;  __rev2_13 = __builtin_shufflevector(__s2_13, __s2_13, 7, 6, 5, 4, 3, 2, 1, 0); \
43177   int8x16_t __ret_13; \
43178   __ret_13 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_13, __p3_13), __rev0_13, __p1_13); \
43179   __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
43180   __ret_13; \
43181 })
43182 #endif
43183 
43184 #ifdef __LITTLE_ENDIAN__
43185 #define vcopyq_lane_f32(__p0_14, __p1_14, __p2_14, __p3_14) __extension__ ({ \
43186   float32x4_t __s0_14 = __p0_14; \
43187   float32x2_t __s2_14 = __p2_14; \
43188   float32x4_t __ret_14; \
43189   __ret_14 = vsetq_lane_f32(vget_lane_f32(__s2_14, __p3_14), __s0_14, __p1_14); \
43190   __ret_14; \
43191 })
43192 #else
43193 #define vcopyq_lane_f32(__p0_15, __p1_15, __p2_15, __p3_15) __extension__ ({ \
43194   float32x4_t __s0_15 = __p0_15; \
43195   float32x2_t __s2_15 = __p2_15; \
43196   float32x4_t __rev0_15;  __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 3, 2, 1, 0); \
43197   float32x2_t __rev2_15;  __rev2_15 = __builtin_shufflevector(__s2_15, __s2_15, 1, 0); \
43198   float32x4_t __ret_15; \
43199   __ret_15 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_15, __p3_15), __rev0_15, __p1_15); \
43200   __ret_15 = __builtin_shufflevector(__ret_15, __ret_15, 3, 2, 1, 0); \
43201   __ret_15; \
43202 })
43203 #endif
43204 
43205 #ifdef __LITTLE_ENDIAN__
43206 #define vcopyq_lane_s32(__p0_16, __p1_16, __p2_16, __p3_16) __extension__ ({ \
43207   int32x4_t __s0_16 = __p0_16; \
43208   int32x2_t __s2_16 = __p2_16; \
43209   int32x4_t __ret_16; \
43210   __ret_16 = vsetq_lane_s32(vget_lane_s32(__s2_16, __p3_16), __s0_16, __p1_16); \
43211   __ret_16; \
43212 })
43213 #else
43214 #define vcopyq_lane_s32(__p0_17, __p1_17, __p2_17, __p3_17) __extension__ ({ \
43215   int32x4_t __s0_17 = __p0_17; \
43216   int32x2_t __s2_17 = __p2_17; \
43217   int32x4_t __rev0_17;  __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 3, 2, 1, 0); \
43218   int32x2_t __rev2_17;  __rev2_17 = __builtin_shufflevector(__s2_17, __s2_17, 1, 0); \
43219   int32x4_t __ret_17; \
43220   __ret_17 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_17, __p3_17), __rev0_17, __p1_17); \
43221   __ret_17 = __builtin_shufflevector(__ret_17, __ret_17, 3, 2, 1, 0); \
43222   __ret_17; \
43223 })
43224 #endif
43225 
43226 #ifdef __LITTLE_ENDIAN__
43227 #define vcopyq_lane_s64(__p0_18, __p1_18, __p2_18, __p3_18) __extension__ ({ \
43228   int64x2_t __s0_18 = __p0_18; \
43229   int64x1_t __s2_18 = __p2_18; \
43230   int64x2_t __ret_18; \
43231   __ret_18 = vsetq_lane_s64(vget_lane_s64(__s2_18, __p3_18), __s0_18, __p1_18); \
43232   __ret_18; \
43233 })
43234 #else
43235 #define vcopyq_lane_s64(__p0_19, __p1_19, __p2_19, __p3_19) __extension__ ({ \
43236   int64x2_t __s0_19 = __p0_19; \
43237   int64x1_t __s2_19 = __p2_19; \
43238   int64x2_t __rev0_19;  __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 1, 0); \
43239   int64x2_t __ret_19; \
43240   __ret_19 = __noswap_vsetq_lane_s64(__noswap_vget_lane_s64(__s2_19, __p3_19), __rev0_19, __p1_19); \
43241   __ret_19 = __builtin_shufflevector(__ret_19, __ret_19, 1, 0); \
43242   __ret_19; \
43243 })
43244 #endif
43245 
43246 #ifdef __LITTLE_ENDIAN__
43247 #define vcopyq_lane_s16(__p0_20, __p1_20, __p2_20, __p3_20) __extension__ ({ \
43248   int16x8_t __s0_20 = __p0_20; \
43249   int16x4_t __s2_20 = __p2_20; \
43250   int16x8_t __ret_20; \
43251   __ret_20 = vsetq_lane_s16(vget_lane_s16(__s2_20, __p3_20), __s0_20, __p1_20); \
43252   __ret_20; \
43253 })
43254 #else
43255 #define vcopyq_lane_s16(__p0_21, __p1_21, __p2_21, __p3_21) __extension__ ({ \
43256   int16x8_t __s0_21 = __p0_21; \
43257   int16x4_t __s2_21 = __p2_21; \
43258   int16x8_t __rev0_21;  __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 7, 6, 5, 4, 3, 2, 1, 0); \
43259   int16x4_t __rev2_21;  __rev2_21 = __builtin_shufflevector(__s2_21, __s2_21, 3, 2, 1, 0); \
43260   int16x8_t __ret_21; \
43261   __ret_21 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_21, __p3_21), __rev0_21, __p1_21); \
43262   __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 7, 6, 5, 4, 3, 2, 1, 0); \
43263   __ret_21; \
43264 })
43265 #endif
43266 
43267 #ifdef __LITTLE_ENDIAN__
43268 #define vcopy_lane_p8(__p0_22, __p1_22, __p2_22, __p3_22) __extension__ ({ \
43269   poly8x8_t __s0_22 = __p0_22; \
43270   poly8x8_t __s2_22 = __p2_22; \
43271   poly8x8_t __ret_22; \
43272   __ret_22 = vset_lane_p8(vget_lane_p8(__s2_22, __p3_22), __s0_22, __p1_22); \
43273   __ret_22; \
43274 })
43275 #else
43276 #define vcopy_lane_p8(__p0_23, __p1_23, __p2_23, __p3_23) __extension__ ({ \
43277   poly8x8_t __s0_23 = __p0_23; \
43278   poly8x8_t __s2_23 = __p2_23; \
43279   poly8x8_t __rev0_23;  __rev0_23 = __builtin_shufflevector(__s0_23, __s0_23, 7, 6, 5, 4, 3, 2, 1, 0); \
43280   poly8x8_t __rev2_23;  __rev2_23 = __builtin_shufflevector(__s2_23, __s2_23, 7, 6, 5, 4, 3, 2, 1, 0); \
43281   poly8x8_t __ret_23; \
43282   __ret_23 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_23, __p3_23), __rev0_23, __p1_23); \
43283   __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 7, 6, 5, 4, 3, 2, 1, 0); \
43284   __ret_23; \
43285 })
43286 #endif
43287 
43288 #ifdef __LITTLE_ENDIAN__
43289 #define vcopy_lane_p16(__p0_24, __p1_24, __p2_24, __p3_24) __extension__ ({ \
43290   poly16x4_t __s0_24 = __p0_24; \
43291   poly16x4_t __s2_24 = __p2_24; \
43292   poly16x4_t __ret_24; \
43293   __ret_24 = vset_lane_p16(vget_lane_p16(__s2_24, __p3_24), __s0_24, __p1_24); \
43294   __ret_24; \
43295 })
43296 #else
43297 #define vcopy_lane_p16(__p0_25, __p1_25, __p2_25, __p3_25) __extension__ ({ \
43298   poly16x4_t __s0_25 = __p0_25; \
43299   poly16x4_t __s2_25 = __p2_25; \
43300   poly16x4_t __rev0_25;  __rev0_25 = __builtin_shufflevector(__s0_25, __s0_25, 3, 2, 1, 0); \
43301   poly16x4_t __rev2_25;  __rev2_25 = __builtin_shufflevector(__s2_25, __s2_25, 3, 2, 1, 0); \
43302   poly16x4_t __ret_25; \
43303   __ret_25 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_25, __p3_25), __rev0_25, __p1_25); \
43304   __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 3, 2, 1, 0); \
43305   __ret_25; \
43306 })
43307 #endif
43308 
43309 #ifdef __LITTLE_ENDIAN__
43310 #define vcopy_lane_u8(__p0_26, __p1_26, __p2_26, __p3_26) __extension__ ({ \
43311   uint8x8_t __s0_26 = __p0_26; \
43312   uint8x8_t __s2_26 = __p2_26; \
43313   uint8x8_t __ret_26; \
43314   __ret_26 = vset_lane_u8(vget_lane_u8(__s2_26, __p3_26), __s0_26, __p1_26); \
43315   __ret_26; \
43316 })
43317 #else
43318 #define vcopy_lane_u8(__p0_27, __p1_27, __p2_27, __p3_27) __extension__ ({ \
43319   uint8x8_t __s0_27 = __p0_27; \
43320   uint8x8_t __s2_27 = __p2_27; \
43321   uint8x8_t __rev0_27;  __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 7, 6, 5, 4, 3, 2, 1, 0); \
43322   uint8x8_t __rev2_27;  __rev2_27 = __builtin_shufflevector(__s2_27, __s2_27, 7, 6, 5, 4, 3, 2, 1, 0); \
43323   uint8x8_t __ret_27; \
43324   __ret_27 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_27, __p3_27), __rev0_27, __p1_27); \
43325   __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 7, 6, 5, 4, 3, 2, 1, 0); \
43326   __ret_27; \
43327 })
43328 #endif
43329 
43330 #ifdef __LITTLE_ENDIAN__
43331 #define vcopy_lane_u32(__p0_28, __p1_28, __p2_28, __p3_28) __extension__ ({ \
43332   uint32x2_t __s0_28 = __p0_28; \
43333   uint32x2_t __s2_28 = __p2_28; \
43334   uint32x2_t __ret_28; \
43335   __ret_28 = vset_lane_u32(vget_lane_u32(__s2_28, __p3_28), __s0_28, __p1_28); \
43336   __ret_28; \
43337 })
43338 #else
43339 #define vcopy_lane_u32(__p0_29, __p1_29, __p2_29, __p3_29) __extension__ ({ \
43340   uint32x2_t __s0_29 = __p0_29; \
43341   uint32x2_t __s2_29 = __p2_29; \
43342   uint32x2_t __rev0_29;  __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 1, 0); \
43343   uint32x2_t __rev2_29;  __rev2_29 = __builtin_shufflevector(__s2_29, __s2_29, 1, 0); \
43344   uint32x2_t __ret_29; \
43345   __ret_29 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_29, __p3_29), __rev0_29, __p1_29); \
43346   __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 1, 0); \
43347   __ret_29; \
43348 })
43349 #endif
43350 
43351 #ifdef __LITTLE_ENDIAN__
43352 #define vcopy_lane_u64(__p0_30, __p1_30, __p2_30, __p3_30) __extension__ ({ \
43353   uint64x1_t __s0_30 = __p0_30; \
43354   uint64x1_t __s2_30 = __p2_30; \
43355   uint64x1_t __ret_30; \
43356   __ret_30 = vset_lane_u64(vget_lane_u64(__s2_30, __p3_30), __s0_30, __p1_30); \
43357   __ret_30; \
43358 })
43359 #else
43360 #define vcopy_lane_u64(__p0_31, __p1_31, __p2_31, __p3_31) __extension__ ({ \
43361   uint64x1_t __s0_31 = __p0_31; \
43362   uint64x1_t __s2_31 = __p2_31; \
43363   uint64x1_t __ret_31; \
43364   __ret_31 = __noswap_vset_lane_u64(__noswap_vget_lane_u64(__s2_31, __p3_31), __s0_31, __p1_31); \
43365   __ret_31; \
43366 })
43367 #endif
43368 
43369 #ifdef __LITTLE_ENDIAN__
43370 #define vcopy_lane_u16(__p0_32, __p1_32, __p2_32, __p3_32) __extension__ ({ \
43371   uint16x4_t __s0_32 = __p0_32; \
43372   uint16x4_t __s2_32 = __p2_32; \
43373   uint16x4_t __ret_32; \
43374   __ret_32 = vset_lane_u16(vget_lane_u16(__s2_32, __p3_32), __s0_32, __p1_32); \
43375   __ret_32; \
43376 })
43377 #else
43378 #define vcopy_lane_u16(__p0_33, __p1_33, __p2_33, __p3_33) __extension__ ({ \
43379   uint16x4_t __s0_33 = __p0_33; \
43380   uint16x4_t __s2_33 = __p2_33; \
43381   uint16x4_t __rev0_33;  __rev0_33 = __builtin_shufflevector(__s0_33, __s0_33, 3, 2, 1, 0); \
43382   uint16x4_t __rev2_33;  __rev2_33 = __builtin_shufflevector(__s2_33, __s2_33, 3, 2, 1, 0); \
43383   uint16x4_t __ret_33; \
43384   __ret_33 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_33, __p3_33), __rev0_33, __p1_33); \
43385   __ret_33 = __builtin_shufflevector(__ret_33, __ret_33, 3, 2, 1, 0); \
43386   __ret_33; \
43387 })
43388 #endif
43389 
43390 #ifdef __LITTLE_ENDIAN__
43391 #define vcopy_lane_s8(__p0_34, __p1_34, __p2_34, __p3_34) __extension__ ({ \
43392   int8x8_t __s0_34 = __p0_34; \
43393   int8x8_t __s2_34 = __p2_34; \
43394   int8x8_t __ret_34; \
43395   __ret_34 = vset_lane_s8(vget_lane_s8(__s2_34, __p3_34), __s0_34, __p1_34); \
43396   __ret_34; \
43397 })
43398 #else
43399 #define vcopy_lane_s8(__p0_35, __p1_35, __p2_35, __p3_35) __extension__ ({ \
43400   int8x8_t __s0_35 = __p0_35; \
43401   int8x8_t __s2_35 = __p2_35; \
43402   int8x8_t __rev0_35;  __rev0_35 = __builtin_shufflevector(__s0_35, __s0_35, 7, 6, 5, 4, 3, 2, 1, 0); \
43403   int8x8_t __rev2_35;  __rev2_35 = __builtin_shufflevector(__s2_35, __s2_35, 7, 6, 5, 4, 3, 2, 1, 0); \
43404   int8x8_t __ret_35; \
43405   __ret_35 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_35, __p3_35), __rev0_35, __p1_35); \
43406   __ret_35 = __builtin_shufflevector(__ret_35, __ret_35, 7, 6, 5, 4, 3, 2, 1, 0); \
43407   __ret_35; \
43408 })
43409 #endif
43410 
43411 #ifdef __LITTLE_ENDIAN__
43412 #define vcopy_lane_f32(__p0_36, __p1_36, __p2_36, __p3_36) __extension__ ({ \
43413   float32x2_t __s0_36 = __p0_36; \
43414   float32x2_t __s2_36 = __p2_36; \
43415   float32x2_t __ret_36; \
43416   __ret_36 = vset_lane_f32(vget_lane_f32(__s2_36, __p3_36), __s0_36, __p1_36); \
43417   __ret_36; \
43418 })
43419 #else
43420 #define vcopy_lane_f32(__p0_37, __p1_37, __p2_37, __p3_37) __extension__ ({ \
43421   float32x2_t __s0_37 = __p0_37; \
43422   float32x2_t __s2_37 = __p2_37; \
43423   float32x2_t __rev0_37;  __rev0_37 = __builtin_shufflevector(__s0_37, __s0_37, 1, 0); \
43424   float32x2_t __rev2_37;  __rev2_37 = __builtin_shufflevector(__s2_37, __s2_37, 1, 0); \
43425   float32x2_t __ret_37; \
43426   __ret_37 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_37, __p3_37), __rev0_37, __p1_37); \
43427   __ret_37 = __builtin_shufflevector(__ret_37, __ret_37, 1, 0); \
43428   __ret_37; \
43429 })
43430 #endif
43431 
43432 #ifdef __LITTLE_ENDIAN__
43433 #define vcopy_lane_s32(__p0_38, __p1_38, __p2_38, __p3_38) __extension__ ({ \
43434   int32x2_t __s0_38 = __p0_38; \
43435   int32x2_t __s2_38 = __p2_38; \
43436   int32x2_t __ret_38; \
43437   __ret_38 = vset_lane_s32(vget_lane_s32(__s2_38, __p3_38), __s0_38, __p1_38); \
43438   __ret_38; \
43439 })
43440 #else
43441 #define vcopy_lane_s32(__p0_39, __p1_39, __p2_39, __p3_39) __extension__ ({ \
43442   int32x2_t __s0_39 = __p0_39; \
43443   int32x2_t __s2_39 = __p2_39; \
43444   int32x2_t __rev0_39;  __rev0_39 = __builtin_shufflevector(__s0_39, __s0_39, 1, 0); \
43445   int32x2_t __rev2_39;  __rev2_39 = __builtin_shufflevector(__s2_39, __s2_39, 1, 0); \
43446   int32x2_t __ret_39; \
43447   __ret_39 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_39, __p3_39), __rev0_39, __p1_39); \
43448   __ret_39 = __builtin_shufflevector(__ret_39, __ret_39, 1, 0); \
43449   __ret_39; \
43450 })
43451 #endif
43452 
43453 #ifdef __LITTLE_ENDIAN__
43454 #define vcopy_lane_s64(__p0_40, __p1_40, __p2_40, __p3_40) __extension__ ({ \
43455   int64x1_t __s0_40 = __p0_40; \
43456   int64x1_t __s2_40 = __p2_40; \
43457   int64x1_t __ret_40; \
43458   __ret_40 = vset_lane_s64(vget_lane_s64(__s2_40, __p3_40), __s0_40, __p1_40); \
43459   __ret_40; \
43460 })
43461 #else
43462 #define vcopy_lane_s64(__p0_41, __p1_41, __p2_41, __p3_41) __extension__ ({ \
43463   int64x1_t __s0_41 = __p0_41; \
43464   int64x1_t __s2_41 = __p2_41; \
43465   int64x1_t __ret_41; \
43466   __ret_41 = __noswap_vset_lane_s64(__noswap_vget_lane_s64(__s2_41, __p3_41), __s0_41, __p1_41); \
43467   __ret_41; \
43468 })
43469 #endif
43470 
43471 #ifdef __LITTLE_ENDIAN__
43472 #define vcopy_lane_s16(__p0_42, __p1_42, __p2_42, __p3_42) __extension__ ({ \
43473   int16x4_t __s0_42 = __p0_42; \
43474   int16x4_t __s2_42 = __p2_42; \
43475   int16x4_t __ret_42; \
43476   __ret_42 = vset_lane_s16(vget_lane_s16(__s2_42, __p3_42), __s0_42, __p1_42); \
43477   __ret_42; \
43478 })
43479 #else
43480 #define vcopy_lane_s16(__p0_43, __p1_43, __p2_43, __p3_43) __extension__ ({ \
43481   int16x4_t __s0_43 = __p0_43; \
43482   int16x4_t __s2_43 = __p2_43; \
43483   int16x4_t __rev0_43;  __rev0_43 = __builtin_shufflevector(__s0_43, __s0_43, 3, 2, 1, 0); \
43484   int16x4_t __rev2_43;  __rev2_43 = __builtin_shufflevector(__s2_43, __s2_43, 3, 2, 1, 0); \
43485   int16x4_t __ret_43; \
43486   __ret_43 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_43, __p3_43), __rev0_43, __p1_43); \
43487   __ret_43 = __builtin_shufflevector(__ret_43, __ret_43, 3, 2, 1, 0); \
43488   __ret_43; \
43489 })
43490 #endif
43491 
43492 #ifdef __LITTLE_ENDIAN__
43493 #define vcopyq_laneq_p8(__p0_44, __p1_44, __p2_44, __p3_44) __extension__ ({ \
43494   poly8x16_t __s0_44 = __p0_44; \
43495   poly8x16_t __s2_44 = __p2_44; \
43496   poly8x16_t __ret_44; \
43497   __ret_44 = vsetq_lane_p8(vgetq_lane_p8(__s2_44, __p3_44), __s0_44, __p1_44); \
43498   __ret_44; \
43499 })
43500 #else
43501 #define vcopyq_laneq_p8(__p0_45, __p1_45, __p2_45, __p3_45) __extension__ ({ \
43502   poly8x16_t __s0_45 = __p0_45; \
43503   poly8x16_t __s2_45 = __p2_45; \
43504   poly8x16_t __rev0_45;  __rev0_45 = __builtin_shufflevector(__s0_45, __s0_45, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
43505   poly8x16_t __rev2_45;  __rev2_45 = __builtin_shufflevector(__s2_45, __s2_45, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
43506   poly8x16_t __ret_45; \
43507   __ret_45 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_45, __p3_45), __rev0_45, __p1_45); \
43508   __ret_45 = __builtin_shufflevector(__ret_45, __ret_45, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
43509   __ret_45; \
43510 })
43511 #endif
43512 
43513 #ifdef __LITTLE_ENDIAN__
43514 #define vcopyq_laneq_p16(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \
43515   poly16x8_t __s0_46 = __p0_46; \
43516   poly16x8_t __s2_46 = __p2_46; \
43517   poly16x8_t __ret_46; \
43518   __ret_46 = vsetq_lane_p16(vgetq_lane_p16(__s2_46, __p3_46), __s0_46, __p1_46); \
43519   __ret_46; \
43520 })
43521 #else
43522 #define vcopyq_laneq_p16(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \
43523   poly16x8_t __s0_47 = __p0_47; \
43524   poly16x8_t __s2_47 = __p2_47; \
43525   poly16x8_t __rev0_47;  __rev0_47 = __builtin_shufflevector(__s0_47, __s0_47, 7, 6, 5, 4, 3, 2, 1, 0); \
43526   poly16x8_t __rev2_47;  __rev2_47 = __builtin_shufflevector(__s2_47, __s2_47, 7, 6, 5, 4, 3, 2, 1, 0); \
43527   poly16x8_t __ret_47; \
43528   __ret_47 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_47, __p3_47), __rev0_47, __p1_47); \
43529   __ret_47 = __builtin_shufflevector(__ret_47, __ret_47, 7, 6, 5, 4, 3, 2, 1, 0); \
43530   __ret_47; \
43531 })
43532 #endif
43533 
43534 #ifdef __LITTLE_ENDIAN__
43535 #define vcopyq_laneq_u8(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \
43536   uint8x16_t __s0_48 = __p0_48; \
43537   uint8x16_t __s2_48 = __p2_48; \
43538   uint8x16_t __ret_48; \
43539   __ret_48 = vsetq_lane_u8(vgetq_lane_u8(__s2_48, __p3_48), __s0_48, __p1_48); \
43540   __ret_48; \
43541 })
43542 #else
43543 #define vcopyq_laneq_u8(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \
43544   uint8x16_t __s0_49 = __p0_49; \
43545   uint8x16_t __s2_49 = __p2_49; \
43546   uint8x16_t __rev0_49;  __rev0_49 = __builtin_shufflevector(__s0_49, __s0_49, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
43547   uint8x16_t __rev2_49;  __rev2_49 = __builtin_shufflevector(__s2_49, __s2_49, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
43548   uint8x16_t __ret_49; \
43549   __ret_49 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_49, __p3_49), __rev0_49, __p1_49); \
43550   __ret_49 = __builtin_shufflevector(__ret_49, __ret_49, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
43551   __ret_49; \
43552 })
43553 #endif
43554 
43555 #ifdef __LITTLE_ENDIAN__
43556 #define vcopyq_laneq_u32(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \
43557   uint32x4_t __s0_50 = __p0_50; \
43558   uint32x4_t __s2_50 = __p2_50; \
43559   uint32x4_t __ret_50; \
43560   __ret_50 = vsetq_lane_u32(vgetq_lane_u32(__s2_50, __p3_50), __s0_50, __p1_50); \
43561   __ret_50; \
43562 })
43563 #else
43564 #define vcopyq_laneq_u32(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \
43565   uint32x4_t __s0_51 = __p0_51; \
43566   uint32x4_t __s2_51 = __p2_51; \
43567   uint32x4_t __rev0_51;  __rev0_51 = __builtin_shufflevector(__s0_51, __s0_51, 3, 2, 1, 0); \
43568   uint32x4_t __rev2_51;  __rev2_51 = __builtin_shufflevector(__s2_51, __s2_51, 3, 2, 1, 0); \
43569   uint32x4_t __ret_51; \
43570   __ret_51 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_51, __p3_51), __rev0_51, __p1_51); \
43571   __ret_51 = __builtin_shufflevector(__ret_51, __ret_51, 3, 2, 1, 0); \
43572   __ret_51; \
43573 })
43574 #endif
43575 
43576 #ifdef __LITTLE_ENDIAN__
43577 #define vcopyq_laneq_u64(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \
43578   uint64x2_t __s0_52 = __p0_52; \
43579   uint64x2_t __s2_52 = __p2_52; \
43580   uint64x2_t __ret_52; \
43581   __ret_52 = vsetq_lane_u64(vgetq_lane_u64(__s2_52, __p3_52), __s0_52, __p1_52); \
43582   __ret_52; \
43583 })
43584 #else
43585 #define vcopyq_laneq_u64(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \
43586   uint64x2_t __s0_53 = __p0_53; \
43587   uint64x2_t __s2_53 = __p2_53; \
43588   uint64x2_t __rev0_53;  __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 1, 0); \
43589   uint64x2_t __rev2_53;  __rev2_53 = __builtin_shufflevector(__s2_53, __s2_53, 1, 0); \
43590   uint64x2_t __ret_53; \
43591   __ret_53 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_53, __p3_53), __rev0_53, __p1_53); \
43592   __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 1, 0); \
43593   __ret_53; \
43594 })
43595 #endif
43596 
43597 #ifdef __LITTLE_ENDIAN__
43598 #define vcopyq_laneq_u16(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \
43599   uint16x8_t __s0_54 = __p0_54; \
43600   uint16x8_t __s2_54 = __p2_54; \
43601   uint16x8_t __ret_54; \
43602   __ret_54 = vsetq_lane_u16(vgetq_lane_u16(__s2_54, __p3_54), __s0_54, __p1_54); \
43603   __ret_54; \
43604 })
43605 #else
43606 #define vcopyq_laneq_u16(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \
43607   uint16x8_t __s0_55 = __p0_55; \
43608   uint16x8_t __s2_55 = __p2_55; \
43609   uint16x8_t __rev0_55;  __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 7, 6, 5, 4, 3, 2, 1, 0); \
43610   uint16x8_t __rev2_55;  __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 7, 6, 5, 4, 3, 2, 1, 0); \
43611   uint16x8_t __ret_55; \
43612   __ret_55 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_55, __p3_55), __rev0_55, __p1_55); \
43613   __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 7, 6, 5, 4, 3, 2, 1, 0); \
43614   __ret_55; \
43615 })
43616 #endif
43617 
43618 #ifdef __LITTLE_ENDIAN__
43619 #define vcopyq_laneq_s8(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \
43620   int8x16_t __s0_56 = __p0_56; \
43621   int8x16_t __s2_56 = __p2_56; \
43622   int8x16_t __ret_56; \
43623   __ret_56 = vsetq_lane_s8(vgetq_lane_s8(__s2_56, __p3_56), __s0_56, __p1_56); \
43624   __ret_56; \
43625 })
43626 #else
43627 #define vcopyq_laneq_s8(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \
43628   int8x16_t __s0_57 = __p0_57; \
43629   int8x16_t __s2_57 = __p2_57; \
43630   int8x16_t __rev0_57;  __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
43631   int8x16_t __rev2_57;  __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
43632   int8x16_t __ret_57; \
43633   __ret_57 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_57, __p3_57), __rev0_57, __p1_57); \
43634   __ret_57 = __builtin_shufflevector(__ret_57, __ret_57, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
43635   __ret_57; \
43636 })
43637 #endif
43638 
43639 #ifdef __LITTLE_ENDIAN__
43640 #define vcopyq_laneq_f32(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \
43641   float32x4_t __s0_58 = __p0_58; \
43642   float32x4_t __s2_58 = __p2_58; \
43643   float32x4_t __ret_58; \
43644   __ret_58 = vsetq_lane_f32(vgetq_lane_f32(__s2_58, __p3_58), __s0_58, __p1_58); \
43645   __ret_58; \
43646 })
43647 #else
43648 #define vcopyq_laneq_f32(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \
43649   float32x4_t __s0_59 = __p0_59; \
43650   float32x4_t __s2_59 = __p2_59; \
43651   float32x4_t __rev0_59;  __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 3, 2, 1, 0); \
43652   float32x4_t __rev2_59;  __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 3, 2, 1, 0); \
43653   float32x4_t __ret_59; \
43654   __ret_59 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_59, __p3_59), __rev0_59, __p1_59); \
43655   __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 3, 2, 1, 0); \
43656   __ret_59; \
43657 })
43658 #endif
43659 
43660 #ifdef __LITTLE_ENDIAN__
43661 #define vcopyq_laneq_s32(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \
43662   int32x4_t __s0_60 = __p0_60; \
43663   int32x4_t __s2_60 = __p2_60; \
43664   int32x4_t __ret_60; \
43665   __ret_60 = vsetq_lane_s32(vgetq_lane_s32(__s2_60, __p3_60), __s0_60, __p1_60); \
43666   __ret_60; \
43667 })
43668 #else
43669 #define vcopyq_laneq_s32(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \
43670   int32x4_t __s0_61 = __p0_61; \
43671   int32x4_t __s2_61 = __p2_61; \
43672   int32x4_t __rev0_61;  __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 3, 2, 1, 0); \
43673   int32x4_t __rev2_61;  __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 3, 2, 1, 0); \
43674   int32x4_t __ret_61; \
43675   __ret_61 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_61, __p3_61), __rev0_61, __p1_61); \
43676   __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 3, 2, 1, 0); \
43677   __ret_61; \
43678 })
43679 #endif
43680 
43681 #ifdef __LITTLE_ENDIAN__
43682 #define vcopyq_laneq_s64(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \
43683   int64x2_t __s0_62 = __p0_62; \
43684   int64x2_t __s2_62 = __p2_62; \
43685   int64x2_t __ret_62; \
43686   __ret_62 = vsetq_lane_s64(vgetq_lane_s64(__s2_62, __p3_62), __s0_62, __p1_62); \
43687   __ret_62; \
43688 })
43689 #else
43690 #define vcopyq_laneq_s64(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \
43691   int64x2_t __s0_63 = __p0_63; \
43692   int64x2_t __s2_63 = __p2_63; \
43693   int64x2_t __rev0_63;  __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 1, 0); \
43694   int64x2_t __rev2_63;  __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 1, 0); \
43695   int64x2_t __ret_63; \
43696   __ret_63 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_63, __p3_63), __rev0_63, __p1_63); \
43697   __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 1, 0); \
43698   __ret_63; \
43699 })
43700 #endif
43701 
43702 #ifdef __LITTLE_ENDIAN__
43703 #define vcopyq_laneq_s16(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \
43704   int16x8_t __s0_64 = __p0_64; \
43705   int16x8_t __s2_64 = __p2_64; \
43706   int16x8_t __ret_64; \
43707   __ret_64 = vsetq_lane_s16(vgetq_lane_s16(__s2_64, __p3_64), __s0_64, __p1_64); \
43708   __ret_64; \
43709 })
43710 #else
43711 #define vcopyq_laneq_s16(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \
43712   int16x8_t __s0_65 = __p0_65; \
43713   int16x8_t __s2_65 = __p2_65; \
43714   int16x8_t __rev0_65;  __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 7, 6, 5, 4, 3, 2, 1, 0); \
43715   int16x8_t __rev2_65;  __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 7, 6, 5, 4, 3, 2, 1, 0); \
43716   int16x8_t __ret_65; \
43717   __ret_65 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_65, __p3_65), __rev0_65, __p1_65); \
43718   __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 7, 6, 5, 4, 3, 2, 1, 0); \
43719   __ret_65; \
43720 })
43721 #endif
43722 
43723 #ifdef __LITTLE_ENDIAN__
43724 #define vcopy_laneq_p8(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \
43725   poly8x8_t __s0_66 = __p0_66; \
43726   poly8x16_t __s2_66 = __p2_66; \
43727   poly8x8_t __ret_66; \
43728   __ret_66 = vset_lane_p8(vgetq_lane_p8(__s2_66, __p3_66), __s0_66, __p1_66); \
43729   __ret_66; \
43730 })
43731 #else
43732 #define vcopy_laneq_p8(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \
43733   poly8x8_t __s0_67 = __p0_67; \
43734   poly8x16_t __s2_67 = __p2_67; \
43735   poly8x8_t __rev0_67;  __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 7, 6, 5, 4, 3, 2, 1, 0); \
43736   poly8x16_t __rev2_67;  __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
43737   poly8x8_t __ret_67; \
43738   __ret_67 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_67, __p3_67), __rev0_67, __p1_67); \
43739   __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 7, 6, 5, 4, 3, 2, 1, 0); \
43740   __ret_67; \
43741 })
43742 #endif
43743 
43744 #ifdef __LITTLE_ENDIAN__
43745 #define vcopy_laneq_p16(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \
43746   poly16x4_t __s0_68 = __p0_68; \
43747   poly16x8_t __s2_68 = __p2_68; \
43748   poly16x4_t __ret_68; \
43749   __ret_68 = vset_lane_p16(vgetq_lane_p16(__s2_68, __p3_68), __s0_68, __p1_68); \
43750   __ret_68; \
43751 })
43752 #else
43753 #define vcopy_laneq_p16(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \
43754   poly16x4_t __s0_69 = __p0_69; \
43755   poly16x8_t __s2_69 = __p2_69; \
43756   poly16x4_t __rev0_69;  __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 3, 2, 1, 0); \
43757   poly16x8_t __rev2_69;  __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 7, 6, 5, 4, 3, 2, 1, 0); \
43758   poly16x4_t __ret_69; \
43759   __ret_69 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_69, __p3_69), __rev0_69, __p1_69); \
43760   __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 3, 2, 1, 0); \
43761   __ret_69; \
43762 })
43763 #endif
43764 
43765 #ifdef __LITTLE_ENDIAN__
43766 #define vcopy_laneq_u8(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \
43767   uint8x8_t __s0_70 = __p0_70; \
43768   uint8x16_t __s2_70 = __p2_70; \
43769   uint8x8_t __ret_70; \
43770   __ret_70 = vset_lane_u8(vgetq_lane_u8(__s2_70, __p3_70), __s0_70, __p1_70); \
43771   __ret_70; \
43772 })
43773 #else
43774 #define vcopy_laneq_u8(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \
43775   uint8x8_t __s0_71 = __p0_71; \
43776   uint8x16_t __s2_71 = __p2_71; \
43777   uint8x8_t __rev0_71;  __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 7, 6, 5, 4, 3, 2, 1, 0); \
43778   uint8x16_t __rev2_71;  __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
43779   uint8x8_t __ret_71; \
43780   __ret_71 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_71, __p3_71), __rev0_71, __p1_71); \
43781   __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 7, 6, 5, 4, 3, 2, 1, 0); \
43782   __ret_71; \
43783 })
43784 #endif
43785 
43786 #ifdef __LITTLE_ENDIAN__
43787 #define vcopy_laneq_u32(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \
43788   uint32x2_t __s0_72 = __p0_72; \
43789   uint32x4_t __s2_72 = __p2_72; \
43790   uint32x2_t __ret_72; \
43791   __ret_72 = vset_lane_u32(vgetq_lane_u32(__s2_72, __p3_72), __s0_72, __p1_72); \
43792   __ret_72; \
43793 })
43794 #else
43795 #define vcopy_laneq_u32(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \
43796   uint32x2_t __s0_73 = __p0_73; \
43797   uint32x4_t __s2_73 = __p2_73; \
43798   uint32x2_t __rev0_73;  __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 1, 0); \
43799   uint32x4_t __rev2_73;  __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 3, 2, 1, 0); \
43800   uint32x2_t __ret_73; \
43801   __ret_73 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_73, __p3_73), __rev0_73, __p1_73); \
43802   __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 1, 0); \
43803   __ret_73; \
43804 })
43805 #endif
43806 
43807 #ifdef __LITTLE_ENDIAN__
43808 #define vcopy_laneq_u64(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \
43809   uint64x1_t __s0_74 = __p0_74; \
43810   uint64x2_t __s2_74 = __p2_74; \
43811   uint64x1_t __ret_74; \
43812   __ret_74 = vset_lane_u64(vgetq_lane_u64(__s2_74, __p3_74), __s0_74, __p1_74); \
43813   __ret_74; \
43814 })
43815 #else
43816 #define vcopy_laneq_u64(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \
43817   uint64x1_t __s0_75 = __p0_75; \
43818   uint64x2_t __s2_75 = __p2_75; \
43819   uint64x2_t __rev2_75;  __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 1, 0); \
43820   uint64x1_t __ret_75; \
43821   __ret_75 = __noswap_vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_75, __p3_75), __s0_75, __p1_75); \
43822   __ret_75; \
43823 })
43824 #endif
43825 
43826 #ifdef __LITTLE_ENDIAN__
43827 #define vcopy_laneq_u16(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \
43828   uint16x4_t __s0_76 = __p0_76; \
43829   uint16x8_t __s2_76 = __p2_76; \
43830   uint16x4_t __ret_76; \
43831   __ret_76 = vset_lane_u16(vgetq_lane_u16(__s2_76, __p3_76), __s0_76, __p1_76); \
43832   __ret_76; \
43833 })
43834 #else
43835 #define vcopy_laneq_u16(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \
43836   uint16x4_t __s0_77 = __p0_77; \
43837   uint16x8_t __s2_77 = __p2_77; \
43838   uint16x4_t __rev0_77;  __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 3, 2, 1, 0); \
43839   uint16x8_t __rev2_77;  __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 7, 6, 5, 4, 3, 2, 1, 0); \
43840   uint16x4_t __ret_77; \
43841   __ret_77 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_77, __p3_77), __rev0_77, __p1_77); \
43842   __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 3, 2, 1, 0); \
43843   __ret_77; \
43844 })
43845 #endif
43846 
43847 #ifdef __LITTLE_ENDIAN__
43848 #define vcopy_laneq_s8(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \
43849   int8x8_t __s0_78 = __p0_78; \
43850   int8x16_t __s2_78 = __p2_78; \
43851   int8x8_t __ret_78; \
43852   __ret_78 = vset_lane_s8(vgetq_lane_s8(__s2_78, __p3_78), __s0_78, __p1_78); \
43853   __ret_78; \
43854 })
43855 #else
43856 #define vcopy_laneq_s8(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \
43857   int8x8_t __s0_79 = __p0_79; \
43858   int8x16_t __s2_79 = __p2_79; \
43859   int8x8_t __rev0_79;  __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 7, 6, 5, 4, 3, 2, 1, 0); \
43860   int8x16_t __rev2_79;  __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
43861   int8x8_t __ret_79; \
43862   __ret_79 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_79, __p3_79), __rev0_79, __p1_79); \
43863   __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 7, 6, 5, 4, 3, 2, 1, 0); \
43864   __ret_79; \
43865 })
43866 #endif
43867 
43868 #ifdef __LITTLE_ENDIAN__
43869 #define vcopy_laneq_f32(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \
43870   float32x2_t __s0_80 = __p0_80; \
43871   float32x4_t __s2_80 = __p2_80; \
43872   float32x2_t __ret_80; \
43873   __ret_80 = vset_lane_f32(vgetq_lane_f32(__s2_80, __p3_80), __s0_80, __p1_80); \
43874   __ret_80; \
43875 })
43876 #else
43877 #define vcopy_laneq_f32(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \
43878   float32x2_t __s0_81 = __p0_81; \
43879   float32x4_t __s2_81 = __p2_81; \
43880   float32x2_t __rev0_81;  __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 1, 0); \
43881   float32x4_t __rev2_81;  __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 3, 2, 1, 0); \
43882   float32x2_t __ret_81; \
43883   __ret_81 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_81, __p3_81), __rev0_81, __p1_81); \
43884   __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 1, 0); \
43885   __ret_81; \
43886 })
43887 #endif
43888 
43889 #ifdef __LITTLE_ENDIAN__
43890 #define vcopy_laneq_s32(__p0_82, __p1_82, __p2_82, __p3_82) __extension__ ({ \
43891   int32x2_t __s0_82 = __p0_82; \
43892   int32x4_t __s2_82 = __p2_82; \
43893   int32x2_t __ret_82; \
43894   __ret_82 = vset_lane_s32(vgetq_lane_s32(__s2_82, __p3_82), __s0_82, __p1_82); \
43895   __ret_82; \
43896 })
43897 #else
43898 #define vcopy_laneq_s32(__p0_83, __p1_83, __p2_83, __p3_83) __extension__ ({ \
43899   int32x2_t __s0_83 = __p0_83; \
43900   int32x4_t __s2_83 = __p2_83; \
43901   int32x2_t __rev0_83;  __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 1, 0); \
43902   int32x4_t __rev2_83;  __rev2_83 = __builtin_shufflevector(__s2_83, __s2_83, 3, 2, 1, 0); \
43903   int32x2_t __ret_83; \
43904   __ret_83 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_83, __p3_83), __rev0_83, __p1_83); \
43905   __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 1, 0); \
43906   __ret_83; \
43907 })
43908 #endif
43909 
43910 #ifdef __LITTLE_ENDIAN__
43911 #define vcopy_laneq_s64(__p0_84, __p1_84, __p2_84, __p3_84) __extension__ ({ \
43912   int64x1_t __s0_84 = __p0_84; \
43913   int64x2_t __s2_84 = __p2_84; \
43914   int64x1_t __ret_84; \
43915   __ret_84 = vset_lane_s64(vgetq_lane_s64(__s2_84, __p3_84), __s0_84, __p1_84); \
43916   __ret_84; \
43917 })
43918 #else
43919 #define vcopy_laneq_s64(__p0_85, __p1_85, __p2_85, __p3_85) __extension__ ({ \
43920   int64x1_t __s0_85 = __p0_85; \
43921   int64x2_t __s2_85 = __p2_85; \
43922   int64x2_t __rev2_85;  __rev2_85 = __builtin_shufflevector(__s2_85, __s2_85, 1, 0); \
43923   int64x1_t __ret_85; \
43924   __ret_85 = __noswap_vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_85, __p3_85), __s0_85, __p1_85); \
43925   __ret_85; \
43926 })
43927 #endif
43928 
43929 #ifdef __LITTLE_ENDIAN__
43930 #define vcopy_laneq_s16(__p0_86, __p1_86, __p2_86, __p3_86) __extension__ ({ \
43931   int16x4_t __s0_86 = __p0_86; \
43932   int16x8_t __s2_86 = __p2_86; \
43933   int16x4_t __ret_86; \
43934   __ret_86 = vset_lane_s16(vgetq_lane_s16(__s2_86, __p3_86), __s0_86, __p1_86); \
43935   __ret_86; \
43936 })
43937 #else
43938 #define vcopy_laneq_s16(__p0_87, __p1_87, __p2_87, __p3_87) __extension__ ({ \
43939   int16x4_t __s0_87 = __p0_87; \
43940   int16x8_t __s2_87 = __p2_87; \
43941   int16x4_t __rev0_87;  __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \
43942   int16x8_t __rev2_87;  __rev2_87 = __builtin_shufflevector(__s2_87, __s2_87, 7, 6, 5, 4, 3, 2, 1, 0); \
43943   int16x4_t __ret_87; \
43944   __ret_87 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_87, __p3_87), __rev0_87, __p1_87); \
43945   __ret_87 = __builtin_shufflevector(__ret_87, __ret_87, 3, 2, 1, 0); \
43946   __ret_87; \
43947 })
43948 #endif
43949 
43950 #ifdef __LITTLE_ENDIAN__
vcreate_p64(uint64_t __p0)43951 __ai poly64x1_t vcreate_p64(uint64_t __p0) {
43952   poly64x1_t __ret;
43953   __ret = (poly64x1_t)(__p0);
43954   return __ret;
43955 }
43956 #else
vcreate_p64(uint64_t __p0)43957 __ai poly64x1_t vcreate_p64(uint64_t __p0) {
43958   poly64x1_t __ret;
43959   __ret = (poly64x1_t)(__p0);
43960   return __ret;
43961 }
43962 #endif
43963 
43964 #ifdef __LITTLE_ENDIAN__
vcreate_f64(uint64_t __p0)43965 __ai float64x1_t vcreate_f64(uint64_t __p0) {
43966   float64x1_t __ret;
43967   __ret = (float64x1_t)(__p0);
43968   return __ret;
43969 }
43970 #else
vcreate_f64(uint64_t __p0)43971 __ai float64x1_t vcreate_f64(uint64_t __p0) {
43972   float64x1_t __ret;
43973   __ret = (float64x1_t)(__p0);
43974   return __ret;
43975 }
43976 #endif
43977 
43978 #ifdef __LITTLE_ENDIAN__
vcvts_f32_s32(int32_t __p0)43979 __ai float32_t vcvts_f32_s32(int32_t __p0) {
43980   float32_t __ret;
43981   __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0);
43982   return __ret;
43983 }
43984 #else
vcvts_f32_s32(int32_t __p0)43985 __ai float32_t vcvts_f32_s32(int32_t __p0) {
43986   float32_t __ret;
43987   __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0);
43988   return __ret;
43989 }
43990 #endif
43991 
43992 #ifdef __LITTLE_ENDIAN__
vcvts_f32_u32(uint32_t __p0)43993 __ai float32_t vcvts_f32_u32(uint32_t __p0) {
43994   float32_t __ret;
43995   __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0);
43996   return __ret;
43997 }
43998 #else
vcvts_f32_u32(uint32_t __p0)43999 __ai float32_t vcvts_f32_u32(uint32_t __p0) {
44000   float32_t __ret;
44001   __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0);
44002   return __ret;
44003 }
44004 #endif
44005 
44006 #ifdef __LITTLE_ENDIAN__
vcvt_f32_f64(float64x2_t __p0)44007 __ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
44008   float32x2_t __ret;
44009   __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
44010   return __ret;
44011 }
44012 #else
vcvt_f32_f64(float64x2_t __p0)44013 __ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
44014   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
44015   float32x2_t __ret;
44016   __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__rev0, 9);
44017   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
44018   return __ret;
44019 }
__noswap_vcvt_f32_f64(float64x2_t __p0)44020 __ai float32x2_t __noswap_vcvt_f32_f64(float64x2_t __p0) {
44021   float32x2_t __ret;
44022   __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
44023   return __ret;
44024 }
44025 #endif
44026 
44027 #ifdef __LITTLE_ENDIAN__
vcvtd_f64_s64(int64_t __p0)44028 __ai float64_t vcvtd_f64_s64(int64_t __p0) {
44029   float64_t __ret;
44030   __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0);
44031   return __ret;
44032 }
44033 #else
vcvtd_f64_s64(int64_t __p0)44034 __ai float64_t vcvtd_f64_s64(int64_t __p0) {
44035   float64_t __ret;
44036   __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0);
44037   return __ret;
44038 }
44039 #endif
44040 
44041 #ifdef __LITTLE_ENDIAN__
vcvtd_f64_u64(uint64_t __p0)44042 __ai float64_t vcvtd_f64_u64(uint64_t __p0) {
44043   float64_t __ret;
44044   __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0);
44045   return __ret;
44046 }
44047 #else
vcvtd_f64_u64(uint64_t __p0)44048 __ai float64_t vcvtd_f64_u64(uint64_t __p0) {
44049   float64_t __ret;
44050   __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0);
44051   return __ret;
44052 }
44053 #endif
44054 
44055 #ifdef __LITTLE_ENDIAN__
vcvtq_f64_u64(uint64x2_t __p0)44056 __ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
44057   float64x2_t __ret;
44058   __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 51);
44059   return __ret;
44060 }
44061 #else
vcvtq_f64_u64(uint64x2_t __p0)44062 __ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
44063   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
44064   float64x2_t __ret;
44065   __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 51);
44066   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
44067   return __ret;
44068 }
44069 #endif
44070 
44071 #ifdef __LITTLE_ENDIAN__
vcvtq_f64_s64(int64x2_t __p0)44072 __ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
44073   float64x2_t __ret;
44074   __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 35);
44075   return __ret;
44076 }
44077 #else
vcvtq_f64_s64(int64x2_t __p0)44078 __ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
44079   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
44080   float64x2_t __ret;
44081   __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 35);
44082   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
44083   return __ret;
44084 }
44085 #endif
44086 
44087 #ifdef __LITTLE_ENDIAN__
vcvt_f64_u64(uint64x1_t __p0)44088 __ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) {
44089   float64x1_t __ret;
44090   __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19);
44091   return __ret;
44092 }
44093 #else
vcvt_f64_u64(uint64x1_t __p0)44094 __ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) {
44095   float64x1_t __ret;
44096   __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19);
44097   return __ret;
44098 }
44099 #endif
44100 
44101 #ifdef __LITTLE_ENDIAN__
vcvt_f64_s64(int64x1_t __p0)44102 __ai float64x1_t vcvt_f64_s64(int64x1_t __p0) {
44103   float64x1_t __ret;
44104   __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3);
44105   return __ret;
44106 }
44107 #else
vcvt_f64_s64(int64x1_t __p0)44108 __ai float64x1_t vcvt_f64_s64(int64x1_t __p0) {
44109   float64x1_t __ret;
44110   __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3);
44111   return __ret;
44112 }
44113 #endif
44114 
44115 #ifdef __LITTLE_ENDIAN__
vcvt_f64_f32(float32x2_t __p0)44116 __ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
44117   float64x2_t __ret;
44118   __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
44119   return __ret;
44120 }
44121 #else
vcvt_f64_f32(float32x2_t __p0)44122 __ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
44123   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
44124   float64x2_t __ret;
44125   __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__rev0, 42);
44126   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
44127   return __ret;
44128 }
__noswap_vcvt_f64_f32(float32x2_t __p0)44129 __ai float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) {
44130   float64x2_t __ret;
44131   __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
44132   return __ret;
44133 }
44134 #endif
44135 
44136 #ifdef __LITTLE_ENDIAN__
vcvt_high_f16_f32(float16x4_t __p0,float32x4_t __p1)44137 __ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
44138   float16x8_t __ret;
44139   __ret = vcombine_f16(__p0, vcvt_f16_f32(__p1));
44140   return __ret;
44141 }
44142 #else
vcvt_high_f16_f32(float16x4_t __p0,float32x4_t __p1)44143 __ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
44144   float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44145   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
44146   float16x8_t __ret;
44147   __ret = __noswap_vcombine_f16(__rev0, __noswap_vcvt_f16_f32(__rev1));
44148   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
44149   return __ret;
44150 }
44151 #endif
44152 
44153 #ifdef __LITTLE_ENDIAN__
vcvt_high_f32_f16(float16x8_t __p0)44154 __ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
44155   float32x4_t __ret;
44156   __ret = vcvt_f32_f16(vget_high_f16(__p0));
44157   return __ret;
44158 }
44159 #else
vcvt_high_f32_f16(float16x8_t __p0)44160 __ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
44161   float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44162   float32x4_t __ret;
44163   __ret = __noswap_vcvt_f32_f16(__noswap_vget_high_f16(__rev0));
44164   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44165   return __ret;
44166 }
44167 #endif
44168 
44169 #ifdef __LITTLE_ENDIAN__
vcvt_high_f32_f64(float32x2_t __p0,float64x2_t __p1)44170 __ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
44171   float32x4_t __ret;
44172   __ret = vcombine_f32(__p0, vcvt_f32_f64(__p1));
44173   return __ret;
44174 }
44175 #else
vcvt_high_f32_f64(float32x2_t __p0,float64x2_t __p1)44176 __ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
44177   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
44178   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
44179   float32x4_t __ret;
44180   __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvt_f32_f64(__rev1));
44181   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44182   return __ret;
44183 }
44184 #endif
44185 
44186 #ifdef __LITTLE_ENDIAN__
vcvt_high_f64_f32(float32x4_t __p0)44187 __ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
44188   float64x2_t __ret;
44189   __ret = vcvt_f64_f32(vget_high_f32(__p0));
44190   return __ret;
44191 }
44192 #else
vcvt_high_f64_f32(float32x4_t __p0)44193 __ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
44194   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44195   float64x2_t __ret;
44196   __ret = __noswap_vcvt_f64_f32(__noswap_vget_high_f32(__rev0));
44197   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
44198   return __ret;
44199 }
44200 #endif
44201 
44202 #ifdef __LITTLE_ENDIAN__
44203 #define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \
44204   uint32_t __s0 = __p0; \
44205   float32_t __ret; \
44206   __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \
44207   __ret; \
44208 })
44209 #else
44210 #define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \
44211   uint32_t __s0 = __p0; \
44212   float32_t __ret; \
44213   __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \
44214   __ret; \
44215 })
44216 #endif
44217 
44218 #ifdef __LITTLE_ENDIAN__
44219 #define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \
44220   int32_t __s0 = __p0; \
44221   float32_t __ret; \
44222   __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \
44223   __ret; \
44224 })
44225 #else
44226 #define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \
44227   int32_t __s0 = __p0; \
44228   float32_t __ret; \
44229   __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \
44230   __ret; \
44231 })
44232 #endif
44233 
44234 #ifdef __LITTLE_ENDIAN__
44235 #define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
44236   uint64x2_t __s0 = __p0; \
44237   float64x2_t __ret; \
44238   __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 51); \
44239   __ret; \
44240 })
44241 #else
44242 #define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
44243   uint64x2_t __s0 = __p0; \
44244   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
44245   float64x2_t __ret; \
44246   __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 51); \
44247   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
44248   __ret; \
44249 })
44250 #endif
44251 
44252 #ifdef __LITTLE_ENDIAN__
44253 #define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
44254   int64x2_t __s0 = __p0; \
44255   float64x2_t __ret; \
44256   __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 35); \
44257   __ret; \
44258 })
44259 #else
44260 #define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
44261   int64x2_t __s0 = __p0; \
44262   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
44263   float64x2_t __ret; \
44264   __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 35); \
44265   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
44266   __ret; \
44267 })
44268 #endif
44269 
44270 #ifdef __LITTLE_ENDIAN__
44271 #define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \
44272   uint64x1_t __s0 = __p0; \
44273   float64x1_t __ret; \
44274   __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \
44275   __ret; \
44276 })
44277 #else
44278 #define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \
44279   uint64x1_t __s0 = __p0; \
44280   float64x1_t __ret; \
44281   __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \
44282   __ret; \
44283 })
44284 #endif
44285 
44286 #ifdef __LITTLE_ENDIAN__
44287 #define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \
44288   int64x1_t __s0 = __p0; \
44289   float64x1_t __ret; \
44290   __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \
44291   __ret; \
44292 })
44293 #else
44294 #define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \
44295   int64x1_t __s0 = __p0; \
44296   float64x1_t __ret; \
44297   __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \
44298   __ret; \
44299 })
44300 #endif
44301 
44302 #ifdef __LITTLE_ENDIAN__
44303 #define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \
44304   uint64_t __s0 = __p0; \
44305   float64_t __ret; \
44306   __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \
44307   __ret; \
44308 })
44309 #else
44310 #define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \
44311   uint64_t __s0 = __p0; \
44312   float64_t __ret; \
44313   __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \
44314   __ret; \
44315 })
44316 #endif
44317 
44318 #ifdef __LITTLE_ENDIAN__
44319 #define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \
44320   int64_t __s0 = __p0; \
44321   float64_t __ret; \
44322   __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \
44323   __ret; \
44324 })
44325 #else
44326 #define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \
44327   int64_t __s0 = __p0; \
44328   float64_t __ret; \
44329   __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \
44330   __ret; \
44331 })
44332 #endif
44333 
44334 #ifdef __LITTLE_ENDIAN__
44335 #define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \
44336   float32_t __s0 = __p0; \
44337   int32_t __ret; \
44338   __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \
44339   __ret; \
44340 })
44341 #else
44342 #define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \
44343   float32_t __s0 = __p0; \
44344   int32_t __ret; \
44345   __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \
44346   __ret; \
44347 })
44348 #endif
44349 
44350 #ifdef __LITTLE_ENDIAN__
44351 #define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
44352   float64x2_t __s0 = __p0; \
44353   int64x2_t __ret; \
44354   __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__s0, __p1, 35); \
44355   __ret; \
44356 })
44357 #else
44358 #define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
44359   float64x2_t __s0 = __p0; \
44360   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
44361   int64x2_t __ret; \
44362   __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__rev0, __p1, 35); \
44363   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
44364   __ret; \
44365 })
44366 #endif
44367 
44368 #ifdef __LITTLE_ENDIAN__
44369 #define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \
44370   float64x1_t __s0 = __p0; \
44371   int64x1_t __ret; \
44372   __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \
44373   __ret; \
44374 })
44375 #else
44376 #define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \
44377   float64x1_t __s0 = __p0; \
44378   int64x1_t __ret; \
44379   __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \
44380   __ret; \
44381 })
44382 #endif
44383 
44384 #ifdef __LITTLE_ENDIAN__
44385 #define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \
44386   float64_t __s0 = __p0; \
44387   int64_t __ret; \
44388   __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \
44389   __ret; \
44390 })
44391 #else
44392 #define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \
44393   float64_t __s0 = __p0; \
44394   int64_t __ret; \
44395   __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \
44396   __ret; \
44397 })
44398 #endif
44399 
44400 #ifdef __LITTLE_ENDIAN__
44401 #define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \
44402   float32_t __s0 = __p0; \
44403   uint32_t __ret; \
44404   __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \
44405   __ret; \
44406 })
44407 #else
44408 #define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \
44409   float32_t __s0 = __p0; \
44410   uint32_t __ret; \
44411   __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \
44412   __ret; \
44413 })
44414 #endif
44415 
44416 #ifdef __LITTLE_ENDIAN__
44417 #define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
44418   float64x2_t __s0 = __p0; \
44419   uint64x2_t __ret; \
44420   __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__s0, __p1, 51); \
44421   __ret; \
44422 })
44423 #else
44424 #define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
44425   float64x2_t __s0 = __p0; \
44426   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
44427   uint64x2_t __ret; \
44428   __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__rev0, __p1, 51); \
44429   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
44430   __ret; \
44431 })
44432 #endif
44433 
44434 #ifdef __LITTLE_ENDIAN__
44435 #define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \
44436   float64x1_t __s0 = __p0; \
44437   uint64x1_t __ret; \
44438   __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \
44439   __ret; \
44440 })
44441 #else
44442 #define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \
44443   float64x1_t __s0 = __p0; \
44444   uint64x1_t __ret; \
44445   __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \
44446   __ret; \
44447 })
44448 #endif
44449 
44450 #ifdef __LITTLE_ENDIAN__
44451 #define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \
44452   float64_t __s0 = __p0; \
44453   uint64_t __ret; \
44454   __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \
44455   __ret; \
44456 })
44457 #else
44458 #define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \
44459   float64_t __s0 = __p0; \
44460   uint64_t __ret; \
44461   __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \
44462   __ret; \
44463 })
44464 #endif
44465 
44466 #ifdef __LITTLE_ENDIAN__
vcvts_s32_f32(float32_t __p0)44467 __ai int32_t vcvts_s32_f32(float32_t __p0) {
44468   int32_t __ret;
44469   __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0);
44470   return __ret;
44471 }
44472 #else
vcvts_s32_f32(float32_t __p0)44473 __ai int32_t vcvts_s32_f32(float32_t __p0) {
44474   int32_t __ret;
44475   __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0);
44476   return __ret;
44477 }
44478 #endif
44479 
44480 #ifdef __LITTLE_ENDIAN__
vcvtd_s64_f64(float64_t __p0)44481 __ai int64_t vcvtd_s64_f64(float64_t __p0) {
44482   int64_t __ret;
44483   __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0);
44484   return __ret;
44485 }
44486 #else
vcvtd_s64_f64(float64_t __p0)44487 __ai int64_t vcvtd_s64_f64(float64_t __p0) {
44488   int64_t __ret;
44489   __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0);
44490   return __ret;
44491 }
44492 #endif
44493 
44494 #ifdef __LITTLE_ENDIAN__
vcvtq_s64_f64(float64x2_t __p0)44495 __ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
44496   int64x2_t __ret;
44497   __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__p0, 35);
44498   return __ret;
44499 }
44500 #else
vcvtq_s64_f64(float64x2_t __p0)44501 __ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
44502   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
44503   int64x2_t __ret;
44504   __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__rev0, 35);
44505   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
44506   return __ret;
44507 }
44508 #endif
44509 
44510 #ifdef __LITTLE_ENDIAN__
vcvt_s64_f64(float64x1_t __p0)44511 __ai int64x1_t vcvt_s64_f64(float64x1_t __p0) {
44512   int64x1_t __ret;
44513   __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3);
44514   return __ret;
44515 }
44516 #else
vcvt_s64_f64(float64x1_t __p0)44517 __ai int64x1_t vcvt_s64_f64(float64x1_t __p0) {
44518   int64x1_t __ret;
44519   __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3);
44520   return __ret;
44521 }
44522 #endif
44523 
44524 #ifdef __LITTLE_ENDIAN__
vcvts_u32_f32(float32_t __p0)44525 __ai uint32_t vcvts_u32_f32(float32_t __p0) {
44526   uint32_t __ret;
44527   __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0);
44528   return __ret;
44529 }
44530 #else
vcvts_u32_f32(float32_t __p0)44531 __ai uint32_t vcvts_u32_f32(float32_t __p0) {
44532   uint32_t __ret;
44533   __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0);
44534   return __ret;
44535 }
44536 #endif
44537 
44538 #ifdef __LITTLE_ENDIAN__
vcvtd_u64_f64(float64_t __p0)44539 __ai uint64_t vcvtd_u64_f64(float64_t __p0) {
44540   uint64_t __ret;
44541   __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0);
44542   return __ret;
44543 }
44544 #else
vcvtd_u64_f64(float64_t __p0)44545 __ai uint64_t vcvtd_u64_f64(float64_t __p0) {
44546   uint64_t __ret;
44547   __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0);
44548   return __ret;
44549 }
44550 #endif
44551 
44552 #ifdef __LITTLE_ENDIAN__
vcvtq_u64_f64(float64x2_t __p0)44553 __ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
44554   uint64x2_t __ret;
44555   __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__p0, 51);
44556   return __ret;
44557 }
44558 #else
vcvtq_u64_f64(float64x2_t __p0)44559 __ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
44560   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
44561   uint64x2_t __ret;
44562   __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__rev0, 51);
44563   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
44564   return __ret;
44565 }
44566 #endif
44567 
44568 #ifdef __LITTLE_ENDIAN__
vcvt_u64_f64(float64x1_t __p0)44569 __ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) {
44570   uint64x1_t __ret;
44571   __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19);
44572   return __ret;
44573 }
44574 #else
vcvt_u64_f64(float64x1_t __p0)44575 __ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) {
44576   uint64x1_t __ret;
44577   __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19);
44578   return __ret;
44579 }
44580 #endif
44581 
44582 #ifdef __LITTLE_ENDIAN__
vcvtas_s32_f32(float32_t __p0)44583 __ai int32_t vcvtas_s32_f32(float32_t __p0) {
44584   int32_t __ret;
44585   __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0);
44586   return __ret;
44587 }
44588 #else
vcvtas_s32_f32(float32_t __p0)44589 __ai int32_t vcvtas_s32_f32(float32_t __p0) {
44590   int32_t __ret;
44591   __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0);
44592   return __ret;
44593 }
44594 #endif
44595 
44596 #ifdef __LITTLE_ENDIAN__
vcvtad_s64_f64(float64_t __p0)44597 __ai int64_t vcvtad_s64_f64(float64_t __p0) {
44598   int64_t __ret;
44599   __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0);
44600   return __ret;
44601 }
44602 #else
vcvtad_s64_f64(float64_t __p0)44603 __ai int64_t vcvtad_s64_f64(float64_t __p0) {
44604   int64_t __ret;
44605   __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0);
44606   return __ret;
44607 }
44608 #endif
44609 
44610 #ifdef __LITTLE_ENDIAN__
vcvtas_u32_f32(float32_t __p0)44611 __ai uint32_t vcvtas_u32_f32(float32_t __p0) {
44612   uint32_t __ret;
44613   __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0);
44614   return __ret;
44615 }
44616 #else
vcvtas_u32_f32(float32_t __p0)44617 __ai uint32_t vcvtas_u32_f32(float32_t __p0) {
44618   uint32_t __ret;
44619   __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0);
44620   return __ret;
44621 }
44622 #endif
44623 
44624 #ifdef __LITTLE_ENDIAN__
vcvtad_u64_f64(float64_t __p0)44625 __ai uint64_t vcvtad_u64_f64(float64_t __p0) {
44626   uint64_t __ret;
44627   __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0);
44628   return __ret;
44629 }
44630 #else
vcvtad_u64_f64(float64_t __p0)44631 __ai uint64_t vcvtad_u64_f64(float64_t __p0) {
44632   uint64_t __ret;
44633   __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0);
44634   return __ret;
44635 }
44636 #endif
44637 
44638 #ifdef __LITTLE_ENDIAN__
vcvtms_s32_f32(float32_t __p0)44639 __ai int32_t vcvtms_s32_f32(float32_t __p0) {
44640   int32_t __ret;
44641   __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0);
44642   return __ret;
44643 }
44644 #else
vcvtms_s32_f32(float32_t __p0)44645 __ai int32_t vcvtms_s32_f32(float32_t __p0) {
44646   int32_t __ret;
44647   __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0);
44648   return __ret;
44649 }
44650 #endif
44651 
44652 #ifdef __LITTLE_ENDIAN__
vcvtmd_s64_f64(float64_t __p0)44653 __ai int64_t vcvtmd_s64_f64(float64_t __p0) {
44654   int64_t __ret;
44655   __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0);
44656   return __ret;
44657 }
44658 #else
vcvtmd_s64_f64(float64_t __p0)44659 __ai int64_t vcvtmd_s64_f64(float64_t __p0) {
44660   int64_t __ret;
44661   __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0);
44662   return __ret;
44663 }
44664 #endif
44665 
44666 #ifdef __LITTLE_ENDIAN__
vcvtms_u32_f32(float32_t __p0)44667 __ai uint32_t vcvtms_u32_f32(float32_t __p0) {
44668   uint32_t __ret;
44669   __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0);
44670   return __ret;
44671 }
44672 #else
vcvtms_u32_f32(float32_t __p0)44673 __ai uint32_t vcvtms_u32_f32(float32_t __p0) {
44674   uint32_t __ret;
44675   __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0);
44676   return __ret;
44677 }
44678 #endif
44679 
44680 #ifdef __LITTLE_ENDIAN__
vcvtmd_u64_f64(float64_t __p0)44681 __ai uint64_t vcvtmd_u64_f64(float64_t __p0) {
44682   uint64_t __ret;
44683   __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0);
44684   return __ret;
44685 }
44686 #else
vcvtmd_u64_f64(float64_t __p0)44687 __ai uint64_t vcvtmd_u64_f64(float64_t __p0) {
44688   uint64_t __ret;
44689   __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0);
44690   return __ret;
44691 }
44692 #endif
44693 
44694 #ifdef __LITTLE_ENDIAN__
vcvtns_s32_f32(float32_t __p0)44695 __ai int32_t vcvtns_s32_f32(float32_t __p0) {
44696   int32_t __ret;
44697   __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0);
44698   return __ret;
44699 }
44700 #else
vcvtns_s32_f32(float32_t __p0)44701 __ai int32_t vcvtns_s32_f32(float32_t __p0) {
44702   int32_t __ret;
44703   __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0);
44704   return __ret;
44705 }
44706 #endif
44707 
44708 #ifdef __LITTLE_ENDIAN__
vcvtnd_s64_f64(float64_t __p0)44709 __ai int64_t vcvtnd_s64_f64(float64_t __p0) {
44710   int64_t __ret;
44711   __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0);
44712   return __ret;
44713 }
44714 #else
vcvtnd_s64_f64(float64_t __p0)44715 __ai int64_t vcvtnd_s64_f64(float64_t __p0) {
44716   int64_t __ret;
44717   __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0);
44718   return __ret;
44719 }
44720 #endif
44721 
44722 #ifdef __LITTLE_ENDIAN__
vcvtns_u32_f32(float32_t __p0)44723 __ai uint32_t vcvtns_u32_f32(float32_t __p0) {
44724   uint32_t __ret;
44725   __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0);
44726   return __ret;
44727 }
44728 #else
vcvtns_u32_f32(float32_t __p0)44729 __ai uint32_t vcvtns_u32_f32(float32_t __p0) {
44730   uint32_t __ret;
44731   __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0);
44732   return __ret;
44733 }
44734 #endif
44735 
44736 #ifdef __LITTLE_ENDIAN__
vcvtnd_u64_f64(float64_t __p0)44737 __ai uint64_t vcvtnd_u64_f64(float64_t __p0) {
44738   uint64_t __ret;
44739   __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0);
44740   return __ret;
44741 }
44742 #else
vcvtnd_u64_f64(float64_t __p0)44743 __ai uint64_t vcvtnd_u64_f64(float64_t __p0) {
44744   uint64_t __ret;
44745   __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0);
44746   return __ret;
44747 }
44748 #endif
44749 
44750 #ifdef __LITTLE_ENDIAN__
vcvtps_s32_f32(float32_t __p0)44751 __ai int32_t vcvtps_s32_f32(float32_t __p0) {
44752   int32_t __ret;
44753   __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0);
44754   return __ret;
44755 }
44756 #else
vcvtps_s32_f32(float32_t __p0)44757 __ai int32_t vcvtps_s32_f32(float32_t __p0) {
44758   int32_t __ret;
44759   __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0);
44760   return __ret;
44761 }
44762 #endif
44763 
44764 #ifdef __LITTLE_ENDIAN__
vcvtpd_s64_f64(float64_t __p0)44765 __ai int64_t vcvtpd_s64_f64(float64_t __p0) {
44766   int64_t __ret;
44767   __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0);
44768   return __ret;
44769 }
44770 #else
vcvtpd_s64_f64(float64_t __p0)44771 __ai int64_t vcvtpd_s64_f64(float64_t __p0) {
44772   int64_t __ret;
44773   __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0);
44774   return __ret;
44775 }
44776 #endif
44777 
44778 #ifdef __LITTLE_ENDIAN__
vcvtps_u32_f32(float32_t __p0)44779 __ai uint32_t vcvtps_u32_f32(float32_t __p0) {
44780   uint32_t __ret;
44781   __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0);
44782   return __ret;
44783 }
44784 #else
vcvtps_u32_f32(float32_t __p0)44785 __ai uint32_t vcvtps_u32_f32(float32_t __p0) {
44786   uint32_t __ret;
44787   __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0);
44788   return __ret;
44789 }
44790 #endif
44791 
44792 #ifdef __LITTLE_ENDIAN__
vcvtpd_u64_f64(float64_t __p0)44793 __ai uint64_t vcvtpd_u64_f64(float64_t __p0) {
44794   uint64_t __ret;
44795   __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0);
44796   return __ret;
44797 }
44798 #else
vcvtpd_u64_f64(float64_t __p0)44799 __ai uint64_t vcvtpd_u64_f64(float64_t __p0) {
44800   uint64_t __ret;
44801   __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0);
44802   return __ret;
44803 }
44804 #endif
44805 
44806 #ifdef __LITTLE_ENDIAN__
vcvtxd_f32_f64(float64_t __p0)44807 __ai float32_t vcvtxd_f32_f64(float64_t __p0) {
44808   float32_t __ret;
44809   __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0);
44810   return __ret;
44811 }
44812 #else
vcvtxd_f32_f64(float64_t __p0)44813 __ai float32_t vcvtxd_f32_f64(float64_t __p0) {
44814   float32_t __ret;
44815   __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0);
44816   return __ret;
44817 }
44818 #endif
44819 
44820 #ifdef __LITTLE_ENDIAN__
vcvtx_f32_f64(float64x2_t __p0)44821 __ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
44822   float32x2_t __ret;
44823   __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
44824   return __ret;
44825 }
44826 #else
vcvtx_f32_f64(float64x2_t __p0)44827 __ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
44828   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
44829   float32x2_t __ret;
44830   __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__rev0, 42);
44831   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
44832   return __ret;
44833 }
__noswap_vcvtx_f32_f64(float64x2_t __p0)44834 __ai float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) {
44835   float32x2_t __ret;
44836   __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
44837   return __ret;
44838 }
44839 #endif
44840 
44841 #ifdef __LITTLE_ENDIAN__
vcvtx_high_f32_f64(float32x2_t __p0,float64x2_t __p1)44842 __ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
44843   float32x4_t __ret;
44844   __ret = vcombine_f32(__p0, vcvtx_f32_f64(__p1));
44845   return __ret;
44846 }
44847 #else
vcvtx_high_f32_f64(float32x2_t __p0,float64x2_t __p1)44848 __ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
44849   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
44850   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
44851   float32x4_t __ret;
44852   __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvtx_f32_f64(__rev1));
44853   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44854   return __ret;
44855 }
44856 #endif
44857 
44858 #ifdef __LITTLE_ENDIAN__
vdivq_f64(float64x2_t __p0,float64x2_t __p1)44859 __ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
44860   float64x2_t __ret;
44861   __ret = __p0 / __p1;
44862   return __ret;
44863 }
44864 #else
vdivq_f64(float64x2_t __p0,float64x2_t __p1)44865 __ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
44866   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
44867   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
44868   float64x2_t __ret;
44869   __ret = __rev0 / __rev1;
44870   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
44871   return __ret;
44872 }
44873 #endif
44874 
44875 #ifdef __LITTLE_ENDIAN__
vdivq_f32(float32x4_t __p0,float32x4_t __p1)44876 __ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
44877   float32x4_t __ret;
44878   __ret = __p0 / __p1;
44879   return __ret;
44880 }
44881 #else
vdivq_f32(float32x4_t __p0,float32x4_t __p1)44882 __ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
44883   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44884   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
44885   float32x4_t __ret;
44886   __ret = __rev0 / __rev1;
44887   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44888   return __ret;
44889 }
44890 #endif
44891 
44892 #ifdef __LITTLE_ENDIAN__
vdiv_f64(float64x1_t __p0,float64x1_t __p1)44893 __ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) {
44894   float64x1_t __ret;
44895   __ret = __p0 / __p1;
44896   return __ret;
44897 }
44898 #else
vdiv_f64(float64x1_t __p0,float64x1_t __p1)44899 __ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) {
44900   float64x1_t __ret;
44901   __ret = __p0 / __p1;
44902   return __ret;
44903 }
44904 #endif
44905 
44906 #ifdef __LITTLE_ENDIAN__
vdiv_f32(float32x2_t __p0,float32x2_t __p1)44907 __ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
44908   float32x2_t __ret;
44909   __ret = __p0 / __p1;
44910   return __ret;
44911 }
44912 #else
vdiv_f32(float32x2_t __p0,float32x2_t __p1)44913 __ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
44914   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
44915   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
44916   float32x2_t __ret;
44917   __ret = __rev0 / __rev1;
44918   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
44919   return __ret;
44920 }
44921 #endif
44922 
44923 #ifdef __LITTLE_ENDIAN__
44924 #define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
44925   poly8x8_t __s0 = __p0; \
44926   poly8_t __ret; \
44927   __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
44928   __ret; \
44929 })
44930 #else
44931 #define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
44932   poly8x8_t __s0 = __p0; \
44933   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
44934   poly8_t __ret; \
44935   __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
44936   __ret; \
44937 })
44938 #endif
44939 
44940 #ifdef __LITTLE_ENDIAN__
44941 #define vduph_lane_p16(__p0, __p1) __extension__ ({ \
44942   poly16x4_t __s0 = __p0; \
44943   poly16_t __ret; \
44944   __ret = (poly16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \
44945   __ret; \
44946 })
44947 #else
44948 #define vduph_lane_p16(__p0, __p1) __extension__ ({ \
44949   poly16x4_t __s0 = __p0; \
44950   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
44951   poly16_t __ret; \
44952   __ret = (poly16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \
44953   __ret; \
44954 })
44955 #endif
44956 
44957 #ifdef __LITTLE_ENDIAN__
44958 #define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
44959   uint8x8_t __s0 = __p0; \
44960   uint8_t __ret; \
44961   __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
44962   __ret; \
44963 })
44964 #else
44965 #define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
44966   uint8x8_t __s0 = __p0; \
44967   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
44968   uint8_t __ret; \
44969   __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
44970   __ret; \
44971 })
44972 #endif
44973 
44974 #ifdef __LITTLE_ENDIAN__
44975 #define vdups_lane_u32(__p0, __p1) __extension__ ({ \
44976   uint32x2_t __s0 = __p0; \
44977   uint32_t __ret; \
44978   __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__s0, __p1); \
44979   __ret; \
44980 })
44981 #else
44982 #define vdups_lane_u32(__p0, __p1) __extension__ ({ \
44983   uint32x2_t __s0 = __p0; \
44984   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
44985   uint32_t __ret; \
44986   __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__rev0, __p1); \
44987   __ret; \
44988 })
44989 #endif
44990 
44991 #ifdef __LITTLE_ENDIAN__
44992 #define vdupd_lane_u64(__p0, __p1) __extension__ ({ \
44993   uint64x1_t __s0 = __p0; \
44994   uint64_t __ret; \
44995   __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
44996   __ret; \
44997 })
44998 #else
44999 #define vdupd_lane_u64(__p0, __p1) __extension__ ({ \
45000   uint64x1_t __s0 = __p0; \
45001   uint64_t __ret; \
45002   __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
45003   __ret; \
45004 })
45005 #endif
45006 
45007 #ifdef __LITTLE_ENDIAN__
45008 #define vduph_lane_u16(__p0, __p1) __extension__ ({ \
45009   uint16x4_t __s0 = __p0; \
45010   uint16_t __ret; \
45011   __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \
45012   __ret; \
45013 })
45014 #else
45015 #define vduph_lane_u16(__p0, __p1) __extension__ ({ \
45016   uint16x4_t __s0 = __p0; \
45017   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
45018   uint16_t __ret; \
45019   __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \
45020   __ret; \
45021 })
45022 #endif
45023 
45024 #ifdef __LITTLE_ENDIAN__
45025 #define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
45026   int8x8_t __s0 = __p0; \
45027   int8_t __ret; \
45028   __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
45029   __ret; \
45030 })
45031 #else
45032 #define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
45033   int8x8_t __s0 = __p0; \
45034   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
45035   int8_t __ret; \
45036   __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
45037   __ret; \
45038 })
45039 #endif
45040 
45041 #ifdef __LITTLE_ENDIAN__
45042 #define vdupd_lane_f64(__p0, __p1) __extension__ ({ \
45043   float64x1_t __s0 = __p0; \
45044   float64_t __ret; \
45045   __ret = (float64_t) __builtin_neon_vdupd_lane_f64((int8x8_t)__s0, __p1); \
45046   __ret; \
45047 })
45048 #else
45049 #define vdupd_lane_f64(__p0, __p1) __extension__ ({ \
45050   float64x1_t __s0 = __p0; \
45051   float64_t __ret; \
45052   __ret = (float64_t) __builtin_neon_vdupd_lane_f64((int8x8_t)__s0, __p1); \
45053   __ret; \
45054 })
45055 #endif
45056 
45057 #ifdef __LITTLE_ENDIAN__
45058 #define vdups_lane_f32(__p0, __p1) __extension__ ({ \
45059   float32x2_t __s0 = __p0; \
45060   float32_t __ret; \
45061   __ret = (float32_t) __builtin_neon_vdups_lane_f32((int8x8_t)__s0, __p1); \
45062   __ret; \
45063 })
45064 #else
45065 #define vdups_lane_f32(__p0, __p1) __extension__ ({ \
45066   float32x2_t __s0 = __p0; \
45067   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
45068   float32_t __ret; \
45069   __ret = (float32_t) __builtin_neon_vdups_lane_f32((int8x8_t)__rev0, __p1); \
45070   __ret; \
45071 })
45072 #endif
45073 
45074 #ifdef __LITTLE_ENDIAN__
45075 #define vdups_lane_s32(__p0, __p1) __extension__ ({ \
45076   int32x2_t __s0 = __p0; \
45077   int32_t __ret; \
45078   __ret = (int32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__s0, __p1); \
45079   __ret; \
45080 })
45081 #else
45082 #define vdups_lane_s32(__p0, __p1) __extension__ ({ \
45083   int32x2_t __s0 = __p0; \
45084   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
45085   int32_t __ret; \
45086   __ret = (int32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__rev0, __p1); \
45087   __ret; \
45088 })
45089 #endif
45090 
45091 #ifdef __LITTLE_ENDIAN__
45092 #define vdupd_lane_s64(__p0, __p1) __extension__ ({ \
45093   int64x1_t __s0 = __p0; \
45094   int64_t __ret; \
45095   __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
45096   __ret; \
45097 })
45098 #else
45099 #define vdupd_lane_s64(__p0, __p1) __extension__ ({ \
45100   int64x1_t __s0 = __p0; \
45101   int64_t __ret; \
45102   __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
45103   __ret; \
45104 })
45105 #endif
45106 
45107 #ifdef __LITTLE_ENDIAN__
45108 #define vduph_lane_s16(__p0, __p1) __extension__ ({ \
45109   int16x4_t __s0 = __p0; \
45110   int16_t __ret; \
45111   __ret = (int16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \
45112   __ret; \
45113 })
45114 #else
45115 #define vduph_lane_s16(__p0, __p1) __extension__ ({ \
45116   int16x4_t __s0 = __p0; \
45117   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
45118   int16_t __ret; \
45119   __ret = (int16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \
45120   __ret; \
45121 })
45122 #endif
45123 
45124 #ifdef __LITTLE_ENDIAN__
45125 #define vdup_lane_p64(__p0, __p1) __extension__ ({ \
45126   poly64x1_t __s0 = __p0; \
45127   poly64x1_t __ret; \
45128   __ret = __builtin_shufflevector(__s0, __s0, __p1); \
45129   __ret; \
45130 })
45131 #else
45132 #define vdup_lane_p64(__p0, __p1) __extension__ ({ \
45133   poly64x1_t __s0 = __p0; \
45134   poly64x1_t __ret; \
45135   __ret = __builtin_shufflevector(__s0, __s0, __p1); \
45136   __ret; \
45137 })
45138 #endif
45139 
45140 #ifdef __LITTLE_ENDIAN__
45141 #define vdupq_lane_p64(__p0, __p1) __extension__ ({ \
45142   poly64x1_t __s0 = __p0; \
45143   poly64x2_t __ret; \
45144   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
45145   __ret; \
45146 })
45147 #else
45148 #define vdupq_lane_p64(__p0, __p1) __extension__ ({ \
45149   poly64x1_t __s0 = __p0; \
45150   poly64x2_t __ret; \
45151   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
45152   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
45153   __ret; \
45154 })
45155 #endif
45156 
45157 #ifdef __LITTLE_ENDIAN__
45158 #define vdupq_lane_f64(__p0, __p1) __extension__ ({ \
45159   float64x1_t __s0 = __p0; \
45160   float64x2_t __ret; \
45161   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
45162   __ret; \
45163 })
45164 #else
45165 #define vdupq_lane_f64(__p0, __p1) __extension__ ({ \
45166   float64x1_t __s0 = __p0; \
45167   float64x2_t __ret; \
45168   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
45169   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
45170   __ret; \
45171 })
45172 #endif
45173 
45174 #ifdef __LITTLE_ENDIAN__
45175 #define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
45176   float16x4_t __s0 = __p0; \
45177   float16x8_t __ret; \
45178   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
45179   __ret; \
45180 })
45181 #else
45182 #define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
45183   float16x4_t __s0 = __p0; \
45184   float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
45185   float16x8_t __ret; \
45186   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
45187   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
45188   __ret; \
45189 })
45190 #endif
45191 
45192 #ifdef __LITTLE_ENDIAN__
45193 #define vdup_lane_f64(__p0, __p1) __extension__ ({ \
45194   float64x1_t __s0 = __p0; \
45195   float64x1_t __ret; \
45196   __ret = __builtin_shufflevector(__s0, __s0, __p1); \
45197   __ret; \
45198 })
45199 #else
45200 #define vdup_lane_f64(__p0, __p1) __extension__ ({ \
45201   float64x1_t __s0 = __p0; \
45202   float64x1_t __ret; \
45203   __ret = __builtin_shufflevector(__s0, __s0, __p1); \
45204   __ret; \
45205 })
45206 #endif
45207 
45208 #ifdef __LITTLE_ENDIAN__
45209 #define vdup_lane_f16(__p0, __p1) __extension__ ({ \
45210   float16x4_t __s0 = __p0; \
45211   float16x4_t __ret; \
45212   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
45213   __ret; \
45214 })
45215 #else
45216 #define vdup_lane_f16(__p0, __p1) __extension__ ({ \
45217   float16x4_t __s0 = __p0; \
45218   float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
45219   float16x4_t __ret; \
45220   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
45221   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
45222   __ret; \
45223 })
45224 #endif
45225 
45226 #ifdef __LITTLE_ENDIAN__
45227 #define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
45228   poly8x16_t __s0 = __p0; \
45229   poly8_t __ret; \
45230   __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
45231   __ret; \
45232 })
45233 #else
45234 #define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
45235   poly8x16_t __s0 = __p0; \
45236   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
45237   poly8_t __ret; \
45238   __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
45239   __ret; \
45240 })
45241 #endif
45242 
45243 #ifdef __LITTLE_ENDIAN__
45244 #define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
45245   poly16x8_t __s0 = __p0; \
45246   poly16_t __ret; \
45247   __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \
45248   __ret; \
45249 })
45250 #else
45251 #define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
45252   poly16x8_t __s0 = __p0; \
45253   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
45254   poly16_t __ret; \
45255   __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \
45256   __ret; \
45257 })
45258 #endif
45259 
45260 #ifdef __LITTLE_ENDIAN__
45261 #define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
45262   uint8x16_t __s0 = __p0; \
45263   uint8_t __ret; \
45264   __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
45265   __ret; \
45266 })
45267 #else
45268 #define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
45269   uint8x16_t __s0 = __p0; \
45270   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
45271   uint8_t __ret; \
45272   __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
45273   __ret; \
45274 })
45275 #endif
45276 
45277 #ifdef __LITTLE_ENDIAN__
45278 #define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
45279   uint32x4_t __s0 = __p0; \
45280   uint32_t __ret; \
45281   __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__s0, __p1); \
45282   __ret; \
45283 })
45284 #else
45285 #define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
45286   uint32x4_t __s0 = __p0; \
45287   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
45288   uint32_t __ret; \
45289   __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__rev0, __p1); \
45290   __ret; \
45291 })
45292 #endif
45293 
45294 #ifdef __LITTLE_ENDIAN__
45295 #define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
45296   uint64x2_t __s0 = __p0; \
45297   uint64_t __ret; \
45298   __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__s0, __p1); \
45299   __ret; \
45300 })
45301 #else
45302 #define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
45303   uint64x2_t __s0 = __p0; \
45304   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
45305   uint64_t __ret; \
45306   __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__rev0, __p1); \
45307   __ret; \
45308 })
45309 #endif
45310 
45311 #ifdef __LITTLE_ENDIAN__
45312 #define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
45313   uint16x8_t __s0 = __p0; \
45314   uint16_t __ret; \
45315   __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \
45316   __ret; \
45317 })
45318 #else
45319 #define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
45320   uint16x8_t __s0 = __p0; \
45321   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
45322   uint16_t __ret; \
45323   __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \
45324   __ret; \
45325 })
45326 #endif
45327 
45328 #ifdef __LITTLE_ENDIAN__
45329 #define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
45330   int8x16_t __s0 = __p0; \
45331   int8_t __ret; \
45332   __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
45333   __ret; \
45334 })
45335 #else
45336 #define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
45337   int8x16_t __s0 = __p0; \
45338   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
45339   int8_t __ret; \
45340   __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
45341   __ret; \
45342 })
45343 #endif
45344 
45345 #ifdef __LITTLE_ENDIAN__
45346 #define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
45347   float64x2_t __s0 = __p0; \
45348   float64_t __ret; \
45349   __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((int8x16_t)__s0, __p1); \
45350   __ret; \
45351 })
45352 #else
45353 #define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
45354   float64x2_t __s0 = __p0; \
45355   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
45356   float64_t __ret; \
45357   __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((int8x16_t)__rev0, __p1); \
45358   __ret; \
45359 })
45360 #endif
45361 
45362 #ifdef __LITTLE_ENDIAN__
45363 #define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
45364   float32x4_t __s0 = __p0; \
45365   float32_t __ret; \
45366   __ret = (float32_t) __builtin_neon_vdups_laneq_f32((int8x16_t)__s0, __p1); \
45367   __ret; \
45368 })
45369 #else
45370 #define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
45371   float32x4_t __s0 = __p0; \
45372   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
45373   float32_t __ret; \
45374   __ret = (float32_t) __builtin_neon_vdups_laneq_f32((int8x16_t)__rev0, __p1); \
45375   __ret; \
45376 })
45377 #endif
45378 
45379 #ifdef __LITTLE_ENDIAN__
45380 #define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
45381   int32x4_t __s0 = __p0; \
45382   int32_t __ret; \
45383   __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__s0, __p1); \
45384   __ret; \
45385 })
45386 #else
45387 #define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
45388   int32x4_t __s0 = __p0; \
45389   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
45390   int32_t __ret; \
45391   __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__rev0, __p1); \
45392   __ret; \
45393 })
45394 #endif
45395 
45396 #ifdef __LITTLE_ENDIAN__
45397 #define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
45398   int64x2_t __s0 = __p0; \
45399   int64_t __ret; \
45400   __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__s0, __p1); \
45401   __ret; \
45402 })
45403 #else
45404 #define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
45405   int64x2_t __s0 = __p0; \
45406   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
45407   int64_t __ret; \
45408   __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__rev0, __p1); \
45409   __ret; \
45410 })
45411 #endif
45412 
45413 #ifdef __LITTLE_ENDIAN__
45414 #define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
45415   int16x8_t __s0 = __p0; \
45416   int16_t __ret; \
45417   __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \
45418   __ret; \
45419 })
45420 #else
45421 #define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
45422   int16x8_t __s0 = __p0; \
45423   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
45424   int16_t __ret; \
45425   __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \
45426   __ret; \
45427 })
45428 #endif
45429 
45430 #ifdef __LITTLE_ENDIAN__
45431 #define vdup_laneq_p8(__p0, __p1) __extension__ ({ \
45432   poly8x16_t __s0 = __p0; \
45433   poly8x8_t __ret; \
45434   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
45435   __ret; \
45436 })
45437 #else
45438 #define vdup_laneq_p8(__p0, __p1) __extension__ ({ \
45439   poly8x16_t __s0 = __p0; \
45440   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
45441   poly8x8_t __ret; \
45442   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
45443   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
45444   __ret; \
45445 })
45446 #endif
45447 
45448 #ifdef __LITTLE_ENDIAN__
45449 #define vdup_laneq_p64(__p0, __p1) __extension__ ({ \
45450   poly64x2_t __s0 = __p0; \
45451   poly64x1_t __ret; \
45452   __ret = __builtin_shufflevector(__s0, __s0, __p1); \
45453   __ret; \
45454 })
45455 #else
45456 #define vdup_laneq_p64(__p0, __p1) __extension__ ({ \
45457   poly64x2_t __s0 = __p0; \
45458   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
45459   poly64x1_t __ret; \
45460   __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
45461   __ret; \
45462 })
45463 #endif
45464 
45465 #ifdef __LITTLE_ENDIAN__
45466 #define vdup_laneq_p16(__p0, __p1) __extension__ ({ \
45467   poly16x8_t __s0 = __p0; \
45468   poly16x4_t __ret; \
45469   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
45470   __ret; \
45471 })
45472 #else
45473 #define vdup_laneq_p16(__p0, __p1) __extension__ ({ \
45474   poly16x8_t __s0 = __p0; \
45475   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
45476   poly16x4_t __ret; \
45477   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
45478   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
45479   __ret; \
45480 })
45481 #endif
45482 
45483 #ifdef __LITTLE_ENDIAN__
45484 #define vdupq_laneq_p8(__p0, __p1) __extension__ ({ \
45485   poly8x16_t __s0 = __p0; \
45486   poly8x16_t __ret; \
45487   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
45488   __ret; \
45489 })
45490 #else
45491 #define vdupq_laneq_p8(__p0, __p1) __extension__ ({ \
45492   poly8x16_t __s0 = __p0; \
45493   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
45494   poly8x16_t __ret; \
45495   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
45496   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
45497   __ret; \
45498 })
45499 #endif
45500 
45501 #ifdef __LITTLE_ENDIAN__
45502 #define vdupq_laneq_p64(__p0, __p1) __extension__ ({ \
45503   poly64x2_t __s0 = __p0; \
45504   poly64x2_t __ret; \
45505   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
45506   __ret; \
45507 })
45508 #else
45509 #define vdupq_laneq_p64(__p0, __p1) __extension__ ({ \
45510   poly64x2_t __s0 = __p0; \
45511   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
45512   poly64x2_t __ret; \
45513   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
45514   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
45515   __ret; \
45516 })
45517 #endif
45518 
45519 #ifdef __LITTLE_ENDIAN__
45520 #define vdupq_laneq_p16(__p0, __p1) __extension__ ({ \
45521   poly16x8_t __s0 = __p0; \
45522   poly16x8_t __ret; \
45523   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
45524   __ret; \
45525 })
45526 #else
45527 #define vdupq_laneq_p16(__p0, __p1) __extension__ ({ \
45528   poly16x8_t __s0 = __p0; \
45529   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
45530   poly16x8_t __ret; \
45531   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
45532   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
45533   __ret; \
45534 })
45535 #endif
45536 
45537 #ifdef __LITTLE_ENDIAN__
45538 #define vdupq_laneq_u8(__p0, __p1) __extension__ ({ \
45539   uint8x16_t __s0 = __p0; \
45540   uint8x16_t __ret; \
45541   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
45542   __ret; \
45543 })
45544 #else
45545 #define vdupq_laneq_u8(__p0, __p1) __extension__ ({ \
45546   uint8x16_t __s0 = __p0; \
45547   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
45548   uint8x16_t __ret; \
45549   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
45550   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
45551   __ret; \
45552 })
45553 #endif
45554 
45555 #ifdef __LITTLE_ENDIAN__
45556 #define vdupq_laneq_u32(__p0, __p1) __extension__ ({ \
45557   uint32x4_t __s0 = __p0; \
45558   uint32x4_t __ret; \
45559   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
45560   __ret; \
45561 })
45562 #else
45563 #define vdupq_laneq_u32(__p0, __p1) __extension__ ({ \
45564   uint32x4_t __s0 = __p0; \
45565   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
45566   uint32x4_t __ret; \
45567   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
45568   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
45569   __ret; \
45570 })
45571 #endif
45572 
45573 #ifdef __LITTLE_ENDIAN__
45574 #define vdupq_laneq_u64(__p0, __p1) __extension__ ({ \
45575   uint64x2_t __s0 = __p0; \
45576   uint64x2_t __ret; \
45577   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
45578   __ret; \
45579 })
45580 #else
45581 #define vdupq_laneq_u64(__p0, __p1) __extension__ ({ \
45582   uint64x2_t __s0 = __p0; \
45583   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
45584   uint64x2_t __ret; \
45585   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
45586   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
45587   __ret; \
45588 })
45589 #endif
45590 
45591 #ifdef __LITTLE_ENDIAN__
45592 #define vdupq_laneq_u16(__p0, __p1) __extension__ ({ \
45593   uint16x8_t __s0 = __p0; \
45594   uint16x8_t __ret; \
45595   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
45596   __ret; \
45597 })
45598 #else
45599 #define vdupq_laneq_u16(__p0, __p1) __extension__ ({ \
45600   uint16x8_t __s0 = __p0; \
45601   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
45602   uint16x8_t __ret; \
45603   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
45604   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
45605   __ret; \
45606 })
45607 #endif
45608 
45609 #ifdef __LITTLE_ENDIAN__
45610 #define vdupq_laneq_s8(__p0, __p1) __extension__ ({ \
45611   int8x16_t __s0 = __p0; \
45612   int8x16_t __ret; \
45613   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
45614   __ret; \
45615 })
45616 #else
45617 #define vdupq_laneq_s8(__p0, __p1) __extension__ ({ \
45618   int8x16_t __s0 = __p0; \
45619   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
45620   int8x16_t __ret; \
45621   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
45622   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
45623   __ret; \
45624 })
45625 #endif
45626 
45627 #ifdef __LITTLE_ENDIAN__
45628 #define vdupq_laneq_f64(__p0, __p1) __extension__ ({ \
45629   float64x2_t __s0 = __p0; \
45630   float64x2_t __ret; \
45631   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
45632   __ret; \
45633 })
45634 #else
45635 #define vdupq_laneq_f64(__p0, __p1) __extension__ ({ \
45636   float64x2_t __s0 = __p0; \
45637   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
45638   float64x2_t __ret; \
45639   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
45640   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
45641   __ret; \
45642 })
45643 #endif
45644 
45645 #ifdef __LITTLE_ENDIAN__
45646 #define vdupq_laneq_f32(__p0, __p1) __extension__ ({ \
45647   float32x4_t __s0 = __p0; \
45648   float32x4_t __ret; \
45649   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
45650   __ret; \
45651 })
45652 #else
45653 #define vdupq_laneq_f32(__p0, __p1) __extension__ ({ \
45654   float32x4_t __s0 = __p0; \
45655   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
45656   float32x4_t __ret; \
45657   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
45658   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
45659   __ret; \
45660 })
45661 #endif
45662 
45663 #ifdef __LITTLE_ENDIAN__
45664 #define vdupq_laneq_f16(__p0, __p1) __extension__ ({ \
45665   float16x8_t __s0 = __p0; \
45666   float16x8_t __ret; \
45667   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
45668   __ret; \
45669 })
45670 #else
45671 #define vdupq_laneq_f16(__p0, __p1) __extension__ ({ \
45672   float16x8_t __s0 = __p0; \
45673   float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
45674   float16x8_t __ret; \
45675   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
45676   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
45677   __ret; \
45678 })
45679 #endif
45680 
45681 #ifdef __LITTLE_ENDIAN__
45682 #define vdupq_laneq_s32(__p0, __p1) __extension__ ({ \
45683   int32x4_t __s0 = __p0; \
45684   int32x4_t __ret; \
45685   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
45686   __ret; \
45687 })
45688 #else
45689 #define vdupq_laneq_s32(__p0, __p1) __extension__ ({ \
45690   int32x4_t __s0 = __p0; \
45691   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
45692   int32x4_t __ret; \
45693   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
45694   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
45695   __ret; \
45696 })
45697 #endif
45698 
45699 #ifdef __LITTLE_ENDIAN__
45700 #define vdupq_laneq_s64(__p0, __p1) __extension__ ({ \
45701   int64x2_t __s0 = __p0; \
45702   int64x2_t __ret; \
45703   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
45704   __ret; \
45705 })
45706 #else
45707 #define vdupq_laneq_s64(__p0, __p1) __extension__ ({ \
45708   int64x2_t __s0 = __p0; \
45709   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
45710   int64x2_t __ret; \
45711   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
45712   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
45713   __ret; \
45714 })
45715 #endif
45716 
45717 #ifdef __LITTLE_ENDIAN__
45718 #define vdupq_laneq_s16(__p0, __p1) __extension__ ({ \
45719   int16x8_t __s0 = __p0; \
45720   int16x8_t __ret; \
45721   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
45722   __ret; \
45723 })
45724 #else
45725 #define vdupq_laneq_s16(__p0, __p1) __extension__ ({ \
45726   int16x8_t __s0 = __p0; \
45727   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
45728   int16x8_t __ret; \
45729   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
45730   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
45731   __ret; \
45732 })
45733 #endif
45734 
45735 #ifdef __LITTLE_ENDIAN__
45736 #define vdup_laneq_u8(__p0, __p1) __extension__ ({ \
45737   uint8x16_t __s0 = __p0; \
45738   uint8x8_t __ret; \
45739   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
45740   __ret; \
45741 })
45742 #else
45743 #define vdup_laneq_u8(__p0, __p1) __extension__ ({ \
45744   uint8x16_t __s0 = __p0; \
45745   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
45746   uint8x8_t __ret; \
45747   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
45748   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
45749   __ret; \
45750 })
45751 #endif
45752 
45753 #ifdef __LITTLE_ENDIAN__
45754 #define vdup_laneq_u32(__p0, __p1) __extension__ ({ \
45755   uint32x4_t __s0 = __p0; \
45756   uint32x2_t __ret; \
45757   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
45758   __ret; \
45759 })
45760 #else
45761 #define vdup_laneq_u32(__p0, __p1) __extension__ ({ \
45762   uint32x4_t __s0 = __p0; \
45763   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
45764   uint32x2_t __ret; \
45765   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
45766   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
45767   __ret; \
45768 })
45769 #endif
45770 
45771 #ifdef __LITTLE_ENDIAN__
45772 #define vdup_laneq_u64(__p0, __p1) __extension__ ({ \
45773   uint64x2_t __s0 = __p0; \
45774   uint64x1_t __ret; \
45775   __ret = __builtin_shufflevector(__s0, __s0, __p1); \
45776   __ret; \
45777 })
45778 #else
45779 #define vdup_laneq_u64(__p0, __p1) __extension__ ({ \
45780   uint64x2_t __s0 = __p0; \
45781   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
45782   uint64x1_t __ret; \
45783   __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
45784   __ret; \
45785 })
45786 #endif
45787 
45788 #ifdef __LITTLE_ENDIAN__
45789 #define vdup_laneq_u16(__p0, __p1) __extension__ ({ \
45790   uint16x8_t __s0 = __p0; \
45791   uint16x4_t __ret; \
45792   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
45793   __ret; \
45794 })
45795 #else
45796 #define vdup_laneq_u16(__p0, __p1) __extension__ ({ \
45797   uint16x8_t __s0 = __p0; \
45798   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
45799   uint16x4_t __ret; \
45800   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
45801   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
45802   __ret; \
45803 })
45804 #endif
45805 
45806 #ifdef __LITTLE_ENDIAN__
45807 #define vdup_laneq_s8(__p0, __p1) __extension__ ({ \
45808   int8x16_t __s0 = __p0; \
45809   int8x8_t __ret; \
45810   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
45811   __ret; \
45812 })
45813 #else
45814 #define vdup_laneq_s8(__p0, __p1) __extension__ ({ \
45815   int8x16_t __s0 = __p0; \
45816   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
45817   int8x8_t __ret; \
45818   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
45819   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
45820   __ret; \
45821 })
45822 #endif
45823 
45824 #ifdef __LITTLE_ENDIAN__
45825 #define vdup_laneq_f64(__p0, __p1) __extension__ ({ \
45826   float64x2_t __s0 = __p0; \
45827   float64x1_t __ret; \
45828   __ret = __builtin_shufflevector(__s0, __s0, __p1); \
45829   __ret; \
45830 })
45831 #else
45832 #define vdup_laneq_f64(__p0, __p1) __extension__ ({ \
45833   float64x2_t __s0 = __p0; \
45834   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
45835   float64x1_t __ret; \
45836   __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
45837   __ret; \
45838 })
45839 #endif
45840 
45841 #ifdef __LITTLE_ENDIAN__
45842 #define vdup_laneq_f32(__p0, __p1) __extension__ ({ \
45843   float32x4_t __s0 = __p0; \
45844   float32x2_t __ret; \
45845   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
45846   __ret; \
45847 })
45848 #else
45849 #define vdup_laneq_f32(__p0, __p1) __extension__ ({ \
45850   float32x4_t __s0 = __p0; \
45851   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
45852   float32x2_t __ret; \
45853   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
45854   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
45855   __ret; \
45856 })
45857 #endif
45858 
45859 #ifdef __LITTLE_ENDIAN__
45860 #define vdup_laneq_f16(__p0, __p1) __extension__ ({ \
45861   float16x8_t __s0 = __p0; \
45862   float16x4_t __ret; \
45863   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
45864   __ret; \
45865 })
45866 #else
45867 #define vdup_laneq_f16(__p0, __p1) __extension__ ({ \
45868   float16x8_t __s0 = __p0; \
45869   float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
45870   float16x4_t __ret; \
45871   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
45872   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
45873   __ret; \
45874 })
45875 #endif
45876 
45877 #ifdef __LITTLE_ENDIAN__
45878 #define vdup_laneq_s32(__p0, __p1) __extension__ ({ \
45879   int32x4_t __s0 = __p0; \
45880   int32x2_t __ret; \
45881   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
45882   __ret; \
45883 })
45884 #else
45885 #define vdup_laneq_s32(__p0, __p1) __extension__ ({ \
45886   int32x4_t __s0 = __p0; \
45887   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
45888   int32x2_t __ret; \
45889   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
45890   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
45891   __ret; \
45892 })
45893 #endif
45894 
45895 #ifdef __LITTLE_ENDIAN__
45896 #define vdup_laneq_s64(__p0, __p1) __extension__ ({ \
45897   int64x2_t __s0 = __p0; \
45898   int64x1_t __ret; \
45899   __ret = __builtin_shufflevector(__s0, __s0, __p1); \
45900   __ret; \
45901 })
45902 #else
45903 #define vdup_laneq_s64(__p0, __p1) __extension__ ({ \
45904   int64x2_t __s0 = __p0; \
45905   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
45906   int64x1_t __ret; \
45907   __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
45908   __ret; \
45909 })
45910 #endif
45911 
45912 #ifdef __LITTLE_ENDIAN__
45913 #define vdup_laneq_s16(__p0, __p1) __extension__ ({ \
45914   int16x8_t __s0 = __p0; \
45915   int16x4_t __ret; \
45916   __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
45917   __ret; \
45918 })
45919 #else
45920 #define vdup_laneq_s16(__p0, __p1) __extension__ ({ \
45921   int16x8_t __s0 = __p0; \
45922   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
45923   int16x4_t __ret; \
45924   __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
45925   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
45926   __ret; \
45927 })
45928 #endif
45929 
45930 #ifdef __LITTLE_ENDIAN__
vdup_n_p64(poly64_t __p0)45931 __ai poly64x1_t vdup_n_p64(poly64_t __p0) {
45932   poly64x1_t __ret;
45933   __ret = (poly64x1_t) {__p0};
45934   return __ret;
45935 }
45936 #else
vdup_n_p64(poly64_t __p0)45937 __ai poly64x1_t vdup_n_p64(poly64_t __p0) {
45938   poly64x1_t __ret;
45939   __ret = (poly64x1_t) {__p0};
45940   return __ret;
45941 }
45942 #endif
45943 
45944 #ifdef __LITTLE_ENDIAN__
vdupq_n_p64(poly64_t __p0)45945 __ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
45946   poly64x2_t __ret;
45947   __ret = (poly64x2_t) {__p0, __p0};
45948   return __ret;
45949 }
45950 #else
vdupq_n_p64(poly64_t __p0)45951 __ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
45952   poly64x2_t __ret;
45953   __ret = (poly64x2_t) {__p0, __p0};
45954   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
45955   return __ret;
45956 }
45957 #endif
45958 
45959 #ifdef __LITTLE_ENDIAN__
vdupq_n_f64(float64_t __p0)45960 __ai float64x2_t vdupq_n_f64(float64_t __p0) {
45961   float64x2_t __ret;
45962   __ret = (float64x2_t) {__p0, __p0};
45963   return __ret;
45964 }
45965 #else
vdupq_n_f64(float64_t __p0)45966 __ai float64x2_t vdupq_n_f64(float64_t __p0) {
45967   float64x2_t __ret;
45968   __ret = (float64x2_t) {__p0, __p0};
45969   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
45970   return __ret;
45971 }
45972 #endif
45973 
45974 #ifdef __LITTLE_ENDIAN__
vdup_n_f64(float64_t __p0)45975 __ai float64x1_t vdup_n_f64(float64_t __p0) {
45976   float64x1_t __ret;
45977   __ret = (float64x1_t) {__p0};
45978   return __ret;
45979 }
45980 #else
vdup_n_f64(float64_t __p0)45981 __ai float64x1_t vdup_n_f64(float64_t __p0) {
45982   float64x1_t __ret;
45983   __ret = (float64x1_t) {__p0};
45984   return __ret;
45985 }
45986 #endif
45987 
45988 #ifdef __LITTLE_ENDIAN__
45989 #define vext_p64(__p0, __p1, __p2) __extension__ ({ \
45990   poly64x1_t __s0 = __p0; \
45991   poly64x1_t __s1 = __p1; \
45992   poly64x1_t __ret; \
45993   __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
45994   __ret; \
45995 })
45996 #else
45997 #define vext_p64(__p0, __p1, __p2) __extension__ ({ \
45998   poly64x1_t __s0 = __p0; \
45999   poly64x1_t __s1 = __p1; \
46000   poly64x1_t __ret; \
46001   __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
46002   __ret; \
46003 })
46004 #endif
46005 
46006 #ifdef __LITTLE_ENDIAN__
46007 #define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
46008   poly64x2_t __s0 = __p0; \
46009   poly64x2_t __s1 = __p1; \
46010   poly64x2_t __ret; \
46011   __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
46012   __ret; \
46013 })
46014 #else
46015 #define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
46016   poly64x2_t __s0 = __p0; \
46017   poly64x2_t __s1 = __p1; \
46018   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
46019   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
46020   poly64x2_t __ret; \
46021   __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
46022   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
46023   __ret; \
46024 })
46025 #endif
46026 
46027 #ifdef __LITTLE_ENDIAN__
46028 #define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
46029   float64x2_t __s0 = __p0; \
46030   float64x2_t __s1 = __p1; \
46031   float64x2_t __ret; \
46032   __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 42); \
46033   __ret; \
46034 })
46035 #else
46036 #define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
46037   float64x2_t __s0 = __p0; \
46038   float64x2_t __s1 = __p1; \
46039   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
46040   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
46041   float64x2_t __ret; \
46042   __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 42); \
46043   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
46044   __ret; \
46045 })
46046 #endif
46047 
46048 #ifdef __LITTLE_ENDIAN__
46049 #define vext_f64(__p0, __p1, __p2) __extension__ ({ \
46050   float64x1_t __s0 = __p0; \
46051   float64x1_t __s1 = __p1; \
46052   float64x1_t __ret; \
46053   __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
46054   __ret; \
46055 })
46056 #else
46057 #define vext_f64(__p0, __p1, __p2) __extension__ ({ \
46058   float64x1_t __s0 = __p0; \
46059   float64x1_t __s1 = __p1; \
46060   float64x1_t __ret; \
46061   __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
46062   __ret; \
46063 })
46064 #endif
46065 
46066 #ifdef __LITTLE_ENDIAN__
vfmaq_f64(float64x2_t __p0,float64x2_t __p1,float64x2_t __p2)46067 __ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
46068   float64x2_t __ret;
46069   __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
46070   return __ret;
46071 }
46072 #else
vfmaq_f64(float64x2_t __p0,float64x2_t __p1,float64x2_t __p2)46073 __ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
46074   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46075   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
46076   float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
46077   float64x2_t __ret;
46078   __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
46079   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
46080   return __ret;
46081 }
__noswap_vfmaq_f64(float64x2_t __p0,float64x2_t __p1,float64x2_t __p2)46082 __ai float64x2_t __noswap_vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
46083   float64x2_t __ret;
46084   __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
46085   return __ret;
46086 }
46087 #endif
46088 
46089 #ifdef __LITTLE_ENDIAN__
vfma_f64(float64x1_t __p0,float64x1_t __p1,float64x1_t __p2)46090 __ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
46091   float64x1_t __ret;
46092   __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
46093   return __ret;
46094 }
46095 #else
vfma_f64(float64x1_t __p0,float64x1_t __p1,float64x1_t __p2)46096 __ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
46097   float64x1_t __ret;
46098   __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
46099   return __ret;
46100 }
46101 #endif
46102 
46103 #ifdef __LITTLE_ENDIAN__
46104 #define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
46105   float64_t __s0 = __p0; \
46106   float64_t __s1 = __p1; \
46107   float64x1_t __s2 = __p2; \
46108   float64_t __ret; \
46109   __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (int8x8_t)__s2, __p3); \
46110   __ret; \
46111 })
46112 #else
46113 #define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
46114   float64_t __s0 = __p0; \
46115   float64_t __s1 = __p1; \
46116   float64x1_t __s2 = __p2; \
46117   float64_t __ret; \
46118   __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (int8x8_t)__s2, __p3); \
46119   __ret; \
46120 })
46121 #define __noswap_vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
46122   float64_t __s0 = __p0; \
46123   float64_t __s1 = __p1; \
46124   float64x1_t __s2 = __p2; \
46125   float64_t __ret; \
46126   __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (int8x8_t)__s2, __p3); \
46127   __ret; \
46128 })
46129 #endif
46130 
46131 #ifdef __LITTLE_ENDIAN__
46132 #define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
46133   float32_t __s0 = __p0; \
46134   float32_t __s1 = __p1; \
46135   float32x2_t __s2 = __p2; \
46136   float32_t __ret; \
46137   __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__s2, __p3); \
46138   __ret; \
46139 })
46140 #else
46141 #define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
46142   float32_t __s0 = __p0; \
46143   float32_t __s1 = __p1; \
46144   float32x2_t __s2 = __p2; \
46145   float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
46146   float32_t __ret; \
46147   __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__rev2, __p3); \
46148   __ret; \
46149 })
46150 #define __noswap_vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
46151   float32_t __s0 = __p0; \
46152   float32_t __s1 = __p1; \
46153   float32x2_t __s2 = __p2; \
46154   float32_t __ret; \
46155   __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__s2, __p3); \
46156   __ret; \
46157 })
46158 #endif
46159 
46160 #ifdef __LITTLE_ENDIAN__
46161 #define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
46162   float64x2_t __s0 = __p0; \
46163   float64x2_t __s1 = __p1; \
46164   float64x1_t __s2 = __p2; \
46165   float64x2_t __ret; \
46166   __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
46167   __ret; \
46168 })
46169 #else
46170 #define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
46171   float64x2_t __s0 = __p0; \
46172   float64x2_t __s1 = __p1; \
46173   float64x1_t __s2 = __p2; \
46174   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
46175   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
46176   float64x2_t __ret; \
46177   __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__s2, __p3, 42); \
46178   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
46179   __ret; \
46180 })
46181 #define __noswap_vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
46182   float64x2_t __s0 = __p0; \
46183   float64x2_t __s1 = __p1; \
46184   float64x1_t __s2 = __p2; \
46185   float64x2_t __ret; \
46186   __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
46187   __ret; \
46188 })
46189 #endif
46190 
46191 #ifdef __LITTLE_ENDIAN__
46192 #define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
46193   float32x4_t __s0 = __p0; \
46194   float32x4_t __s1 = __p1; \
46195   float32x2_t __s2 = __p2; \
46196   float32x4_t __ret; \
46197   __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
46198   __ret; \
46199 })
46200 #else
46201 #define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
46202   float32x4_t __s0 = __p0; \
46203   float32x4_t __s1 = __p1; \
46204   float32x2_t __s2 = __p2; \
46205   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
46206   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
46207   float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
46208   float32x4_t __ret; \
46209   __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 41); \
46210   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
46211   __ret; \
46212 })
46213 #define __noswap_vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
46214   float32x4_t __s0 = __p0; \
46215   float32x4_t __s1 = __p1; \
46216   float32x2_t __s2 = __p2; \
46217   float32x4_t __ret; \
46218   __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
46219   __ret; \
46220 })
46221 #endif
46222 
46223 #ifdef __LITTLE_ENDIAN__
46224 #define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
46225   float64x1_t __s0 = __p0; \
46226   float64x1_t __s1 = __p1; \
46227   float64x1_t __s2 = __p2; \
46228   float64x1_t __ret; \
46229   __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \
46230   __ret; \
46231 })
46232 #else
46233 #define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
46234   float64x1_t __s0 = __p0; \
46235   float64x1_t __s1 = __p1; \
46236   float64x1_t __s2 = __p2; \
46237   float64x1_t __ret; \
46238   __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \
46239   __ret; \
46240 })
46241 #define __noswap_vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
46242   float64x1_t __s0 = __p0; \
46243   float64x1_t __s1 = __p1; \
46244   float64x1_t __s2 = __p2; \
46245   float64x1_t __ret; \
46246   __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \
46247   __ret; \
46248 })
46249 #endif
46250 
46251 #ifdef __LITTLE_ENDIAN__
46252 #define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
46253   float32x2_t __s0 = __p0; \
46254   float32x2_t __s1 = __p1; \
46255   float32x2_t __s2 = __p2; \
46256   float32x2_t __ret; \
46257   __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
46258   __ret; \
46259 })
46260 #else
46261 #define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
46262   float32x2_t __s0 = __p0; \
46263   float32x2_t __s1 = __p1; \
46264   float32x2_t __s2 = __p2; \
46265   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
46266   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
46267   float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
46268   float32x2_t __ret; \
46269   __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 9); \
46270   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
46271   __ret; \
46272 })
46273 #define __noswap_vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
46274   float32x2_t __s0 = __p0; \
46275   float32x2_t __s1 = __p1; \
46276   float32x2_t __s2 = __p2; \
46277   float32x2_t __ret; \
46278   __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
46279   __ret; \
46280 })
46281 #endif
46282 
46283 #ifdef __LITTLE_ENDIAN__
46284 #define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
46285   float64_t __s0 = __p0; \
46286   float64_t __s1 = __p1; \
46287   float64x2_t __s2 = __p2; \
46288   float64_t __ret; \
46289   __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__s2, __p3); \
46290   __ret; \
46291 })
46292 #else
46293 #define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
46294   float64_t __s0 = __p0; \
46295   float64_t __s1 = __p1; \
46296   float64x2_t __s2 = __p2; \
46297   float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
46298   float64_t __ret; \
46299   __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__rev2, __p3); \
46300   __ret; \
46301 })
46302 #define __noswap_vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
46303   float64_t __s0 = __p0; \
46304   float64_t __s1 = __p1; \
46305   float64x2_t __s2 = __p2; \
46306   float64_t __ret; \
46307   __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__s2, __p3); \
46308   __ret; \
46309 })
46310 #endif
46311 
46312 #ifdef __LITTLE_ENDIAN__
46313 #define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
46314   float32_t __s0 = __p0; \
46315   float32_t __s1 = __p1; \
46316   float32x4_t __s2 = __p2; \
46317   float32_t __ret; \
46318   __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__s2, __p3); \
46319   __ret; \
46320 })
46321 #else
46322 #define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
46323   float32_t __s0 = __p0; \
46324   float32_t __s1 = __p1; \
46325   float32x4_t __s2 = __p2; \
46326   float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
46327   float32_t __ret; \
46328   __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__rev2, __p3); \
46329   __ret; \
46330 })
46331 #define __noswap_vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
46332   float32_t __s0 = __p0; \
46333   float32_t __s1 = __p1; \
46334   float32x4_t __s2 = __p2; \
46335   float32_t __ret; \
46336   __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__s2, __p3); \
46337   __ret; \
46338 })
46339 #endif
46340 
46341 #ifdef __LITTLE_ENDIAN__
46342 #define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
46343   float64x2_t __s0 = __p0; \
46344   float64x2_t __s1 = __p1; \
46345   float64x2_t __s2 = __p2; \
46346   float64x2_t __ret; \
46347   __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
46348   __ret; \
46349 })
46350 #else
46351 #define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
46352   float64x2_t __s0 = __p0; \
46353   float64x2_t __s1 = __p1; \
46354   float64x2_t __s2 = __p2; \
46355   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
46356   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
46357   float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
46358   float64x2_t __ret; \
46359   __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 42); \
46360   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
46361   __ret; \
46362 })
46363 #define __noswap_vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
46364   float64x2_t __s0 = __p0; \
46365   float64x2_t __s1 = __p1; \
46366   float64x2_t __s2 = __p2; \
46367   float64x2_t __ret; \
46368   __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
46369   __ret; \
46370 })
46371 #endif
46372 
46373 #ifdef __LITTLE_ENDIAN__
46374 #define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
46375   float32x4_t __s0 = __p0; \
46376   float32x4_t __s1 = __p1; \
46377   float32x4_t __s2 = __p2; \
46378   float32x4_t __ret; \
46379   __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
46380   __ret; \
46381 })
46382 #else
46383 #define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
46384   float32x4_t __s0 = __p0; \
46385   float32x4_t __s1 = __p1; \
46386   float32x4_t __s2 = __p2; \
46387   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
46388   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
46389   float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
46390   float32x4_t __ret; \
46391   __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 41); \
46392   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
46393   __ret; \
46394 })
46395 #define __noswap_vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
46396   float32x4_t __s0 = __p0; \
46397   float32x4_t __s1 = __p1; \
46398   float32x4_t __s2 = __p2; \
46399   float32x4_t __ret; \
46400   __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
46401   __ret; \
46402 })
46403 #endif
46404 
46405 #ifdef __LITTLE_ENDIAN__
46406 #define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
46407   float64x1_t __s0 = __p0; \
46408   float64x1_t __s1 = __p1; \
46409   float64x2_t __s2 = __p2; \
46410   float64x1_t __ret; \
46411   __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
46412   __ret; \
46413 })
46414 #else
46415 #define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
46416   float64x1_t __s0 = __p0; \
46417   float64x1_t __s1 = __p1; \
46418   float64x2_t __s2 = __p2; \
46419   float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
46420   float64x1_t __ret; \
46421   __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__rev2, __p3, 10); \
46422   __ret; \
46423 })
46424 #define __noswap_vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
46425   float64x1_t __s0 = __p0; \
46426   float64x1_t __s1 = __p1; \
46427   float64x2_t __s2 = __p2; \
46428   float64x1_t __ret; \
46429   __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
46430   __ret; \
46431 })
46432 #endif
46433 
46434 #ifdef __LITTLE_ENDIAN__
46435 #define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
46436   float32x2_t __s0 = __p0; \
46437   float32x2_t __s1 = __p1; \
46438   float32x4_t __s2 = __p2; \
46439   float32x2_t __ret; \
46440   __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
46441   __ret; \
46442 })
46443 #else
46444 #define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
46445   float32x2_t __s0 = __p0; \
46446   float32x2_t __s1 = __p1; \
46447   float32x4_t __s2 = __p2; \
46448   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
46449   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
46450   float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
46451   float32x2_t __ret; \
46452   __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 9); \
46453   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
46454   __ret; \
46455 })
46456 #define __noswap_vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
46457   float32x2_t __s0 = __p0; \
46458   float32x2_t __s1 = __p1; \
46459   float32x4_t __s2 = __p2; \
46460   float32x2_t __ret; \
46461   __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
46462   __ret; \
46463 })
46464 #endif
46465 
46466 #ifdef __LITTLE_ENDIAN__
vfmaq_n_f64(float64x2_t __p0,float64x2_t __p1,float64_t __p2)46467 __ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
46468   float64x2_t __ret;
46469   __ret = vfmaq_f64(__p0, __p1, (float64x2_t) {__p2, __p2});
46470   return __ret;
46471 }
46472 #else
vfmaq_n_f64(float64x2_t __p0,float64x2_t __p1,float64_t __p2)46473 __ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
46474   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46475   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
46476   float64x2_t __ret;
46477   __ret = __noswap_vfmaq_f64(__rev0, __rev1, (float64x2_t) {__p2, __p2});
46478   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
46479   return __ret;
46480 }
46481 #endif
46482 
46483 #ifdef __LITTLE_ENDIAN__
vfmaq_n_f32(float32x4_t __p0,float32x4_t __p1,float32_t __p2)46484 __ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
46485   float32x4_t __ret;
46486   __ret = vfmaq_f32(__p0, __p1, (float32x4_t) {__p2, __p2, __p2, __p2});
46487   return __ret;
46488 }
46489 #else
vfmaq_n_f32(float32x4_t __p0,float32x4_t __p1,float32_t __p2)46490 __ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
46491   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
46492   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
46493   float32x4_t __ret;
46494   __ret = __noswap_vfmaq_f32(__rev0, __rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
46495   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
46496   return __ret;
46497 }
46498 #endif
46499 
46500 #ifdef __LITTLE_ENDIAN__
vfma_n_f32(float32x2_t __p0,float32x2_t __p1,float32_t __p2)46501 __ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
46502   float32x2_t __ret;
46503   __ret = vfma_f32(__p0, __p1, (float32x2_t) {__p2, __p2});
46504   return __ret;
46505 }
46506 #else
vfma_n_f32(float32x2_t __p0,float32x2_t __p1,float32_t __p2)46507 __ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
46508   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46509   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
46510   float32x2_t __ret;
46511   __ret = __noswap_vfma_f32(__rev0, __rev1, (float32x2_t) {__p2, __p2});
46512   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
46513   return __ret;
46514 }
46515 #endif
46516 
46517 #ifdef __LITTLE_ENDIAN__
vfmsq_f64(float64x2_t __p0,float64x2_t __p1,float64x2_t __p2)46518 __ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
46519   float64x2_t __ret;
46520   __ret = (float64x2_t) __builtin_neon_vfmsq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
46521   return __ret;
46522 }
46523 #else
vfmsq_f64(float64x2_t __p0,float64x2_t __p1,float64x2_t __p2)46524 __ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
46525   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46526   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
46527   float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
46528   float64x2_t __ret;
46529   __ret = (float64x2_t) __builtin_neon_vfmsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
46530   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
46531   return __ret;
46532 }
__noswap_vfmsq_f64(float64x2_t __p0,float64x2_t __p1,float64x2_t __p2)46533 __ai float64x2_t __noswap_vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
46534   float64x2_t __ret;
46535   __ret = (float64x2_t) __builtin_neon_vfmsq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
46536   return __ret;
46537 }
46538 #endif
46539 
46540 #ifdef __LITTLE_ENDIAN__
vfmsq_f32(float32x4_t __p0,float32x4_t __p1,float32x4_t __p2)46541 __ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
46542   float32x4_t __ret;
46543   __ret = (float32x4_t) __builtin_neon_vfmsq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
46544   return __ret;
46545 }
46546 #else
vfmsq_f32(float32x4_t __p0,float32x4_t __p1,float32x4_t __p2)46547 __ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
46548   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
46549   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
46550   float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
46551   float32x4_t __ret;
46552   __ret = (float32x4_t) __builtin_neon_vfmsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
46553   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
46554   return __ret;
46555 }
__noswap_vfmsq_f32(float32x4_t __p0,float32x4_t __p1,float32x4_t __p2)46556 __ai float32x4_t __noswap_vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
46557   float32x4_t __ret;
46558   __ret = (float32x4_t) __builtin_neon_vfmsq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
46559   return __ret;
46560 }
46561 #endif
46562 
46563 #ifdef __LITTLE_ENDIAN__
vfms_f64(float64x1_t __p0,float64x1_t __p1,float64x1_t __p2)46564 __ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
46565   float64x1_t __ret;
46566   __ret = (float64x1_t) __builtin_neon_vfms_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
46567   return __ret;
46568 }
46569 #else
vfms_f64(float64x1_t __p0,float64x1_t __p1,float64x1_t __p2)46570 __ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
46571   float64x1_t __ret;
46572   __ret = (float64x1_t) __builtin_neon_vfms_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
46573   return __ret;
46574 }
46575 #endif
46576 
46577 #ifdef __LITTLE_ENDIAN__
vfms_f32(float32x2_t __p0,float32x2_t __p1,float32x2_t __p2)46578 __ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
46579   float32x2_t __ret;
46580   __ret = (float32x2_t) __builtin_neon_vfms_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
46581   return __ret;
46582 }
46583 #else
vfms_f32(float32x2_t __p0,float32x2_t __p1,float32x2_t __p2)46584 __ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
46585   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46586   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
46587   float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
46588   float32x2_t __ret;
46589   __ret = (float32x2_t) __builtin_neon_vfms_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
46590   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
46591   return __ret;
46592 }
__noswap_vfms_f32(float32x2_t __p0,float32x2_t __p1,float32x2_t __p2)46593 __ai float32x2_t __noswap_vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
46594   float32x2_t __ret;
46595   __ret = (float32x2_t) __builtin_neon_vfms_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
46596   return __ret;
46597 }
46598 #endif
46599 
46600 #ifdef __LITTLE_ENDIAN__
46601 #define vfmsd_lane_f64(__p0_88, __p1_88, __p2_88, __p3_88) __extension__ ({ \
46602   float64_t __s0_88 = __p0_88; \
46603   float64_t __s1_88 = __p1_88; \
46604   float64x1_t __s2_88 = __p2_88; \
46605   float64_t __ret_88; \
46606   __ret_88 = vfmad_lane_f64(__s0_88, __s1_88, -__s2_88, __p3_88); \
46607   __ret_88; \
46608 })
46609 #else
46610 #define vfmsd_lane_f64(__p0_89, __p1_89, __p2_89, __p3_89) __extension__ ({ \
46611   float64_t __s0_89 = __p0_89; \
46612   float64_t __s1_89 = __p1_89; \
46613   float64x1_t __s2_89 = __p2_89; \
46614   float64_t __ret_89; \
46615   __ret_89 = __noswap_vfmad_lane_f64(__s0_89, __s1_89, -__s2_89, __p3_89); \
46616   __ret_89; \
46617 })
46618 #endif
46619 
46620 #ifdef __LITTLE_ENDIAN__
46621 #define vfmss_lane_f32(__p0_90, __p1_90, __p2_90, __p3_90) __extension__ ({ \
46622   float32_t __s0_90 = __p0_90; \
46623   float32_t __s1_90 = __p1_90; \
46624   float32x2_t __s2_90 = __p2_90; \
46625   float32_t __ret_90; \
46626   __ret_90 = vfmas_lane_f32(__s0_90, __s1_90, -__s2_90, __p3_90); \
46627   __ret_90; \
46628 })
46629 #else
46630 #define vfmss_lane_f32(__p0_91, __p1_91, __p2_91, __p3_91) __extension__ ({ \
46631   float32_t __s0_91 = __p0_91; \
46632   float32_t __s1_91 = __p1_91; \
46633   float32x2_t __s2_91 = __p2_91; \
46634   float32x2_t __rev2_91;  __rev2_91 = __builtin_shufflevector(__s2_91, __s2_91, 1, 0); \
46635   float32_t __ret_91; \
46636   __ret_91 = __noswap_vfmas_lane_f32(__s0_91, __s1_91, -__rev2_91, __p3_91); \
46637   __ret_91; \
46638 })
46639 #endif
46640 
46641 #ifdef __LITTLE_ENDIAN__
46642 #define vfmsq_lane_f64(__p0_92, __p1_92, __p2_92, __p3_92) __extension__ ({ \
46643   float64x2_t __s0_92 = __p0_92; \
46644   float64x2_t __s1_92 = __p1_92; \
46645   float64x1_t __s2_92 = __p2_92; \
46646   float64x2_t __ret_92; \
46647   __ret_92 = vfmaq_lane_f64(__s0_92, __s1_92, -__s2_92, __p3_92); \
46648   __ret_92; \
46649 })
46650 #else
46651 #define vfmsq_lane_f64(__p0_93, __p1_93, __p2_93, __p3_93) __extension__ ({ \
46652   float64x2_t __s0_93 = __p0_93; \
46653   float64x2_t __s1_93 = __p1_93; \
46654   float64x1_t __s2_93 = __p2_93; \
46655   float64x2_t __rev0_93;  __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 1, 0); \
46656   float64x2_t __rev1_93;  __rev1_93 = __builtin_shufflevector(__s1_93, __s1_93, 1, 0); \
46657   float64x2_t __ret_93; \
46658   __ret_93 = __noswap_vfmaq_lane_f64(__rev0_93, __rev1_93, -__s2_93, __p3_93); \
46659   __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 1, 0); \
46660   __ret_93; \
46661 })
46662 #endif
46663 
46664 #ifdef __LITTLE_ENDIAN__
46665 #define vfmsq_lane_f32(__p0_94, __p1_94, __p2_94, __p3_94) __extension__ ({ \
46666   float32x4_t __s0_94 = __p0_94; \
46667   float32x4_t __s1_94 = __p1_94; \
46668   float32x2_t __s2_94 = __p2_94; \
46669   float32x4_t __ret_94; \
46670   __ret_94 = vfmaq_lane_f32(__s0_94, __s1_94, -__s2_94, __p3_94); \
46671   __ret_94; \
46672 })
46673 #else
46674 #define vfmsq_lane_f32(__p0_95, __p1_95, __p2_95, __p3_95) __extension__ ({ \
46675   float32x4_t __s0_95 = __p0_95; \
46676   float32x4_t __s1_95 = __p1_95; \
46677   float32x2_t __s2_95 = __p2_95; \
46678   float32x4_t __rev0_95;  __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 3, 2, 1, 0); \
46679   float32x4_t __rev1_95;  __rev1_95 = __builtin_shufflevector(__s1_95, __s1_95, 3, 2, 1, 0); \
46680   float32x2_t __rev2_95;  __rev2_95 = __builtin_shufflevector(__s2_95, __s2_95, 1, 0); \
46681   float32x4_t __ret_95; \
46682   __ret_95 = __noswap_vfmaq_lane_f32(__rev0_95, __rev1_95, -__rev2_95, __p3_95); \
46683   __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 3, 2, 1, 0); \
46684   __ret_95; \
46685 })
46686 #endif
46687 
46688 #ifdef __LITTLE_ENDIAN__
46689 #define vfms_lane_f64(__p0_96, __p1_96, __p2_96, __p3_96) __extension__ ({ \
46690   float64x1_t __s0_96 = __p0_96; \
46691   float64x1_t __s1_96 = __p1_96; \
46692   float64x1_t __s2_96 = __p2_96; \
46693   float64x1_t __ret_96; \
46694   __ret_96 = vfma_lane_f64(__s0_96, __s1_96, -__s2_96, __p3_96); \
46695   __ret_96; \
46696 })
46697 #else
46698 #define vfms_lane_f64(__p0_97, __p1_97, __p2_97, __p3_97) __extension__ ({ \
46699   float64x1_t __s0_97 = __p0_97; \
46700   float64x1_t __s1_97 = __p1_97; \
46701   float64x1_t __s2_97 = __p2_97; \
46702   float64x1_t __ret_97; \
46703   __ret_97 = __noswap_vfma_lane_f64(__s0_97, __s1_97, -__s2_97, __p3_97); \
46704   __ret_97; \
46705 })
46706 #endif
46707 
46708 #ifdef __LITTLE_ENDIAN__
46709 #define vfms_lane_f32(__p0_98, __p1_98, __p2_98, __p3_98) __extension__ ({ \
46710   float32x2_t __s0_98 = __p0_98; \
46711   float32x2_t __s1_98 = __p1_98; \
46712   float32x2_t __s2_98 = __p2_98; \
46713   float32x2_t __ret_98; \
46714   __ret_98 = vfma_lane_f32(__s0_98, __s1_98, -__s2_98, __p3_98); \
46715   __ret_98; \
46716 })
46717 #else
46718 #define vfms_lane_f32(__p0_99, __p1_99, __p2_99, __p3_99) __extension__ ({ \
46719   float32x2_t __s0_99 = __p0_99; \
46720   float32x2_t __s1_99 = __p1_99; \
46721   float32x2_t __s2_99 = __p2_99; \
46722   float32x2_t __rev0_99;  __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 1, 0); \
46723   float32x2_t __rev1_99;  __rev1_99 = __builtin_shufflevector(__s1_99, __s1_99, 1, 0); \
46724   float32x2_t __rev2_99;  __rev2_99 = __builtin_shufflevector(__s2_99, __s2_99, 1, 0); \
46725   float32x2_t __ret_99; \
46726   __ret_99 = __noswap_vfma_lane_f32(__rev0_99, __rev1_99, -__rev2_99, __p3_99); \
46727   __ret_99 = __builtin_shufflevector(__ret_99, __ret_99, 1, 0); \
46728   __ret_99; \
46729 })
46730 #endif
46731 
46732 #ifdef __LITTLE_ENDIAN__
46733 #define vfmsd_laneq_f64(__p0_100, __p1_100, __p2_100, __p3_100) __extension__ ({ \
46734   float64_t __s0_100 = __p0_100; \
46735   float64_t __s1_100 = __p1_100; \
46736   float64x2_t __s2_100 = __p2_100; \
46737   float64_t __ret_100; \
46738   __ret_100 = vfmad_laneq_f64(__s0_100, __s1_100, -__s2_100, __p3_100); \
46739   __ret_100; \
46740 })
46741 #else
46742 #define vfmsd_laneq_f64(__p0_101, __p1_101, __p2_101, __p3_101) __extension__ ({ \
46743   float64_t __s0_101 = __p0_101; \
46744   float64_t __s1_101 = __p1_101; \
46745   float64x2_t __s2_101 = __p2_101; \
46746   float64x2_t __rev2_101;  __rev2_101 = __builtin_shufflevector(__s2_101, __s2_101, 1, 0); \
46747   float64_t __ret_101; \
46748   __ret_101 = __noswap_vfmad_laneq_f64(__s0_101, __s1_101, -__rev2_101, __p3_101); \
46749   __ret_101; \
46750 })
46751 #endif
46752 
46753 #ifdef __LITTLE_ENDIAN__
46754 #define vfmss_laneq_f32(__p0_102, __p1_102, __p2_102, __p3_102) __extension__ ({ \
46755   float32_t __s0_102 = __p0_102; \
46756   float32_t __s1_102 = __p1_102; \
46757   float32x4_t __s2_102 = __p2_102; \
46758   float32_t __ret_102; \
46759   __ret_102 = vfmas_laneq_f32(__s0_102, __s1_102, -__s2_102, __p3_102); \
46760   __ret_102; \
46761 })
46762 #else
46763 #define vfmss_laneq_f32(__p0_103, __p1_103, __p2_103, __p3_103) __extension__ ({ \
46764   float32_t __s0_103 = __p0_103; \
46765   float32_t __s1_103 = __p1_103; \
46766   float32x4_t __s2_103 = __p2_103; \
46767   float32x4_t __rev2_103;  __rev2_103 = __builtin_shufflevector(__s2_103, __s2_103, 3, 2, 1, 0); \
46768   float32_t __ret_103; \
46769   __ret_103 = __noswap_vfmas_laneq_f32(__s0_103, __s1_103, -__rev2_103, __p3_103); \
46770   __ret_103; \
46771 })
46772 #endif
46773 
46774 #ifdef __LITTLE_ENDIAN__
46775 #define vfmsq_laneq_f64(__p0_104, __p1_104, __p2_104, __p3_104) __extension__ ({ \
46776   float64x2_t __s0_104 = __p0_104; \
46777   float64x2_t __s1_104 = __p1_104; \
46778   float64x2_t __s2_104 = __p2_104; \
46779   float64x2_t __ret_104; \
46780   __ret_104 = vfmaq_laneq_f64(__s0_104, __s1_104, -__s2_104, __p3_104); \
46781   __ret_104; \
46782 })
46783 #else
46784 #define vfmsq_laneq_f64(__p0_105, __p1_105, __p2_105, __p3_105) __extension__ ({ \
46785   float64x2_t __s0_105 = __p0_105; \
46786   float64x2_t __s1_105 = __p1_105; \
46787   float64x2_t __s2_105 = __p2_105; \
46788   float64x2_t __rev0_105;  __rev0_105 = __builtin_shufflevector(__s0_105, __s0_105, 1, 0); \
46789   float64x2_t __rev1_105;  __rev1_105 = __builtin_shufflevector(__s1_105, __s1_105, 1, 0); \
46790   float64x2_t __rev2_105;  __rev2_105 = __builtin_shufflevector(__s2_105, __s2_105, 1, 0); \
46791   float64x2_t __ret_105; \
46792   __ret_105 = __noswap_vfmaq_laneq_f64(__rev0_105, __rev1_105, -__rev2_105, __p3_105); \
46793   __ret_105 = __builtin_shufflevector(__ret_105, __ret_105, 1, 0); \
46794   __ret_105; \
46795 })
46796 #endif
46797 
46798 #ifdef __LITTLE_ENDIAN__
46799 #define vfmsq_laneq_f32(__p0_106, __p1_106, __p2_106, __p3_106) __extension__ ({ \
46800   float32x4_t __s0_106 = __p0_106; \
46801   float32x4_t __s1_106 = __p1_106; \
46802   float32x4_t __s2_106 = __p2_106; \
46803   float32x4_t __ret_106; \
46804   __ret_106 = vfmaq_laneq_f32(__s0_106, __s1_106, -__s2_106, __p3_106); \
46805   __ret_106; \
46806 })
46807 #else
46808 #define vfmsq_laneq_f32(__p0_107, __p1_107, __p2_107, __p3_107) __extension__ ({ \
46809   float32x4_t __s0_107 = __p0_107; \
46810   float32x4_t __s1_107 = __p1_107; \
46811   float32x4_t __s2_107 = __p2_107; \
46812   float32x4_t __rev0_107;  __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 3, 2, 1, 0); \
46813   float32x4_t __rev1_107;  __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 3, 2, 1, 0); \
46814   float32x4_t __rev2_107;  __rev2_107 = __builtin_shufflevector(__s2_107, __s2_107, 3, 2, 1, 0); \
46815   float32x4_t __ret_107; \
46816   __ret_107 = __noswap_vfmaq_laneq_f32(__rev0_107, __rev1_107, -__rev2_107, __p3_107); \
46817   __ret_107 = __builtin_shufflevector(__ret_107, __ret_107, 3, 2, 1, 0); \
46818   __ret_107; \
46819 })
46820 #endif
46821 
46822 #ifdef __LITTLE_ENDIAN__
46823 #define vfms_laneq_f64(__p0_108, __p1_108, __p2_108, __p3_108) __extension__ ({ \
46824   float64x1_t __s0_108 = __p0_108; \
46825   float64x1_t __s1_108 = __p1_108; \
46826   float64x2_t __s2_108 = __p2_108; \
46827   float64x1_t __ret_108; \
46828   __ret_108 = vfma_laneq_f64(__s0_108, __s1_108, -__s2_108, __p3_108); \
46829   __ret_108; \
46830 })
46831 #else
46832 #define vfms_laneq_f64(__p0_109, __p1_109, __p2_109, __p3_109) __extension__ ({ \
46833   float64x1_t __s0_109 = __p0_109; \
46834   float64x1_t __s1_109 = __p1_109; \
46835   float64x2_t __s2_109 = __p2_109; \
46836   float64x2_t __rev2_109;  __rev2_109 = __builtin_shufflevector(__s2_109, __s2_109, 1, 0); \
46837   float64x1_t __ret_109; \
46838   __ret_109 = __noswap_vfma_laneq_f64(__s0_109, __s1_109, -__rev2_109, __p3_109); \
46839   __ret_109; \
46840 })
46841 #endif
46842 
46843 #ifdef __LITTLE_ENDIAN__
46844 #define vfms_laneq_f32(__p0_110, __p1_110, __p2_110, __p3_110) __extension__ ({ \
46845   float32x2_t __s0_110 = __p0_110; \
46846   float32x2_t __s1_110 = __p1_110; \
46847   float32x4_t __s2_110 = __p2_110; \
46848   float32x2_t __ret_110; \
46849   __ret_110 = vfma_laneq_f32(__s0_110, __s1_110, -__s2_110, __p3_110); \
46850   __ret_110; \
46851 })
46852 #else
46853 #define vfms_laneq_f32(__p0_111, __p1_111, __p2_111, __p3_111) __extension__ ({ \
46854   float32x2_t __s0_111 = __p0_111; \
46855   float32x2_t __s1_111 = __p1_111; \
46856   float32x4_t __s2_111 = __p2_111; \
46857   float32x2_t __rev0_111;  __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 1, 0); \
46858   float32x2_t __rev1_111;  __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 1, 0); \
46859   float32x4_t __rev2_111;  __rev2_111 = __builtin_shufflevector(__s2_111, __s2_111, 3, 2, 1, 0); \
46860   float32x2_t __ret_111; \
46861   __ret_111 = __noswap_vfma_laneq_f32(__rev0_111, __rev1_111, -__rev2_111, __p3_111); \
46862   __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 1, 0); \
46863   __ret_111; \
46864 })
46865 #endif
46866 
46867 #ifdef __LITTLE_ENDIAN__
vfmsq_n_f64(float64x2_t __p0,float64x2_t __p1,float64_t __p2)46868 __ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
46869   float64x2_t __ret;
46870   __ret = vfmsq_f64(__p0, __p1, (float64x2_t) {__p2, __p2});
46871   return __ret;
46872 }
46873 #else
vfmsq_n_f64(float64x2_t __p0,float64x2_t __p1,float64_t __p2)46874 __ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
46875   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46876   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
46877   float64x2_t __ret;
46878   __ret = __noswap_vfmsq_f64(__rev0, __rev1, (float64x2_t) {__p2, __p2});
46879   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
46880   return __ret;
46881 }
46882 #endif
46883 
46884 #ifdef __LITTLE_ENDIAN__
vfmsq_n_f32(float32x4_t __p0,float32x4_t __p1,float32_t __p2)46885 __ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
46886   float32x4_t __ret;
46887   __ret = vfmsq_f32(__p0, __p1, (float32x4_t) {__p2, __p2, __p2, __p2});
46888   return __ret;
46889 }
46890 #else
vfmsq_n_f32(float32x4_t __p0,float32x4_t __p1,float32_t __p2)46891 __ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
46892   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
46893   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
46894   float32x4_t __ret;
46895   __ret = __noswap_vfmsq_f32(__rev0, __rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
46896   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
46897   return __ret;
46898 }
46899 #endif
46900 
46901 #ifdef __LITTLE_ENDIAN__
vfms_n_f32(float32x2_t __p0,float32x2_t __p1,float32_t __p2)46902 __ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
46903   float32x2_t __ret;
46904   __ret = vfms_f32(__p0, __p1, (float32x2_t) {__p2, __p2});
46905   return __ret;
46906 }
46907 #else
vfms_n_f32(float32x2_t __p0,float32x2_t __p1,float32_t __p2)46908 __ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
46909   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46910   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
46911   float32x2_t __ret;
46912   __ret = __noswap_vfms_f32(__rev0, __rev1, (float32x2_t) {__p2, __p2});
46913   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
46914   return __ret;
46915 }
46916 #endif
46917 
46918 #ifdef __LITTLE_ENDIAN__
vget_high_p64(poly64x2_t __p0)46919 __ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
46920   poly64x1_t __ret;
46921   __ret = __builtin_shufflevector(__p0, __p0, 1);
46922   return __ret;
46923 }
46924 #else
vget_high_p64(poly64x2_t __p0)46925 __ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
46926   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46927   poly64x1_t __ret;
46928   __ret = __builtin_shufflevector(__rev0, __rev0, 1);
46929   return __ret;
46930 }
__noswap_vget_high_p64(poly64x2_t __p0)46931 __ai poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) {
46932   poly64x1_t __ret;
46933   __ret = __builtin_shufflevector(__p0, __p0, 1);
46934   return __ret;
46935 }
46936 #endif
46937 
46938 #ifdef __LITTLE_ENDIAN__
vget_high_f64(float64x2_t __p0)46939 __ai float64x1_t vget_high_f64(float64x2_t __p0) {
46940   float64x1_t __ret;
46941   __ret = __builtin_shufflevector(__p0, __p0, 1);
46942   return __ret;
46943 }
46944 #else
vget_high_f64(float64x2_t __p0)46945 __ai float64x1_t vget_high_f64(float64x2_t __p0) {
46946   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46947   float64x1_t __ret;
46948   __ret = __builtin_shufflevector(__rev0, __rev0, 1);
46949   return __ret;
46950 }
46951 #endif
46952 
46953 #ifdef __LITTLE_ENDIAN__
46954 #define vget_lane_p64(__p0, __p1) __extension__ ({ \
46955   poly64x1_t __s0 = __p0; \
46956   poly64_t __ret; \
46957   __ret = (poly64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
46958   __ret; \
46959 })
46960 #else
46961 #define vget_lane_p64(__p0, __p1) __extension__ ({ \
46962   poly64x1_t __s0 = __p0; \
46963   poly64_t __ret; \
46964   __ret = (poly64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
46965   __ret; \
46966 })
46967 #define __noswap_vget_lane_p64(__p0, __p1) __extension__ ({ \
46968   poly64x1_t __s0 = __p0; \
46969   poly64_t __ret; \
46970   __ret = (poly64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
46971   __ret; \
46972 })
46973 #endif
46974 
46975 #ifdef __LITTLE_ENDIAN__
46976 #define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
46977   poly64x2_t __s0 = __p0; \
46978   poly64_t __ret; \
46979   __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
46980   __ret; \
46981 })
46982 #else
46983 #define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
46984   poly64x2_t __s0 = __p0; \
46985   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
46986   poly64_t __ret; \
46987   __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
46988   __ret; \
46989 })
46990 #define __noswap_vgetq_lane_p64(__p0, __p1) __extension__ ({ \
46991   poly64x2_t __s0 = __p0; \
46992   poly64_t __ret; \
46993   __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
46994   __ret; \
46995 })
46996 #endif
46997 
46998 #ifdef __LITTLE_ENDIAN__
46999 #define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
47000   float64x2_t __s0 = __p0; \
47001   float64_t __ret; \
47002   __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__s0, __p1); \
47003   __ret; \
47004 })
47005 #else
47006 #define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
47007   float64x2_t __s0 = __p0; \
47008   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
47009   float64_t __ret; \
47010   __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__rev0, __p1); \
47011   __ret; \
47012 })
47013 #define __noswap_vgetq_lane_f64(__p0, __p1) __extension__ ({ \
47014   float64x2_t __s0 = __p0; \
47015   float64_t __ret; \
47016   __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__s0, __p1); \
47017   __ret; \
47018 })
47019 #endif
47020 
47021 #ifdef __LITTLE_ENDIAN__
47022 #define vget_lane_f64(__p0, __p1) __extension__ ({ \
47023   float64x1_t __s0 = __p0; \
47024   float64_t __ret; \
47025   __ret = (float64_t) __builtin_neon_vget_lane_f64((int8x8_t)__s0, __p1); \
47026   __ret; \
47027 })
47028 #else
47029 #define vget_lane_f64(__p0, __p1) __extension__ ({ \
47030   float64x1_t __s0 = __p0; \
47031   float64_t __ret; \
47032   __ret = (float64_t) __builtin_neon_vget_lane_f64((int8x8_t)__s0, __p1); \
47033   __ret; \
47034 })
47035 #define __noswap_vget_lane_f64(__p0, __p1) __extension__ ({ \
47036   float64x1_t __s0 = __p0; \
47037   float64_t __ret; \
47038   __ret = (float64_t) __builtin_neon_vget_lane_f64((int8x8_t)__s0, __p1); \
47039   __ret; \
47040 })
47041 #endif
47042 
47043 #ifdef __LITTLE_ENDIAN__
47044 #define vget_lane_f16(__p0_112, __p1_112) __extension__ ({ \
47045   float16x4_t __s0_112 = __p0_112; \
47046   float16_t __ret_112; \
47047 float16x4_t __reint_112 = __s0_112; \
47048 int16_t __reint1_112 = vget_lane_s16(*(int16x4_t *) &__reint_112, __p1_112); \
47049   __ret_112 = *(float16_t *) &__reint1_112; \
47050   __ret_112; \
47051 })
47052 #else
47053 #define vget_lane_f16(__p0_113, __p1_113) __extension__ ({ \
47054   float16x4_t __s0_113 = __p0_113; \
47055   float16x4_t __rev0_113;  __rev0_113 = __builtin_shufflevector(__s0_113, __s0_113, 3, 2, 1, 0); \
47056   float16_t __ret_113; \
47057 float16x4_t __reint_113 = __rev0_113; \
47058 int16_t __reint1_113 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_113, __p1_113); \
47059   __ret_113 = *(float16_t *) &__reint1_113; \
47060   __ret_113; \
47061 })
47062 #endif
47063 
47064 #ifdef __LITTLE_ENDIAN__
47065 #define vgetq_lane_f16(__p0_114, __p1_114) __extension__ ({ \
47066   float16x8_t __s0_114 = __p0_114; \
47067   float16_t __ret_114; \
47068 float16x8_t __reint_114 = __s0_114; \
47069 int16_t __reint1_114 = vgetq_lane_s16(*(int16x8_t *) &__reint_114, __p1_114); \
47070   __ret_114 = *(float16_t *) &__reint1_114; \
47071   __ret_114; \
47072 })
47073 #else
47074 #define vgetq_lane_f16(__p0_115, __p1_115) __extension__ ({ \
47075   float16x8_t __s0_115 = __p0_115; \
47076   float16x8_t __rev0_115;  __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 7, 6, 5, 4, 3, 2, 1, 0); \
47077   float16_t __ret_115; \
47078 float16x8_t __reint_115 = __rev0_115; \
47079 int16_t __reint1_115 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_115, __p1_115); \
47080   __ret_115 = *(float16_t *) &__reint1_115; \
47081   __ret_115; \
47082 })
47083 #endif
47084 
47085 #ifdef __LITTLE_ENDIAN__
vget_low_p64(poly64x2_t __p0)47086 __ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
47087   poly64x1_t __ret;
47088   __ret = __builtin_shufflevector(__p0, __p0, 0);
47089   return __ret;
47090 }
47091 #else
vget_low_p64(poly64x2_t __p0)47092 __ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
47093   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
47094   poly64x1_t __ret;
47095   __ret = __builtin_shufflevector(__rev0, __rev0, 0);
47096   return __ret;
47097 }
47098 #endif
47099 
47100 #ifdef __LITTLE_ENDIAN__
vget_low_f64(float64x2_t __p0)47101 __ai float64x1_t vget_low_f64(float64x2_t __p0) {
47102   float64x1_t __ret;
47103   __ret = __builtin_shufflevector(__p0, __p0, 0);
47104   return __ret;
47105 }
47106 #else
vget_low_f64(float64x2_t __p0)47107 __ai float64x1_t vget_low_f64(float64x2_t __p0) {
47108   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
47109   float64x1_t __ret;
47110   __ret = __builtin_shufflevector(__rev0, __rev0, 0);
47111   return __ret;
47112 }
47113 #endif
47114 
47115 #ifdef __LITTLE_ENDIAN__
47116 #define vld1_p64(__p0) __extension__ ({ \
47117   poly64x1_t __ret; \
47118   __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \
47119   __ret; \
47120 })
47121 #else
47122 #define vld1_p64(__p0) __extension__ ({ \
47123   poly64x1_t __ret; \
47124   __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \
47125   __ret; \
47126 })
47127 #endif
47128 
47129 #ifdef __LITTLE_ENDIAN__
47130 #define vld1q_p64(__p0) __extension__ ({ \
47131   poly64x2_t __ret; \
47132   __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
47133   __ret; \
47134 })
47135 #else
47136 #define vld1q_p64(__p0) __extension__ ({ \
47137   poly64x2_t __ret; \
47138   __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
47139   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
47140   __ret; \
47141 })
47142 #endif
47143 
47144 #ifdef __LITTLE_ENDIAN__
47145 #define vld1q_f64(__p0) __extension__ ({ \
47146   float64x2_t __ret; \
47147   __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
47148   __ret; \
47149 })
47150 #else
47151 #define vld1q_f64(__p0) __extension__ ({ \
47152   float64x2_t __ret; \
47153   __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
47154   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
47155   __ret; \
47156 })
47157 #endif
47158 
47159 #ifdef __LITTLE_ENDIAN__
47160 #define vld1_f64(__p0) __extension__ ({ \
47161   float64x1_t __ret; \
47162   __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \
47163   __ret; \
47164 })
47165 #else
47166 #define vld1_f64(__p0) __extension__ ({ \
47167   float64x1_t __ret; \
47168   __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \
47169   __ret; \
47170 })
47171 #endif
47172 
47173 #ifdef __LITTLE_ENDIAN__
47174 #define vld1_dup_p64(__p0) __extension__ ({ \
47175   poly64x1_t __ret; \
47176   __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \
47177   __ret; \
47178 })
47179 #else
47180 #define vld1_dup_p64(__p0) __extension__ ({ \
47181   poly64x1_t __ret; \
47182   __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \
47183   __ret; \
47184 })
47185 #endif
47186 
47187 #ifdef __LITTLE_ENDIAN__
47188 #define vld1q_dup_p64(__p0) __extension__ ({ \
47189   poly64x2_t __ret; \
47190   __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
47191   __ret; \
47192 })
47193 #else
47194 #define vld1q_dup_p64(__p0) __extension__ ({ \
47195   poly64x2_t __ret; \
47196   __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
47197   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
47198   __ret; \
47199 })
47200 #endif
47201 
47202 #ifdef __LITTLE_ENDIAN__
47203 #define vld1q_dup_f64(__p0) __extension__ ({ \
47204   float64x2_t __ret; \
47205   __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
47206   __ret; \
47207 })
47208 #else
47209 #define vld1q_dup_f64(__p0) __extension__ ({ \
47210   float64x2_t __ret; \
47211   __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
47212   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
47213   __ret; \
47214 })
47215 #endif
47216 
47217 #ifdef __LITTLE_ENDIAN__
47218 #define vld1_dup_f64(__p0) __extension__ ({ \
47219   float64x1_t __ret; \
47220   __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \
47221   __ret; \
47222 })
47223 #else
47224 #define vld1_dup_f64(__p0) __extension__ ({ \
47225   float64x1_t __ret; \
47226   __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \
47227   __ret; \
47228 })
47229 #endif
47230 
47231 #ifdef __LITTLE_ENDIAN__
47232 #define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
47233   poly64x1_t __s1 = __p1; \
47234   poly64x1_t __ret; \
47235   __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
47236   __ret; \
47237 })
47238 #else
47239 #define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
47240   poly64x1_t __s1 = __p1; \
47241   poly64x1_t __ret; \
47242   __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
47243   __ret; \
47244 })
47245 #endif
47246 
47247 #ifdef __LITTLE_ENDIAN__
47248 #define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
47249   poly64x2_t __s1 = __p1; \
47250   poly64x2_t __ret; \
47251   __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \
47252   __ret; \
47253 })
47254 #else
47255 #define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
47256   poly64x2_t __s1 = __p1; \
47257   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
47258   poly64x2_t __ret; \
47259   __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \
47260   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
47261   __ret; \
47262 })
47263 #endif
47264 
47265 #ifdef __LITTLE_ENDIAN__
47266 #define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
47267   float64x2_t __s1 = __p1; \
47268   float64x2_t __ret; \
47269   __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \
47270   __ret; \
47271 })
47272 #else
47273 #define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
47274   float64x2_t __s1 = __p1; \
47275   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
47276   float64x2_t __ret; \
47277   __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \
47278   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
47279   __ret; \
47280 })
47281 #endif
47282 
47283 #ifdef __LITTLE_ENDIAN__
47284 #define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
47285   float64x1_t __s1 = __p1; \
47286   float64x1_t __ret; \
47287   __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
47288   __ret; \
47289 })
47290 #else
47291 #define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
47292   float64x1_t __s1 = __p1; \
47293   float64x1_t __ret; \
47294   __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
47295   __ret; \
47296 })
47297 #endif
47298 
47299 #ifdef __LITTLE_ENDIAN__
47300 #define vld1_p8_x2(__p0) __extension__ ({ \
47301   poly8x8x2_t __ret; \
47302   __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \
47303   __ret; \
47304 })
47305 #else
47306 #define vld1_p8_x2(__p0) __extension__ ({ \
47307   poly8x8x2_t __ret; \
47308   __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \
47309  \
47310   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
47311   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
47312   __ret; \
47313 })
47314 #endif
47315 
47316 #ifdef __LITTLE_ENDIAN__
47317 #define vld1_p64_x2(__p0) __extension__ ({ \
47318   poly64x1x2_t __ret; \
47319   __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \
47320   __ret; \
47321 })
47322 #else
47323 #define vld1_p64_x2(__p0) __extension__ ({ \
47324   poly64x1x2_t __ret; \
47325   __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \
47326   __ret; \
47327 })
47328 #endif
47329 
47330 #ifdef __LITTLE_ENDIAN__
47331 #define vld1_p16_x2(__p0) __extension__ ({ \
47332   poly16x4x2_t __ret; \
47333   __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \
47334   __ret; \
47335 })
47336 #else
47337 #define vld1_p16_x2(__p0) __extension__ ({ \
47338   poly16x4x2_t __ret; \
47339   __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \
47340  \
47341   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
47342   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
47343   __ret; \
47344 })
47345 #endif
47346 
47347 #ifdef __LITTLE_ENDIAN__
47348 #define vld1q_p8_x2(__p0) __extension__ ({ \
47349   poly8x16x2_t __ret; \
47350   __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \
47351   __ret; \
47352 })
47353 #else
47354 #define vld1q_p8_x2(__p0) __extension__ ({ \
47355   poly8x16x2_t __ret; \
47356   __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \
47357  \
47358   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
47359   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
47360   __ret; \
47361 })
47362 #endif
47363 
47364 #ifdef __LITTLE_ENDIAN__
47365 #define vld1q_p64_x2(__p0) __extension__ ({ \
47366   poly64x2x2_t __ret; \
47367   __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
47368   __ret; \
47369 })
47370 #else
47371 #define vld1q_p64_x2(__p0) __extension__ ({ \
47372   poly64x2x2_t __ret; \
47373   __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
47374  \
47375   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
47376   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
47377   __ret; \
47378 })
47379 #endif
47380 
47381 #ifdef __LITTLE_ENDIAN__
47382 #define vld1q_p16_x2(__p0) __extension__ ({ \
47383   poly16x8x2_t __ret; \
47384   __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \
47385   __ret; \
47386 })
47387 #else
47388 #define vld1q_p16_x2(__p0) __extension__ ({ \
47389   poly16x8x2_t __ret; \
47390   __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \
47391  \
47392   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
47393   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
47394   __ret; \
47395 })
47396 #endif
47397 
47398 #ifdef __LITTLE_ENDIAN__
47399 #define vld1q_u8_x2(__p0) __extension__ ({ \
47400   uint8x16x2_t __ret; \
47401   __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \
47402   __ret; \
47403 })
47404 #else
47405 #define vld1q_u8_x2(__p0) __extension__ ({ \
47406   uint8x16x2_t __ret; \
47407   __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \
47408  \
47409   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
47410   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
47411   __ret; \
47412 })
47413 #endif
47414 
47415 #ifdef __LITTLE_ENDIAN__
47416 #define vld1q_u32_x2(__p0) __extension__ ({ \
47417   uint32x4x2_t __ret; \
47418   __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \
47419   __ret; \
47420 })
47421 #else
47422 #define vld1q_u32_x2(__p0) __extension__ ({ \
47423   uint32x4x2_t __ret; \
47424   __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \
47425  \
47426   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
47427   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
47428   __ret; \
47429 })
47430 #endif
47431 
47432 #ifdef __LITTLE_ENDIAN__
47433 #define vld1q_u64_x2(__p0) __extension__ ({ \
47434   uint64x2x2_t __ret; \
47435   __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \
47436   __ret; \
47437 })
47438 #else
47439 #define vld1q_u64_x2(__p0) __extension__ ({ \
47440   uint64x2x2_t __ret; \
47441   __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \
47442  \
47443   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
47444   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
47445   __ret; \
47446 })
47447 #endif
47448 
47449 #ifdef __LITTLE_ENDIAN__
47450 #define vld1q_u16_x2(__p0) __extension__ ({ \
47451   uint16x8x2_t __ret; \
47452   __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \
47453   __ret; \
47454 })
47455 #else
47456 #define vld1q_u16_x2(__p0) __extension__ ({ \
47457   uint16x8x2_t __ret; \
47458   __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \
47459  \
47460   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
47461   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
47462   __ret; \
47463 })
47464 #endif
47465 
47466 #ifdef __LITTLE_ENDIAN__
47467 #define vld1q_s8_x2(__p0) __extension__ ({ \
47468   int8x16x2_t __ret; \
47469   __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \
47470   __ret; \
47471 })
47472 #else
47473 #define vld1q_s8_x2(__p0) __extension__ ({ \
47474   int8x16x2_t __ret; \
47475   __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \
47476  \
47477   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
47478   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
47479   __ret; \
47480 })
47481 #endif
47482 
47483 #ifdef __LITTLE_ENDIAN__
47484 #define vld1q_f64_x2(__p0) __extension__ ({ \
47485   float64x2x2_t __ret; \
47486   __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
47487   __ret; \
47488 })
47489 #else
47490 #define vld1q_f64_x2(__p0) __extension__ ({ \
47491   float64x2x2_t __ret; \
47492   __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
47493  \
47494   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
47495   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
47496   __ret; \
47497 })
47498 #endif
47499 
47500 #ifdef __LITTLE_ENDIAN__
47501 #define vld1q_f32_x2(__p0) __extension__ ({ \
47502   float32x4x2_t __ret; \
47503   __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \
47504   __ret; \
47505 })
47506 #else
47507 #define vld1q_f32_x2(__p0) __extension__ ({ \
47508   float32x4x2_t __ret; \
47509   __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \
47510  \
47511   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
47512   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
47513   __ret; \
47514 })
47515 #endif
47516 
47517 #ifdef __LITTLE_ENDIAN__
47518 #define vld1q_f16_x2(__p0) __extension__ ({ \
47519   float16x8x2_t __ret; \
47520   __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \
47521   __ret; \
47522 })
47523 #else
47524 #define vld1q_f16_x2(__p0) __extension__ ({ \
47525   float16x8x2_t __ret; \
47526   __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \
47527  \
47528   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
47529   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
47530   __ret; \
47531 })
47532 #endif
47533 
47534 #ifdef __LITTLE_ENDIAN__
47535 #define vld1q_s32_x2(__p0) __extension__ ({ \
47536   int32x4x2_t __ret; \
47537   __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \
47538   __ret; \
47539 })
47540 #else
47541 #define vld1q_s32_x2(__p0) __extension__ ({ \
47542   int32x4x2_t __ret; \
47543   __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \
47544  \
47545   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
47546   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
47547   __ret; \
47548 })
47549 #endif
47550 
47551 #ifdef __LITTLE_ENDIAN__
47552 #define vld1q_s64_x2(__p0) __extension__ ({ \
47553   int64x2x2_t __ret; \
47554   __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \
47555   __ret; \
47556 })
47557 #else
47558 #define vld1q_s64_x2(__p0) __extension__ ({ \
47559   int64x2x2_t __ret; \
47560   __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \
47561  \
47562   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
47563   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
47564   __ret; \
47565 })
47566 #endif
47567 
47568 #ifdef __LITTLE_ENDIAN__
47569 #define vld1q_s16_x2(__p0) __extension__ ({ \
47570   int16x8x2_t __ret; \
47571   __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \
47572   __ret; \
47573 })
47574 #else
47575 #define vld1q_s16_x2(__p0) __extension__ ({ \
47576   int16x8x2_t __ret; \
47577   __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \
47578  \
47579   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
47580   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
47581   __ret; \
47582 })
47583 #endif
47584 
47585 #ifdef __LITTLE_ENDIAN__
47586 #define vld1_u8_x2(__p0) __extension__ ({ \
47587   uint8x8x2_t __ret; \
47588   __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \
47589   __ret; \
47590 })
47591 #else
47592 #define vld1_u8_x2(__p0) __extension__ ({ \
47593   uint8x8x2_t __ret; \
47594   __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \
47595  \
47596   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
47597   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
47598   __ret; \
47599 })
47600 #endif
47601 
47602 #ifdef __LITTLE_ENDIAN__
47603 #define vld1_u32_x2(__p0) __extension__ ({ \
47604   uint32x2x2_t __ret; \
47605   __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \
47606   __ret; \
47607 })
47608 #else
47609 #define vld1_u32_x2(__p0) __extension__ ({ \
47610   uint32x2x2_t __ret; \
47611   __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \
47612  \
47613   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
47614   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
47615   __ret; \
47616 })
47617 #endif
47618 
47619 #ifdef __LITTLE_ENDIAN__
47620 #define vld1_u64_x2(__p0) __extension__ ({ \
47621   uint64x1x2_t __ret; \
47622   __builtin_neon_vld1_x2_v(&__ret, __p0, 19); \
47623   __ret; \
47624 })
47625 #else
47626 #define vld1_u64_x2(__p0) __extension__ ({ \
47627   uint64x1x2_t __ret; \
47628   __builtin_neon_vld1_x2_v(&__ret, __p0, 19); \
47629   __ret; \
47630 })
47631 #endif
47632 
47633 #ifdef __LITTLE_ENDIAN__
47634 #define vld1_u16_x2(__p0) __extension__ ({ \
47635   uint16x4x2_t __ret; \
47636   __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \
47637   __ret; \
47638 })
47639 #else
47640 #define vld1_u16_x2(__p0) __extension__ ({ \
47641   uint16x4x2_t __ret; \
47642   __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \
47643  \
47644   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
47645   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
47646   __ret; \
47647 })
47648 #endif
47649 
47650 #ifdef __LITTLE_ENDIAN__
47651 #define vld1_s8_x2(__p0) __extension__ ({ \
47652   int8x8x2_t __ret; \
47653   __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \
47654   __ret; \
47655 })
47656 #else
47657 #define vld1_s8_x2(__p0) __extension__ ({ \
47658   int8x8x2_t __ret; \
47659   __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \
47660  \
47661   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
47662   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
47663   __ret; \
47664 })
47665 #endif
47666 
47667 #ifdef __LITTLE_ENDIAN__
47668 #define vld1_f64_x2(__p0) __extension__ ({ \
47669   float64x1x2_t __ret; \
47670   __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \
47671   __ret; \
47672 })
47673 #else
47674 #define vld1_f64_x2(__p0) __extension__ ({ \
47675   float64x1x2_t __ret; \
47676   __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \
47677   __ret; \
47678 })
47679 #endif
47680 
47681 #ifdef __LITTLE_ENDIAN__
47682 #define vld1_f32_x2(__p0) __extension__ ({ \
47683   float32x2x2_t __ret; \
47684   __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \
47685   __ret; \
47686 })
47687 #else
47688 #define vld1_f32_x2(__p0) __extension__ ({ \
47689   float32x2x2_t __ret; \
47690   __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \
47691  \
47692   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
47693   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
47694   __ret; \
47695 })
47696 #endif
47697 
47698 #ifdef __LITTLE_ENDIAN__
47699 #define vld1_f16_x2(__p0) __extension__ ({ \
47700   float16x4x2_t __ret; \
47701   __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \
47702   __ret; \
47703 })
47704 #else
47705 #define vld1_f16_x2(__p0) __extension__ ({ \
47706   float16x4x2_t __ret; \
47707   __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \
47708  \
47709   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
47710   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
47711   __ret; \
47712 })
47713 #endif
47714 
47715 #ifdef __LITTLE_ENDIAN__
47716 #define vld1_s32_x2(__p0) __extension__ ({ \
47717   int32x2x2_t __ret; \
47718   __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \
47719   __ret; \
47720 })
47721 #else
47722 #define vld1_s32_x2(__p0) __extension__ ({ \
47723   int32x2x2_t __ret; \
47724   __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \
47725  \
47726   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
47727   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
47728   __ret; \
47729 })
47730 #endif
47731 
47732 #ifdef __LITTLE_ENDIAN__
47733 #define vld1_s64_x2(__p0) __extension__ ({ \
47734   int64x1x2_t __ret; \
47735   __builtin_neon_vld1_x2_v(&__ret, __p0, 3); \
47736   __ret; \
47737 })
47738 #else
47739 #define vld1_s64_x2(__p0) __extension__ ({ \
47740   int64x1x2_t __ret; \
47741   __builtin_neon_vld1_x2_v(&__ret, __p0, 3); \
47742   __ret; \
47743 })
47744 #endif
47745 
47746 #ifdef __LITTLE_ENDIAN__
47747 #define vld1_s16_x2(__p0) __extension__ ({ \
47748   int16x4x2_t __ret; \
47749   __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \
47750   __ret; \
47751 })
47752 #else
47753 #define vld1_s16_x2(__p0) __extension__ ({ \
47754   int16x4x2_t __ret; \
47755   __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \
47756  \
47757   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
47758   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
47759   __ret; \
47760 })
47761 #endif
47762 
47763 #ifdef __LITTLE_ENDIAN__
47764 #define vld1_p8_x3(__p0) __extension__ ({ \
47765   poly8x8x3_t __ret; \
47766   __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \
47767   __ret; \
47768 })
47769 #else
47770 #define vld1_p8_x3(__p0) __extension__ ({ \
47771   poly8x8x3_t __ret; \
47772   __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \
47773  \
47774   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
47775   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
47776   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
47777   __ret; \
47778 })
47779 #endif
47780 
47781 #ifdef __LITTLE_ENDIAN__
47782 #define vld1_p64_x3(__p0) __extension__ ({ \
47783   poly64x1x3_t __ret; \
47784   __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \
47785   __ret; \
47786 })
47787 #else
47788 #define vld1_p64_x3(__p0) __extension__ ({ \
47789   poly64x1x3_t __ret; \
47790   __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \
47791   __ret; \
47792 })
47793 #endif
47794 
47795 #ifdef __LITTLE_ENDIAN__
47796 #define vld1_p16_x3(__p0) __extension__ ({ \
47797   poly16x4x3_t __ret; \
47798   __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \
47799   __ret; \
47800 })
47801 #else
47802 #define vld1_p16_x3(__p0) __extension__ ({ \
47803   poly16x4x3_t __ret; \
47804   __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \
47805  \
47806   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
47807   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
47808   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
47809   __ret; \
47810 })
47811 #endif
47812 
47813 #ifdef __LITTLE_ENDIAN__
47814 #define vld1q_p8_x3(__p0) __extension__ ({ \
47815   poly8x16x3_t __ret; \
47816   __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \
47817   __ret; \
47818 })
47819 #else
47820 #define vld1q_p8_x3(__p0) __extension__ ({ \
47821   poly8x16x3_t __ret; \
47822   __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \
47823  \
47824   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
47825   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
47826   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
47827   __ret; \
47828 })
47829 #endif
47830 
47831 #ifdef __LITTLE_ENDIAN__
47832 #define vld1q_p64_x3(__p0) __extension__ ({ \
47833   poly64x2x3_t __ret; \
47834   __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
47835   __ret; \
47836 })
47837 #else
47838 #define vld1q_p64_x3(__p0) __extension__ ({ \
47839   poly64x2x3_t __ret; \
47840   __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
47841  \
47842   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
47843   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
47844   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
47845   __ret; \
47846 })
47847 #endif
47848 
47849 #ifdef __LITTLE_ENDIAN__
47850 #define vld1q_p16_x3(__p0) __extension__ ({ \
47851   poly16x8x3_t __ret; \
47852   __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \
47853   __ret; \
47854 })
47855 #else
47856 #define vld1q_p16_x3(__p0) __extension__ ({ \
47857   poly16x8x3_t __ret; \
47858   __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \
47859  \
47860   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
47861   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
47862   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
47863   __ret; \
47864 })
47865 #endif
47866 
47867 #ifdef __LITTLE_ENDIAN__
47868 #define vld1q_u8_x3(__p0) __extension__ ({ \
47869   uint8x16x3_t __ret; \
47870   __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \
47871   __ret; \
47872 })
47873 #else
47874 #define vld1q_u8_x3(__p0) __extension__ ({ \
47875   uint8x16x3_t __ret; \
47876   __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \
47877  \
47878   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
47879   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
47880   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
47881   __ret; \
47882 })
47883 #endif
47884 
47885 #ifdef __LITTLE_ENDIAN__
47886 #define vld1q_u32_x3(__p0) __extension__ ({ \
47887   uint32x4x3_t __ret; \
47888   __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \
47889   __ret; \
47890 })
47891 #else
47892 #define vld1q_u32_x3(__p0) __extension__ ({ \
47893   uint32x4x3_t __ret; \
47894   __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \
47895  \
47896   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
47897   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
47898   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
47899   __ret; \
47900 })
47901 #endif
47902 
47903 #ifdef __LITTLE_ENDIAN__
47904 #define vld1q_u64_x3(__p0) __extension__ ({ \
47905   uint64x2x3_t __ret; \
47906   __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \
47907   __ret; \
47908 })
47909 #else
47910 #define vld1q_u64_x3(__p0) __extension__ ({ \
47911   uint64x2x3_t __ret; \
47912   __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \
47913  \
47914   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
47915   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
47916   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
47917   __ret; \
47918 })
47919 #endif
47920 
47921 #ifdef __LITTLE_ENDIAN__
47922 #define vld1q_u16_x3(__p0) __extension__ ({ \
47923   uint16x8x3_t __ret; \
47924   __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \
47925   __ret; \
47926 })
47927 #else
47928 #define vld1q_u16_x3(__p0) __extension__ ({ \
47929   uint16x8x3_t __ret; \
47930   __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \
47931  \
47932   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
47933   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
47934   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
47935   __ret; \
47936 })
47937 #endif
47938 
47939 #ifdef __LITTLE_ENDIAN__
47940 #define vld1q_s8_x3(__p0) __extension__ ({ \
47941   int8x16x3_t __ret; \
47942   __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \
47943   __ret; \
47944 })
47945 #else
47946 #define vld1q_s8_x3(__p0) __extension__ ({ \
47947   int8x16x3_t __ret; \
47948   __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \
47949  \
47950   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
47951   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
47952   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
47953   __ret; \
47954 })
47955 #endif
47956 
47957 #ifdef __LITTLE_ENDIAN__
47958 #define vld1q_f64_x3(__p0) __extension__ ({ \
47959   float64x2x3_t __ret; \
47960   __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
47961   __ret; \
47962 })
47963 #else
47964 #define vld1q_f64_x3(__p0) __extension__ ({ \
47965   float64x2x3_t __ret; \
47966   __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
47967  \
47968   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
47969   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
47970   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
47971   __ret; \
47972 })
47973 #endif
47974 
47975 #ifdef __LITTLE_ENDIAN__
47976 #define vld1q_f32_x3(__p0) __extension__ ({ \
47977   float32x4x3_t __ret; \
47978   __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \
47979   __ret; \
47980 })
47981 #else
47982 #define vld1q_f32_x3(__p0) __extension__ ({ \
47983   float32x4x3_t __ret; \
47984   __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \
47985  \
47986   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
47987   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
47988   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
47989   __ret; \
47990 })
47991 #endif
47992 
47993 #ifdef __LITTLE_ENDIAN__
47994 #define vld1q_f16_x3(__p0) __extension__ ({ \
47995   float16x8x3_t __ret; \
47996   __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \
47997   __ret; \
47998 })
47999 #else
48000 #define vld1q_f16_x3(__p0) __extension__ ({ \
48001   float16x8x3_t __ret; \
48002   __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \
48003  \
48004   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
48005   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
48006   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
48007   __ret; \
48008 })
48009 #endif
48010 
48011 #ifdef __LITTLE_ENDIAN__
48012 #define vld1q_s32_x3(__p0) __extension__ ({ \
48013   int32x4x3_t __ret; \
48014   __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \
48015   __ret; \
48016 })
48017 #else
48018 #define vld1q_s32_x3(__p0) __extension__ ({ \
48019   int32x4x3_t __ret; \
48020   __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \
48021  \
48022   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
48023   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
48024   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
48025   __ret; \
48026 })
48027 #endif
48028 
48029 #ifdef __LITTLE_ENDIAN__
48030 #define vld1q_s64_x3(__p0) __extension__ ({ \
48031   int64x2x3_t __ret; \
48032   __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \
48033   __ret; \
48034 })
48035 #else
48036 #define vld1q_s64_x3(__p0) __extension__ ({ \
48037   int64x2x3_t __ret; \
48038   __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \
48039  \
48040   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
48041   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
48042   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
48043   __ret; \
48044 })
48045 #endif
48046 
48047 #ifdef __LITTLE_ENDIAN__
48048 #define vld1q_s16_x3(__p0) __extension__ ({ \
48049   int16x8x3_t __ret; \
48050   __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \
48051   __ret; \
48052 })
48053 #else
48054 #define vld1q_s16_x3(__p0) __extension__ ({ \
48055   int16x8x3_t __ret; \
48056   __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \
48057  \
48058   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
48059   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
48060   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
48061   __ret; \
48062 })
48063 #endif
48064 
48065 #ifdef __LITTLE_ENDIAN__
48066 #define vld1_u8_x3(__p0) __extension__ ({ \
48067   uint8x8x3_t __ret; \
48068   __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \
48069   __ret; \
48070 })
48071 #else
48072 #define vld1_u8_x3(__p0) __extension__ ({ \
48073   uint8x8x3_t __ret; \
48074   __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \
48075  \
48076   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
48077   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
48078   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
48079   __ret; \
48080 })
48081 #endif
48082 
48083 #ifdef __LITTLE_ENDIAN__
48084 #define vld1_u32_x3(__p0) __extension__ ({ \
48085   uint32x2x3_t __ret; \
48086   __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \
48087   __ret; \
48088 })
48089 #else
48090 #define vld1_u32_x3(__p0) __extension__ ({ \
48091   uint32x2x3_t __ret; \
48092   __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \
48093  \
48094   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
48095   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
48096   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
48097   __ret; \
48098 })
48099 #endif
48100 
48101 #ifdef __LITTLE_ENDIAN__
48102 #define vld1_u64_x3(__p0) __extension__ ({ \
48103   uint64x1x3_t __ret; \
48104   __builtin_neon_vld1_x3_v(&__ret, __p0, 19); \
48105   __ret; \
48106 })
48107 #else
48108 #define vld1_u64_x3(__p0) __extension__ ({ \
48109   uint64x1x3_t __ret; \
48110   __builtin_neon_vld1_x3_v(&__ret, __p0, 19); \
48111   __ret; \
48112 })
48113 #endif
48114 
48115 #ifdef __LITTLE_ENDIAN__
48116 #define vld1_u16_x3(__p0) __extension__ ({ \
48117   uint16x4x3_t __ret; \
48118   __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \
48119   __ret; \
48120 })
48121 #else
48122 #define vld1_u16_x3(__p0) __extension__ ({ \
48123   uint16x4x3_t __ret; \
48124   __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \
48125  \
48126   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
48127   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
48128   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
48129   __ret; \
48130 })
48131 #endif
48132 
48133 #ifdef __LITTLE_ENDIAN__
48134 #define vld1_s8_x3(__p0) __extension__ ({ \
48135   int8x8x3_t __ret; \
48136   __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \
48137   __ret; \
48138 })
48139 #else
48140 #define vld1_s8_x3(__p0) __extension__ ({ \
48141   int8x8x3_t __ret; \
48142   __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \
48143  \
48144   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
48145   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
48146   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
48147   __ret; \
48148 })
48149 #endif
48150 
48151 #ifdef __LITTLE_ENDIAN__
48152 #define vld1_f64_x3(__p0) __extension__ ({ \
48153   float64x1x3_t __ret; \
48154   __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \
48155   __ret; \
48156 })
48157 #else
48158 #define vld1_f64_x3(__p0) __extension__ ({ \
48159   float64x1x3_t __ret; \
48160   __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \
48161   __ret; \
48162 })
48163 #endif
48164 
48165 #ifdef __LITTLE_ENDIAN__
48166 #define vld1_f32_x3(__p0) __extension__ ({ \
48167   float32x2x3_t __ret; \
48168   __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \
48169   __ret; \
48170 })
48171 #else
48172 #define vld1_f32_x3(__p0) __extension__ ({ \
48173   float32x2x3_t __ret; \
48174   __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \
48175  \
48176   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
48177   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
48178   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
48179   __ret; \
48180 })
48181 #endif
48182 
48183 #ifdef __LITTLE_ENDIAN__
48184 #define vld1_f16_x3(__p0) __extension__ ({ \
48185   float16x4x3_t __ret; \
48186   __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \
48187   __ret; \
48188 })
48189 #else
48190 #define vld1_f16_x3(__p0) __extension__ ({ \
48191   float16x4x3_t __ret; \
48192   __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \
48193  \
48194   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
48195   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
48196   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
48197   __ret; \
48198 })
48199 #endif
48200 
48201 #ifdef __LITTLE_ENDIAN__
48202 #define vld1_s32_x3(__p0) __extension__ ({ \
48203   int32x2x3_t __ret; \
48204   __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \
48205   __ret; \
48206 })
48207 #else
48208 #define vld1_s32_x3(__p0) __extension__ ({ \
48209   int32x2x3_t __ret; \
48210   __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \
48211  \
48212   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
48213   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
48214   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
48215   __ret; \
48216 })
48217 #endif
48218 
48219 #ifdef __LITTLE_ENDIAN__
48220 #define vld1_s64_x3(__p0) __extension__ ({ \
48221   int64x1x3_t __ret; \
48222   __builtin_neon_vld1_x3_v(&__ret, __p0, 3); \
48223   __ret; \
48224 })
48225 #else
48226 #define vld1_s64_x3(__p0) __extension__ ({ \
48227   int64x1x3_t __ret; \
48228   __builtin_neon_vld1_x3_v(&__ret, __p0, 3); \
48229   __ret; \
48230 })
48231 #endif
48232 
48233 #ifdef __LITTLE_ENDIAN__
48234 #define vld1_s16_x3(__p0) __extension__ ({ \
48235   int16x4x3_t __ret; \
48236   __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \
48237   __ret; \
48238 })
48239 #else
48240 #define vld1_s16_x3(__p0) __extension__ ({ \
48241   int16x4x3_t __ret; \
48242   __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \
48243  \
48244   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
48245   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
48246   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
48247   __ret; \
48248 })
48249 #endif
48250 
48251 #ifdef __LITTLE_ENDIAN__
48252 #define vld1_p8_x4(__p0) __extension__ ({ \
48253   poly8x8x4_t __ret; \
48254   __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \
48255   __ret; \
48256 })
48257 #else
48258 #define vld1_p8_x4(__p0) __extension__ ({ \
48259   poly8x8x4_t __ret; \
48260   __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \
48261  \
48262   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
48263   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
48264   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
48265   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
48266   __ret; \
48267 })
48268 #endif
48269 
48270 #ifdef __LITTLE_ENDIAN__
48271 #define vld1_p64_x4(__p0) __extension__ ({ \
48272   poly64x1x4_t __ret; \
48273   __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \
48274   __ret; \
48275 })
48276 #else
48277 #define vld1_p64_x4(__p0) __extension__ ({ \
48278   poly64x1x4_t __ret; \
48279   __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \
48280   __ret; \
48281 })
48282 #endif
48283 
48284 #ifdef __LITTLE_ENDIAN__
48285 #define vld1_p16_x4(__p0) __extension__ ({ \
48286   poly16x4x4_t __ret; \
48287   __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \
48288   __ret; \
48289 })
48290 #else
48291 #define vld1_p16_x4(__p0) __extension__ ({ \
48292   poly16x4x4_t __ret; \
48293   __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \
48294  \
48295   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
48296   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
48297   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
48298   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
48299   __ret; \
48300 })
48301 #endif
48302 
48303 #ifdef __LITTLE_ENDIAN__
48304 #define vld1q_p8_x4(__p0) __extension__ ({ \
48305   poly8x16x4_t __ret; \
48306   __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \
48307   __ret; \
48308 })
48309 #else
48310 #define vld1q_p8_x4(__p0) __extension__ ({ \
48311   poly8x16x4_t __ret; \
48312   __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \
48313  \
48314   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
48315   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
48316   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
48317   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
48318   __ret; \
48319 })
48320 #endif
48321 
48322 #ifdef __LITTLE_ENDIAN__
48323 #define vld1q_p64_x4(__p0) __extension__ ({ \
48324   poly64x2x4_t __ret; \
48325   __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
48326   __ret; \
48327 })
48328 #else
48329 #define vld1q_p64_x4(__p0) __extension__ ({ \
48330   poly64x2x4_t __ret; \
48331   __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
48332  \
48333   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
48334   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
48335   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
48336   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
48337   __ret; \
48338 })
48339 #endif
48340 
48341 #ifdef __LITTLE_ENDIAN__
48342 #define vld1q_p16_x4(__p0) __extension__ ({ \
48343   poly16x8x4_t __ret; \
48344   __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \
48345   __ret; \
48346 })
48347 #else
48348 #define vld1q_p16_x4(__p0) __extension__ ({ \
48349   poly16x8x4_t __ret; \
48350   __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \
48351  \
48352   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
48353   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
48354   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
48355   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
48356   __ret; \
48357 })
48358 #endif
48359 
48360 #ifdef __LITTLE_ENDIAN__
48361 #define vld1q_u8_x4(__p0) __extension__ ({ \
48362   uint8x16x4_t __ret; \
48363   __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \
48364   __ret; \
48365 })
48366 #else
48367 #define vld1q_u8_x4(__p0) __extension__ ({ \
48368   uint8x16x4_t __ret; \
48369   __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \
48370  \
48371   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
48372   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
48373   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
48374   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
48375   __ret; \
48376 })
48377 #endif
48378 
48379 #ifdef __LITTLE_ENDIAN__
48380 #define vld1q_u32_x4(__p0) __extension__ ({ \
48381   uint32x4x4_t __ret; \
48382   __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \
48383   __ret; \
48384 })
48385 #else
48386 #define vld1q_u32_x4(__p0) __extension__ ({ \
48387   uint32x4x4_t __ret; \
48388   __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \
48389  \
48390   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
48391   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
48392   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
48393   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
48394   __ret; \
48395 })
48396 #endif
48397 
48398 #ifdef __LITTLE_ENDIAN__
48399 #define vld1q_u64_x4(__p0) __extension__ ({ \
48400   uint64x2x4_t __ret; \
48401   __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \
48402   __ret; \
48403 })
48404 #else
48405 #define vld1q_u64_x4(__p0) __extension__ ({ \
48406   uint64x2x4_t __ret; \
48407   __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \
48408  \
48409   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
48410   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
48411   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
48412   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
48413   __ret; \
48414 })
48415 #endif
48416 
48417 #ifdef __LITTLE_ENDIAN__
48418 #define vld1q_u16_x4(__p0) __extension__ ({ \
48419   uint16x8x4_t __ret; \
48420   __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \
48421   __ret; \
48422 })
48423 #else
48424 #define vld1q_u16_x4(__p0) __extension__ ({ \
48425   uint16x8x4_t __ret; \
48426   __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \
48427  \
48428   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
48429   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
48430   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
48431   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
48432   __ret; \
48433 })
48434 #endif
48435 
48436 #ifdef __LITTLE_ENDIAN__
48437 #define vld1q_s8_x4(__p0) __extension__ ({ \
48438   int8x16x4_t __ret; \
48439   __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \
48440   __ret; \
48441 })
48442 #else
48443 #define vld1q_s8_x4(__p0) __extension__ ({ \
48444   int8x16x4_t __ret; \
48445   __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \
48446  \
48447   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
48448   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
48449   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
48450   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
48451   __ret; \
48452 })
48453 #endif
48454 
48455 #ifdef __LITTLE_ENDIAN__
48456 #define vld1q_f64_x4(__p0) __extension__ ({ \
48457   float64x2x4_t __ret; \
48458   __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
48459   __ret; \
48460 })
48461 #else
48462 #define vld1q_f64_x4(__p0) __extension__ ({ \
48463   float64x2x4_t __ret; \
48464   __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
48465  \
48466   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
48467   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
48468   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
48469   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
48470   __ret; \
48471 })
48472 #endif
48473 
48474 #ifdef __LITTLE_ENDIAN__
48475 #define vld1q_f32_x4(__p0) __extension__ ({ \
48476   float32x4x4_t __ret; \
48477   __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \
48478   __ret; \
48479 })
48480 #else
48481 #define vld1q_f32_x4(__p0) __extension__ ({ \
48482   float32x4x4_t __ret; \
48483   __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \
48484  \
48485   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
48486   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
48487   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
48488   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
48489   __ret; \
48490 })
48491 #endif
48492 
48493 #ifdef __LITTLE_ENDIAN__
48494 #define vld1q_f16_x4(__p0) __extension__ ({ \
48495   float16x8x4_t __ret; \
48496   __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \
48497   __ret; \
48498 })
48499 #else
48500 #define vld1q_f16_x4(__p0) __extension__ ({ \
48501   float16x8x4_t __ret; \
48502   __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \
48503  \
48504   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
48505   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
48506   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
48507   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
48508   __ret; \
48509 })
48510 #endif
48511 
48512 #ifdef __LITTLE_ENDIAN__
48513 #define vld1q_s32_x4(__p0) __extension__ ({ \
48514   int32x4x4_t __ret; \
48515   __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \
48516   __ret; \
48517 })
48518 #else
48519 #define vld1q_s32_x4(__p0) __extension__ ({ \
48520   int32x4x4_t __ret; \
48521   __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \
48522  \
48523   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
48524   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
48525   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
48526   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
48527   __ret; \
48528 })
48529 #endif
48530 
48531 #ifdef __LITTLE_ENDIAN__
48532 #define vld1q_s64_x4(__p0) __extension__ ({ \
48533   int64x2x4_t __ret; \
48534   __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \
48535   __ret; \
48536 })
48537 #else
48538 #define vld1q_s64_x4(__p0) __extension__ ({ \
48539   int64x2x4_t __ret; \
48540   __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \
48541  \
48542   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
48543   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
48544   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
48545   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
48546   __ret; \
48547 })
48548 #endif
48549 
48550 #ifdef __LITTLE_ENDIAN__
48551 #define vld1q_s16_x4(__p0) __extension__ ({ \
48552   int16x8x4_t __ret; \
48553   __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \
48554   __ret; \
48555 })
48556 #else
48557 #define vld1q_s16_x4(__p0) __extension__ ({ \
48558   int16x8x4_t __ret; \
48559   __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \
48560  \
48561   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
48562   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
48563   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
48564   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
48565   __ret; \
48566 })
48567 #endif
48568 
48569 #ifdef __LITTLE_ENDIAN__
48570 #define vld1_u8_x4(__p0) __extension__ ({ \
48571   uint8x8x4_t __ret; \
48572   __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \
48573   __ret; \
48574 })
48575 #else
48576 #define vld1_u8_x4(__p0) __extension__ ({ \
48577   uint8x8x4_t __ret; \
48578   __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \
48579  \
48580   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
48581   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
48582   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
48583   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
48584   __ret; \
48585 })
48586 #endif
48587 
48588 #ifdef __LITTLE_ENDIAN__
48589 #define vld1_u32_x4(__p0) __extension__ ({ \
48590   uint32x2x4_t __ret; \
48591   __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \
48592   __ret; \
48593 })
48594 #else
48595 #define vld1_u32_x4(__p0) __extension__ ({ \
48596   uint32x2x4_t __ret; \
48597   __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \
48598  \
48599   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
48600   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
48601   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
48602   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
48603   __ret; \
48604 })
48605 #endif
48606 
48607 #ifdef __LITTLE_ENDIAN__
48608 #define vld1_u64_x4(__p0) __extension__ ({ \
48609   uint64x1x4_t __ret; \
48610   __builtin_neon_vld1_x4_v(&__ret, __p0, 19); \
48611   __ret; \
48612 })
48613 #else
48614 #define vld1_u64_x4(__p0) __extension__ ({ \
48615   uint64x1x4_t __ret; \
48616   __builtin_neon_vld1_x4_v(&__ret, __p0, 19); \
48617   __ret; \
48618 })
48619 #endif
48620 
48621 #ifdef __LITTLE_ENDIAN__
48622 #define vld1_u16_x4(__p0) __extension__ ({ \
48623   uint16x4x4_t __ret; \
48624   __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \
48625   __ret; \
48626 })
48627 #else
48628 #define vld1_u16_x4(__p0) __extension__ ({ \
48629   uint16x4x4_t __ret; \
48630   __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \
48631  \
48632   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
48633   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
48634   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
48635   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
48636   __ret; \
48637 })
48638 #endif
48639 
48640 #ifdef __LITTLE_ENDIAN__
48641 #define vld1_s8_x4(__p0) __extension__ ({ \
48642   int8x8x4_t __ret; \
48643   __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \
48644   __ret; \
48645 })
48646 #else
48647 #define vld1_s8_x4(__p0) __extension__ ({ \
48648   int8x8x4_t __ret; \
48649   __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \
48650  \
48651   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
48652   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
48653   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
48654   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
48655   __ret; \
48656 })
48657 #endif
48658 
48659 #ifdef __LITTLE_ENDIAN__
48660 #define vld1_f64_x4(__p0) __extension__ ({ \
48661   float64x1x4_t __ret; \
48662   __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \
48663   __ret; \
48664 })
48665 #else
48666 #define vld1_f64_x4(__p0) __extension__ ({ \
48667   float64x1x4_t __ret; \
48668   __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \
48669   __ret; \
48670 })
48671 #endif
48672 
48673 #ifdef __LITTLE_ENDIAN__
48674 #define vld1_f32_x4(__p0) __extension__ ({ \
48675   float32x2x4_t __ret; \
48676   __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \
48677   __ret; \
48678 })
48679 #else
48680 #define vld1_f32_x4(__p0) __extension__ ({ \
48681   float32x2x4_t __ret; \
48682   __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \
48683  \
48684   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
48685   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
48686   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
48687   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
48688   __ret; \
48689 })
48690 #endif
48691 
48692 #ifdef __LITTLE_ENDIAN__
48693 #define vld1_f16_x4(__p0) __extension__ ({ \
48694   float16x4x4_t __ret; \
48695   __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \
48696   __ret; \
48697 })
48698 #else
48699 #define vld1_f16_x4(__p0) __extension__ ({ \
48700   float16x4x4_t __ret; \
48701   __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \
48702  \
48703   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
48704   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
48705   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
48706   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
48707   __ret; \
48708 })
48709 #endif
48710 
48711 #ifdef __LITTLE_ENDIAN__
48712 #define vld1_s32_x4(__p0) __extension__ ({ \
48713   int32x2x4_t __ret; \
48714   __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \
48715   __ret; \
48716 })
48717 #else
48718 #define vld1_s32_x4(__p0) __extension__ ({ \
48719   int32x2x4_t __ret; \
48720   __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \
48721  \
48722   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
48723   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
48724   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
48725   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
48726   __ret; \
48727 })
48728 #endif
48729 
48730 #ifdef __LITTLE_ENDIAN__
48731 #define vld1_s64_x4(__p0) __extension__ ({ \
48732   int64x1x4_t __ret; \
48733   __builtin_neon_vld1_x4_v(&__ret, __p0, 3); \
48734   __ret; \
48735 })
48736 #else
48737 #define vld1_s64_x4(__p0) __extension__ ({ \
48738   int64x1x4_t __ret; \
48739   __builtin_neon_vld1_x4_v(&__ret, __p0, 3); \
48740   __ret; \
48741 })
48742 #endif
48743 
48744 #ifdef __LITTLE_ENDIAN__
48745 #define vld1_s16_x4(__p0) __extension__ ({ \
48746   int16x4x4_t __ret; \
48747   __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \
48748   __ret; \
48749 })
48750 #else
48751 #define vld1_s16_x4(__p0) __extension__ ({ \
48752   int16x4x4_t __ret; \
48753   __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \
48754  \
48755   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
48756   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
48757   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
48758   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
48759   __ret; \
48760 })
48761 #endif
48762 
48763 #ifdef __LITTLE_ENDIAN__
48764 #define vld2_p64(__p0) __extension__ ({ \
48765   poly64x1x2_t __ret; \
48766   __builtin_neon_vld2_v(&__ret, __p0, 6); \
48767   __ret; \
48768 })
48769 #else
48770 #define vld2_p64(__p0) __extension__ ({ \
48771   poly64x1x2_t __ret; \
48772   __builtin_neon_vld2_v(&__ret, __p0, 6); \
48773   __ret; \
48774 })
48775 #endif
48776 
48777 #ifdef __LITTLE_ENDIAN__
48778 #define vld2q_p64(__p0) __extension__ ({ \
48779   poly64x2x2_t __ret; \
48780   __builtin_neon_vld2q_v(&__ret, __p0, 38); \
48781   __ret; \
48782 })
48783 #else
48784 #define vld2q_p64(__p0) __extension__ ({ \
48785   poly64x2x2_t __ret; \
48786   __builtin_neon_vld2q_v(&__ret, __p0, 38); \
48787  \
48788   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
48789   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
48790   __ret; \
48791 })
48792 #endif
48793 
48794 #ifdef __LITTLE_ENDIAN__
48795 #define vld2q_u64(__p0) __extension__ ({ \
48796   uint64x2x2_t __ret; \
48797   __builtin_neon_vld2q_v(&__ret, __p0, 51); \
48798   __ret; \
48799 })
48800 #else
48801 #define vld2q_u64(__p0) __extension__ ({ \
48802   uint64x2x2_t __ret; \
48803   __builtin_neon_vld2q_v(&__ret, __p0, 51); \
48804  \
48805   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
48806   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
48807   __ret; \
48808 })
48809 #endif
48810 
48811 #ifdef __LITTLE_ENDIAN__
48812 #define vld2q_f64(__p0) __extension__ ({ \
48813   float64x2x2_t __ret; \
48814   __builtin_neon_vld2q_v(&__ret, __p0, 42); \
48815   __ret; \
48816 })
48817 #else
48818 #define vld2q_f64(__p0) __extension__ ({ \
48819   float64x2x2_t __ret; \
48820   __builtin_neon_vld2q_v(&__ret, __p0, 42); \
48821  \
48822   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
48823   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
48824   __ret; \
48825 })
48826 #endif
48827 
48828 #ifdef __LITTLE_ENDIAN__
48829 #define vld2q_s64(__p0) __extension__ ({ \
48830   int64x2x2_t __ret; \
48831   __builtin_neon_vld2q_v(&__ret, __p0, 35); \
48832   __ret; \
48833 })
48834 #else
48835 #define vld2q_s64(__p0) __extension__ ({ \
48836   int64x2x2_t __ret; \
48837   __builtin_neon_vld2q_v(&__ret, __p0, 35); \
48838  \
48839   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
48840   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
48841   __ret; \
48842 })
48843 #endif
48844 
48845 #ifdef __LITTLE_ENDIAN__
48846 #define vld2_f64(__p0) __extension__ ({ \
48847   float64x1x2_t __ret; \
48848   __builtin_neon_vld2_v(&__ret, __p0, 10); \
48849   __ret; \
48850 })
48851 #else
48852 #define vld2_f64(__p0) __extension__ ({ \
48853   float64x1x2_t __ret; \
48854   __builtin_neon_vld2_v(&__ret, __p0, 10); \
48855   __ret; \
48856 })
48857 #endif
48858 
48859 #ifdef __LITTLE_ENDIAN__
48860 #define vld2_dup_p64(__p0) __extension__ ({ \
48861   poly64x1x2_t __ret; \
48862   __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \
48863   __ret; \
48864 })
48865 #else
48866 #define vld2_dup_p64(__p0) __extension__ ({ \
48867   poly64x1x2_t __ret; \
48868   __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \
48869   __ret; \
48870 })
48871 #endif
48872 
48873 #ifdef __LITTLE_ENDIAN__
48874 #define vld2q_dup_p8(__p0) __extension__ ({ \
48875   poly8x16x2_t __ret; \
48876   __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \
48877   __ret; \
48878 })
48879 #else
48880 #define vld2q_dup_p8(__p0) __extension__ ({ \
48881   poly8x16x2_t __ret; \
48882   __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \
48883  \
48884   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
48885   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
48886   __ret; \
48887 })
48888 #endif
48889 
48890 #ifdef __LITTLE_ENDIAN__
48891 #define vld2q_dup_p64(__p0) __extension__ ({ \
48892   poly64x2x2_t __ret; \
48893   __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
48894   __ret; \
48895 })
48896 #else
48897 #define vld2q_dup_p64(__p0) __extension__ ({ \
48898   poly64x2x2_t __ret; \
48899   __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
48900  \
48901   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
48902   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
48903   __ret; \
48904 })
48905 #endif
48906 
48907 #ifdef __LITTLE_ENDIAN__
48908 #define vld2q_dup_p16(__p0) __extension__ ({ \
48909   poly16x8x2_t __ret; \
48910   __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \
48911   __ret; \
48912 })
48913 #else
48914 #define vld2q_dup_p16(__p0) __extension__ ({ \
48915   poly16x8x2_t __ret; \
48916   __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \
48917  \
48918   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
48919   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
48920   __ret; \
48921 })
48922 #endif
48923 
48924 #ifdef __LITTLE_ENDIAN__
48925 #define vld2q_dup_u8(__p0) __extension__ ({ \
48926   uint8x16x2_t __ret; \
48927   __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \
48928   __ret; \
48929 })
48930 #else
48931 #define vld2q_dup_u8(__p0) __extension__ ({ \
48932   uint8x16x2_t __ret; \
48933   __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \
48934  \
48935   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
48936   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
48937   __ret; \
48938 })
48939 #endif
48940 
48941 #ifdef __LITTLE_ENDIAN__
48942 #define vld2q_dup_u32(__p0) __extension__ ({ \
48943   uint32x4x2_t __ret; \
48944   __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \
48945   __ret; \
48946 })
48947 #else
48948 #define vld2q_dup_u32(__p0) __extension__ ({ \
48949   uint32x4x2_t __ret; \
48950   __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \
48951  \
48952   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
48953   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
48954   __ret; \
48955 })
48956 #endif
48957 
48958 #ifdef __LITTLE_ENDIAN__
48959 #define vld2q_dup_u64(__p0) __extension__ ({ \
48960   uint64x2x2_t __ret; \
48961   __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \
48962   __ret; \
48963 })
48964 #else
48965 #define vld2q_dup_u64(__p0) __extension__ ({ \
48966   uint64x2x2_t __ret; \
48967   __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \
48968  \
48969   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
48970   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
48971   __ret; \
48972 })
48973 #endif
48974 
48975 #ifdef __LITTLE_ENDIAN__
48976 #define vld2q_dup_u16(__p0) __extension__ ({ \
48977   uint16x8x2_t __ret; \
48978   __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \
48979   __ret; \
48980 })
48981 #else
48982 #define vld2q_dup_u16(__p0) __extension__ ({ \
48983   uint16x8x2_t __ret; \
48984   __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \
48985  \
48986   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
48987   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
48988   __ret; \
48989 })
48990 #endif
48991 
48992 #ifdef __LITTLE_ENDIAN__
48993 #define vld2q_dup_s8(__p0) __extension__ ({ \
48994   int8x16x2_t __ret; \
48995   __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \
48996   __ret; \
48997 })
48998 #else
48999 #define vld2q_dup_s8(__p0) __extension__ ({ \
49000   int8x16x2_t __ret; \
49001   __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \
49002  \
49003   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49004   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49005   __ret; \
49006 })
49007 #endif
49008 
49009 #ifdef __LITTLE_ENDIAN__
49010 #define vld2q_dup_f64(__p0) __extension__ ({ \
49011   float64x2x2_t __ret; \
49012   __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
49013   __ret; \
49014 })
49015 #else
49016 #define vld2q_dup_f64(__p0) __extension__ ({ \
49017   float64x2x2_t __ret; \
49018   __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
49019  \
49020   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
49021   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
49022   __ret; \
49023 })
49024 #endif
49025 
49026 #ifdef __LITTLE_ENDIAN__
49027 #define vld2q_dup_f32(__p0) __extension__ ({ \
49028   float32x4x2_t __ret; \
49029   __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \
49030   __ret; \
49031 })
49032 #else
49033 #define vld2q_dup_f32(__p0) __extension__ ({ \
49034   float32x4x2_t __ret; \
49035   __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \
49036  \
49037   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
49038   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
49039   __ret; \
49040 })
49041 #endif
49042 
49043 #ifdef __LITTLE_ENDIAN__
49044 #define vld2q_dup_f16(__p0) __extension__ ({ \
49045   float16x8x2_t __ret; \
49046   __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \
49047   __ret; \
49048 })
49049 #else
49050 #define vld2q_dup_f16(__p0) __extension__ ({ \
49051   float16x8x2_t __ret; \
49052   __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \
49053  \
49054   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
49055   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
49056   __ret; \
49057 })
49058 #endif
49059 
49060 #ifdef __LITTLE_ENDIAN__
49061 #define vld2q_dup_s32(__p0) __extension__ ({ \
49062   int32x4x2_t __ret; \
49063   __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \
49064   __ret; \
49065 })
49066 #else
49067 #define vld2q_dup_s32(__p0) __extension__ ({ \
49068   int32x4x2_t __ret; \
49069   __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \
49070  \
49071   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
49072   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
49073   __ret; \
49074 })
49075 #endif
49076 
49077 #ifdef __LITTLE_ENDIAN__
49078 #define vld2q_dup_s64(__p0) __extension__ ({ \
49079   int64x2x2_t __ret; \
49080   __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \
49081   __ret; \
49082 })
49083 #else
49084 #define vld2q_dup_s64(__p0) __extension__ ({ \
49085   int64x2x2_t __ret; \
49086   __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \
49087  \
49088   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
49089   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
49090   __ret; \
49091 })
49092 #endif
49093 
49094 #ifdef __LITTLE_ENDIAN__
49095 #define vld2q_dup_s16(__p0) __extension__ ({ \
49096   int16x8x2_t __ret; \
49097   __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \
49098   __ret; \
49099 })
49100 #else
49101 #define vld2q_dup_s16(__p0) __extension__ ({ \
49102   int16x8x2_t __ret; \
49103   __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \
49104  \
49105   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
49106   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
49107   __ret; \
49108 })
49109 #endif
49110 
49111 #ifdef __LITTLE_ENDIAN__
49112 #define vld2_dup_f64(__p0) __extension__ ({ \
49113   float64x1x2_t __ret; \
49114   __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \
49115   __ret; \
49116 })
49117 #else
49118 #define vld2_dup_f64(__p0) __extension__ ({ \
49119   float64x1x2_t __ret; \
49120   __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \
49121   __ret; \
49122 })
49123 #endif
49124 
49125 #ifdef __LITTLE_ENDIAN__
49126 #define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
49127   poly64x1x2_t __s1 = __p1; \
49128   poly64x1x2_t __ret; \
49129   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
49130   __ret; \
49131 })
49132 #else
49133 #define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
49134   poly64x1x2_t __s1 = __p1; \
49135   poly64x1x2_t __ret; \
49136   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
49137   __ret; \
49138 })
49139 #endif
49140 
49141 #ifdef __LITTLE_ENDIAN__
49142 #define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
49143   poly8x16x2_t __s1 = __p1; \
49144   poly8x16x2_t __ret; \
49145   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \
49146   __ret; \
49147 })
49148 #else
49149 #define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
49150   poly8x16x2_t __s1 = __p1; \
49151   poly8x16x2_t __rev1; \
49152   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49153   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49154   poly8x16x2_t __ret; \
49155   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \
49156  \
49157   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49158   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49159   __ret; \
49160 })
49161 #endif
49162 
49163 #ifdef __LITTLE_ENDIAN__
49164 #define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
49165   poly64x2x2_t __s1 = __p1; \
49166   poly64x2x2_t __ret; \
49167   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \
49168   __ret; \
49169 })
49170 #else
49171 #define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
49172   poly64x2x2_t __s1 = __p1; \
49173   poly64x2x2_t __rev1; \
49174   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
49175   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
49176   poly64x2x2_t __ret; \
49177   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \
49178  \
49179   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
49180   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
49181   __ret; \
49182 })
49183 #endif
49184 
49185 #ifdef __LITTLE_ENDIAN__
49186 #define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
49187   uint8x16x2_t __s1 = __p1; \
49188   uint8x16x2_t __ret; \
49189   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \
49190   __ret; \
49191 })
49192 #else
49193 #define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
49194   uint8x16x2_t __s1 = __p1; \
49195   uint8x16x2_t __rev1; \
49196   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49197   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49198   uint8x16x2_t __ret; \
49199   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \
49200  \
49201   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49202   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49203   __ret; \
49204 })
49205 #endif
49206 
49207 #ifdef __LITTLE_ENDIAN__
49208 #define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
49209   uint64x2x2_t __s1 = __p1; \
49210   uint64x2x2_t __ret; \
49211   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \
49212   __ret; \
49213 })
49214 #else
49215 #define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
49216   uint64x2x2_t __s1 = __p1; \
49217   uint64x2x2_t __rev1; \
49218   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
49219   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
49220   uint64x2x2_t __ret; \
49221   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \
49222  \
49223   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
49224   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
49225   __ret; \
49226 })
49227 #endif
49228 
49229 #ifdef __LITTLE_ENDIAN__
49230 #define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
49231   int8x16x2_t __s1 = __p1; \
49232   int8x16x2_t __ret; \
49233   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \
49234   __ret; \
49235 })
49236 #else
49237 #define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
49238   int8x16x2_t __s1 = __p1; \
49239   int8x16x2_t __rev1; \
49240   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49241   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49242   int8x16x2_t __ret; \
49243   __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \
49244  \
49245   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49246   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49247   __ret; \
49248 })
49249 #endif
49250 
49251 #ifdef __LITTLE_ENDIAN__
49252 #define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
49253   float64x2x2_t __s1 = __p1; \
49254   float64x2x2_t __ret; \
49255   __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 42); \
49256   __ret; \
49257 })
49258 #else
49259 #define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
49260   float64x2x2_t __s1 = __p1; \
49261   float64x2x2_t __rev1; \
49262   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
49263   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
49264   float64x2x2_t __ret; \
49265   __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 42); \
49266  \
49267   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
49268   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
49269   __ret; \
49270 })
49271 #endif
49272 
49273 #ifdef __LITTLE_ENDIAN__
49274 #define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
49275   int64x2x2_t __s1 = __p1; \
49276   int64x2x2_t __ret; \
49277   __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 35); \
49278   __ret; \
49279 })
49280 #else
49281 #define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
49282   int64x2x2_t __s1 = __p1; \
49283   int64x2x2_t __rev1; \
49284   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
49285   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
49286   int64x2x2_t __ret; \
49287   __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 35); \
49288  \
49289   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
49290   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
49291   __ret; \
49292 })
49293 #endif
49294 
49295 #ifdef __LITTLE_ENDIAN__
49296 #define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
49297   uint64x1x2_t __s1 = __p1; \
49298   uint64x1x2_t __ret; \
49299   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
49300   __ret; \
49301 })
49302 #else
49303 #define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
49304   uint64x1x2_t __s1 = __p1; \
49305   uint64x1x2_t __ret; \
49306   __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
49307   __ret; \
49308 })
49309 #endif
49310 
49311 #ifdef __LITTLE_ENDIAN__
49312 #define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
49313   float64x1x2_t __s1 = __p1; \
49314   float64x1x2_t __ret; \
49315   __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 10); \
49316   __ret; \
49317 })
49318 #else
49319 #define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
49320   float64x1x2_t __s1 = __p1; \
49321   float64x1x2_t __ret; \
49322   __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 10); \
49323   __ret; \
49324 })
49325 #endif
49326 
49327 #ifdef __LITTLE_ENDIAN__
49328 #define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
49329   int64x1x2_t __s1 = __p1; \
49330   int64x1x2_t __ret; \
49331   __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 3); \
49332   __ret; \
49333 })
49334 #else
49335 #define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
49336   int64x1x2_t __s1 = __p1; \
49337   int64x1x2_t __ret; \
49338   __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 3); \
49339   __ret; \
49340 })
49341 #endif
49342 
49343 #ifdef __LITTLE_ENDIAN__
49344 #define vld3_p64(__p0) __extension__ ({ \
49345   poly64x1x3_t __ret; \
49346   __builtin_neon_vld3_v(&__ret, __p0, 6); \
49347   __ret; \
49348 })
49349 #else
49350 #define vld3_p64(__p0) __extension__ ({ \
49351   poly64x1x3_t __ret; \
49352   __builtin_neon_vld3_v(&__ret, __p0, 6); \
49353   __ret; \
49354 })
49355 #endif
49356 
49357 #ifdef __LITTLE_ENDIAN__
49358 #define vld3q_p64(__p0) __extension__ ({ \
49359   poly64x2x3_t __ret; \
49360   __builtin_neon_vld3q_v(&__ret, __p0, 38); \
49361   __ret; \
49362 })
49363 #else
49364 #define vld3q_p64(__p0) __extension__ ({ \
49365   poly64x2x3_t __ret; \
49366   __builtin_neon_vld3q_v(&__ret, __p0, 38); \
49367  \
49368   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
49369   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
49370   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
49371   __ret; \
49372 })
49373 #endif
49374 
49375 #ifdef __LITTLE_ENDIAN__
49376 #define vld3q_u64(__p0) __extension__ ({ \
49377   uint64x2x3_t __ret; \
49378   __builtin_neon_vld3q_v(&__ret, __p0, 51); \
49379   __ret; \
49380 })
49381 #else
49382 #define vld3q_u64(__p0) __extension__ ({ \
49383   uint64x2x3_t __ret; \
49384   __builtin_neon_vld3q_v(&__ret, __p0, 51); \
49385  \
49386   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
49387   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
49388   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
49389   __ret; \
49390 })
49391 #endif
49392 
49393 #ifdef __LITTLE_ENDIAN__
49394 #define vld3q_f64(__p0) __extension__ ({ \
49395   float64x2x3_t __ret; \
49396   __builtin_neon_vld3q_v(&__ret, __p0, 42); \
49397   __ret; \
49398 })
49399 #else
49400 #define vld3q_f64(__p0) __extension__ ({ \
49401   float64x2x3_t __ret; \
49402   __builtin_neon_vld3q_v(&__ret, __p0, 42); \
49403  \
49404   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
49405   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
49406   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
49407   __ret; \
49408 })
49409 #endif
49410 
49411 #ifdef __LITTLE_ENDIAN__
49412 #define vld3q_s64(__p0) __extension__ ({ \
49413   int64x2x3_t __ret; \
49414   __builtin_neon_vld3q_v(&__ret, __p0, 35); \
49415   __ret; \
49416 })
49417 #else
49418 #define vld3q_s64(__p0) __extension__ ({ \
49419   int64x2x3_t __ret; \
49420   __builtin_neon_vld3q_v(&__ret, __p0, 35); \
49421  \
49422   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
49423   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
49424   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
49425   __ret; \
49426 })
49427 #endif
49428 
49429 #ifdef __LITTLE_ENDIAN__
49430 #define vld3_f64(__p0) __extension__ ({ \
49431   float64x1x3_t __ret; \
49432   __builtin_neon_vld3_v(&__ret, __p0, 10); \
49433   __ret; \
49434 })
49435 #else
49436 #define vld3_f64(__p0) __extension__ ({ \
49437   float64x1x3_t __ret; \
49438   __builtin_neon_vld3_v(&__ret, __p0, 10); \
49439   __ret; \
49440 })
49441 #endif
49442 
49443 #ifdef __LITTLE_ENDIAN__
49444 #define vld3_dup_p64(__p0) __extension__ ({ \
49445   poly64x1x3_t __ret; \
49446   __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \
49447   __ret; \
49448 })
49449 #else
49450 #define vld3_dup_p64(__p0) __extension__ ({ \
49451   poly64x1x3_t __ret; \
49452   __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \
49453   __ret; \
49454 })
49455 #endif
49456 
49457 #ifdef __LITTLE_ENDIAN__
49458 #define vld3q_dup_p8(__p0) __extension__ ({ \
49459   poly8x16x3_t __ret; \
49460   __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \
49461   __ret; \
49462 })
49463 #else
49464 #define vld3q_dup_p8(__p0) __extension__ ({ \
49465   poly8x16x3_t __ret; \
49466   __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \
49467  \
49468   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49469   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49470   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49471   __ret; \
49472 })
49473 #endif
49474 
49475 #ifdef __LITTLE_ENDIAN__
49476 #define vld3q_dup_p64(__p0) __extension__ ({ \
49477   poly64x2x3_t __ret; \
49478   __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
49479   __ret; \
49480 })
49481 #else
49482 #define vld3q_dup_p64(__p0) __extension__ ({ \
49483   poly64x2x3_t __ret; \
49484   __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
49485  \
49486   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
49487   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
49488   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
49489   __ret; \
49490 })
49491 #endif
49492 
49493 #ifdef __LITTLE_ENDIAN__
49494 #define vld3q_dup_p16(__p0) __extension__ ({ \
49495   poly16x8x3_t __ret; \
49496   __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \
49497   __ret; \
49498 })
49499 #else
49500 #define vld3q_dup_p16(__p0) __extension__ ({ \
49501   poly16x8x3_t __ret; \
49502   __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \
49503  \
49504   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
49505   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
49506   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
49507   __ret; \
49508 })
49509 #endif
49510 
49511 #ifdef __LITTLE_ENDIAN__
49512 #define vld3q_dup_u8(__p0) __extension__ ({ \
49513   uint8x16x3_t __ret; \
49514   __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \
49515   __ret; \
49516 })
49517 #else
49518 #define vld3q_dup_u8(__p0) __extension__ ({ \
49519   uint8x16x3_t __ret; \
49520   __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \
49521  \
49522   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49523   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49524   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49525   __ret; \
49526 })
49527 #endif
49528 
49529 #ifdef __LITTLE_ENDIAN__
49530 #define vld3q_dup_u32(__p0) __extension__ ({ \
49531   uint32x4x3_t __ret; \
49532   __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \
49533   __ret; \
49534 })
49535 #else
49536 #define vld3q_dup_u32(__p0) __extension__ ({ \
49537   uint32x4x3_t __ret; \
49538   __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \
49539  \
49540   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
49541   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
49542   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
49543   __ret; \
49544 })
49545 #endif
49546 
49547 #ifdef __LITTLE_ENDIAN__
49548 #define vld3q_dup_u64(__p0) __extension__ ({ \
49549   uint64x2x3_t __ret; \
49550   __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \
49551   __ret; \
49552 })
49553 #else
49554 #define vld3q_dup_u64(__p0) __extension__ ({ \
49555   uint64x2x3_t __ret; \
49556   __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \
49557  \
49558   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
49559   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
49560   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
49561   __ret; \
49562 })
49563 #endif
49564 
49565 #ifdef __LITTLE_ENDIAN__
49566 #define vld3q_dup_u16(__p0) __extension__ ({ \
49567   uint16x8x3_t __ret; \
49568   __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \
49569   __ret; \
49570 })
49571 #else
49572 #define vld3q_dup_u16(__p0) __extension__ ({ \
49573   uint16x8x3_t __ret; \
49574   __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \
49575  \
49576   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
49577   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
49578   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
49579   __ret; \
49580 })
49581 #endif
49582 
49583 #ifdef __LITTLE_ENDIAN__
49584 #define vld3q_dup_s8(__p0) __extension__ ({ \
49585   int8x16x3_t __ret; \
49586   __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \
49587   __ret; \
49588 })
49589 #else
49590 #define vld3q_dup_s8(__p0) __extension__ ({ \
49591   int8x16x3_t __ret; \
49592   __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \
49593  \
49594   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49595   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49596   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49597   __ret; \
49598 })
49599 #endif
49600 
49601 #ifdef __LITTLE_ENDIAN__
49602 #define vld3q_dup_f64(__p0) __extension__ ({ \
49603   float64x2x3_t __ret; \
49604   __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
49605   __ret; \
49606 })
49607 #else
49608 #define vld3q_dup_f64(__p0) __extension__ ({ \
49609   float64x2x3_t __ret; \
49610   __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
49611  \
49612   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
49613   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
49614   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
49615   __ret; \
49616 })
49617 #endif
49618 
49619 #ifdef __LITTLE_ENDIAN__
49620 #define vld3q_dup_f32(__p0) __extension__ ({ \
49621   float32x4x3_t __ret; \
49622   __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \
49623   __ret; \
49624 })
49625 #else
49626 #define vld3q_dup_f32(__p0) __extension__ ({ \
49627   float32x4x3_t __ret; \
49628   __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \
49629  \
49630   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
49631   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
49632   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
49633   __ret; \
49634 })
49635 #endif
49636 
49637 #ifdef __LITTLE_ENDIAN__
49638 #define vld3q_dup_f16(__p0) __extension__ ({ \
49639   float16x8x3_t __ret; \
49640   __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \
49641   __ret; \
49642 })
49643 #else
49644 #define vld3q_dup_f16(__p0) __extension__ ({ \
49645   float16x8x3_t __ret; \
49646   __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \
49647  \
49648   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
49649   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
49650   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
49651   __ret; \
49652 })
49653 #endif
49654 
49655 #ifdef __LITTLE_ENDIAN__
49656 #define vld3q_dup_s32(__p0) __extension__ ({ \
49657   int32x4x3_t __ret; \
49658   __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \
49659   __ret; \
49660 })
49661 #else
49662 #define vld3q_dup_s32(__p0) __extension__ ({ \
49663   int32x4x3_t __ret; \
49664   __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \
49665  \
49666   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
49667   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
49668   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
49669   __ret; \
49670 })
49671 #endif
49672 
49673 #ifdef __LITTLE_ENDIAN__
49674 #define vld3q_dup_s64(__p0) __extension__ ({ \
49675   int64x2x3_t __ret; \
49676   __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \
49677   __ret; \
49678 })
49679 #else
49680 #define vld3q_dup_s64(__p0) __extension__ ({ \
49681   int64x2x3_t __ret; \
49682   __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \
49683  \
49684   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
49685   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
49686   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
49687   __ret; \
49688 })
49689 #endif
49690 
49691 #ifdef __LITTLE_ENDIAN__
49692 #define vld3q_dup_s16(__p0) __extension__ ({ \
49693   int16x8x3_t __ret; \
49694   __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \
49695   __ret; \
49696 })
49697 #else
49698 #define vld3q_dup_s16(__p0) __extension__ ({ \
49699   int16x8x3_t __ret; \
49700   __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \
49701  \
49702   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
49703   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
49704   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
49705   __ret; \
49706 })
49707 #endif
49708 
49709 #ifdef __LITTLE_ENDIAN__
49710 #define vld3_dup_f64(__p0) __extension__ ({ \
49711   float64x1x3_t __ret; \
49712   __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \
49713   __ret; \
49714 })
49715 #else
49716 #define vld3_dup_f64(__p0) __extension__ ({ \
49717   float64x1x3_t __ret; \
49718   __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \
49719   __ret; \
49720 })
49721 #endif
49722 
49723 #ifdef __LITTLE_ENDIAN__
49724 #define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
49725   poly64x1x3_t __s1 = __p1; \
49726   poly64x1x3_t __ret; \
49727   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
49728   __ret; \
49729 })
49730 #else
49731 #define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
49732   poly64x1x3_t __s1 = __p1; \
49733   poly64x1x3_t __ret; \
49734   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
49735   __ret; \
49736 })
49737 #endif
49738 
49739 #ifdef __LITTLE_ENDIAN__
49740 #define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
49741   poly8x16x3_t __s1 = __p1; \
49742   poly8x16x3_t __ret; \
49743   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \
49744   __ret; \
49745 })
49746 #else
49747 #define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
49748   poly8x16x3_t __s1 = __p1; \
49749   poly8x16x3_t __rev1; \
49750   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49751   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49752   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49753   poly8x16x3_t __ret; \
49754   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \
49755  \
49756   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49757   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49758   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49759   __ret; \
49760 })
49761 #endif
49762 
49763 #ifdef __LITTLE_ENDIAN__
49764 #define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
49765   poly64x2x3_t __s1 = __p1; \
49766   poly64x2x3_t __ret; \
49767   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \
49768   __ret; \
49769 })
49770 #else
49771 #define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
49772   poly64x2x3_t __s1 = __p1; \
49773   poly64x2x3_t __rev1; \
49774   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
49775   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
49776   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
49777   poly64x2x3_t __ret; \
49778   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \
49779  \
49780   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
49781   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
49782   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
49783   __ret; \
49784 })
49785 #endif
49786 
49787 #ifdef __LITTLE_ENDIAN__
49788 #define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
49789   uint8x16x3_t __s1 = __p1; \
49790   uint8x16x3_t __ret; \
49791   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \
49792   __ret; \
49793 })
49794 #else
49795 #define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
49796   uint8x16x3_t __s1 = __p1; \
49797   uint8x16x3_t __rev1; \
49798   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49799   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49800   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49801   uint8x16x3_t __ret; \
49802   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \
49803  \
49804   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49805   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49806   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49807   __ret; \
49808 })
49809 #endif
49810 
49811 #ifdef __LITTLE_ENDIAN__
49812 #define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
49813   uint64x2x3_t __s1 = __p1; \
49814   uint64x2x3_t __ret; \
49815   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \
49816   __ret; \
49817 })
49818 #else
49819 #define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
49820   uint64x2x3_t __s1 = __p1; \
49821   uint64x2x3_t __rev1; \
49822   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
49823   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
49824   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
49825   uint64x2x3_t __ret; \
49826   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \
49827  \
49828   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
49829   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
49830   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
49831   __ret; \
49832 })
49833 #endif
49834 
49835 #ifdef __LITTLE_ENDIAN__
49836 #define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
49837   int8x16x3_t __s1 = __p1; \
49838   int8x16x3_t __ret; \
49839   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \
49840   __ret; \
49841 })
49842 #else
49843 #define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
49844   int8x16x3_t __s1 = __p1; \
49845   int8x16x3_t __rev1; \
49846   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49847   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49848   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49849   int8x16x3_t __ret; \
49850   __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \
49851  \
49852   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49853   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49854   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49855   __ret; \
49856 })
49857 #endif
49858 
49859 #ifdef __LITTLE_ENDIAN__
49860 #define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
49861   float64x2x3_t __s1 = __p1; \
49862   float64x2x3_t __ret; \
49863   __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 42); \
49864   __ret; \
49865 })
49866 #else
49867 #define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
49868   float64x2x3_t __s1 = __p1; \
49869   float64x2x3_t __rev1; \
49870   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
49871   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
49872   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
49873   float64x2x3_t __ret; \
49874   __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 42); \
49875  \
49876   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
49877   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
49878   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
49879   __ret; \
49880 })
49881 #endif
49882 
49883 #ifdef __LITTLE_ENDIAN__
49884 #define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
49885   int64x2x3_t __s1 = __p1; \
49886   int64x2x3_t __ret; \
49887   __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 35); \
49888   __ret; \
49889 })
49890 #else
49891 #define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
49892   int64x2x3_t __s1 = __p1; \
49893   int64x2x3_t __rev1; \
49894   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
49895   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
49896   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
49897   int64x2x3_t __ret; \
49898   __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 35); \
49899  \
49900   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
49901   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
49902   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
49903   __ret; \
49904 })
49905 #endif
49906 
49907 #ifdef __LITTLE_ENDIAN__
49908 #define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
49909   uint64x1x3_t __s1 = __p1; \
49910   uint64x1x3_t __ret; \
49911   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
49912   __ret; \
49913 })
49914 #else
49915 #define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
49916   uint64x1x3_t __s1 = __p1; \
49917   uint64x1x3_t __ret; \
49918   __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
49919   __ret; \
49920 })
49921 #endif
49922 
49923 #ifdef __LITTLE_ENDIAN__
49924 #define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
49925   float64x1x3_t __s1 = __p1; \
49926   float64x1x3_t __ret; \
49927   __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
49928   __ret; \
49929 })
49930 #else
49931 #define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
49932   float64x1x3_t __s1 = __p1; \
49933   float64x1x3_t __ret; \
49934   __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
49935   __ret; \
49936 })
49937 #endif
49938 
49939 #ifdef __LITTLE_ENDIAN__
49940 #define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
49941   int64x1x3_t __s1 = __p1; \
49942   int64x1x3_t __ret; \
49943   __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
49944   __ret; \
49945 })
49946 #else
49947 #define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
49948   int64x1x3_t __s1 = __p1; \
49949   int64x1x3_t __ret; \
49950   __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
49951   __ret; \
49952 })
49953 #endif
49954 
49955 #ifdef __LITTLE_ENDIAN__
49956 #define vld4_p64(__p0) __extension__ ({ \
49957   poly64x1x4_t __ret; \
49958   __builtin_neon_vld4_v(&__ret, __p0, 6); \
49959   __ret; \
49960 })
49961 #else
49962 #define vld4_p64(__p0) __extension__ ({ \
49963   poly64x1x4_t __ret; \
49964   __builtin_neon_vld4_v(&__ret, __p0, 6); \
49965   __ret; \
49966 })
49967 #endif
49968 
49969 #ifdef __LITTLE_ENDIAN__
49970 #define vld4q_p64(__p0) __extension__ ({ \
49971   poly64x2x4_t __ret; \
49972   __builtin_neon_vld4q_v(&__ret, __p0, 38); \
49973   __ret; \
49974 })
49975 #else
49976 #define vld4q_p64(__p0) __extension__ ({ \
49977   poly64x2x4_t __ret; \
49978   __builtin_neon_vld4q_v(&__ret, __p0, 38); \
49979  \
49980   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
49981   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
49982   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
49983   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
49984   __ret; \
49985 })
49986 #endif
49987 
49988 #ifdef __LITTLE_ENDIAN__
49989 #define vld4q_u64(__p0) __extension__ ({ \
49990   uint64x2x4_t __ret; \
49991   __builtin_neon_vld4q_v(&__ret, __p0, 51); \
49992   __ret; \
49993 })
49994 #else
49995 #define vld4q_u64(__p0) __extension__ ({ \
49996   uint64x2x4_t __ret; \
49997   __builtin_neon_vld4q_v(&__ret, __p0, 51); \
49998  \
49999   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
50000   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
50001   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
50002   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
50003   __ret; \
50004 })
50005 #endif
50006 
50007 #ifdef __LITTLE_ENDIAN__
50008 #define vld4q_f64(__p0) __extension__ ({ \
50009   float64x2x4_t __ret; \
50010   __builtin_neon_vld4q_v(&__ret, __p0, 42); \
50011   __ret; \
50012 })
50013 #else
50014 #define vld4q_f64(__p0) __extension__ ({ \
50015   float64x2x4_t __ret; \
50016   __builtin_neon_vld4q_v(&__ret, __p0, 42); \
50017  \
50018   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
50019   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
50020   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
50021   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
50022   __ret; \
50023 })
50024 #endif
50025 
50026 #ifdef __LITTLE_ENDIAN__
50027 #define vld4q_s64(__p0) __extension__ ({ \
50028   int64x2x4_t __ret; \
50029   __builtin_neon_vld4q_v(&__ret, __p0, 35); \
50030   __ret; \
50031 })
50032 #else
50033 #define vld4q_s64(__p0) __extension__ ({ \
50034   int64x2x4_t __ret; \
50035   __builtin_neon_vld4q_v(&__ret, __p0, 35); \
50036  \
50037   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
50038   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
50039   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
50040   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
50041   __ret; \
50042 })
50043 #endif
50044 
50045 #ifdef __LITTLE_ENDIAN__
50046 #define vld4_f64(__p0) __extension__ ({ \
50047   float64x1x4_t __ret; \
50048   __builtin_neon_vld4_v(&__ret, __p0, 10); \
50049   __ret; \
50050 })
50051 #else
50052 #define vld4_f64(__p0) __extension__ ({ \
50053   float64x1x4_t __ret; \
50054   __builtin_neon_vld4_v(&__ret, __p0, 10); \
50055   __ret; \
50056 })
50057 #endif
50058 
50059 #ifdef __LITTLE_ENDIAN__
50060 #define vld4_dup_p64(__p0) __extension__ ({ \
50061   poly64x1x4_t __ret; \
50062   __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \
50063   __ret; \
50064 })
50065 #else
50066 #define vld4_dup_p64(__p0) __extension__ ({ \
50067   poly64x1x4_t __ret; \
50068   __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \
50069   __ret; \
50070 })
50071 #endif
50072 
50073 #ifdef __LITTLE_ENDIAN__
50074 #define vld4q_dup_p8(__p0) __extension__ ({ \
50075   poly8x16x4_t __ret; \
50076   __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \
50077   __ret; \
50078 })
50079 #else
50080 #define vld4q_dup_p8(__p0) __extension__ ({ \
50081   poly8x16x4_t __ret; \
50082   __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \
50083  \
50084   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50085   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50086   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50087   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50088   __ret; \
50089 })
50090 #endif
50091 
50092 #ifdef __LITTLE_ENDIAN__
50093 #define vld4q_dup_p64(__p0) __extension__ ({ \
50094   poly64x2x4_t __ret; \
50095   __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
50096   __ret; \
50097 })
50098 #else
50099 #define vld4q_dup_p64(__p0) __extension__ ({ \
50100   poly64x2x4_t __ret; \
50101   __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
50102  \
50103   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
50104   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
50105   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
50106   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
50107   __ret; \
50108 })
50109 #endif
50110 
50111 #ifdef __LITTLE_ENDIAN__
50112 #define vld4q_dup_p16(__p0) __extension__ ({ \
50113   poly16x8x4_t __ret; \
50114   __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \
50115   __ret; \
50116 })
50117 #else
50118 #define vld4q_dup_p16(__p0) __extension__ ({ \
50119   poly16x8x4_t __ret; \
50120   __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \
50121  \
50122   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
50123   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
50124   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
50125   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
50126   __ret; \
50127 })
50128 #endif
50129 
50130 #ifdef __LITTLE_ENDIAN__
50131 #define vld4q_dup_u8(__p0) __extension__ ({ \
50132   uint8x16x4_t __ret; \
50133   __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \
50134   __ret; \
50135 })
50136 #else
50137 #define vld4q_dup_u8(__p0) __extension__ ({ \
50138   uint8x16x4_t __ret; \
50139   __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \
50140  \
50141   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50142   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50143   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50144   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50145   __ret; \
50146 })
50147 #endif
50148 
50149 #ifdef __LITTLE_ENDIAN__
50150 #define vld4q_dup_u32(__p0) __extension__ ({ \
50151   uint32x4x4_t __ret; \
50152   __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \
50153   __ret; \
50154 })
50155 #else
50156 #define vld4q_dup_u32(__p0) __extension__ ({ \
50157   uint32x4x4_t __ret; \
50158   __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \
50159  \
50160   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
50161   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
50162   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
50163   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
50164   __ret; \
50165 })
50166 #endif
50167 
50168 #ifdef __LITTLE_ENDIAN__
50169 #define vld4q_dup_u64(__p0) __extension__ ({ \
50170   uint64x2x4_t __ret; \
50171   __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \
50172   __ret; \
50173 })
50174 #else
50175 #define vld4q_dup_u64(__p0) __extension__ ({ \
50176   uint64x2x4_t __ret; \
50177   __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \
50178  \
50179   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
50180   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
50181   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
50182   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
50183   __ret; \
50184 })
50185 #endif
50186 
50187 #ifdef __LITTLE_ENDIAN__
50188 #define vld4q_dup_u16(__p0) __extension__ ({ \
50189   uint16x8x4_t __ret; \
50190   __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \
50191   __ret; \
50192 })
50193 #else
50194 #define vld4q_dup_u16(__p0) __extension__ ({ \
50195   uint16x8x4_t __ret; \
50196   __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \
50197  \
50198   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
50199   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
50200   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
50201   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
50202   __ret; \
50203 })
50204 #endif
50205 
50206 #ifdef __LITTLE_ENDIAN__
50207 #define vld4q_dup_s8(__p0) __extension__ ({ \
50208   int8x16x4_t __ret; \
50209   __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \
50210   __ret; \
50211 })
50212 #else
50213 #define vld4q_dup_s8(__p0) __extension__ ({ \
50214   int8x16x4_t __ret; \
50215   __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \
50216  \
50217   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50218   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50219   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50220   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50221   __ret; \
50222 })
50223 #endif
50224 
50225 #ifdef __LITTLE_ENDIAN__
50226 #define vld4q_dup_f64(__p0) __extension__ ({ \
50227   float64x2x4_t __ret; \
50228   __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
50229   __ret; \
50230 })
50231 #else
50232 #define vld4q_dup_f64(__p0) __extension__ ({ \
50233   float64x2x4_t __ret; \
50234   __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
50235  \
50236   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
50237   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
50238   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
50239   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
50240   __ret; \
50241 })
50242 #endif
50243 
50244 #ifdef __LITTLE_ENDIAN__
50245 #define vld4q_dup_f32(__p0) __extension__ ({ \
50246   float32x4x4_t __ret; \
50247   __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \
50248   __ret; \
50249 })
50250 #else
50251 #define vld4q_dup_f32(__p0) __extension__ ({ \
50252   float32x4x4_t __ret; \
50253   __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \
50254  \
50255   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
50256   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
50257   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
50258   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
50259   __ret; \
50260 })
50261 #endif
50262 
50263 #ifdef __LITTLE_ENDIAN__
50264 #define vld4q_dup_f16(__p0) __extension__ ({ \
50265   float16x8x4_t __ret; \
50266   __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \
50267   __ret; \
50268 })
50269 #else
50270 #define vld4q_dup_f16(__p0) __extension__ ({ \
50271   float16x8x4_t __ret; \
50272   __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \
50273  \
50274   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
50275   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
50276   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
50277   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
50278   __ret; \
50279 })
50280 #endif
50281 
50282 #ifdef __LITTLE_ENDIAN__
50283 #define vld4q_dup_s32(__p0) __extension__ ({ \
50284   int32x4x4_t __ret; \
50285   __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \
50286   __ret; \
50287 })
50288 #else
50289 #define vld4q_dup_s32(__p0) __extension__ ({ \
50290   int32x4x4_t __ret; \
50291   __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \
50292  \
50293   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
50294   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
50295   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
50296   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
50297   __ret; \
50298 })
50299 #endif
50300 
50301 #ifdef __LITTLE_ENDIAN__
50302 #define vld4q_dup_s64(__p0) __extension__ ({ \
50303   int64x2x4_t __ret; \
50304   __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \
50305   __ret; \
50306 })
50307 #else
50308 #define vld4q_dup_s64(__p0) __extension__ ({ \
50309   int64x2x4_t __ret; \
50310   __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \
50311  \
50312   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
50313   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
50314   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
50315   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
50316   __ret; \
50317 })
50318 #endif
50319 
50320 #ifdef __LITTLE_ENDIAN__
50321 #define vld4q_dup_s16(__p0) __extension__ ({ \
50322   int16x8x4_t __ret; \
50323   __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \
50324   __ret; \
50325 })
50326 #else
50327 #define vld4q_dup_s16(__p0) __extension__ ({ \
50328   int16x8x4_t __ret; \
50329   __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \
50330  \
50331   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
50332   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
50333   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
50334   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
50335   __ret; \
50336 })
50337 #endif
50338 
50339 #ifdef __LITTLE_ENDIAN__
50340 #define vld4_dup_f64(__p0) __extension__ ({ \
50341   float64x1x4_t __ret; \
50342   __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \
50343   __ret; \
50344 })
50345 #else
50346 #define vld4_dup_f64(__p0) __extension__ ({ \
50347   float64x1x4_t __ret; \
50348   __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \
50349   __ret; \
50350 })
50351 #endif
50352 
50353 #ifdef __LITTLE_ENDIAN__
50354 #define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
50355   poly64x1x4_t __s1 = __p1; \
50356   poly64x1x4_t __ret; \
50357   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
50358   __ret; \
50359 })
50360 #else
50361 #define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
50362   poly64x1x4_t __s1 = __p1; \
50363   poly64x1x4_t __ret; \
50364   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
50365   __ret; \
50366 })
50367 #endif
50368 
50369 #ifdef __LITTLE_ENDIAN__
50370 #define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
50371   poly8x16x4_t __s1 = __p1; \
50372   poly8x16x4_t __ret; \
50373   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \
50374   __ret; \
50375 })
50376 #else
50377 #define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
50378   poly8x16x4_t __s1 = __p1; \
50379   poly8x16x4_t __rev1; \
50380   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50381   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50382   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50383   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50384   poly8x16x4_t __ret; \
50385   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \
50386  \
50387   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50388   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50389   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50390   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50391   __ret; \
50392 })
50393 #endif
50394 
50395 #ifdef __LITTLE_ENDIAN__
50396 #define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
50397   poly64x2x4_t __s1 = __p1; \
50398   poly64x2x4_t __ret; \
50399   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \
50400   __ret; \
50401 })
50402 #else
50403 #define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
50404   poly64x2x4_t __s1 = __p1; \
50405   poly64x2x4_t __rev1; \
50406   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
50407   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
50408   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
50409   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
50410   poly64x2x4_t __ret; \
50411   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \
50412  \
50413   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
50414   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
50415   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
50416   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
50417   __ret; \
50418 })
50419 #endif
50420 
50421 #ifdef __LITTLE_ENDIAN__
50422 #define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
50423   uint8x16x4_t __s1 = __p1; \
50424   uint8x16x4_t __ret; \
50425   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \
50426   __ret; \
50427 })
50428 #else
50429 #define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
50430   uint8x16x4_t __s1 = __p1; \
50431   uint8x16x4_t __rev1; \
50432   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50433   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50434   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50435   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50436   uint8x16x4_t __ret; \
50437   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \
50438  \
50439   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50440   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50441   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50442   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50443   __ret; \
50444 })
50445 #endif
50446 
50447 #ifdef __LITTLE_ENDIAN__
50448 #define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
50449   uint64x2x4_t __s1 = __p1; \
50450   uint64x2x4_t __ret; \
50451   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \
50452   __ret; \
50453 })
50454 #else
50455 #define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
50456   uint64x2x4_t __s1 = __p1; \
50457   uint64x2x4_t __rev1; \
50458   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
50459   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
50460   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
50461   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
50462   uint64x2x4_t __ret; \
50463   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \
50464  \
50465   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
50466   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
50467   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
50468   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
50469   __ret; \
50470 })
50471 #endif
50472 
50473 #ifdef __LITTLE_ENDIAN__
50474 #define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
50475   int8x16x4_t __s1 = __p1; \
50476   int8x16x4_t __ret; \
50477   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \
50478   __ret; \
50479 })
50480 #else
50481 #define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
50482   int8x16x4_t __s1 = __p1; \
50483   int8x16x4_t __rev1; \
50484   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50485   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50486   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50487   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50488   int8x16x4_t __ret; \
50489   __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \
50490  \
50491   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50492   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50493   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50494   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50495   __ret; \
50496 })
50497 #endif
50498 
50499 #ifdef __LITTLE_ENDIAN__
50500 #define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
50501   float64x2x4_t __s1 = __p1; \
50502   float64x2x4_t __ret; \
50503   __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 42); \
50504   __ret; \
50505 })
50506 #else
50507 #define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
50508   float64x2x4_t __s1 = __p1; \
50509   float64x2x4_t __rev1; \
50510   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
50511   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
50512   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
50513   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
50514   float64x2x4_t __ret; \
50515   __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 42); \
50516  \
50517   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
50518   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
50519   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
50520   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
50521   __ret; \
50522 })
50523 #endif
50524 
50525 #ifdef __LITTLE_ENDIAN__
50526 #define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
50527   int64x2x4_t __s1 = __p1; \
50528   int64x2x4_t __ret; \
50529   __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 35); \
50530   __ret; \
50531 })
50532 #else
50533 #define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
50534   int64x2x4_t __s1 = __p1; \
50535   int64x2x4_t __rev1; \
50536   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
50537   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
50538   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
50539   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
50540   int64x2x4_t __ret; \
50541   __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 35); \
50542  \
50543   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
50544   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
50545   __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
50546   __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
50547   __ret; \
50548 })
50549 #endif
50550 
50551 #ifdef __LITTLE_ENDIAN__
50552 #define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
50553   uint64x1x4_t __s1 = __p1; \
50554   uint64x1x4_t __ret; \
50555   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
50556   __ret; \
50557 })
50558 #else
50559 #define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
50560   uint64x1x4_t __s1 = __p1; \
50561   uint64x1x4_t __ret; \
50562   __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
50563   __ret; \
50564 })
50565 #endif
50566 
50567 #ifdef __LITTLE_ENDIAN__
50568 #define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
50569   float64x1x4_t __s1 = __p1; \
50570   float64x1x4_t __ret; \
50571   __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
50572   __ret; \
50573 })
50574 #else
50575 #define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
50576   float64x1x4_t __s1 = __p1; \
50577   float64x1x4_t __ret; \
50578   __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
50579   __ret; \
50580 })
50581 #endif
50582 
50583 #ifdef __LITTLE_ENDIAN__
50584 #define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
50585   int64x1x4_t __s1 = __p1; \
50586   int64x1x4_t __ret; \
50587   __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
50588   __ret; \
50589 })
50590 #else
50591 #define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
50592   int64x1x4_t __s1 = __p1; \
50593   int64x1x4_t __ret; \
50594   __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
50595   __ret; \
50596 })
50597 #endif
50598 
50599 #ifdef __LITTLE_ENDIAN__
50600 #define vldrq_p128(__p0) __extension__ ({ \
50601   poly128_t __ret; \
50602   __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \
50603   __ret; \
50604 })
50605 #else
50606 #define vldrq_p128(__p0) __extension__ ({ \
50607   poly128_t __ret; \
50608   __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \
50609   __ret; \
50610 })
50611 #endif
50612 
50613 #ifdef __LITTLE_ENDIAN__
vmaxq_f64(float64x2_t __p0,float64x2_t __p1)50614 __ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
50615   float64x2_t __ret;
50616   __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
50617   return __ret;
50618 }
50619 #else
vmaxq_f64(float64x2_t __p0,float64x2_t __p1)50620 __ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
50621   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
50622   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
50623   float64x2_t __ret;
50624   __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
50625   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
50626   return __ret;
50627 }
50628 #endif
50629 
50630 #ifdef __LITTLE_ENDIAN__
vmax_f64(float64x1_t __p0,float64x1_t __p1)50631 __ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) {
50632   float64x1_t __ret;
50633   __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
50634   return __ret;
50635 }
50636 #else
vmax_f64(float64x1_t __p0,float64x1_t __p1)50637 __ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) {
50638   float64x1_t __ret;
50639   __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
50640   return __ret;
50641 }
50642 #endif
50643 
50644 #ifdef __LITTLE_ENDIAN__
vmaxnmq_f64(float64x2_t __p0,float64x2_t __p1)50645 __ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
50646   float64x2_t __ret;
50647   __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
50648   return __ret;
50649 }
50650 #else
vmaxnmq_f64(float64x2_t __p0,float64x2_t __p1)50651 __ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
50652   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
50653   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
50654   float64x2_t __ret;
50655   __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
50656   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
50657   return __ret;
50658 }
50659 #endif
50660 
50661 #ifdef __LITTLE_ENDIAN__
vmaxnmq_f32(float32x4_t __p0,float32x4_t __p1)50662 __ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
50663   float32x4_t __ret;
50664   __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
50665   return __ret;
50666 }
50667 #else
vmaxnmq_f32(float32x4_t __p0,float32x4_t __p1)50668 __ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
50669   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
50670   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
50671   float32x4_t __ret;
50672   __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
50673   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
50674   return __ret;
50675 }
50676 #endif
50677 
50678 #ifdef __LITTLE_ENDIAN__
vmaxnm_f64(float64x1_t __p0,float64x1_t __p1)50679 __ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) {
50680   float64x1_t __ret;
50681   __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
50682   return __ret;
50683 }
50684 #else
vmaxnm_f64(float64x1_t __p0,float64x1_t __p1)50685 __ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) {
50686   float64x1_t __ret;
50687   __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
50688   return __ret;
50689 }
50690 #endif
50691 
50692 #ifdef __LITTLE_ENDIAN__
vmaxnm_f32(float32x2_t __p0,float32x2_t __p1)50693 __ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
50694   float32x2_t __ret;
50695   __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
50696   return __ret;
50697 }
50698 #else
vmaxnm_f32(float32x2_t __p0,float32x2_t __p1)50699 __ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
50700   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
50701   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
50702   float32x2_t __ret;
50703   __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
50704   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
50705   return __ret;
50706 }
50707 #endif
50708 
50709 #ifdef __LITTLE_ENDIAN__
vmaxnmvq_f64(float64x2_t __p0)50710 __ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
50711   float64_t __ret;
50712   __ret = (float64_t) __builtin_neon_vmaxnmvq_f64((int8x16_t)__p0);
50713   return __ret;
50714 }
50715 #else
vmaxnmvq_f64(float64x2_t __p0)50716 __ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
50717   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
50718   float64_t __ret;
50719   __ret = (float64_t) __builtin_neon_vmaxnmvq_f64((int8x16_t)__rev0);
50720   return __ret;
50721 }
50722 #endif
50723 
50724 #ifdef __LITTLE_ENDIAN__
vmaxnmvq_f32(float32x4_t __p0)50725 __ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
50726   float32_t __ret;
50727   __ret = (float32_t) __builtin_neon_vmaxnmvq_f32((int8x16_t)__p0);
50728   return __ret;
50729 }
50730 #else
vmaxnmvq_f32(float32x4_t __p0)50731 __ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
50732   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
50733   float32_t __ret;
50734   __ret = (float32_t) __builtin_neon_vmaxnmvq_f32((int8x16_t)__rev0);
50735   return __ret;
50736 }
50737 #endif
50738 
50739 #ifdef __LITTLE_ENDIAN__
vmaxnmv_f32(float32x2_t __p0)50740 __ai float32_t vmaxnmv_f32(float32x2_t __p0) {
50741   float32_t __ret;
50742   __ret = (float32_t) __builtin_neon_vmaxnmv_f32((int8x8_t)__p0);
50743   return __ret;
50744 }
50745 #else
vmaxnmv_f32(float32x2_t __p0)50746 __ai float32_t vmaxnmv_f32(float32x2_t __p0) {
50747   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
50748   float32_t __ret;
50749   __ret = (float32_t) __builtin_neon_vmaxnmv_f32((int8x8_t)__rev0);
50750   return __ret;
50751 }
50752 #endif
50753 
50754 #ifdef __LITTLE_ENDIAN__
vmaxvq_u8(uint8x16_t __p0)50755 __ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
50756   uint8_t __ret;
50757   __ret = (uint8_t) __builtin_neon_vmaxvq_u8((int8x16_t)__p0);
50758   return __ret;
50759 }
50760 #else
vmaxvq_u8(uint8x16_t __p0)50761 __ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
50762   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
50763   uint8_t __ret;
50764   __ret = (uint8_t) __builtin_neon_vmaxvq_u8((int8x16_t)__rev0);
50765   return __ret;
50766 }
50767 #endif
50768 
50769 #ifdef __LITTLE_ENDIAN__
vmaxvq_u32(uint32x4_t __p0)50770 __ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
50771   uint32_t __ret;
50772   __ret = (uint32_t) __builtin_neon_vmaxvq_u32((int8x16_t)__p0);
50773   return __ret;
50774 }
50775 #else
vmaxvq_u32(uint32x4_t __p0)50776 __ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
50777   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
50778   uint32_t __ret;
50779   __ret = (uint32_t) __builtin_neon_vmaxvq_u32((int8x16_t)__rev0);
50780   return __ret;
50781 }
50782 #endif
50783 
50784 #ifdef __LITTLE_ENDIAN__
vmaxvq_u16(uint16x8_t __p0)50785 __ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
50786   uint16_t __ret;
50787   __ret = (uint16_t) __builtin_neon_vmaxvq_u16((int8x16_t)__p0);
50788   return __ret;
50789 }
50790 #else
vmaxvq_u16(uint16x8_t __p0)50791 __ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
50792   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
50793   uint16_t __ret;
50794   __ret = (uint16_t) __builtin_neon_vmaxvq_u16((int8x16_t)__rev0);
50795   return __ret;
50796 }
50797 #endif
50798 
50799 #ifdef __LITTLE_ENDIAN__
vmaxvq_s8(int8x16_t __p0)50800 __ai int8_t vmaxvq_s8(int8x16_t __p0) {
50801   int8_t __ret;
50802   __ret = (int8_t) __builtin_neon_vmaxvq_s8((int8x16_t)__p0);
50803   return __ret;
50804 }
50805 #else
vmaxvq_s8(int8x16_t __p0)50806 __ai int8_t vmaxvq_s8(int8x16_t __p0) {
50807   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
50808   int8_t __ret;
50809   __ret = (int8_t) __builtin_neon_vmaxvq_s8((int8x16_t)__rev0);
50810   return __ret;
50811 }
50812 #endif
50813 
50814 #ifdef __LITTLE_ENDIAN__
vmaxvq_f64(float64x2_t __p0)50815 __ai float64_t vmaxvq_f64(float64x2_t __p0) {
50816   float64_t __ret;
50817   __ret = (float64_t) __builtin_neon_vmaxvq_f64((int8x16_t)__p0);
50818   return __ret;
50819 }
50820 #else
vmaxvq_f64(float64x2_t __p0)50821 __ai float64_t vmaxvq_f64(float64x2_t __p0) {
50822   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
50823   float64_t __ret;
50824   __ret = (float64_t) __builtin_neon_vmaxvq_f64((int8x16_t)__rev0);
50825   return __ret;
50826 }
50827 #endif
50828 
50829 #ifdef __LITTLE_ENDIAN__
vmaxvq_f32(float32x4_t __p0)50830 __ai float32_t vmaxvq_f32(float32x4_t __p0) {
50831   float32_t __ret;
50832   __ret = (float32_t) __builtin_neon_vmaxvq_f32((int8x16_t)__p0);
50833   return __ret;
50834 }
50835 #else
vmaxvq_f32(float32x4_t __p0)50836 __ai float32_t vmaxvq_f32(float32x4_t __p0) {
50837   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
50838   float32_t __ret;
50839   __ret = (float32_t) __builtin_neon_vmaxvq_f32((int8x16_t)__rev0);
50840   return __ret;
50841 }
50842 #endif
50843 
50844 #ifdef __LITTLE_ENDIAN__
vmaxvq_s32(int32x4_t __p0)50845 __ai int32_t vmaxvq_s32(int32x4_t __p0) {
50846   int32_t __ret;
50847   __ret = (int32_t) __builtin_neon_vmaxvq_s32((int8x16_t)__p0);
50848   return __ret;
50849 }
50850 #else
vmaxvq_s32(int32x4_t __p0)50851 __ai int32_t vmaxvq_s32(int32x4_t __p0) {
50852   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
50853   int32_t __ret;
50854   __ret = (int32_t) __builtin_neon_vmaxvq_s32((int8x16_t)__rev0);
50855   return __ret;
50856 }
50857 #endif
50858 
50859 #ifdef __LITTLE_ENDIAN__
vmaxvq_s16(int16x8_t __p0)50860 __ai int16_t vmaxvq_s16(int16x8_t __p0) {
50861   int16_t __ret;
50862   __ret = (int16_t) __builtin_neon_vmaxvq_s16((int8x16_t)__p0);
50863   return __ret;
50864 }
50865 #else
vmaxvq_s16(int16x8_t __p0)50866 __ai int16_t vmaxvq_s16(int16x8_t __p0) {
50867   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
50868   int16_t __ret;
50869   __ret = (int16_t) __builtin_neon_vmaxvq_s16((int8x16_t)__rev0);
50870   return __ret;
50871 }
50872 #endif
50873 
50874 #ifdef __LITTLE_ENDIAN__
vmaxv_u8(uint8x8_t __p0)50875 __ai uint8_t vmaxv_u8(uint8x8_t __p0) {
50876   uint8_t __ret;
50877   __ret = (uint8_t) __builtin_neon_vmaxv_u8((int8x8_t)__p0);
50878   return __ret;
50879 }
50880 #else
vmaxv_u8(uint8x8_t __p0)50881 __ai uint8_t vmaxv_u8(uint8x8_t __p0) {
50882   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
50883   uint8_t __ret;
50884   __ret = (uint8_t) __builtin_neon_vmaxv_u8((int8x8_t)__rev0);
50885   return __ret;
50886 }
50887 #endif
50888 
50889 #ifdef __LITTLE_ENDIAN__
vmaxv_u32(uint32x2_t __p0)50890 __ai uint32_t vmaxv_u32(uint32x2_t __p0) {
50891   uint32_t __ret;
50892   __ret = (uint32_t) __builtin_neon_vmaxv_u32((int8x8_t)__p0);
50893   return __ret;
50894 }
50895 #else
vmaxv_u32(uint32x2_t __p0)50896 __ai uint32_t vmaxv_u32(uint32x2_t __p0) {
50897   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
50898   uint32_t __ret;
50899   __ret = (uint32_t) __builtin_neon_vmaxv_u32((int8x8_t)__rev0);
50900   return __ret;
50901 }
50902 #endif
50903 
50904 #ifdef __LITTLE_ENDIAN__
vmaxv_u16(uint16x4_t __p0)50905 __ai uint16_t vmaxv_u16(uint16x4_t __p0) {
50906   uint16_t __ret;
50907   __ret = (uint16_t) __builtin_neon_vmaxv_u16((int8x8_t)__p0);
50908   return __ret;
50909 }
50910 #else
vmaxv_u16(uint16x4_t __p0)50911 __ai uint16_t vmaxv_u16(uint16x4_t __p0) {
50912   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
50913   uint16_t __ret;
50914   __ret = (uint16_t) __builtin_neon_vmaxv_u16((int8x8_t)__rev0);
50915   return __ret;
50916 }
50917 #endif
50918 
50919 #ifdef __LITTLE_ENDIAN__
vmaxv_s8(int8x8_t __p0)50920 __ai int8_t vmaxv_s8(int8x8_t __p0) {
50921   int8_t __ret;
50922   __ret = (int8_t) __builtin_neon_vmaxv_s8((int8x8_t)__p0);
50923   return __ret;
50924 }
50925 #else
vmaxv_s8(int8x8_t __p0)50926 __ai int8_t vmaxv_s8(int8x8_t __p0) {
50927   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
50928   int8_t __ret;
50929   __ret = (int8_t) __builtin_neon_vmaxv_s8((int8x8_t)__rev0);
50930   return __ret;
50931 }
50932 #endif
50933 
50934 #ifdef __LITTLE_ENDIAN__
vmaxv_f32(float32x2_t __p0)50935 __ai float32_t vmaxv_f32(float32x2_t __p0) {
50936   float32_t __ret;
50937   __ret = (float32_t) __builtin_neon_vmaxv_f32((int8x8_t)__p0);
50938   return __ret;
50939 }
50940 #else
vmaxv_f32(float32x2_t __p0)50941 __ai float32_t vmaxv_f32(float32x2_t __p0) {
50942   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
50943   float32_t __ret;
50944   __ret = (float32_t) __builtin_neon_vmaxv_f32((int8x8_t)__rev0);
50945   return __ret;
50946 }
50947 #endif
50948 
50949 #ifdef __LITTLE_ENDIAN__
vmaxv_s32(int32x2_t __p0)50950 __ai int32_t vmaxv_s32(int32x2_t __p0) {
50951   int32_t __ret;
50952   __ret = (int32_t) __builtin_neon_vmaxv_s32((int8x8_t)__p0);
50953   return __ret;
50954 }
50955 #else
vmaxv_s32(int32x2_t __p0)50956 __ai int32_t vmaxv_s32(int32x2_t __p0) {
50957   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
50958   int32_t __ret;
50959   __ret = (int32_t) __builtin_neon_vmaxv_s32((int8x8_t)__rev0);
50960   return __ret;
50961 }
50962 #endif
50963 
50964 #ifdef __LITTLE_ENDIAN__
vmaxv_s16(int16x4_t __p0)50965 __ai int16_t vmaxv_s16(int16x4_t __p0) {
50966   int16_t __ret;
50967   __ret = (int16_t) __builtin_neon_vmaxv_s16((int8x8_t)__p0);
50968   return __ret;
50969 }
50970 #else
vmaxv_s16(int16x4_t __p0)50971 __ai int16_t vmaxv_s16(int16x4_t __p0) {
50972   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
50973   int16_t __ret;
50974   __ret = (int16_t) __builtin_neon_vmaxv_s16((int8x8_t)__rev0);
50975   return __ret;
50976 }
50977 #endif
50978 
50979 #ifdef __LITTLE_ENDIAN__
vminq_f64(float64x2_t __p0,float64x2_t __p1)50980 __ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
50981   float64x2_t __ret;
50982   __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
50983   return __ret;
50984 }
50985 #else
vminq_f64(float64x2_t __p0,float64x2_t __p1)50986 __ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
50987   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
50988   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
50989   float64x2_t __ret;
50990   __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
50991   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
50992   return __ret;
50993 }
50994 #endif
50995 
50996 #ifdef __LITTLE_ENDIAN__
vmin_f64(float64x1_t __p0,float64x1_t __p1)50997 __ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) {
50998   float64x1_t __ret;
50999   __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
51000   return __ret;
51001 }
51002 #else
vmin_f64(float64x1_t __p0,float64x1_t __p1)51003 __ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) {
51004   float64x1_t __ret;
51005   __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
51006   return __ret;
51007 }
51008 #endif
51009 
51010 #ifdef __LITTLE_ENDIAN__
vminnmq_f64(float64x2_t __p0,float64x2_t __p1)51011 __ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
51012   float64x2_t __ret;
51013   __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
51014   return __ret;
51015 }
51016 #else
vminnmq_f64(float64x2_t __p0,float64x2_t __p1)51017 __ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
51018   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
51019   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
51020   float64x2_t __ret;
51021   __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
51022   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
51023   return __ret;
51024 }
51025 #endif
51026 
51027 #ifdef __LITTLE_ENDIAN__
vminnmq_f32(float32x4_t __p0,float32x4_t __p1)51028 __ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
51029   float32x4_t __ret;
51030   __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
51031   return __ret;
51032 }
51033 #else
vminnmq_f32(float32x4_t __p0,float32x4_t __p1)51034 __ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
51035   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
51036   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
51037   float32x4_t __ret;
51038   __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
51039   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
51040   return __ret;
51041 }
51042 #endif
51043 
51044 #ifdef __LITTLE_ENDIAN__
vminnm_f64(float64x1_t __p0,float64x1_t __p1)51045 __ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) {
51046   float64x1_t __ret;
51047   __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
51048   return __ret;
51049 }
51050 #else
vminnm_f64(float64x1_t __p0,float64x1_t __p1)51051 __ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) {
51052   float64x1_t __ret;
51053   __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
51054   return __ret;
51055 }
51056 #endif
51057 
51058 #ifdef __LITTLE_ENDIAN__
vminnm_f32(float32x2_t __p0,float32x2_t __p1)51059 __ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) {
51060   float32x2_t __ret;
51061   __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
51062   return __ret;
51063 }
51064 #else
vminnm_f32(float32x2_t __p0,float32x2_t __p1)51065 __ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) {
51066   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
51067   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
51068   float32x2_t __ret;
51069   __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
51070   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
51071   return __ret;
51072 }
51073 #endif
51074 
51075 #ifdef __LITTLE_ENDIAN__
vminnmvq_f64(float64x2_t __p0)51076 __ai float64_t vminnmvq_f64(float64x2_t __p0) {
51077   float64_t __ret;
51078   __ret = (float64_t) __builtin_neon_vminnmvq_f64((int8x16_t)__p0);
51079   return __ret;
51080 }
51081 #else
vminnmvq_f64(float64x2_t __p0)51082 __ai float64_t vminnmvq_f64(float64x2_t __p0) {
51083   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
51084   float64_t __ret;
51085   __ret = (float64_t) __builtin_neon_vminnmvq_f64((int8x16_t)__rev0);
51086   return __ret;
51087 }
51088 #endif
51089 
51090 #ifdef __LITTLE_ENDIAN__
vminnmvq_f32(float32x4_t __p0)51091 __ai float32_t vminnmvq_f32(float32x4_t __p0) {
51092   float32_t __ret;
51093   __ret = (float32_t) __builtin_neon_vminnmvq_f32((int8x16_t)__p0);
51094   return __ret;
51095 }
51096 #else
vminnmvq_f32(float32x4_t __p0)51097 __ai float32_t vminnmvq_f32(float32x4_t __p0) {
51098   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
51099   float32_t __ret;
51100   __ret = (float32_t) __builtin_neon_vminnmvq_f32((int8x16_t)__rev0);
51101   return __ret;
51102 }
51103 #endif
51104 
51105 #ifdef __LITTLE_ENDIAN__
vminnmv_f32(float32x2_t __p0)51106 __ai float32_t vminnmv_f32(float32x2_t __p0) {
51107   float32_t __ret;
51108   __ret = (float32_t) __builtin_neon_vminnmv_f32((int8x8_t)__p0);
51109   return __ret;
51110 }
51111 #else
vminnmv_f32(float32x2_t __p0)51112 __ai float32_t vminnmv_f32(float32x2_t __p0) {
51113   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
51114   float32_t __ret;
51115   __ret = (float32_t) __builtin_neon_vminnmv_f32((int8x8_t)__rev0);
51116   return __ret;
51117 }
51118 #endif
51119 
51120 #ifdef __LITTLE_ENDIAN__
vminvq_u8(uint8x16_t __p0)51121 __ai uint8_t vminvq_u8(uint8x16_t __p0) {
51122   uint8_t __ret;
51123   __ret = (uint8_t) __builtin_neon_vminvq_u8((int8x16_t)__p0);
51124   return __ret;
51125 }
51126 #else
vminvq_u8(uint8x16_t __p0)51127 __ai uint8_t vminvq_u8(uint8x16_t __p0) {
51128   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
51129   uint8_t __ret;
51130   __ret = (uint8_t) __builtin_neon_vminvq_u8((int8x16_t)__rev0);
51131   return __ret;
51132 }
51133 #endif
51134 
51135 #ifdef __LITTLE_ENDIAN__
vminvq_u32(uint32x4_t __p0)51136 __ai uint32_t vminvq_u32(uint32x4_t __p0) {
51137   uint32_t __ret;
51138   __ret = (uint32_t) __builtin_neon_vminvq_u32((int8x16_t)__p0);
51139   return __ret;
51140 }
51141 #else
vminvq_u32(uint32x4_t __p0)51142 __ai uint32_t vminvq_u32(uint32x4_t __p0) {
51143   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
51144   uint32_t __ret;
51145   __ret = (uint32_t) __builtin_neon_vminvq_u32((int8x16_t)__rev0);
51146   return __ret;
51147 }
51148 #endif
51149 
51150 #ifdef __LITTLE_ENDIAN__
vminvq_u16(uint16x8_t __p0)51151 __ai uint16_t vminvq_u16(uint16x8_t __p0) {
51152   uint16_t __ret;
51153   __ret = (uint16_t) __builtin_neon_vminvq_u16((int8x16_t)__p0);
51154   return __ret;
51155 }
51156 #else
vminvq_u16(uint16x8_t __p0)51157 __ai uint16_t vminvq_u16(uint16x8_t __p0) {
51158   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
51159   uint16_t __ret;
51160   __ret = (uint16_t) __builtin_neon_vminvq_u16((int8x16_t)__rev0);
51161   return __ret;
51162 }
51163 #endif
51164 
51165 #ifdef __LITTLE_ENDIAN__
vminvq_s8(int8x16_t __p0)51166 __ai int8_t vminvq_s8(int8x16_t __p0) {
51167   int8_t __ret;
51168   __ret = (int8_t) __builtin_neon_vminvq_s8((int8x16_t)__p0);
51169   return __ret;
51170 }
51171 #else
vminvq_s8(int8x16_t __p0)51172 __ai int8_t vminvq_s8(int8x16_t __p0) {
51173   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
51174   int8_t __ret;
51175   __ret = (int8_t) __builtin_neon_vminvq_s8((int8x16_t)__rev0);
51176   return __ret;
51177 }
51178 #endif
51179 
51180 #ifdef __LITTLE_ENDIAN__
vminvq_f64(float64x2_t __p0)51181 __ai float64_t vminvq_f64(float64x2_t __p0) {
51182   float64_t __ret;
51183   __ret = (float64_t) __builtin_neon_vminvq_f64((int8x16_t)__p0);
51184   return __ret;
51185 }
51186 #else
vminvq_f64(float64x2_t __p0)51187 __ai float64_t vminvq_f64(float64x2_t __p0) {
51188   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
51189   float64_t __ret;
51190   __ret = (float64_t) __builtin_neon_vminvq_f64((int8x16_t)__rev0);
51191   return __ret;
51192 }
51193 #endif
51194 
51195 #ifdef __LITTLE_ENDIAN__
vminvq_f32(float32x4_t __p0)51196 __ai float32_t vminvq_f32(float32x4_t __p0) {
51197   float32_t __ret;
51198   __ret = (float32_t) __builtin_neon_vminvq_f32((int8x16_t)__p0);
51199   return __ret;
51200 }
51201 #else
vminvq_f32(float32x4_t __p0)51202 __ai float32_t vminvq_f32(float32x4_t __p0) {
51203   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
51204   float32_t __ret;
51205   __ret = (float32_t) __builtin_neon_vminvq_f32((int8x16_t)__rev0);
51206   return __ret;
51207 }
51208 #endif
51209 
51210 #ifdef __LITTLE_ENDIAN__
vminvq_s32(int32x4_t __p0)51211 __ai int32_t vminvq_s32(int32x4_t __p0) {
51212   int32_t __ret;
51213   __ret = (int32_t) __builtin_neon_vminvq_s32((int8x16_t)__p0);
51214   return __ret;
51215 }
51216 #else
vminvq_s32(int32x4_t __p0)51217 __ai int32_t vminvq_s32(int32x4_t __p0) {
51218   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
51219   int32_t __ret;
51220   __ret = (int32_t) __builtin_neon_vminvq_s32((int8x16_t)__rev0);
51221   return __ret;
51222 }
51223 #endif
51224 
51225 #ifdef __LITTLE_ENDIAN__
vminvq_s16(int16x8_t __p0)51226 __ai int16_t vminvq_s16(int16x8_t __p0) {
51227   int16_t __ret;
51228   __ret = (int16_t) __builtin_neon_vminvq_s16((int8x16_t)__p0);
51229   return __ret;
51230 }
51231 #else
vminvq_s16(int16x8_t __p0)51232 __ai int16_t vminvq_s16(int16x8_t __p0) {
51233   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
51234   int16_t __ret;
51235   __ret = (int16_t) __builtin_neon_vminvq_s16((int8x16_t)__rev0);
51236   return __ret;
51237 }
51238 #endif
51239 
51240 #ifdef __LITTLE_ENDIAN__
vminv_u8(uint8x8_t __p0)51241 __ai uint8_t vminv_u8(uint8x8_t __p0) {
51242   uint8_t __ret;
51243   __ret = (uint8_t) __builtin_neon_vminv_u8((int8x8_t)__p0);
51244   return __ret;
51245 }
51246 #else
vminv_u8(uint8x8_t __p0)51247 __ai uint8_t vminv_u8(uint8x8_t __p0) {
51248   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
51249   uint8_t __ret;
51250   __ret = (uint8_t) __builtin_neon_vminv_u8((int8x8_t)__rev0);
51251   return __ret;
51252 }
51253 #endif
51254 
51255 #ifdef __LITTLE_ENDIAN__
vminv_u32(uint32x2_t __p0)51256 __ai uint32_t vminv_u32(uint32x2_t __p0) {
51257   uint32_t __ret;
51258   __ret = (uint32_t) __builtin_neon_vminv_u32((int8x8_t)__p0);
51259   return __ret;
51260 }
51261 #else
vminv_u32(uint32x2_t __p0)51262 __ai uint32_t vminv_u32(uint32x2_t __p0) {
51263   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
51264   uint32_t __ret;
51265   __ret = (uint32_t) __builtin_neon_vminv_u32((int8x8_t)__rev0);
51266   return __ret;
51267 }
51268 #endif
51269 
51270 #ifdef __LITTLE_ENDIAN__
vminv_u16(uint16x4_t __p0)51271 __ai uint16_t vminv_u16(uint16x4_t __p0) {
51272   uint16_t __ret;
51273   __ret = (uint16_t) __builtin_neon_vminv_u16((int8x8_t)__p0);
51274   return __ret;
51275 }
51276 #else
vminv_u16(uint16x4_t __p0)51277 __ai uint16_t vminv_u16(uint16x4_t __p0) {
51278   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
51279   uint16_t __ret;
51280   __ret = (uint16_t) __builtin_neon_vminv_u16((int8x8_t)__rev0);
51281   return __ret;
51282 }
51283 #endif
51284 
51285 #ifdef __LITTLE_ENDIAN__
vminv_s8(int8x8_t __p0)51286 __ai int8_t vminv_s8(int8x8_t __p0) {
51287   int8_t __ret;
51288   __ret = (int8_t) __builtin_neon_vminv_s8((int8x8_t)__p0);
51289   return __ret;
51290 }
51291 #else
vminv_s8(int8x8_t __p0)51292 __ai int8_t vminv_s8(int8x8_t __p0) {
51293   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
51294   int8_t __ret;
51295   __ret = (int8_t) __builtin_neon_vminv_s8((int8x8_t)__rev0);
51296   return __ret;
51297 }
51298 #endif
51299 
51300 #ifdef __LITTLE_ENDIAN__
vminv_f32(float32x2_t __p0)51301 __ai float32_t vminv_f32(float32x2_t __p0) {
51302   float32_t __ret;
51303   __ret = (float32_t) __builtin_neon_vminv_f32((int8x8_t)__p0);
51304   return __ret;
51305 }
51306 #else
vminv_f32(float32x2_t __p0)51307 __ai float32_t vminv_f32(float32x2_t __p0) {
51308   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
51309   float32_t __ret;
51310   __ret = (float32_t) __builtin_neon_vminv_f32((int8x8_t)__rev0);
51311   return __ret;
51312 }
51313 #endif
51314 
51315 #ifdef __LITTLE_ENDIAN__
vminv_s32(int32x2_t __p0)51316 __ai int32_t vminv_s32(int32x2_t __p0) {
51317   int32_t __ret;
51318   __ret = (int32_t) __builtin_neon_vminv_s32((int8x8_t)__p0);
51319   return __ret;
51320 }
51321 #else
vminv_s32(int32x2_t __p0)51322 __ai int32_t vminv_s32(int32x2_t __p0) {
51323   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
51324   int32_t __ret;
51325   __ret = (int32_t) __builtin_neon_vminv_s32((int8x8_t)__rev0);
51326   return __ret;
51327 }
51328 #endif
51329 
51330 #ifdef __LITTLE_ENDIAN__
vminv_s16(int16x4_t __p0)51331 __ai int16_t vminv_s16(int16x4_t __p0) {
51332   int16_t __ret;
51333   __ret = (int16_t) __builtin_neon_vminv_s16((int8x8_t)__p0);
51334   return __ret;
51335 }
51336 #else
vminv_s16(int16x4_t __p0)51337 __ai int16_t vminv_s16(int16x4_t __p0) {
51338   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
51339   int16_t __ret;
51340   __ret = (int16_t) __builtin_neon_vminv_s16((int8x8_t)__rev0);
51341   return __ret;
51342 }
51343 #endif
51344 
51345 #ifdef __LITTLE_ENDIAN__
vmlaq_f64(float64x2_t __p0,float64x2_t __p1,float64x2_t __p2)51346 __ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
51347   float64x2_t __ret;
51348   __ret = __p0 + __p1 * __p2;
51349   return __ret;
51350 }
51351 #else
vmlaq_f64(float64x2_t __p0,float64x2_t __p1,float64x2_t __p2)51352 __ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
51353   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
51354   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
51355   float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
51356   float64x2_t __ret;
51357   __ret = __rev0 + __rev1 * __rev2;
51358   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
51359   return __ret;
51360 }
51361 #endif
51362 
51363 #ifdef __LITTLE_ENDIAN__
vmla_f64(float64x1_t __p0,float64x1_t __p1,float64x1_t __p2)51364 __ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
51365   float64x1_t __ret;
51366   __ret = __p0 + __p1 * __p2;
51367   return __ret;
51368 }
51369 #else
vmla_f64(float64x1_t __p0,float64x1_t __p1,float64x1_t __p2)51370 __ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
51371   float64x1_t __ret;
51372   __ret = __p0 + __p1 * __p2;
51373   return __ret;
51374 }
51375 #endif
51376 
51377 #ifdef __LITTLE_ENDIAN__
51378 #define vmlaq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
51379   uint32x4_t __s0 = __p0; \
51380   uint32x4_t __s1 = __p1; \
51381   uint32x4_t __s2 = __p2; \
51382   uint32x4_t __ret; \
51383   __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
51384   __ret; \
51385 })
51386 #else
51387 #define vmlaq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
51388   uint32x4_t __s0 = __p0; \
51389   uint32x4_t __s1 = __p1; \
51390   uint32x4_t __s2 = __p2; \
51391   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
51392   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
51393   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
51394   uint32x4_t __ret; \
51395   __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
51396   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
51397   __ret; \
51398 })
51399 #endif
51400 
51401 #ifdef __LITTLE_ENDIAN__
51402 #define vmlaq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
51403   uint16x8_t __s0 = __p0; \
51404   uint16x8_t __s1 = __p1; \
51405   uint16x8_t __s2 = __p2; \
51406   uint16x8_t __ret; \
51407   __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
51408   __ret; \
51409 })
51410 #else
51411 #define vmlaq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
51412   uint16x8_t __s0 = __p0; \
51413   uint16x8_t __s1 = __p1; \
51414   uint16x8_t __s2 = __p2; \
51415   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
51416   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
51417   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
51418   uint16x8_t __ret; \
51419   __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
51420   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
51421   __ret; \
51422 })
51423 #endif
51424 
51425 #ifdef __LITTLE_ENDIAN__
51426 #define vmlaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
51427   float32x4_t __s0 = __p0; \
51428   float32x4_t __s1 = __p1; \
51429   float32x4_t __s2 = __p2; \
51430   float32x4_t __ret; \
51431   __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
51432   __ret; \
51433 })
51434 #else
51435 #define vmlaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
51436   float32x4_t __s0 = __p0; \
51437   float32x4_t __s1 = __p1; \
51438   float32x4_t __s2 = __p2; \
51439   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
51440   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
51441   float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
51442   float32x4_t __ret; \
51443   __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
51444   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
51445   __ret; \
51446 })
51447 #endif
51448 
51449 #ifdef __LITTLE_ENDIAN__
51450 #define vmlaq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
51451   int32x4_t __s0 = __p0; \
51452   int32x4_t __s1 = __p1; \
51453   int32x4_t __s2 = __p2; \
51454   int32x4_t __ret; \
51455   __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
51456   __ret; \
51457 })
51458 #else
51459 #define vmlaq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
51460   int32x4_t __s0 = __p0; \
51461   int32x4_t __s1 = __p1; \
51462   int32x4_t __s2 = __p2; \
51463   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
51464   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
51465   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
51466   int32x4_t __ret; \
51467   __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
51468   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
51469   __ret; \
51470 })
51471 #endif
51472 
51473 #ifdef __LITTLE_ENDIAN__
51474 #define vmlaq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
51475   int16x8_t __s0 = __p0; \
51476   int16x8_t __s1 = __p1; \
51477   int16x8_t __s2 = __p2; \
51478   int16x8_t __ret; \
51479   __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
51480   __ret; \
51481 })
51482 #else
51483 #define vmlaq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
51484   int16x8_t __s0 = __p0; \
51485   int16x8_t __s1 = __p1; \
51486   int16x8_t __s2 = __p2; \
51487   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
51488   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
51489   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
51490   int16x8_t __ret; \
51491   __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
51492   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
51493   __ret; \
51494 })
51495 #endif
51496 
51497 #ifdef __LITTLE_ENDIAN__
51498 #define vmla_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
51499   uint32x2_t __s0 = __p0; \
51500   uint32x2_t __s1 = __p1; \
51501   uint32x4_t __s2 = __p2; \
51502   uint32x2_t __ret; \
51503   __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
51504   __ret; \
51505 })
51506 #else
51507 #define vmla_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
51508   uint32x2_t __s0 = __p0; \
51509   uint32x2_t __s1 = __p1; \
51510   uint32x4_t __s2 = __p2; \
51511   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
51512   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
51513   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
51514   uint32x2_t __ret; \
51515   __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
51516   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
51517   __ret; \
51518 })
51519 #endif
51520 
51521 #ifdef __LITTLE_ENDIAN__
51522 #define vmla_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
51523   uint16x4_t __s0 = __p0; \
51524   uint16x4_t __s1 = __p1; \
51525   uint16x8_t __s2 = __p2; \
51526   uint16x4_t __ret; \
51527   __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
51528   __ret; \
51529 })
51530 #else
51531 #define vmla_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
51532   uint16x4_t __s0 = __p0; \
51533   uint16x4_t __s1 = __p1; \
51534   uint16x8_t __s2 = __p2; \
51535   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
51536   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
51537   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
51538   uint16x4_t __ret; \
51539   __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
51540   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
51541   __ret; \
51542 })
51543 #endif
51544 
51545 #ifdef __LITTLE_ENDIAN__
51546 #define vmla_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
51547   float32x2_t __s0 = __p0; \
51548   float32x2_t __s1 = __p1; \
51549   float32x4_t __s2 = __p2; \
51550   float32x2_t __ret; \
51551   __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
51552   __ret; \
51553 })
51554 #else
51555 #define vmla_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
51556   float32x2_t __s0 = __p0; \
51557   float32x2_t __s1 = __p1; \
51558   float32x4_t __s2 = __p2; \
51559   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
51560   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
51561   float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
51562   float32x2_t __ret; \
51563   __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
51564   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
51565   __ret; \
51566 })
51567 #endif
51568 
51569 #ifdef __LITTLE_ENDIAN__
51570 #define vmla_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
51571   int32x2_t __s0 = __p0; \
51572   int32x2_t __s1 = __p1; \
51573   int32x4_t __s2 = __p2; \
51574   int32x2_t __ret; \
51575   __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
51576   __ret; \
51577 })
51578 #else
51579 #define vmla_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
51580   int32x2_t __s0 = __p0; \
51581   int32x2_t __s1 = __p1; \
51582   int32x4_t __s2 = __p2; \
51583   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
51584   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
51585   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
51586   int32x2_t __ret; \
51587   __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
51588   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
51589   __ret; \
51590 })
51591 #endif
51592 
51593 #ifdef __LITTLE_ENDIAN__
51594 #define vmla_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
51595   int16x4_t __s0 = __p0; \
51596   int16x4_t __s1 = __p1; \
51597   int16x8_t __s2 = __p2; \
51598   int16x4_t __ret; \
51599   __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
51600   __ret; \
51601 })
51602 #else
51603 #define vmla_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
51604   int16x4_t __s0 = __p0; \
51605   int16x4_t __s1 = __p1; \
51606   int16x8_t __s2 = __p2; \
51607   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
51608   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
51609   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
51610   int16x4_t __ret; \
51611   __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
51612   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
51613   __ret; \
51614 })
51615 #endif
51616 
51617 #ifdef __LITTLE_ENDIAN__
vmlaq_n_f64(float64x2_t __p0,float64x2_t __p1,float64_t __p2)51618 __ai float64x2_t vmlaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
51619   float64x2_t __ret;
51620   __ret = __p0 + __p1 * (float64x2_t) {__p2, __p2};
51621   return __ret;
51622 }
51623 #else
vmlaq_n_f64(float64x2_t __p0,float64x2_t __p1,float64_t __p2)51624 __ai float64x2_t vmlaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
51625   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
51626   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
51627   float64x2_t __ret;
51628   __ret = __rev0 + __rev1 * (float64x2_t) {__p2, __p2};
51629   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
51630   return __ret;
51631 }
51632 #endif
51633 
51634 #ifdef __LITTLE_ENDIAN__
51635 #define vmlal_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
51636   uint64x2_t __s0 = __p0; \
51637   uint32x4_t __s1 = __p1; \
51638   uint32x2_t __s2 = __p2; \
51639   uint64x2_t __ret; \
51640   __ret = __s0 + vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
51641   __ret; \
51642 })
51643 #else
51644 #define vmlal_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
51645   uint64x2_t __s0 = __p0; \
51646   uint32x4_t __s1 = __p1; \
51647   uint32x2_t __s2 = __p2; \
51648   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
51649   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
51650   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
51651   uint64x2_t __ret; \
51652   __ret = __rev0 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
51653   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
51654   __ret; \
51655 })
51656 #endif
51657 
51658 #ifdef __LITTLE_ENDIAN__
51659 #define vmlal_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
51660   uint32x4_t __s0 = __p0; \
51661   uint16x8_t __s1 = __p1; \
51662   uint16x4_t __s2 = __p2; \
51663   uint32x4_t __ret; \
51664   __ret = __s0 + vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
51665   __ret; \
51666 })
51667 #else
51668 #define vmlal_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
51669   uint32x4_t __s0 = __p0; \
51670   uint16x8_t __s1 = __p1; \
51671   uint16x4_t __s2 = __p2; \
51672   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
51673   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
51674   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
51675   uint32x4_t __ret; \
51676   __ret = __rev0 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
51677   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
51678   __ret; \
51679 })
51680 #endif
51681 
51682 #ifdef __LITTLE_ENDIAN__
51683 #define vmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
51684   int64x2_t __s0 = __p0; \
51685   int32x4_t __s1 = __p1; \
51686   int32x2_t __s2 = __p2; \
51687   int64x2_t __ret; \
51688   __ret = __s0 + vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
51689   __ret; \
51690 })
51691 #else
51692 #define vmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
51693   int64x2_t __s0 = __p0; \
51694   int32x4_t __s1 = __p1; \
51695   int32x2_t __s2 = __p2; \
51696   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
51697   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
51698   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
51699   int64x2_t __ret; \
51700   __ret = __rev0 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
51701   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
51702   __ret; \
51703 })
51704 #endif
51705 
51706 #ifdef __LITTLE_ENDIAN__
51707 #define vmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
51708   int32x4_t __s0 = __p0; \
51709   int16x8_t __s1 = __p1; \
51710   int16x4_t __s2 = __p2; \
51711   int32x4_t __ret; \
51712   __ret = __s0 + vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
51713   __ret; \
51714 })
51715 #else
51716 #define vmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
51717   int32x4_t __s0 = __p0; \
51718   int16x8_t __s1 = __p1; \
51719   int16x4_t __s2 = __p2; \
51720   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
51721   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
51722   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
51723   int32x4_t __ret; \
51724   __ret = __rev0 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
51725   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
51726   __ret; \
51727 })
51728 #endif
51729 
51730 #ifdef __LITTLE_ENDIAN__
51731 #define vmlal_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
51732   uint64x2_t __s0 = __p0; \
51733   uint32x4_t __s1 = __p1; \
51734   uint32x4_t __s2 = __p2; \
51735   uint64x2_t __ret; \
51736   __ret = __s0 + vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
51737   __ret; \
51738 })
51739 #else
51740 #define vmlal_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
51741   uint64x2_t __s0 = __p0; \
51742   uint32x4_t __s1 = __p1; \
51743   uint32x4_t __s2 = __p2; \
51744   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
51745   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
51746   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
51747   uint64x2_t __ret; \
51748   __ret = __rev0 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
51749   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
51750   __ret; \
51751 })
51752 #endif
51753 
51754 #ifdef __LITTLE_ENDIAN__
51755 #define vmlal_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
51756   uint32x4_t __s0 = __p0; \
51757   uint16x8_t __s1 = __p1; \
51758   uint16x8_t __s2 = __p2; \
51759   uint32x4_t __ret; \
51760   __ret = __s0 + vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
51761   __ret; \
51762 })
51763 #else
51764 #define vmlal_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
51765   uint32x4_t __s0 = __p0; \
51766   uint16x8_t __s1 = __p1; \
51767   uint16x8_t __s2 = __p2; \
51768   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
51769   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
51770   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
51771   uint32x4_t __ret; \
51772   __ret = __rev0 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
51773   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
51774   __ret; \
51775 })
51776 #endif
51777 
51778 #ifdef __LITTLE_ENDIAN__
51779 #define vmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
51780   int64x2_t __s0 = __p0; \
51781   int32x4_t __s1 = __p1; \
51782   int32x4_t __s2 = __p2; \
51783   int64x2_t __ret; \
51784   __ret = __s0 + vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
51785   __ret; \
51786 })
51787 #else
51788 #define vmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
51789   int64x2_t __s0 = __p0; \
51790   int32x4_t __s1 = __p1; \
51791   int32x4_t __s2 = __p2; \
51792   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
51793   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
51794   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
51795   int64x2_t __ret; \
51796   __ret = __rev0 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
51797   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
51798   __ret; \
51799 })
51800 #endif
51801 
51802 #ifdef __LITTLE_ENDIAN__
51803 #define vmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
51804   int32x4_t __s0 = __p0; \
51805   int16x8_t __s1 = __p1; \
51806   int16x8_t __s2 = __p2; \
51807   int32x4_t __ret; \
51808   __ret = __s0 + vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
51809   __ret; \
51810 })
51811 #else
51812 #define vmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
51813   int32x4_t __s0 = __p0; \
51814   int16x8_t __s1 = __p1; \
51815   int16x8_t __s2 = __p2; \
51816   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
51817   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
51818   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
51819   int32x4_t __ret; \
51820   __ret = __rev0 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
51821   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
51822   __ret; \
51823 })
51824 #endif
51825 
51826 #ifdef __LITTLE_ENDIAN__
51827 #define vmlal_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
51828   uint64x2_t __s0 = __p0; \
51829   uint32x2_t __s1 = __p1; \
51830   uint32x4_t __s2 = __p2; \
51831   uint64x2_t __ret; \
51832   __ret = __s0 + vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
51833   __ret; \
51834 })
51835 #else
51836 #define vmlal_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
51837   uint64x2_t __s0 = __p0; \
51838   uint32x2_t __s1 = __p1; \
51839   uint32x4_t __s2 = __p2; \
51840   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
51841   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
51842   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
51843   uint64x2_t __ret; \
51844   __ret = __rev0 + __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
51845   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
51846   __ret; \
51847 })
51848 #endif
51849 
51850 #ifdef __LITTLE_ENDIAN__
51851 #define vmlal_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
51852   uint32x4_t __s0 = __p0; \
51853   uint16x4_t __s1 = __p1; \
51854   uint16x8_t __s2 = __p2; \
51855   uint32x4_t __ret; \
51856   __ret = __s0 + vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
51857   __ret; \
51858 })
51859 #else
51860 #define vmlal_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
51861   uint32x4_t __s0 = __p0; \
51862   uint16x4_t __s1 = __p1; \
51863   uint16x8_t __s2 = __p2; \
51864   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
51865   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
51866   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
51867   uint32x4_t __ret; \
51868   __ret = __rev0 + __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
51869   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
51870   __ret; \
51871 })
51872 #endif
51873 
51874 #ifdef __LITTLE_ENDIAN__
51875 #define vmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
51876   int64x2_t __s0 = __p0; \
51877   int32x2_t __s1 = __p1; \
51878   int32x4_t __s2 = __p2; \
51879   int64x2_t __ret; \
51880   __ret = __s0 + vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
51881   __ret; \
51882 })
51883 #else
51884 #define vmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
51885   int64x2_t __s0 = __p0; \
51886   int32x2_t __s1 = __p1; \
51887   int32x4_t __s2 = __p2; \
51888   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
51889   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
51890   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
51891   int64x2_t __ret; \
51892   __ret = __rev0 + __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
51893   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
51894   __ret; \
51895 })
51896 #endif
51897 
51898 #ifdef __LITTLE_ENDIAN__
51899 #define vmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
51900   int32x4_t __s0 = __p0; \
51901   int16x4_t __s1 = __p1; \
51902   int16x8_t __s2 = __p2; \
51903   int32x4_t __ret; \
51904   __ret = __s0 + vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
51905   __ret; \
51906 })
51907 #else
51908 #define vmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
51909   int32x4_t __s0 = __p0; \
51910   int16x4_t __s1 = __p1; \
51911   int16x8_t __s2 = __p2; \
51912   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
51913   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
51914   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
51915   int32x4_t __ret; \
51916   __ret = __rev0 + __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
51917   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
51918   __ret; \
51919 })
51920 #endif
51921 
51922 #ifdef __LITTLE_ENDIAN__
vmlsq_f64(float64x2_t __p0,float64x2_t __p1,float64x2_t __p2)51923 __ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
51924   float64x2_t __ret;
51925   __ret = __p0 - __p1 * __p2;
51926   return __ret;
51927 }
51928 #else
vmlsq_f64(float64x2_t __p0,float64x2_t __p1,float64x2_t __p2)51929 __ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
51930   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
51931   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
51932   float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
51933   float64x2_t __ret;
51934   __ret = __rev0 - __rev1 * __rev2;
51935   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
51936   return __ret;
51937 }
51938 #endif
51939 
51940 #ifdef __LITTLE_ENDIAN__
vmls_f64(float64x1_t __p0,float64x1_t __p1,float64x1_t __p2)51941 __ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
51942   float64x1_t __ret;
51943   __ret = __p0 - __p1 * __p2;
51944   return __ret;
51945 }
51946 #else
vmls_f64(float64x1_t __p0,float64x1_t __p1,float64x1_t __p2)51947 __ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
51948   float64x1_t __ret;
51949   __ret = __p0 - __p1 * __p2;
51950   return __ret;
51951 }
51952 #endif
51953 
51954 #ifdef __LITTLE_ENDIAN__
51955 #define vmlsq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
51956   uint32x4_t __s0 = __p0; \
51957   uint32x4_t __s1 = __p1; \
51958   uint32x4_t __s2 = __p2; \
51959   uint32x4_t __ret; \
51960   __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
51961   __ret; \
51962 })
51963 #else
51964 #define vmlsq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
51965   uint32x4_t __s0 = __p0; \
51966   uint32x4_t __s1 = __p1; \
51967   uint32x4_t __s2 = __p2; \
51968   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
51969   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
51970   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
51971   uint32x4_t __ret; \
51972   __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
51973   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
51974   __ret; \
51975 })
51976 #endif
51977 
51978 #ifdef __LITTLE_ENDIAN__
51979 #define vmlsq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
51980   uint16x8_t __s0 = __p0; \
51981   uint16x8_t __s1 = __p1; \
51982   uint16x8_t __s2 = __p2; \
51983   uint16x8_t __ret; \
51984   __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
51985   __ret; \
51986 })
51987 #else
51988 #define vmlsq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
51989   uint16x8_t __s0 = __p0; \
51990   uint16x8_t __s1 = __p1; \
51991   uint16x8_t __s2 = __p2; \
51992   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
51993   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
51994   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
51995   uint16x8_t __ret; \
51996   __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
51997   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
51998   __ret; \
51999 })
52000 #endif
52001 
52002 #ifdef __LITTLE_ENDIAN__
52003 #define vmlsq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
52004   float32x4_t __s0 = __p0; \
52005   float32x4_t __s1 = __p1; \
52006   float32x4_t __s2 = __p2; \
52007   float32x4_t __ret; \
52008   __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
52009   __ret; \
52010 })
52011 #else
52012 #define vmlsq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
52013   float32x4_t __s0 = __p0; \
52014   float32x4_t __s1 = __p1; \
52015   float32x4_t __s2 = __p2; \
52016   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
52017   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
52018   float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
52019   float32x4_t __ret; \
52020   __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
52021   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
52022   __ret; \
52023 })
52024 #endif
52025 
52026 #ifdef __LITTLE_ENDIAN__
52027 #define vmlsq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
52028   int32x4_t __s0 = __p0; \
52029   int32x4_t __s1 = __p1; \
52030   int32x4_t __s2 = __p2; \
52031   int32x4_t __ret; \
52032   __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
52033   __ret; \
52034 })
52035 #else
52036 #define vmlsq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
52037   int32x4_t __s0 = __p0; \
52038   int32x4_t __s1 = __p1; \
52039   int32x4_t __s2 = __p2; \
52040   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
52041   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
52042   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
52043   int32x4_t __ret; \
52044   __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
52045   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
52046   __ret; \
52047 })
52048 #endif
52049 
52050 #ifdef __LITTLE_ENDIAN__
52051 #define vmlsq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
52052   int16x8_t __s0 = __p0; \
52053   int16x8_t __s1 = __p1; \
52054   int16x8_t __s2 = __p2; \
52055   int16x8_t __ret; \
52056   __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
52057   __ret; \
52058 })
52059 #else
52060 #define vmlsq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
52061   int16x8_t __s0 = __p0; \
52062   int16x8_t __s1 = __p1; \
52063   int16x8_t __s2 = __p2; \
52064   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
52065   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
52066   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
52067   int16x8_t __ret; \
52068   __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
52069   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
52070   __ret; \
52071 })
52072 #endif
52073 
52074 #ifdef __LITTLE_ENDIAN__
52075 #define vmls_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
52076   uint32x2_t __s0 = __p0; \
52077   uint32x2_t __s1 = __p1; \
52078   uint32x4_t __s2 = __p2; \
52079   uint32x2_t __ret; \
52080   __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
52081   __ret; \
52082 })
52083 #else
52084 #define vmls_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
52085   uint32x2_t __s0 = __p0; \
52086   uint32x2_t __s1 = __p1; \
52087   uint32x4_t __s2 = __p2; \
52088   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
52089   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
52090   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
52091   uint32x2_t __ret; \
52092   __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
52093   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
52094   __ret; \
52095 })
52096 #endif
52097 
52098 #ifdef __LITTLE_ENDIAN__
52099 #define vmls_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
52100   uint16x4_t __s0 = __p0; \
52101   uint16x4_t __s1 = __p1; \
52102   uint16x8_t __s2 = __p2; \
52103   uint16x4_t __ret; \
52104   __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
52105   __ret; \
52106 })
52107 #else
52108 #define vmls_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
52109   uint16x4_t __s0 = __p0; \
52110   uint16x4_t __s1 = __p1; \
52111   uint16x8_t __s2 = __p2; \
52112   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
52113   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
52114   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
52115   uint16x4_t __ret; \
52116   __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
52117   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
52118   __ret; \
52119 })
52120 #endif
52121 
52122 #ifdef __LITTLE_ENDIAN__
52123 #define vmls_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
52124   float32x2_t __s0 = __p0; \
52125   float32x2_t __s1 = __p1; \
52126   float32x4_t __s2 = __p2; \
52127   float32x2_t __ret; \
52128   __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
52129   __ret; \
52130 })
52131 #else
52132 #define vmls_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
52133   float32x2_t __s0 = __p0; \
52134   float32x2_t __s1 = __p1; \
52135   float32x4_t __s2 = __p2; \
52136   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
52137   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
52138   float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
52139   float32x2_t __ret; \
52140   __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
52141   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
52142   __ret; \
52143 })
52144 #endif
52145 
52146 #ifdef __LITTLE_ENDIAN__
52147 #define vmls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
52148   int32x2_t __s0 = __p0; \
52149   int32x2_t __s1 = __p1; \
52150   int32x4_t __s2 = __p2; \
52151   int32x2_t __ret; \
52152   __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
52153   __ret; \
52154 })
52155 #else
52156 #define vmls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
52157   int32x2_t __s0 = __p0; \
52158   int32x2_t __s1 = __p1; \
52159   int32x4_t __s2 = __p2; \
52160   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
52161   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
52162   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
52163   int32x2_t __ret; \
52164   __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
52165   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
52166   __ret; \
52167 })
52168 #endif
52169 
52170 #ifdef __LITTLE_ENDIAN__
52171 #define vmls_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
52172   int16x4_t __s0 = __p0; \
52173   int16x4_t __s1 = __p1; \
52174   int16x8_t __s2 = __p2; \
52175   int16x4_t __ret; \
52176   __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
52177   __ret; \
52178 })
52179 #else
52180 #define vmls_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
52181   int16x4_t __s0 = __p0; \
52182   int16x4_t __s1 = __p1; \
52183   int16x8_t __s2 = __p2; \
52184   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
52185   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
52186   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
52187   int16x4_t __ret; \
52188   __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
52189   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
52190   __ret; \
52191 })
52192 #endif
52193 
52194 #ifdef __LITTLE_ENDIAN__
vmlsq_n_f64(float64x2_t __p0,float64x2_t __p1,float64_t __p2)52195 __ai float64x2_t vmlsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
52196   float64x2_t __ret;
52197   __ret = __p0 - __p1 * (float64x2_t) {__p2, __p2};
52198   return __ret;
52199 }
52200 #else
vmlsq_n_f64(float64x2_t __p0,float64x2_t __p1,float64_t __p2)52201 __ai float64x2_t vmlsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
52202   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
52203   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
52204   float64x2_t __ret;
52205   __ret = __rev0 - __rev1 * (float64x2_t) {__p2, __p2};
52206   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
52207   return __ret;
52208 }
52209 #endif
52210 
52211 #ifdef __LITTLE_ENDIAN__
52212 #define vmlsl_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
52213   uint64x2_t __s0 = __p0; \
52214   uint32x4_t __s1 = __p1; \
52215   uint32x2_t __s2 = __p2; \
52216   uint64x2_t __ret; \
52217   __ret = __s0 - vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
52218   __ret; \
52219 })
52220 #else
52221 #define vmlsl_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
52222   uint64x2_t __s0 = __p0; \
52223   uint32x4_t __s1 = __p1; \
52224   uint32x2_t __s2 = __p2; \
52225   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
52226   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
52227   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
52228   uint64x2_t __ret; \
52229   __ret = __rev0 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
52230   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
52231   __ret; \
52232 })
52233 #endif
52234 
52235 #ifdef __LITTLE_ENDIAN__
52236 #define vmlsl_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
52237   uint32x4_t __s0 = __p0; \
52238   uint16x8_t __s1 = __p1; \
52239   uint16x4_t __s2 = __p2; \
52240   uint32x4_t __ret; \
52241   __ret = __s0 - vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
52242   __ret; \
52243 })
52244 #else
52245 #define vmlsl_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
52246   uint32x4_t __s0 = __p0; \
52247   uint16x8_t __s1 = __p1; \
52248   uint16x4_t __s2 = __p2; \
52249   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
52250   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
52251   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
52252   uint32x4_t __ret; \
52253   __ret = __rev0 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
52254   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
52255   __ret; \
52256 })
52257 #endif
52258 
52259 #ifdef __LITTLE_ENDIAN__
52260 #define vmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
52261   int64x2_t __s0 = __p0; \
52262   int32x4_t __s1 = __p1; \
52263   int32x2_t __s2 = __p2; \
52264   int64x2_t __ret; \
52265   __ret = __s0 - vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
52266   __ret; \
52267 })
52268 #else
52269 #define vmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
52270   int64x2_t __s0 = __p0; \
52271   int32x4_t __s1 = __p1; \
52272   int32x2_t __s2 = __p2; \
52273   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
52274   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
52275   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
52276   int64x2_t __ret; \
52277   __ret = __rev0 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
52278   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
52279   __ret; \
52280 })
52281 #endif
52282 
52283 #ifdef __LITTLE_ENDIAN__
52284 #define vmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
52285   int32x4_t __s0 = __p0; \
52286   int16x8_t __s1 = __p1; \
52287   int16x4_t __s2 = __p2; \
52288   int32x4_t __ret; \
52289   __ret = __s0 - vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
52290   __ret; \
52291 })
52292 #else
52293 #define vmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
52294   int32x4_t __s0 = __p0; \
52295   int16x8_t __s1 = __p1; \
52296   int16x4_t __s2 = __p2; \
52297   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
52298   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
52299   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
52300   int32x4_t __ret; \
52301   __ret = __rev0 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
52302   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
52303   __ret; \
52304 })
52305 #endif
52306 
52307 #ifdef __LITTLE_ENDIAN__
52308 #define vmlsl_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
52309   uint64x2_t __s0 = __p0; \
52310   uint32x4_t __s1 = __p1; \
52311   uint32x4_t __s2 = __p2; \
52312   uint64x2_t __ret; \
52313   __ret = __s0 - vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
52314   __ret; \
52315 })
52316 #else
52317 #define vmlsl_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
52318   uint64x2_t __s0 = __p0; \
52319   uint32x4_t __s1 = __p1; \
52320   uint32x4_t __s2 = __p2; \
52321   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
52322   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
52323   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
52324   uint64x2_t __ret; \
52325   __ret = __rev0 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
52326   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
52327   __ret; \
52328 })
52329 #endif
52330 
52331 #ifdef __LITTLE_ENDIAN__
52332 #define vmlsl_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
52333   uint32x4_t __s0 = __p0; \
52334   uint16x8_t __s1 = __p1; \
52335   uint16x8_t __s2 = __p2; \
52336   uint32x4_t __ret; \
52337   __ret = __s0 - vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
52338   __ret; \
52339 })
52340 #else
52341 #define vmlsl_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
52342   uint32x4_t __s0 = __p0; \
52343   uint16x8_t __s1 = __p1; \
52344   uint16x8_t __s2 = __p2; \
52345   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
52346   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
52347   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
52348   uint32x4_t __ret; \
52349   __ret = __rev0 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
52350   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
52351   __ret; \
52352 })
52353 #endif
52354 
52355 #ifdef __LITTLE_ENDIAN__
52356 #define vmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
52357   int64x2_t __s0 = __p0; \
52358   int32x4_t __s1 = __p1; \
52359   int32x4_t __s2 = __p2; \
52360   int64x2_t __ret; \
52361   __ret = __s0 - vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
52362   __ret; \
52363 })
52364 #else
52365 #define vmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
52366   int64x2_t __s0 = __p0; \
52367   int32x4_t __s1 = __p1; \
52368   int32x4_t __s2 = __p2; \
52369   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
52370   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
52371   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
52372   int64x2_t __ret; \
52373   __ret = __rev0 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
52374   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
52375   __ret; \
52376 })
52377 #endif
52378 
52379 #ifdef __LITTLE_ENDIAN__
52380 #define vmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
52381   int32x4_t __s0 = __p0; \
52382   int16x8_t __s1 = __p1; \
52383   int16x8_t __s2 = __p2; \
52384   int32x4_t __ret; \
52385   __ret = __s0 - vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
52386   __ret; \
52387 })
52388 #else
52389 #define vmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
52390   int32x4_t __s0 = __p0; \
52391   int16x8_t __s1 = __p1; \
52392   int16x8_t __s2 = __p2; \
52393   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
52394   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
52395   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
52396   int32x4_t __ret; \
52397   __ret = __rev0 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
52398   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
52399   __ret; \
52400 })
52401 #endif
52402 
52403 #ifdef __LITTLE_ENDIAN__
52404 #define vmlsl_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
52405   uint64x2_t __s0 = __p0; \
52406   uint32x2_t __s1 = __p1; \
52407   uint32x4_t __s2 = __p2; \
52408   uint64x2_t __ret; \
52409   __ret = __s0 - vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
52410   __ret; \
52411 })
52412 #else
52413 #define vmlsl_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
52414   uint64x2_t __s0 = __p0; \
52415   uint32x2_t __s1 = __p1; \
52416   uint32x4_t __s2 = __p2; \
52417   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
52418   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
52419   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
52420   uint64x2_t __ret; \
52421   __ret = __rev0 - __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
52422   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
52423   __ret; \
52424 })
52425 #endif
52426 
52427 #ifdef __LITTLE_ENDIAN__
52428 #define vmlsl_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
52429   uint32x4_t __s0 = __p0; \
52430   uint16x4_t __s1 = __p1; \
52431   uint16x8_t __s2 = __p2; \
52432   uint32x4_t __ret; \
52433   __ret = __s0 - vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
52434   __ret; \
52435 })
52436 #else
52437 #define vmlsl_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
52438   uint32x4_t __s0 = __p0; \
52439   uint16x4_t __s1 = __p1; \
52440   uint16x8_t __s2 = __p2; \
52441   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
52442   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
52443   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
52444   uint32x4_t __ret; \
52445   __ret = __rev0 - __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
52446   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
52447   __ret; \
52448 })
52449 #endif
52450 
52451 #ifdef __LITTLE_ENDIAN__
52452 #define vmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
52453   int64x2_t __s0 = __p0; \
52454   int32x2_t __s1 = __p1; \
52455   int32x4_t __s2 = __p2; \
52456   int64x2_t __ret; \
52457   __ret = __s0 - vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
52458   __ret; \
52459 })
52460 #else
52461 #define vmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
52462   int64x2_t __s0 = __p0; \
52463   int32x2_t __s1 = __p1; \
52464   int32x4_t __s2 = __p2; \
52465   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
52466   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
52467   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
52468   int64x2_t __ret; \
52469   __ret = __rev0 - __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
52470   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
52471   __ret; \
52472 })
52473 #endif
52474 
52475 #ifdef __LITTLE_ENDIAN__
52476 #define vmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
52477   int32x4_t __s0 = __p0; \
52478   int16x4_t __s1 = __p1; \
52479   int16x8_t __s2 = __p2; \
52480   int32x4_t __ret; \
52481   __ret = __s0 - vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
52482   __ret; \
52483 })
52484 #else
52485 #define vmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
52486   int32x4_t __s0 = __p0; \
52487   int16x4_t __s1 = __p1; \
52488   int16x8_t __s2 = __p2; \
52489   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
52490   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
52491   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
52492   int32x4_t __ret; \
52493   __ret = __rev0 - __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
52494   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
52495   __ret; \
52496 })
52497 #endif
52498 
52499 #ifdef __LITTLE_ENDIAN__
vmovq_n_f64(float64_t __p0)52500 __ai float64x2_t vmovq_n_f64(float64_t __p0) {
52501   float64x2_t __ret;
52502   __ret = (float64x2_t) {__p0, __p0};
52503   return __ret;
52504 }
52505 #else
vmovq_n_f64(float64_t __p0)52506 __ai float64x2_t vmovq_n_f64(float64_t __p0) {
52507   float64x2_t __ret;
52508   __ret = (float64x2_t) {__p0, __p0};
52509   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
52510   return __ret;
52511 }
52512 #endif
52513 
52514 #ifdef __LITTLE_ENDIAN__
vmov_n_f64(float64_t __p0)52515 __ai float64x1_t vmov_n_f64(float64_t __p0) {
52516   float64x1_t __ret;
52517   __ret = (float64x1_t) {__p0};
52518   return __ret;
52519 }
52520 #else
vmov_n_f64(float64_t __p0)52521 __ai float64x1_t vmov_n_f64(float64_t __p0) {
52522   float64x1_t __ret;
52523   __ret = (float64x1_t) {__p0};
52524   return __ret;
52525 }
52526 #endif
52527 
52528 #ifdef __LITTLE_ENDIAN__
vmovl_high_u8(uint8x16_t __p0_116)52529 __ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_116) {
52530   uint16x8_t __ret_116;
52531   uint8x8_t __a1_116 = vget_high_u8(__p0_116);
52532   __ret_116 = (uint16x8_t)(vshll_n_u8(__a1_116, 0));
52533   return __ret_116;
52534 }
52535 #else
vmovl_high_u8(uint8x16_t __p0_117)52536 __ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_117) {
52537   uint8x16_t __rev0_117;  __rev0_117 = __builtin_shufflevector(__p0_117, __p0_117, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
52538   uint16x8_t __ret_117;
52539   uint8x8_t __a1_117 = __noswap_vget_high_u8(__rev0_117);
52540   __ret_117 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_117, 0));
52541   __ret_117 = __builtin_shufflevector(__ret_117, __ret_117, 7, 6, 5, 4, 3, 2, 1, 0);
52542   return __ret_117;
52543 }
__noswap_vmovl_high_u8(uint8x16_t __p0_118)52544 __ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_118) {
52545   uint16x8_t __ret_118;
52546   uint8x8_t __a1_118 = __noswap_vget_high_u8(__p0_118);
52547   __ret_118 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_118, 0));
52548   return __ret_118;
52549 }
52550 #endif
52551 
52552 #ifdef __LITTLE_ENDIAN__
vmovl_high_u32(uint32x4_t __p0_119)52553 __ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_119) {
52554   uint64x2_t __ret_119;
52555   uint32x2_t __a1_119 = vget_high_u32(__p0_119);
52556   __ret_119 = (uint64x2_t)(vshll_n_u32(__a1_119, 0));
52557   return __ret_119;
52558 }
52559 #else
vmovl_high_u32(uint32x4_t __p0_120)52560 __ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_120) {
52561   uint32x4_t __rev0_120;  __rev0_120 = __builtin_shufflevector(__p0_120, __p0_120, 3, 2, 1, 0);
52562   uint64x2_t __ret_120;
52563   uint32x2_t __a1_120 = __noswap_vget_high_u32(__rev0_120);
52564   __ret_120 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_120, 0));
52565   __ret_120 = __builtin_shufflevector(__ret_120, __ret_120, 1, 0);
52566   return __ret_120;
52567 }
__noswap_vmovl_high_u32(uint32x4_t __p0_121)52568 __ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_121) {
52569   uint64x2_t __ret_121;
52570   uint32x2_t __a1_121 = __noswap_vget_high_u32(__p0_121);
52571   __ret_121 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_121, 0));
52572   return __ret_121;
52573 }
52574 #endif
52575 
52576 #ifdef __LITTLE_ENDIAN__
vmovl_high_u16(uint16x8_t __p0_122)52577 __ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_122) {
52578   uint32x4_t __ret_122;
52579   uint16x4_t __a1_122 = vget_high_u16(__p0_122);
52580   __ret_122 = (uint32x4_t)(vshll_n_u16(__a1_122, 0));
52581   return __ret_122;
52582 }
52583 #else
vmovl_high_u16(uint16x8_t __p0_123)52584 __ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_123) {
52585   uint16x8_t __rev0_123;  __rev0_123 = __builtin_shufflevector(__p0_123, __p0_123, 7, 6, 5, 4, 3, 2, 1, 0);
52586   uint32x4_t __ret_123;
52587   uint16x4_t __a1_123 = __noswap_vget_high_u16(__rev0_123);
52588   __ret_123 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_123, 0));
52589   __ret_123 = __builtin_shufflevector(__ret_123, __ret_123, 3, 2, 1, 0);
52590   return __ret_123;
52591 }
__noswap_vmovl_high_u16(uint16x8_t __p0_124)52592 __ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_124) {
52593   uint32x4_t __ret_124;
52594   uint16x4_t __a1_124 = __noswap_vget_high_u16(__p0_124);
52595   __ret_124 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_124, 0));
52596   return __ret_124;
52597 }
52598 #endif
52599 
52600 #ifdef __LITTLE_ENDIAN__
vmovl_high_s8(int8x16_t __p0_125)52601 __ai int16x8_t vmovl_high_s8(int8x16_t __p0_125) {
52602   int16x8_t __ret_125;
52603   int8x8_t __a1_125 = vget_high_s8(__p0_125);
52604   __ret_125 = (int16x8_t)(vshll_n_s8(__a1_125, 0));
52605   return __ret_125;
52606 }
52607 #else
vmovl_high_s8(int8x16_t __p0_126)52608 __ai int16x8_t vmovl_high_s8(int8x16_t __p0_126) {
52609   int8x16_t __rev0_126;  __rev0_126 = __builtin_shufflevector(__p0_126, __p0_126, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
52610   int16x8_t __ret_126;
52611   int8x8_t __a1_126 = __noswap_vget_high_s8(__rev0_126);
52612   __ret_126 = (int16x8_t)(__noswap_vshll_n_s8(__a1_126, 0));
52613   __ret_126 = __builtin_shufflevector(__ret_126, __ret_126, 7, 6, 5, 4, 3, 2, 1, 0);
52614   return __ret_126;
52615 }
__noswap_vmovl_high_s8(int8x16_t __p0_127)52616 __ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_127) {
52617   int16x8_t __ret_127;
52618   int8x8_t __a1_127 = __noswap_vget_high_s8(__p0_127);
52619   __ret_127 = (int16x8_t)(__noswap_vshll_n_s8(__a1_127, 0));
52620   return __ret_127;
52621 }
52622 #endif
52623 
52624 #ifdef __LITTLE_ENDIAN__
vmovl_high_s32(int32x4_t __p0_128)52625 __ai int64x2_t vmovl_high_s32(int32x4_t __p0_128) {
52626   int64x2_t __ret_128;
52627   int32x2_t __a1_128 = vget_high_s32(__p0_128);
52628   __ret_128 = (int64x2_t)(vshll_n_s32(__a1_128, 0));
52629   return __ret_128;
52630 }
52631 #else
vmovl_high_s32(int32x4_t __p0_129)52632 __ai int64x2_t vmovl_high_s32(int32x4_t __p0_129) {
52633   int32x4_t __rev0_129;  __rev0_129 = __builtin_shufflevector(__p0_129, __p0_129, 3, 2, 1, 0);
52634   int64x2_t __ret_129;
52635   int32x2_t __a1_129 = __noswap_vget_high_s32(__rev0_129);
52636   __ret_129 = (int64x2_t)(__noswap_vshll_n_s32(__a1_129, 0));
52637   __ret_129 = __builtin_shufflevector(__ret_129, __ret_129, 1, 0);
52638   return __ret_129;
52639 }
__noswap_vmovl_high_s32(int32x4_t __p0_130)52640 __ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_130) {
52641   int64x2_t __ret_130;
52642   int32x2_t __a1_130 = __noswap_vget_high_s32(__p0_130);
52643   __ret_130 = (int64x2_t)(__noswap_vshll_n_s32(__a1_130, 0));
52644   return __ret_130;
52645 }
52646 #endif
52647 
52648 #ifdef __LITTLE_ENDIAN__
vmovl_high_s16(int16x8_t __p0_131)52649 __ai int32x4_t vmovl_high_s16(int16x8_t __p0_131) {
52650   int32x4_t __ret_131;
52651   int16x4_t __a1_131 = vget_high_s16(__p0_131);
52652   __ret_131 = (int32x4_t)(vshll_n_s16(__a1_131, 0));
52653   return __ret_131;
52654 }
52655 #else
vmovl_high_s16(int16x8_t __p0_132)52656 __ai int32x4_t vmovl_high_s16(int16x8_t __p0_132) {
52657   int16x8_t __rev0_132;  __rev0_132 = __builtin_shufflevector(__p0_132, __p0_132, 7, 6, 5, 4, 3, 2, 1, 0);
52658   int32x4_t __ret_132;
52659   int16x4_t __a1_132 = __noswap_vget_high_s16(__rev0_132);
52660   __ret_132 = (int32x4_t)(__noswap_vshll_n_s16(__a1_132, 0));
52661   __ret_132 = __builtin_shufflevector(__ret_132, __ret_132, 3, 2, 1, 0);
52662   return __ret_132;
52663 }
__noswap_vmovl_high_s16(int16x8_t __p0_133)52664 __ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_133) {
52665   int32x4_t __ret_133;
52666   int16x4_t __a1_133 = __noswap_vget_high_s16(__p0_133);
52667   __ret_133 = (int32x4_t)(__noswap_vshll_n_s16(__a1_133, 0));
52668   return __ret_133;
52669 }
52670 #endif
52671 
52672 #ifdef __LITTLE_ENDIAN__
vmovn_high_u32(uint16x4_t __p0,uint32x4_t __p1)52673 __ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
52674   uint16x8_t __ret;
52675   __ret = vcombine_u16(__p0, vmovn_u32(__p1));
52676   return __ret;
52677 }
52678 #else
vmovn_high_u32(uint16x4_t __p0,uint32x4_t __p1)52679 __ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
52680   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
52681   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
52682   uint16x8_t __ret;
52683   __ret = __noswap_vcombine_u16(__rev0, __noswap_vmovn_u32(__rev1));
52684   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
52685   return __ret;
52686 }
52687 #endif
52688 
52689 #ifdef __LITTLE_ENDIAN__
vmovn_high_u64(uint32x2_t __p0,uint64x2_t __p1)52690 __ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
52691   uint32x4_t __ret;
52692   __ret = vcombine_u32(__p0, vmovn_u64(__p1));
52693   return __ret;
52694 }
52695 #else
vmovn_high_u64(uint32x2_t __p0,uint64x2_t __p1)52696 __ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
52697   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
52698   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
52699   uint32x4_t __ret;
52700   __ret = __noswap_vcombine_u32(__rev0, __noswap_vmovn_u64(__rev1));
52701   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
52702   return __ret;
52703 }
52704 #endif
52705 
52706 #ifdef __LITTLE_ENDIAN__
vmovn_high_u16(uint8x8_t __p0,uint16x8_t __p1)52707 __ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
52708   uint8x16_t __ret;
52709   __ret = vcombine_u8(__p0, vmovn_u16(__p1));
52710   return __ret;
52711 }
52712 #else
vmovn_high_u16(uint8x8_t __p0,uint16x8_t __p1)52713 __ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
52714   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
52715   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
52716   uint8x16_t __ret;
52717   __ret = __noswap_vcombine_u8(__rev0, __noswap_vmovn_u16(__rev1));
52718   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
52719   return __ret;
52720 }
52721 #endif
52722 
52723 #ifdef __LITTLE_ENDIAN__
vmovn_high_s32(int16x4_t __p0,int32x4_t __p1)52724 __ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
52725   int16x8_t __ret;
52726   __ret = vcombine_s16(__p0, vmovn_s32(__p1));
52727   return __ret;
52728 }
52729 #else
vmovn_high_s32(int16x4_t __p0,int32x4_t __p1)52730 __ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
52731   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
52732   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
52733   int16x8_t __ret;
52734   __ret = __noswap_vcombine_s16(__rev0, __noswap_vmovn_s32(__rev1));
52735   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
52736   return __ret;
52737 }
52738 #endif
52739 
52740 #ifdef __LITTLE_ENDIAN__
vmovn_high_s64(int32x2_t __p0,int64x2_t __p1)52741 __ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
52742   int32x4_t __ret;
52743   __ret = vcombine_s32(__p0, vmovn_s64(__p1));
52744   return __ret;
52745 }
52746 #else
vmovn_high_s64(int32x2_t __p0,int64x2_t __p1)52747 __ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
52748   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
52749   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
52750   int32x4_t __ret;
52751   __ret = __noswap_vcombine_s32(__rev0, __noswap_vmovn_s64(__rev1));
52752   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
52753   return __ret;
52754 }
52755 #endif
52756 
52757 #ifdef __LITTLE_ENDIAN__
vmovn_high_s16(int8x8_t __p0,int16x8_t __p1)52758 __ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
52759   int8x16_t __ret;
52760   __ret = vcombine_s8(__p0, vmovn_s16(__p1));
52761   return __ret;
52762 }
52763 #else
vmovn_high_s16(int8x8_t __p0,int16x8_t __p1)52764 __ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
52765   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
52766   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
52767   int8x16_t __ret;
52768   __ret = __noswap_vcombine_s8(__rev0, __noswap_vmovn_s16(__rev1));
52769   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
52770   return __ret;
52771 }
52772 #endif
52773 
52774 #ifdef __LITTLE_ENDIAN__
vmulq_f64(float64x2_t __p0,float64x2_t __p1)52775 __ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
52776   float64x2_t __ret;
52777   __ret = __p0 * __p1;
52778   return __ret;
52779 }
52780 #else
vmulq_f64(float64x2_t __p0,float64x2_t __p1)52781 __ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
52782   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
52783   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
52784   float64x2_t __ret;
52785   __ret = __rev0 * __rev1;
52786   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
52787   return __ret;
52788 }
52789 #endif
52790 
52791 #ifdef __LITTLE_ENDIAN__
vmul_f64(float64x1_t __p0,float64x1_t __p1)52792 __ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) {
52793   float64x1_t __ret;
52794   __ret = __p0 * __p1;
52795   return __ret;
52796 }
52797 #else
vmul_f64(float64x1_t __p0,float64x1_t __p1)52798 __ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) {
52799   float64x1_t __ret;
52800   __ret = __p0 * __p1;
52801   return __ret;
52802 }
52803 #endif
52804 
52805 #ifdef __LITTLE_ENDIAN__
52806 #define vmuld_lane_f64(__p0_134, __p1_134, __p2_134) __extension__ ({ \
52807   float64_t __s0_134 = __p0_134; \
52808   float64x1_t __s1_134 = __p1_134; \
52809   float64_t __ret_134; \
52810   __ret_134 = __s0_134 * vget_lane_f64(__s1_134, __p2_134); \
52811   __ret_134; \
52812 })
52813 #else
52814 #define vmuld_lane_f64(__p0_135, __p1_135, __p2_135) __extension__ ({ \
52815   float64_t __s0_135 = __p0_135; \
52816   float64x1_t __s1_135 = __p1_135; \
52817   float64_t __ret_135; \
52818   __ret_135 = __s0_135 * __noswap_vget_lane_f64(__s1_135, __p2_135); \
52819   __ret_135; \
52820 })
52821 #endif
52822 
52823 #ifdef __LITTLE_ENDIAN__
52824 #define vmuls_lane_f32(__p0_136, __p1_136, __p2_136) __extension__ ({ \
52825   float32_t __s0_136 = __p0_136; \
52826   float32x2_t __s1_136 = __p1_136; \
52827   float32_t __ret_136; \
52828   __ret_136 = __s0_136 * vget_lane_f32(__s1_136, __p2_136); \
52829   __ret_136; \
52830 })
52831 #else
52832 #define vmuls_lane_f32(__p0_137, __p1_137, __p2_137) __extension__ ({ \
52833   float32_t __s0_137 = __p0_137; \
52834   float32x2_t __s1_137 = __p1_137; \
52835   float32x2_t __rev1_137;  __rev1_137 = __builtin_shufflevector(__s1_137, __s1_137, 1, 0); \
52836   float32_t __ret_137; \
52837   __ret_137 = __s0_137 * __noswap_vget_lane_f32(__rev1_137, __p2_137); \
52838   __ret_137; \
52839 })
52840 #endif
52841 
52842 #ifdef __LITTLE_ENDIAN__
52843 #define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \
52844   float64x1_t __s0 = __p0; \
52845   float64x1_t __s1 = __p1; \
52846   float64x1_t __ret; \
52847   __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
52848   __ret; \
52849 })
52850 #else
52851 #define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \
52852   float64x1_t __s0 = __p0; \
52853   float64x1_t __s1 = __p1; \
52854   float64x1_t __ret; \
52855   __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
52856   __ret; \
52857 })
52858 #endif
52859 
52860 #ifdef __LITTLE_ENDIAN__
52861 #define vmulq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
52862   float64x2_t __s0 = __p0; \
52863   float64x1_t __s1 = __p1; \
52864   float64x2_t __ret; \
52865   __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
52866   __ret; \
52867 })
52868 #else
52869 #define vmulq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
52870   float64x2_t __s0 = __p0; \
52871   float64x1_t __s1 = __p1; \
52872   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
52873   float64x2_t __ret; \
52874   __ret = __rev0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
52875   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
52876   __ret; \
52877 })
52878 #endif
52879 
52880 #ifdef __LITTLE_ENDIAN__
52881 #define vmuld_laneq_f64(__p0_138, __p1_138, __p2_138) __extension__ ({ \
52882   float64_t __s0_138 = __p0_138; \
52883   float64x2_t __s1_138 = __p1_138; \
52884   float64_t __ret_138; \
52885   __ret_138 = __s0_138 * vgetq_lane_f64(__s1_138, __p2_138); \
52886   __ret_138; \
52887 })
52888 #else
52889 #define vmuld_laneq_f64(__p0_139, __p1_139, __p2_139) __extension__ ({ \
52890   float64_t __s0_139 = __p0_139; \
52891   float64x2_t __s1_139 = __p1_139; \
52892   float64x2_t __rev1_139;  __rev1_139 = __builtin_shufflevector(__s1_139, __s1_139, 1, 0); \
52893   float64_t __ret_139; \
52894   __ret_139 = __s0_139 * __noswap_vgetq_lane_f64(__rev1_139, __p2_139); \
52895   __ret_139; \
52896 })
52897 #endif
52898 
52899 #ifdef __LITTLE_ENDIAN__
52900 #define vmuls_laneq_f32(__p0_140, __p1_140, __p2_140) __extension__ ({ \
52901   float32_t __s0_140 = __p0_140; \
52902   float32x4_t __s1_140 = __p1_140; \
52903   float32_t __ret_140; \
52904   __ret_140 = __s0_140 * vgetq_lane_f32(__s1_140, __p2_140); \
52905   __ret_140; \
52906 })
52907 #else
52908 #define vmuls_laneq_f32(__p0_141, __p1_141, __p2_141) __extension__ ({ \
52909   float32_t __s0_141 = __p0_141; \
52910   float32x4_t __s1_141 = __p1_141; \
52911   float32x4_t __rev1_141;  __rev1_141 = __builtin_shufflevector(__s1_141, __s1_141, 3, 2, 1, 0); \
52912   float32_t __ret_141; \
52913   __ret_141 = __s0_141 * __noswap_vgetq_lane_f32(__rev1_141, __p2_141); \
52914   __ret_141; \
52915 })
52916 #endif
52917 
52918 #ifdef __LITTLE_ENDIAN__
52919 #define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
52920   float64x1_t __s0 = __p0; \
52921   float64x2_t __s1 = __p1; \
52922   float64x1_t __ret; \
52923   __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 10); \
52924   __ret; \
52925 })
52926 #else
52927 #define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
52928   float64x1_t __s0 = __p0; \
52929   float64x2_t __s1 = __p1; \
52930   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
52931   float64x1_t __ret; \
52932   __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__rev1, __p2, 10); \
52933   __ret; \
52934 })
52935 #endif
52936 
52937 #ifdef __LITTLE_ENDIAN__
52938 #define vmulq_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
52939   uint32x4_t __s0 = __p0; \
52940   uint32x4_t __s1 = __p1; \
52941   uint32x4_t __ret; \
52942   __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
52943   __ret; \
52944 })
52945 #else
52946 #define vmulq_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
52947   uint32x4_t __s0 = __p0; \
52948   uint32x4_t __s1 = __p1; \
52949   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
52950   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
52951   uint32x4_t __ret; \
52952   __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
52953   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
52954   __ret; \
52955 })
52956 #endif
52957 
52958 #ifdef __LITTLE_ENDIAN__
52959 #define vmulq_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
52960   uint16x8_t __s0 = __p0; \
52961   uint16x8_t __s1 = __p1; \
52962   uint16x8_t __ret; \
52963   __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
52964   __ret; \
52965 })
52966 #else
52967 #define vmulq_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
52968   uint16x8_t __s0 = __p0; \
52969   uint16x8_t __s1 = __p1; \
52970   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
52971   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
52972   uint16x8_t __ret; \
52973   __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
52974   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
52975   __ret; \
52976 })
52977 #endif
52978 
52979 #ifdef __LITTLE_ENDIAN__
52980 #define vmulq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
52981   float64x2_t __s0 = __p0; \
52982   float64x2_t __s1 = __p1; \
52983   float64x2_t __ret; \
52984   __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
52985   __ret; \
52986 })
52987 #else
52988 #define vmulq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
52989   float64x2_t __s0 = __p0; \
52990   float64x2_t __s1 = __p1; \
52991   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
52992   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
52993   float64x2_t __ret; \
52994   __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
52995   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
52996   __ret; \
52997 })
52998 #endif
52999 
53000 #ifdef __LITTLE_ENDIAN__
53001 #define vmulq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
53002   float32x4_t __s0 = __p0; \
53003   float32x4_t __s1 = __p1; \
53004   float32x4_t __ret; \
53005   __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
53006   __ret; \
53007 })
53008 #else
53009 #define vmulq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
53010   float32x4_t __s0 = __p0; \
53011   float32x4_t __s1 = __p1; \
53012   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
53013   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
53014   float32x4_t __ret; \
53015   __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
53016   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
53017   __ret; \
53018 })
53019 #endif
53020 
53021 #ifdef __LITTLE_ENDIAN__
53022 #define vmulq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
53023   int32x4_t __s0 = __p0; \
53024   int32x4_t __s1 = __p1; \
53025   int32x4_t __ret; \
53026   __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
53027   __ret; \
53028 })
53029 #else
53030 #define vmulq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
53031   int32x4_t __s0 = __p0; \
53032   int32x4_t __s1 = __p1; \
53033   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
53034   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
53035   int32x4_t __ret; \
53036   __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
53037   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
53038   __ret; \
53039 })
53040 #endif
53041 
53042 #ifdef __LITTLE_ENDIAN__
53043 #define vmulq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
53044   int16x8_t __s0 = __p0; \
53045   int16x8_t __s1 = __p1; \
53046   int16x8_t __ret; \
53047   __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
53048   __ret; \
53049 })
53050 #else
53051 #define vmulq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
53052   int16x8_t __s0 = __p0; \
53053   int16x8_t __s1 = __p1; \
53054   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
53055   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
53056   int16x8_t __ret; \
53057   __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
53058   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
53059   __ret; \
53060 })
53061 #endif
53062 
53063 #ifdef __LITTLE_ENDIAN__
53064 #define vmul_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
53065   uint32x2_t __s0 = __p0; \
53066   uint32x4_t __s1 = __p1; \
53067   uint32x2_t __ret; \
53068   __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
53069   __ret; \
53070 })
53071 #else
53072 #define vmul_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
53073   uint32x2_t __s0 = __p0; \
53074   uint32x4_t __s1 = __p1; \
53075   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
53076   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
53077   uint32x2_t __ret; \
53078   __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
53079   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
53080   __ret; \
53081 })
53082 #endif
53083 
53084 #ifdef __LITTLE_ENDIAN__
53085 #define vmul_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
53086   uint16x4_t __s0 = __p0; \
53087   uint16x8_t __s1 = __p1; \
53088   uint16x4_t __ret; \
53089   __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
53090   __ret; \
53091 })
53092 #else
53093 #define vmul_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
53094   uint16x4_t __s0 = __p0; \
53095   uint16x8_t __s1 = __p1; \
53096   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
53097   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
53098   uint16x4_t __ret; \
53099   __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
53100   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
53101   __ret; \
53102 })
53103 #endif
53104 
53105 #ifdef __LITTLE_ENDIAN__
53106 #define vmul_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
53107   float32x2_t __s0 = __p0; \
53108   float32x4_t __s1 = __p1; \
53109   float32x2_t __ret; \
53110   __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
53111   __ret; \
53112 })
53113 #else
53114 #define vmul_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
53115   float32x2_t __s0 = __p0; \
53116   float32x4_t __s1 = __p1; \
53117   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
53118   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
53119   float32x2_t __ret; \
53120   __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
53121   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
53122   __ret; \
53123 })
53124 #endif
53125 
53126 #ifdef __LITTLE_ENDIAN__
53127 #define vmul_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
53128   int32x2_t __s0 = __p0; \
53129   int32x4_t __s1 = __p1; \
53130   int32x2_t __ret; \
53131   __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
53132   __ret; \
53133 })
53134 #else
53135 #define vmul_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
53136   int32x2_t __s0 = __p0; \
53137   int32x4_t __s1 = __p1; \
53138   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
53139   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
53140   int32x2_t __ret; \
53141   __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
53142   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
53143   __ret; \
53144 })
53145 #endif
53146 
53147 #ifdef __LITTLE_ENDIAN__
53148 #define vmul_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
53149   int16x4_t __s0 = __p0; \
53150   int16x8_t __s1 = __p1; \
53151   int16x4_t __ret; \
53152   __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
53153   __ret; \
53154 })
53155 #else
53156 #define vmul_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
53157   int16x4_t __s0 = __p0; \
53158   int16x8_t __s1 = __p1; \
53159   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
53160   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
53161   int16x4_t __ret; \
53162   __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
53163   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
53164   __ret; \
53165 })
53166 #endif
53167 
53168 #ifdef __LITTLE_ENDIAN__
vmul_n_f64(float64x1_t __p0,float64_t __p1)53169 __ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) {
53170   float64x1_t __ret;
53171   __ret = (float64x1_t) __builtin_neon_vmul_n_f64((int8x8_t)__p0, __p1);
53172   return __ret;
53173 }
53174 #else
vmul_n_f64(float64x1_t __p0,float64_t __p1)53175 __ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) {
53176   float64x1_t __ret;
53177   __ret = (float64x1_t) __builtin_neon_vmul_n_f64((int8x8_t)__p0, __p1);
53178   return __ret;
53179 }
53180 #endif
53181 
53182 #ifdef __LITTLE_ENDIAN__
vmulq_n_f64(float64x2_t __p0,float64_t __p1)53183 __ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
53184   float64x2_t __ret;
53185   __ret = __p0 * (float64x2_t) {__p1, __p1};
53186   return __ret;
53187 }
53188 #else
vmulq_n_f64(float64x2_t __p0,float64_t __p1)53189 __ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
53190   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
53191   float64x2_t __ret;
53192   __ret = __rev0 * (float64x2_t) {__p1, __p1};
53193   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
53194   return __ret;
53195 }
53196 #endif
53197 
53198 #ifdef __LITTLE_ENDIAN__
vmull_p64(poly64_t __p0,poly64_t __p1)53199 __ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) {
53200   poly128_t __ret;
53201   __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1);
53202   return __ret;
53203 }
53204 #else
vmull_p64(poly64_t __p0,poly64_t __p1)53205 __ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) {
53206   poly128_t __ret;
53207   __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1);
53208   return __ret;
53209 }
__noswap_vmull_p64(poly64_t __p0,poly64_t __p1)53210 __ai poly128_t __noswap_vmull_p64(poly64_t __p0, poly64_t __p1) {
53211   poly128_t __ret;
53212   __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1);
53213   return __ret;
53214 }
53215 #endif
53216 
53217 #ifdef __LITTLE_ENDIAN__
vmull_high_p8(poly8x16_t __p0,poly8x16_t __p1)53218 __ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
53219   poly16x8_t __ret;
53220   __ret = vmull_p8(vget_high_p8(__p0), vget_high_p8(__p1));
53221   return __ret;
53222 }
53223 #else
vmull_high_p8(poly8x16_t __p0,poly8x16_t __p1)53224 __ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
53225   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
53226   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
53227   poly16x8_t __ret;
53228   __ret = __noswap_vmull_p8(__noswap_vget_high_p8(__rev0), __noswap_vget_high_p8(__rev1));
53229   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
53230   return __ret;
53231 }
53232 #endif
53233 
53234 #ifdef __LITTLE_ENDIAN__
vmull_high_u8(uint8x16_t __p0,uint8x16_t __p1)53235 __ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
53236   uint16x8_t __ret;
53237   __ret = vmull_u8(vget_high_u8(__p0), vget_high_u8(__p1));
53238   return __ret;
53239 }
53240 #else
vmull_high_u8(uint8x16_t __p0,uint8x16_t __p1)53241 __ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
53242   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
53243   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
53244   uint16x8_t __ret;
53245   __ret = __noswap_vmull_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
53246   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
53247   return __ret;
53248 }
53249 #endif
53250 
53251 #ifdef __LITTLE_ENDIAN__
vmull_high_u32(uint32x4_t __p0,uint32x4_t __p1)53252 __ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
53253   uint64x2_t __ret;
53254   __ret = vmull_u32(vget_high_u32(__p0), vget_high_u32(__p1));
53255   return __ret;
53256 }
53257 #else
vmull_high_u32(uint32x4_t __p0,uint32x4_t __p1)53258 __ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
53259   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
53260   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
53261   uint64x2_t __ret;
53262   __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
53263   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
53264   return __ret;
53265 }
53266 #endif
53267 
53268 #ifdef __LITTLE_ENDIAN__
vmull_high_u16(uint16x8_t __p0,uint16x8_t __p1)53269 __ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
53270   uint32x4_t __ret;
53271   __ret = vmull_u16(vget_high_u16(__p0), vget_high_u16(__p1));
53272   return __ret;
53273 }
53274 #else
vmull_high_u16(uint16x8_t __p0,uint16x8_t __p1)53275 __ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
53276   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
53277   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
53278   uint32x4_t __ret;
53279   __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
53280   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
53281   return __ret;
53282 }
53283 #endif
53284 
53285 #ifdef __LITTLE_ENDIAN__
vmull_high_s8(int8x16_t __p0,int8x16_t __p1)53286 __ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
53287   int16x8_t __ret;
53288   __ret = vmull_s8(vget_high_s8(__p0), vget_high_s8(__p1));
53289   return __ret;
53290 }
53291 #else
vmull_high_s8(int8x16_t __p0,int8x16_t __p1)53292 __ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
53293   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
53294   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
53295   int16x8_t __ret;
53296   __ret = __noswap_vmull_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
53297   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
53298   return __ret;
53299 }
53300 #endif
53301 
53302 #ifdef __LITTLE_ENDIAN__
vmull_high_s32(int32x4_t __p0,int32x4_t __p1)53303 __ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
53304   int64x2_t __ret;
53305   __ret = vmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
53306   return __ret;
53307 }
53308 #else
vmull_high_s32(int32x4_t __p0,int32x4_t __p1)53309 __ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
53310   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
53311   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
53312   int64x2_t __ret;
53313   __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
53314   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
53315   return __ret;
53316 }
53317 #endif
53318 
53319 #ifdef __LITTLE_ENDIAN__
vmull_high_s16(int16x8_t __p0,int16x8_t __p1)53320 __ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
53321   int32x4_t __ret;
53322   __ret = vmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
53323   return __ret;
53324 }
53325 #else
vmull_high_s16(int16x8_t __p0,int16x8_t __p1)53326 __ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
53327   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
53328   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
53329   int32x4_t __ret;
53330   __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
53331   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
53332   return __ret;
53333 }
53334 #endif
53335 
53336 #ifdef __LITTLE_ENDIAN__
vmull_high_p64(poly64x2_t __p0,poly64x2_t __p1)53337 __ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
53338   poly128_t __ret;
53339   __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1)));
53340   return __ret;
53341 }
53342 #else
vmull_high_p64(poly64x2_t __p0,poly64x2_t __p1)53343 __ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
53344   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
53345   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
53346   poly128_t __ret;
53347   __ret = __noswap_vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1)));
53348   return __ret;
53349 }
53350 #endif
53351 
53352 #ifdef __LITTLE_ENDIAN__
53353 #define vmull_high_lane_u32(__p0, __p1, __p2) __extension__ ({ \
53354   uint32x4_t __s0 = __p0; \
53355   uint32x2_t __s1 = __p1; \
53356   uint64x2_t __ret; \
53357   __ret = vmull_u32(vget_high_u32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
53358   __ret; \
53359 })
53360 #else
53361 #define vmull_high_lane_u32(__p0, __p1, __p2) __extension__ ({ \
53362   uint32x4_t __s0 = __p0; \
53363   uint32x2_t __s1 = __p1; \
53364   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
53365   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
53366   uint64x2_t __ret; \
53367   __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
53368   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
53369   __ret; \
53370 })
53371 #endif
53372 
53373 #ifdef __LITTLE_ENDIAN__
53374 #define vmull_high_lane_u16(__p0, __p1, __p2) __extension__ ({ \
53375   uint16x8_t __s0 = __p0; \
53376   uint16x4_t __s1 = __p1; \
53377   uint32x4_t __ret; \
53378   __ret = vmull_u16(vget_high_u16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
53379   __ret; \
53380 })
53381 #else
53382 #define vmull_high_lane_u16(__p0, __p1, __p2) __extension__ ({ \
53383   uint16x8_t __s0 = __p0; \
53384   uint16x4_t __s1 = __p1; \
53385   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
53386   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
53387   uint32x4_t __ret; \
53388   __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
53389   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
53390   __ret; \
53391 })
53392 #endif
53393 
53394 #ifdef __LITTLE_ENDIAN__
53395 #define vmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
53396   int32x4_t __s0 = __p0; \
53397   int32x2_t __s1 = __p1; \
53398   int64x2_t __ret; \
53399   __ret = vmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
53400   __ret; \
53401 })
53402 #else
53403 #define vmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
53404   int32x4_t __s0 = __p0; \
53405   int32x2_t __s1 = __p1; \
53406   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
53407   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
53408   int64x2_t __ret; \
53409   __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
53410   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
53411   __ret; \
53412 })
53413 #endif
53414 
53415 #ifdef __LITTLE_ENDIAN__
53416 #define vmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
53417   int16x8_t __s0 = __p0; \
53418   int16x4_t __s1 = __p1; \
53419   int32x4_t __ret; \
53420   __ret = vmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
53421   __ret; \
53422 })
53423 #else
53424 #define vmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
53425   int16x8_t __s0 = __p0; \
53426   int16x4_t __s1 = __p1; \
53427   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
53428   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
53429   int32x4_t __ret; \
53430   __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
53431   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
53432   __ret; \
53433 })
53434 #endif
53435 
53436 #ifdef __LITTLE_ENDIAN__
53437 #define vmull_high_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
53438   uint32x4_t __s0 = __p0; \
53439   uint32x4_t __s1 = __p1; \
53440   uint64x2_t __ret; \
53441   __ret = vmull_u32(vget_high_u32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
53442   __ret; \
53443 })
53444 #else
53445 #define vmull_high_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
53446   uint32x4_t __s0 = __p0; \
53447   uint32x4_t __s1 = __p1; \
53448   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
53449   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
53450   uint64x2_t __ret; \
53451   __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
53452   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
53453   __ret; \
53454 })
53455 #endif
53456 
53457 #ifdef __LITTLE_ENDIAN__
53458 #define vmull_high_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
53459   uint16x8_t __s0 = __p0; \
53460   uint16x8_t __s1 = __p1; \
53461   uint32x4_t __ret; \
53462   __ret = vmull_u16(vget_high_u16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
53463   __ret; \
53464 })
53465 #else
53466 #define vmull_high_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
53467   uint16x8_t __s0 = __p0; \
53468   uint16x8_t __s1 = __p1; \
53469   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
53470   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
53471   uint32x4_t __ret; \
53472   __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
53473   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
53474   __ret; \
53475 })
53476 #endif
53477 
53478 #ifdef __LITTLE_ENDIAN__
53479 #define vmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
53480   int32x4_t __s0 = __p0; \
53481   int32x4_t __s1 = __p1; \
53482   int64x2_t __ret; \
53483   __ret = vmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
53484   __ret; \
53485 })
53486 #else
53487 #define vmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
53488   int32x4_t __s0 = __p0; \
53489   int32x4_t __s1 = __p1; \
53490   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
53491   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
53492   int64x2_t __ret; \
53493   __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
53494   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
53495   __ret; \
53496 })
53497 #endif
53498 
53499 #ifdef __LITTLE_ENDIAN__
53500 #define vmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
53501   int16x8_t __s0 = __p0; \
53502   int16x8_t __s1 = __p1; \
53503   int32x4_t __ret; \
53504   __ret = vmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
53505   __ret; \
53506 })
53507 #else
53508 #define vmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
53509   int16x8_t __s0 = __p0; \
53510   int16x8_t __s1 = __p1; \
53511   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
53512   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
53513   int32x4_t __ret; \
53514   __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
53515   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
53516   __ret; \
53517 })
53518 #endif
53519 
53520 #ifdef __LITTLE_ENDIAN__
vmull_high_n_u32(uint32x4_t __p0,uint32_t __p1)53521 __ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
53522   uint64x2_t __ret;
53523   __ret = vmull_n_u32(vget_high_u32(__p0), __p1);
53524   return __ret;
53525 }
53526 #else
vmull_high_n_u32(uint32x4_t __p0,uint32_t __p1)53527 __ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
53528   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
53529   uint64x2_t __ret;
53530   __ret = __noswap_vmull_n_u32(__noswap_vget_high_u32(__rev0), __p1);
53531   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
53532   return __ret;
53533 }
53534 #endif
53535 
53536 #ifdef __LITTLE_ENDIAN__
vmull_high_n_u16(uint16x8_t __p0,uint16_t __p1)53537 __ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
53538   uint32x4_t __ret;
53539   __ret = vmull_n_u16(vget_high_u16(__p0), __p1);
53540   return __ret;
53541 }
53542 #else
vmull_high_n_u16(uint16x8_t __p0,uint16_t __p1)53543 __ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
53544   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
53545   uint32x4_t __ret;
53546   __ret = __noswap_vmull_n_u16(__noswap_vget_high_u16(__rev0), __p1);
53547   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
53548   return __ret;
53549 }
53550 #endif
53551 
53552 #ifdef __LITTLE_ENDIAN__
vmull_high_n_s32(int32x4_t __p0,int32_t __p1)53553 __ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
53554   int64x2_t __ret;
53555   __ret = vmull_n_s32(vget_high_s32(__p0), __p1);
53556   return __ret;
53557 }
53558 #else
vmull_high_n_s32(int32x4_t __p0,int32_t __p1)53559 __ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
53560   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
53561   int64x2_t __ret;
53562   __ret = __noswap_vmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
53563   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
53564   return __ret;
53565 }
53566 #endif
53567 
53568 #ifdef __LITTLE_ENDIAN__
vmull_high_n_s16(int16x8_t __p0,int16_t __p1)53569 __ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
53570   int32x4_t __ret;
53571   __ret = vmull_n_s16(vget_high_s16(__p0), __p1);
53572   return __ret;
53573 }
53574 #else
vmull_high_n_s16(int16x8_t __p0,int16_t __p1)53575 __ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
53576   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
53577   int32x4_t __ret;
53578   __ret = __noswap_vmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
53579   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
53580   return __ret;
53581 }
53582 #endif
53583 
53584 #ifdef __LITTLE_ENDIAN__
53585 #define vmull_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
53586   uint32x2_t __s0 = __p0; \
53587   uint32x4_t __s1 = __p1; \
53588   uint64x2_t __ret; \
53589   __ret = vmull_u32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
53590   __ret; \
53591 })
53592 #else
53593 #define vmull_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
53594   uint32x2_t __s0 = __p0; \
53595   uint32x4_t __s1 = __p1; \
53596   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
53597   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
53598   uint64x2_t __ret; \
53599   __ret = __noswap_vmull_u32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
53600   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
53601   __ret; \
53602 })
53603 #endif
53604 
53605 #ifdef __LITTLE_ENDIAN__
53606 #define vmull_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
53607   uint16x4_t __s0 = __p0; \
53608   uint16x8_t __s1 = __p1; \
53609   uint32x4_t __ret; \
53610   __ret = vmull_u16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
53611   __ret; \
53612 })
53613 #else
53614 #define vmull_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
53615   uint16x4_t __s0 = __p0; \
53616   uint16x8_t __s1 = __p1; \
53617   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
53618   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
53619   uint32x4_t __ret; \
53620   __ret = __noswap_vmull_u16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
53621   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
53622   __ret; \
53623 })
53624 #endif
53625 
53626 #ifdef __LITTLE_ENDIAN__
53627 #define vmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
53628   int32x2_t __s0 = __p0; \
53629   int32x4_t __s1 = __p1; \
53630   int64x2_t __ret; \
53631   __ret = vmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
53632   __ret; \
53633 })
53634 #else
53635 #define vmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
53636   int32x2_t __s0 = __p0; \
53637   int32x4_t __s1 = __p1; \
53638   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
53639   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
53640   int64x2_t __ret; \
53641   __ret = __noswap_vmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
53642   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
53643   __ret; \
53644 })
53645 #endif
53646 
53647 #ifdef __LITTLE_ENDIAN__
53648 #define vmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
53649   int16x4_t __s0 = __p0; \
53650   int16x8_t __s1 = __p1; \
53651   int32x4_t __ret; \
53652   __ret = vmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
53653   __ret; \
53654 })
53655 #else
53656 #define vmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
53657   int16x4_t __s0 = __p0; \
53658   int16x8_t __s1 = __p1; \
53659   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
53660   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
53661   int32x4_t __ret; \
53662   __ret = __noswap_vmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
53663   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
53664   __ret; \
53665 })
53666 #endif
53667 
53668 #ifdef __LITTLE_ENDIAN__
vmulxq_f64(float64x2_t __p0,float64x2_t __p1)53669 __ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
53670   float64x2_t __ret;
53671   __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
53672   return __ret;
53673 }
53674 #else
vmulxq_f64(float64x2_t __p0,float64x2_t __p1)53675 __ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
53676   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
53677   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
53678   float64x2_t __ret;
53679   __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
53680   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
53681   return __ret;
53682 }
__noswap_vmulxq_f64(float64x2_t __p0,float64x2_t __p1)53683 __ai float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
53684   float64x2_t __ret;
53685   __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
53686   return __ret;
53687 }
53688 #endif
53689 
53690 #ifdef __LITTLE_ENDIAN__
vmulxq_f32(float32x4_t __p0,float32x4_t __p1)53691 __ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
53692   float32x4_t __ret;
53693   __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
53694   return __ret;
53695 }
53696 #else
vmulxq_f32(float32x4_t __p0,float32x4_t __p1)53697 __ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
53698   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
53699   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
53700   float32x4_t __ret;
53701   __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
53702   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
53703   return __ret;
53704 }
__noswap_vmulxq_f32(float32x4_t __p0,float32x4_t __p1)53705 __ai float32x4_t __noswap_vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
53706   float32x4_t __ret;
53707   __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
53708   return __ret;
53709 }
53710 #endif
53711 
53712 #ifdef __LITTLE_ENDIAN__
vmulx_f64(float64x1_t __p0,float64x1_t __p1)53713 __ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) {
53714   float64x1_t __ret;
53715   __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
53716   return __ret;
53717 }
53718 #else
vmulx_f64(float64x1_t __p0,float64x1_t __p1)53719 __ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) {
53720   float64x1_t __ret;
53721   __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
53722   return __ret;
53723 }
53724 #endif
53725 
53726 #ifdef __LITTLE_ENDIAN__
vmulx_f32(float32x2_t __p0,float32x2_t __p1)53727 __ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
53728   float32x2_t __ret;
53729   __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
53730   return __ret;
53731 }
53732 #else
vmulx_f32(float32x2_t __p0,float32x2_t __p1)53733 __ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
53734   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
53735   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
53736   float32x2_t __ret;
53737   __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
53738   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
53739   return __ret;
53740 }
__noswap_vmulx_f32(float32x2_t __p0,float32x2_t __p1)53741 __ai float32x2_t __noswap_vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
53742   float32x2_t __ret;
53743   __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
53744   return __ret;
53745 }
53746 #endif
53747 
53748 #ifdef __LITTLE_ENDIAN__
vmulxd_f64(float64_t __p0,float64_t __p1)53749 __ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) {
53750   float64_t __ret;
53751   __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1);
53752   return __ret;
53753 }
53754 #else
vmulxd_f64(float64_t __p0,float64_t __p1)53755 __ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) {
53756   float64_t __ret;
53757   __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1);
53758   return __ret;
53759 }
__noswap_vmulxd_f64(float64_t __p0,float64_t __p1)53760 __ai float64_t __noswap_vmulxd_f64(float64_t __p0, float64_t __p1) {
53761   float64_t __ret;
53762   __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1);
53763   return __ret;
53764 }
53765 #endif
53766 
53767 #ifdef __LITTLE_ENDIAN__
vmulxs_f32(float32_t __p0,float32_t __p1)53768 __ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) {
53769   float32_t __ret;
53770   __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
53771   return __ret;
53772 }
53773 #else
vmulxs_f32(float32_t __p0,float32_t __p1)53774 __ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) {
53775   float32_t __ret;
53776   __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
53777   return __ret;
53778 }
__noswap_vmulxs_f32(float32_t __p0,float32_t __p1)53779 __ai float32_t __noswap_vmulxs_f32(float32_t __p0, float32_t __p1) {
53780   float32_t __ret;
53781   __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
53782   return __ret;
53783 }
53784 #endif
53785 
53786 #ifdef __LITTLE_ENDIAN__
53787 #define vmulxd_lane_f64(__p0_142, __p1_142, __p2_142) __extension__ ({ \
53788   float64_t __s0_142 = __p0_142; \
53789   float64x1_t __s1_142 = __p1_142; \
53790   float64_t __ret_142; \
53791   __ret_142 = vmulxd_f64(__s0_142, vget_lane_f64(__s1_142, __p2_142)); \
53792   __ret_142; \
53793 })
53794 #else
53795 #define vmulxd_lane_f64(__p0_143, __p1_143, __p2_143) __extension__ ({ \
53796   float64_t __s0_143 = __p0_143; \
53797   float64x1_t __s1_143 = __p1_143; \
53798   float64_t __ret_143; \
53799   __ret_143 = __noswap_vmulxd_f64(__s0_143, __noswap_vget_lane_f64(__s1_143, __p2_143)); \
53800   __ret_143; \
53801 })
53802 #endif
53803 
53804 #ifdef __LITTLE_ENDIAN__
53805 #define vmulxs_lane_f32(__p0_144, __p1_144, __p2_144) __extension__ ({ \
53806   float32_t __s0_144 = __p0_144; \
53807   float32x2_t __s1_144 = __p1_144; \
53808   float32_t __ret_144; \
53809   __ret_144 = vmulxs_f32(__s0_144, vget_lane_f32(__s1_144, __p2_144)); \
53810   __ret_144; \
53811 })
53812 #else
53813 #define vmulxs_lane_f32(__p0_145, __p1_145, __p2_145) __extension__ ({ \
53814   float32_t __s0_145 = __p0_145; \
53815   float32x2_t __s1_145 = __p1_145; \
53816   float32x2_t __rev1_145;  __rev1_145 = __builtin_shufflevector(__s1_145, __s1_145, 1, 0); \
53817   float32_t __ret_145; \
53818   __ret_145 = __noswap_vmulxs_f32(__s0_145, __noswap_vget_lane_f32(__rev1_145, __p2_145)); \
53819   __ret_145; \
53820 })
53821 #endif
53822 
53823 #ifdef __LITTLE_ENDIAN__
53824 #define vmulxq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
53825   float64x2_t __s0 = __p0; \
53826   float64x1_t __s1 = __p1; \
53827   float64x2_t __ret; \
53828   __ret = vmulxq_f64(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
53829   __ret; \
53830 })
53831 #else
53832 #define vmulxq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
53833   float64x2_t __s0 = __p0; \
53834   float64x1_t __s1 = __p1; \
53835   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
53836   float64x2_t __ret; \
53837   __ret = __noswap_vmulxq_f64(__rev0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
53838   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
53839   __ret; \
53840 })
53841 #endif
53842 
53843 #ifdef __LITTLE_ENDIAN__
53844 #define vmulxq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
53845   float32x4_t __s0 = __p0; \
53846   float32x2_t __s1 = __p1; \
53847   float32x4_t __ret; \
53848   __ret = vmulxq_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
53849   __ret; \
53850 })
53851 #else
53852 #define vmulxq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
53853   float32x4_t __s0 = __p0; \
53854   float32x2_t __s1 = __p1; \
53855   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
53856   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
53857   float32x4_t __ret; \
53858   __ret = __noswap_vmulxq_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
53859   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
53860   __ret; \
53861 })
53862 #endif
53863 
53864 #ifdef __LITTLE_ENDIAN__
53865 #define vmulx_lane_f32(__p0, __p1, __p2) __extension__ ({ \
53866   float32x2_t __s0 = __p0; \
53867   float32x2_t __s1 = __p1; \
53868   float32x2_t __ret; \
53869   __ret = vmulx_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
53870   __ret; \
53871 })
53872 #else
53873 #define vmulx_lane_f32(__p0, __p1, __p2) __extension__ ({ \
53874   float32x2_t __s0 = __p0; \
53875   float32x2_t __s1 = __p1; \
53876   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
53877   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
53878   float32x2_t __ret; \
53879   __ret = __noswap_vmulx_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
53880   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
53881   __ret; \
53882 })
53883 #endif
53884 
53885 #ifdef __LITTLE_ENDIAN__
53886 #define vmulxd_laneq_f64(__p0_146, __p1_146, __p2_146) __extension__ ({ \
53887   float64_t __s0_146 = __p0_146; \
53888   float64x2_t __s1_146 = __p1_146; \
53889   float64_t __ret_146; \
53890   __ret_146 = vmulxd_f64(__s0_146, vgetq_lane_f64(__s1_146, __p2_146)); \
53891   __ret_146; \
53892 })
53893 #else
53894 #define vmulxd_laneq_f64(__p0_147, __p1_147, __p2_147) __extension__ ({ \
53895   float64_t __s0_147 = __p0_147; \
53896   float64x2_t __s1_147 = __p1_147; \
53897   float64x2_t __rev1_147;  __rev1_147 = __builtin_shufflevector(__s1_147, __s1_147, 1, 0); \
53898   float64_t __ret_147; \
53899   __ret_147 = __noswap_vmulxd_f64(__s0_147, __noswap_vgetq_lane_f64(__rev1_147, __p2_147)); \
53900   __ret_147; \
53901 })
53902 #endif
53903 
53904 #ifdef __LITTLE_ENDIAN__
53905 #define vmulxs_laneq_f32(__p0_148, __p1_148, __p2_148) __extension__ ({ \
53906   float32_t __s0_148 = __p0_148; \
53907   float32x4_t __s1_148 = __p1_148; \
53908   float32_t __ret_148; \
53909   __ret_148 = vmulxs_f32(__s0_148, vgetq_lane_f32(__s1_148, __p2_148)); \
53910   __ret_148; \
53911 })
53912 #else
53913 #define vmulxs_laneq_f32(__p0_149, __p1_149, __p2_149) __extension__ ({ \
53914   float32_t __s0_149 = __p0_149; \
53915   float32x4_t __s1_149 = __p1_149; \
53916   float32x4_t __rev1_149;  __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 3, 2, 1, 0); \
53917   float32_t __ret_149; \
53918   __ret_149 = __noswap_vmulxs_f32(__s0_149, __noswap_vgetq_lane_f32(__rev1_149, __p2_149)); \
53919   __ret_149; \
53920 })
53921 #endif
53922 
53923 #ifdef __LITTLE_ENDIAN__
53924 #define vmulxq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
53925   float64x2_t __s0 = __p0; \
53926   float64x2_t __s1 = __p1; \
53927   float64x2_t __ret; \
53928   __ret = vmulxq_f64(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
53929   __ret; \
53930 })
53931 #else
53932 #define vmulxq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
53933   float64x2_t __s0 = __p0; \
53934   float64x2_t __s1 = __p1; \
53935   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
53936   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
53937   float64x2_t __ret; \
53938   __ret = __noswap_vmulxq_f64(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
53939   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
53940   __ret; \
53941 })
53942 #endif
53943 
53944 #ifdef __LITTLE_ENDIAN__
53945 #define vmulxq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
53946   float32x4_t __s0 = __p0; \
53947   float32x4_t __s1 = __p1; \
53948   float32x4_t __ret; \
53949   __ret = vmulxq_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
53950   __ret; \
53951 })
53952 #else
53953 #define vmulxq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
53954   float32x4_t __s0 = __p0; \
53955   float32x4_t __s1 = __p1; \
53956   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
53957   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
53958   float32x4_t __ret; \
53959   __ret = __noswap_vmulxq_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
53960   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
53961   __ret; \
53962 })
53963 #endif
53964 
53965 #ifdef __LITTLE_ENDIAN__
53966 #define vmulx_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
53967   float32x2_t __s0 = __p0; \
53968   float32x4_t __s1 = __p1; \
53969   float32x2_t __ret; \
53970   __ret = vmulx_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
53971   __ret; \
53972 })
53973 #else
53974 #define vmulx_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
53975   float32x2_t __s0 = __p0; \
53976   float32x4_t __s1 = __p1; \
53977   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
53978   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
53979   float32x2_t __ret; \
53980   __ret = __noswap_vmulx_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
53981   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
53982   __ret; \
53983 })
53984 #endif
53985 
53986 #ifdef __LITTLE_ENDIAN__
vnegq_f64(float64x2_t __p0)53987 __ai float64x2_t vnegq_f64(float64x2_t __p0) {
53988   float64x2_t __ret;
53989   __ret = -__p0;
53990   return __ret;
53991 }
53992 #else
vnegq_f64(float64x2_t __p0)53993 __ai float64x2_t vnegq_f64(float64x2_t __p0) {
53994   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
53995   float64x2_t __ret;
53996   __ret = -__rev0;
53997   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
53998   return __ret;
53999 }
54000 #endif
54001 
54002 #ifdef __LITTLE_ENDIAN__
vnegq_s64(int64x2_t __p0)54003 __ai int64x2_t vnegq_s64(int64x2_t __p0) {
54004   int64x2_t __ret;
54005   __ret = -__p0;
54006   return __ret;
54007 }
54008 #else
vnegq_s64(int64x2_t __p0)54009 __ai int64x2_t vnegq_s64(int64x2_t __p0) {
54010   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
54011   int64x2_t __ret;
54012   __ret = -__rev0;
54013   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
54014   return __ret;
54015 }
54016 #endif
54017 
54018 #ifdef __LITTLE_ENDIAN__
vneg_f64(float64x1_t __p0)54019 __ai float64x1_t vneg_f64(float64x1_t __p0) {
54020   float64x1_t __ret;
54021   __ret = -__p0;
54022   return __ret;
54023 }
54024 #else
vneg_f64(float64x1_t __p0)54025 __ai float64x1_t vneg_f64(float64x1_t __p0) {
54026   float64x1_t __ret;
54027   __ret = -__p0;
54028   return __ret;
54029 }
54030 #endif
54031 
54032 #ifdef __LITTLE_ENDIAN__
vneg_s64(int64x1_t __p0)54033 __ai int64x1_t vneg_s64(int64x1_t __p0) {
54034   int64x1_t __ret;
54035   __ret = -__p0;
54036   return __ret;
54037 }
54038 #else
vneg_s64(int64x1_t __p0)54039 __ai int64x1_t vneg_s64(int64x1_t __p0) {
54040   int64x1_t __ret;
54041   __ret = -__p0;
54042   return __ret;
54043 }
54044 #endif
54045 
54046 #ifdef __LITTLE_ENDIAN__
vnegd_s64(int64_t __p0)54047 __ai int64_t vnegd_s64(int64_t __p0) {
54048   int64_t __ret;
54049   __ret = (int64_t) __builtin_neon_vnegd_s64(__p0);
54050   return __ret;
54051 }
54052 #else
vnegd_s64(int64_t __p0)54053 __ai int64_t vnegd_s64(int64_t __p0) {
54054   int64_t __ret;
54055   __ret = (int64_t) __builtin_neon_vnegd_s64(__p0);
54056   return __ret;
54057 }
54058 #endif
54059 
54060 #ifdef __LITTLE_ENDIAN__
vpaddq_u8(uint8x16_t __p0,uint8x16_t __p1)54061 __ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
54062   uint8x16_t __ret;
54063   __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
54064   return __ret;
54065 }
54066 #else
vpaddq_u8(uint8x16_t __p0,uint8x16_t __p1)54067 __ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
54068   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
54069   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
54070   uint8x16_t __ret;
54071   __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
54072   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
54073   return __ret;
54074 }
54075 #endif
54076 
54077 #ifdef __LITTLE_ENDIAN__
vpaddq_u32(uint32x4_t __p0,uint32x4_t __p1)54078 __ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
54079   uint32x4_t __ret;
54080   __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
54081   return __ret;
54082 }
54083 #else
vpaddq_u32(uint32x4_t __p0,uint32x4_t __p1)54084 __ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
54085   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
54086   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
54087   uint32x4_t __ret;
54088   __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
54089   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
54090   return __ret;
54091 }
54092 #endif
54093 
54094 #ifdef __LITTLE_ENDIAN__
vpaddq_u64(uint64x2_t __p0,uint64x2_t __p1)54095 __ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
54096   uint64x2_t __ret;
54097   __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
54098   return __ret;
54099 }
54100 #else
vpaddq_u64(uint64x2_t __p0,uint64x2_t __p1)54101 __ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
54102   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
54103   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
54104   uint64x2_t __ret;
54105   __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
54106   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
54107   return __ret;
54108 }
54109 #endif
54110 
54111 #ifdef __LITTLE_ENDIAN__
vpaddq_u16(uint16x8_t __p0,uint16x8_t __p1)54112 __ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
54113   uint16x8_t __ret;
54114   __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
54115   return __ret;
54116 }
54117 #else
vpaddq_u16(uint16x8_t __p0,uint16x8_t __p1)54118 __ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
54119   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
54120   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
54121   uint16x8_t __ret;
54122   __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
54123   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
54124   return __ret;
54125 }
54126 #endif
54127 
54128 #ifdef __LITTLE_ENDIAN__
vpaddq_s8(int8x16_t __p0,int8x16_t __p1)54129 __ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
54130   int8x16_t __ret;
54131   __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
54132   return __ret;
54133 }
54134 #else
vpaddq_s8(int8x16_t __p0,int8x16_t __p1)54135 __ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
54136   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
54137   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
54138   int8x16_t __ret;
54139   __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
54140   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
54141   return __ret;
54142 }
54143 #endif
54144 
54145 #ifdef __LITTLE_ENDIAN__
vpaddq_f64(float64x2_t __p0,float64x2_t __p1)54146 __ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
54147   float64x2_t __ret;
54148   __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
54149   return __ret;
54150 }
54151 #else
vpaddq_f64(float64x2_t __p0,float64x2_t __p1)54152 __ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
54153   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
54154   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
54155   float64x2_t __ret;
54156   __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
54157   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
54158   return __ret;
54159 }
54160 #endif
54161 
54162 #ifdef __LITTLE_ENDIAN__
vpaddq_f32(float32x4_t __p0,float32x4_t __p1)54163 __ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
54164   float32x4_t __ret;
54165   __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
54166   return __ret;
54167 }
54168 #else
vpaddq_f32(float32x4_t __p0,float32x4_t __p1)54169 __ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
54170   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
54171   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
54172   float32x4_t __ret;
54173   __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
54174   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
54175   return __ret;
54176 }
54177 #endif
54178 
54179 #ifdef __LITTLE_ENDIAN__
vpaddq_s32(int32x4_t __p0,int32x4_t __p1)54180 __ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
54181   int32x4_t __ret;
54182   __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
54183   return __ret;
54184 }
54185 #else
vpaddq_s32(int32x4_t __p0,int32x4_t __p1)54186 __ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
54187   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
54188   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
54189   int32x4_t __ret;
54190   __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
54191   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
54192   return __ret;
54193 }
54194 #endif
54195 
54196 #ifdef __LITTLE_ENDIAN__
vpaddq_s64(int64x2_t __p0,int64x2_t __p1)54197 __ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
54198   int64x2_t __ret;
54199   __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
54200   return __ret;
54201 }
54202 #else
vpaddq_s64(int64x2_t __p0,int64x2_t __p1)54203 __ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
54204   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
54205   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
54206   int64x2_t __ret;
54207   __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
54208   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
54209   return __ret;
54210 }
54211 #endif
54212 
54213 #ifdef __LITTLE_ENDIAN__
vpaddq_s16(int16x8_t __p0,int16x8_t __p1)54214 __ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
54215   int16x8_t __ret;
54216   __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
54217   return __ret;
54218 }
54219 #else
vpaddq_s16(int16x8_t __p0,int16x8_t __p1)54220 __ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
54221   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
54222   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
54223   int16x8_t __ret;
54224   __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
54225   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
54226   return __ret;
54227 }
54228 #endif
54229 
54230 #ifdef __LITTLE_ENDIAN__
vpaddd_u64(uint64x2_t __p0)54231 __ai uint64_t vpaddd_u64(uint64x2_t __p0) {
54232   uint64_t __ret;
54233   __ret = (uint64_t) __builtin_neon_vpaddd_u64((int8x16_t)__p0);
54234   return __ret;
54235 }
54236 #else
vpaddd_u64(uint64x2_t __p0)54237 __ai uint64_t vpaddd_u64(uint64x2_t __p0) {
54238   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
54239   uint64_t __ret;
54240   __ret = (uint64_t) __builtin_neon_vpaddd_u64((int8x16_t)__rev0);
54241   return __ret;
54242 }
54243 #endif
54244 
54245 #ifdef __LITTLE_ENDIAN__
vpaddd_f64(float64x2_t __p0)54246 __ai float64_t vpaddd_f64(float64x2_t __p0) {
54247   float64_t __ret;
54248   __ret = (float64_t) __builtin_neon_vpaddd_f64((int8x16_t)__p0);
54249   return __ret;
54250 }
54251 #else
vpaddd_f64(float64x2_t __p0)54252 __ai float64_t vpaddd_f64(float64x2_t __p0) {
54253   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
54254   float64_t __ret;
54255   __ret = (float64_t) __builtin_neon_vpaddd_f64((int8x16_t)__rev0);
54256   return __ret;
54257 }
54258 #endif
54259 
54260 #ifdef __LITTLE_ENDIAN__
vpaddd_s64(int64x2_t __p0)54261 __ai int64_t vpaddd_s64(int64x2_t __p0) {
54262   int64_t __ret;
54263   __ret = (int64_t) __builtin_neon_vpaddd_s64((int8x16_t)__p0);
54264   return __ret;
54265 }
54266 #else
vpaddd_s64(int64x2_t __p0)54267 __ai int64_t vpaddd_s64(int64x2_t __p0) {
54268   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
54269   int64_t __ret;
54270   __ret = (int64_t) __builtin_neon_vpaddd_s64((int8x16_t)__rev0);
54271   return __ret;
54272 }
54273 #endif
54274 
54275 #ifdef __LITTLE_ENDIAN__
vpadds_f32(float32x2_t __p0)54276 __ai float32_t vpadds_f32(float32x2_t __p0) {
54277   float32_t __ret;
54278   __ret = (float32_t) __builtin_neon_vpadds_f32((int8x8_t)__p0);
54279   return __ret;
54280 }
54281 #else
vpadds_f32(float32x2_t __p0)54282 __ai float32_t vpadds_f32(float32x2_t __p0) {
54283   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
54284   float32_t __ret;
54285   __ret = (float32_t) __builtin_neon_vpadds_f32((int8x8_t)__rev0);
54286   return __ret;
54287 }
54288 #endif
54289 
54290 #ifdef __LITTLE_ENDIAN__
vpmaxq_u8(uint8x16_t __p0,uint8x16_t __p1)54291 __ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
54292   uint8x16_t __ret;
54293   __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
54294   return __ret;
54295 }
54296 #else
vpmaxq_u8(uint8x16_t __p0,uint8x16_t __p1)54297 __ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
54298   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
54299   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
54300   uint8x16_t __ret;
54301   __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
54302   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
54303   return __ret;
54304 }
54305 #endif
54306 
54307 #ifdef __LITTLE_ENDIAN__
vpmaxq_u32(uint32x4_t __p0,uint32x4_t __p1)54308 __ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
54309   uint32x4_t __ret;
54310   __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
54311   return __ret;
54312 }
54313 #else
vpmaxq_u32(uint32x4_t __p0,uint32x4_t __p1)54314 __ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
54315   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
54316   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
54317   uint32x4_t __ret;
54318   __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
54319   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
54320   return __ret;
54321 }
54322 #endif
54323 
54324 #ifdef __LITTLE_ENDIAN__
vpmaxq_u16(uint16x8_t __p0,uint16x8_t __p1)54325 __ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
54326   uint16x8_t __ret;
54327   __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
54328   return __ret;
54329 }
54330 #else
vpmaxq_u16(uint16x8_t __p0,uint16x8_t __p1)54331 __ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
54332   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
54333   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
54334   uint16x8_t __ret;
54335   __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
54336   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
54337   return __ret;
54338 }
54339 #endif
54340 
54341 #ifdef __LITTLE_ENDIAN__
vpmaxq_s8(int8x16_t __p0,int8x16_t __p1)54342 __ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
54343   int8x16_t __ret;
54344   __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
54345   return __ret;
54346 }
54347 #else
vpmaxq_s8(int8x16_t __p0,int8x16_t __p1)54348 __ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
54349   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
54350   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
54351   int8x16_t __ret;
54352   __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
54353   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
54354   return __ret;
54355 }
54356 #endif
54357 
54358 #ifdef __LITTLE_ENDIAN__
vpmaxq_f64(float64x2_t __p0,float64x2_t __p1)54359 __ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
54360   float64x2_t __ret;
54361   __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
54362   return __ret;
54363 }
54364 #else
vpmaxq_f64(float64x2_t __p0,float64x2_t __p1)54365 __ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
54366   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
54367   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
54368   float64x2_t __ret;
54369   __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
54370   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
54371   return __ret;
54372 }
54373 #endif
54374 
54375 #ifdef __LITTLE_ENDIAN__
vpmaxq_f32(float32x4_t __p0,float32x4_t __p1)54376 __ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
54377   float32x4_t __ret;
54378   __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
54379   return __ret;
54380 }
54381 #else
vpmaxq_f32(float32x4_t __p0,float32x4_t __p1)54382 __ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
54383   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
54384   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
54385   float32x4_t __ret;
54386   __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
54387   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
54388   return __ret;
54389 }
54390 #endif
54391 
54392 #ifdef __LITTLE_ENDIAN__
vpmaxq_s32(int32x4_t __p0,int32x4_t __p1)54393 __ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
54394   int32x4_t __ret;
54395   __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
54396   return __ret;
54397 }
54398 #else
vpmaxq_s32(int32x4_t __p0,int32x4_t __p1)54399 __ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
54400   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
54401   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
54402   int32x4_t __ret;
54403   __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
54404   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
54405   return __ret;
54406 }
54407 #endif
54408 
54409 #ifdef __LITTLE_ENDIAN__
vpmaxq_s16(int16x8_t __p0,int16x8_t __p1)54410 __ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
54411   int16x8_t __ret;
54412   __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
54413   return __ret;
54414 }
54415 #else
vpmaxq_s16(int16x8_t __p0,int16x8_t __p1)54416 __ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
54417   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
54418   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
54419   int16x8_t __ret;
54420   __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
54421   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
54422   return __ret;
54423 }
54424 #endif
54425 
54426 #ifdef __LITTLE_ENDIAN__
vpmaxqd_f64(float64x2_t __p0)54427 __ai float64_t vpmaxqd_f64(float64x2_t __p0) {
54428   float64_t __ret;
54429   __ret = (float64_t) __builtin_neon_vpmaxqd_f64((int8x16_t)__p0);
54430   return __ret;
54431 }
54432 #else
vpmaxqd_f64(float64x2_t __p0)54433 __ai float64_t vpmaxqd_f64(float64x2_t __p0) {
54434   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
54435   float64_t __ret;
54436   __ret = (float64_t) __builtin_neon_vpmaxqd_f64((int8x16_t)__rev0);
54437   return __ret;
54438 }
54439 #endif
54440 
54441 #ifdef __LITTLE_ENDIAN__
vpmaxs_f32(float32x2_t __p0)54442 __ai float32_t vpmaxs_f32(float32x2_t __p0) {
54443   float32_t __ret;
54444   __ret = (float32_t) __builtin_neon_vpmaxs_f32((int8x8_t)__p0);
54445   return __ret;
54446 }
54447 #else
vpmaxs_f32(float32x2_t __p0)54448 __ai float32_t vpmaxs_f32(float32x2_t __p0) {
54449   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
54450   float32_t __ret;
54451   __ret = (float32_t) __builtin_neon_vpmaxs_f32((int8x8_t)__rev0);
54452   return __ret;
54453 }
54454 #endif
54455 
54456 #ifdef __LITTLE_ENDIAN__
vpmaxnmq_f64(float64x2_t __p0,float64x2_t __p1)54457 __ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
54458   float64x2_t __ret;
54459   __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
54460   return __ret;
54461 }
54462 #else
vpmaxnmq_f64(float64x2_t __p0,float64x2_t __p1)54463 __ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
54464   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
54465   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
54466   float64x2_t __ret;
54467   __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
54468   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
54469   return __ret;
54470 }
54471 #endif
54472 
54473 #ifdef __LITTLE_ENDIAN__
vpmaxnmq_f32(float32x4_t __p0,float32x4_t __p1)54474 __ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
54475   float32x4_t __ret;
54476   __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
54477   return __ret;
54478 }
54479 #else
vpmaxnmq_f32(float32x4_t __p0,float32x4_t __p1)54480 __ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
54481   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
54482   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
54483   float32x4_t __ret;
54484   __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
54485   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
54486   return __ret;
54487 }
54488 #endif
54489 
54490 #ifdef __LITTLE_ENDIAN__
vpmaxnm_f32(float32x2_t __p0,float32x2_t __p1)54491 __ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
54492   float32x2_t __ret;
54493   __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
54494   return __ret;
54495 }
54496 #else
vpmaxnm_f32(float32x2_t __p0,float32x2_t __p1)54497 __ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
54498   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
54499   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
54500   float32x2_t __ret;
54501   __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
54502   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
54503   return __ret;
54504 }
54505 #endif
54506 
54507 #ifdef __LITTLE_ENDIAN__
vpmaxnmqd_f64(float64x2_t __p0)54508 __ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
54509   float64_t __ret;
54510   __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64((int8x16_t)__p0);
54511   return __ret;
54512 }
54513 #else
vpmaxnmqd_f64(float64x2_t __p0)54514 __ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
54515   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
54516   float64_t __ret;
54517   __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64((int8x16_t)__rev0);
54518   return __ret;
54519 }
54520 #endif
54521 
54522 #ifdef __LITTLE_ENDIAN__
vpmaxnms_f32(float32x2_t __p0)54523 __ai float32_t vpmaxnms_f32(float32x2_t __p0) {
54524   float32_t __ret;
54525   __ret = (float32_t) __builtin_neon_vpmaxnms_f32((int8x8_t)__p0);
54526   return __ret;
54527 }
54528 #else
vpmaxnms_f32(float32x2_t __p0)54529 __ai float32_t vpmaxnms_f32(float32x2_t __p0) {
54530   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
54531   float32_t __ret;
54532   __ret = (float32_t) __builtin_neon_vpmaxnms_f32((int8x8_t)__rev0);
54533   return __ret;
54534 }
54535 #endif
54536 
54537 #ifdef __LITTLE_ENDIAN__
vpminq_u8(uint8x16_t __p0,uint8x16_t __p1)54538 __ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
54539   uint8x16_t __ret;
54540   __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
54541   return __ret;
54542 }
54543 #else
vpminq_u8(uint8x16_t __p0,uint8x16_t __p1)54544 __ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
54545   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
54546   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
54547   uint8x16_t __ret;
54548   __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
54549   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
54550   return __ret;
54551 }
54552 #endif
54553 
54554 #ifdef __LITTLE_ENDIAN__
vpminq_u32(uint32x4_t __p0,uint32x4_t __p1)54555 __ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
54556   uint32x4_t __ret;
54557   __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
54558   return __ret;
54559 }
54560 #else
vpminq_u32(uint32x4_t __p0,uint32x4_t __p1)54561 __ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
54562   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
54563   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
54564   uint32x4_t __ret;
54565   __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
54566   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
54567   return __ret;
54568 }
54569 #endif
54570 
54571 #ifdef __LITTLE_ENDIAN__
vpminq_u16(uint16x8_t __p0,uint16x8_t __p1)54572 __ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
54573   uint16x8_t __ret;
54574   __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
54575   return __ret;
54576 }
54577 #else
vpminq_u16(uint16x8_t __p0,uint16x8_t __p1)54578 __ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
54579   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
54580   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
54581   uint16x8_t __ret;
54582   __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
54583   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
54584   return __ret;
54585 }
54586 #endif
54587 
54588 #ifdef __LITTLE_ENDIAN__
vpminq_s8(int8x16_t __p0,int8x16_t __p1)54589 __ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
54590   int8x16_t __ret;
54591   __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
54592   return __ret;
54593 }
54594 #else
vpminq_s8(int8x16_t __p0,int8x16_t __p1)54595 __ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
54596   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
54597   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
54598   int8x16_t __ret;
54599   __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
54600   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
54601   return __ret;
54602 }
54603 #endif
54604 
54605 #ifdef __LITTLE_ENDIAN__
vpminq_f64(float64x2_t __p0,float64x2_t __p1)54606 __ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
54607   float64x2_t __ret;
54608   __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
54609   return __ret;
54610 }
54611 #else
vpminq_f64(float64x2_t __p0,float64x2_t __p1)54612 __ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
54613   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
54614   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
54615   float64x2_t __ret;
54616   __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
54617   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
54618   return __ret;
54619 }
54620 #endif
54621 
54622 #ifdef __LITTLE_ENDIAN__
vpminq_f32(float32x4_t __p0,float32x4_t __p1)54623 __ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
54624   float32x4_t __ret;
54625   __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
54626   return __ret;
54627 }
54628 #else
vpminq_f32(float32x4_t __p0,float32x4_t __p1)54629 __ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
54630   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
54631   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
54632   float32x4_t __ret;
54633   __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
54634   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
54635   return __ret;
54636 }
54637 #endif
54638 
54639 #ifdef __LITTLE_ENDIAN__
vpminq_s32(int32x4_t __p0,int32x4_t __p1)54640 __ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
54641   int32x4_t __ret;
54642   __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
54643   return __ret;
54644 }
54645 #else
vpminq_s32(int32x4_t __p0,int32x4_t __p1)54646 __ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
54647   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
54648   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
54649   int32x4_t __ret;
54650   __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
54651   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
54652   return __ret;
54653 }
54654 #endif
54655 
54656 #ifdef __LITTLE_ENDIAN__
vpminq_s16(int16x8_t __p0,int16x8_t __p1)54657 __ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
54658   int16x8_t __ret;
54659   __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
54660   return __ret;
54661 }
54662 #else
vpminq_s16(int16x8_t __p0,int16x8_t __p1)54663 __ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
54664   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
54665   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
54666   int16x8_t __ret;
54667   __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
54668   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
54669   return __ret;
54670 }
54671 #endif
54672 
54673 #ifdef __LITTLE_ENDIAN__
vpminqd_f64(float64x2_t __p0)54674 __ai float64_t vpminqd_f64(float64x2_t __p0) {
54675   float64_t __ret;
54676   __ret = (float64_t) __builtin_neon_vpminqd_f64((int8x16_t)__p0);
54677   return __ret;
54678 }
54679 #else
vpminqd_f64(float64x2_t __p0)54680 __ai float64_t vpminqd_f64(float64x2_t __p0) {
54681   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
54682   float64_t __ret;
54683   __ret = (float64_t) __builtin_neon_vpminqd_f64((int8x16_t)__rev0);
54684   return __ret;
54685 }
54686 #endif
54687 
54688 #ifdef __LITTLE_ENDIAN__
vpmins_f32(float32x2_t __p0)54689 __ai float32_t vpmins_f32(float32x2_t __p0) {
54690   float32_t __ret;
54691   __ret = (float32_t) __builtin_neon_vpmins_f32((int8x8_t)__p0);
54692   return __ret;
54693 }
54694 #else
vpmins_f32(float32x2_t __p0)54695 __ai float32_t vpmins_f32(float32x2_t __p0) {
54696   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
54697   float32_t __ret;
54698   __ret = (float32_t) __builtin_neon_vpmins_f32((int8x8_t)__rev0);
54699   return __ret;
54700 }
54701 #endif
54702 
54703 #ifdef __LITTLE_ENDIAN__
vpminnmq_f64(float64x2_t __p0,float64x2_t __p1)54704 __ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
54705   float64x2_t __ret;
54706   __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
54707   return __ret;
54708 }
54709 #else
vpminnmq_f64(float64x2_t __p0,float64x2_t __p1)54710 __ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
54711   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
54712   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
54713   float64x2_t __ret;
54714   __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
54715   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
54716   return __ret;
54717 }
54718 #endif
54719 
54720 #ifdef __LITTLE_ENDIAN__
vpminnmq_f32(float32x4_t __p0,float32x4_t __p1)54721 __ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
54722   float32x4_t __ret;
54723   __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
54724   return __ret;
54725 }
54726 #else
vpminnmq_f32(float32x4_t __p0,float32x4_t __p1)54727 __ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
54728   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
54729   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
54730   float32x4_t __ret;
54731   __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
54732   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
54733   return __ret;
54734 }
54735 #endif
54736 
54737 #ifdef __LITTLE_ENDIAN__
vpminnm_f32(float32x2_t __p0,float32x2_t __p1)54738 __ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
54739   float32x2_t __ret;
54740   __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
54741   return __ret;
54742 }
54743 #else
vpminnm_f32(float32x2_t __p0,float32x2_t __p1)54744 __ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
54745   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
54746   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
54747   float32x2_t __ret;
54748   __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
54749   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
54750   return __ret;
54751 }
54752 #endif
54753 
54754 #ifdef __LITTLE_ENDIAN__
vpminnmqd_f64(float64x2_t __p0)54755 __ai float64_t vpminnmqd_f64(float64x2_t __p0) {
54756   float64_t __ret;
54757   __ret = (float64_t) __builtin_neon_vpminnmqd_f64((int8x16_t)__p0);
54758   return __ret;
54759 }
54760 #else
vpminnmqd_f64(float64x2_t __p0)54761 __ai float64_t vpminnmqd_f64(float64x2_t __p0) {
54762   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
54763   float64_t __ret;
54764   __ret = (float64_t) __builtin_neon_vpminnmqd_f64((int8x16_t)__rev0);
54765   return __ret;
54766 }
54767 #endif
54768 
54769 #ifdef __LITTLE_ENDIAN__
vpminnms_f32(float32x2_t __p0)54770 __ai float32_t vpminnms_f32(float32x2_t __p0) {
54771   float32_t __ret;
54772   __ret = (float32_t) __builtin_neon_vpminnms_f32((int8x8_t)__p0);
54773   return __ret;
54774 }
54775 #else
vpminnms_f32(float32x2_t __p0)54776 __ai float32_t vpminnms_f32(float32x2_t __p0) {
54777   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
54778   float32_t __ret;
54779   __ret = (float32_t) __builtin_neon_vpminnms_f32((int8x8_t)__rev0);
54780   return __ret;
54781 }
54782 #endif
54783 
54784 #ifdef __LITTLE_ENDIAN__
vqabsq_s64(int64x2_t __p0)54785 __ai int64x2_t vqabsq_s64(int64x2_t __p0) {
54786   int64x2_t __ret;
54787   __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 35);
54788   return __ret;
54789 }
54790 #else
vqabsq_s64(int64x2_t __p0)54791 __ai int64x2_t vqabsq_s64(int64x2_t __p0) {
54792   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
54793   int64x2_t __ret;
54794   __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 35);
54795   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
54796   return __ret;
54797 }
54798 #endif
54799 
54800 #ifdef __LITTLE_ENDIAN__
vqabs_s64(int64x1_t __p0)54801 __ai int64x1_t vqabs_s64(int64x1_t __p0) {
54802   int64x1_t __ret;
54803   __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3);
54804   return __ret;
54805 }
54806 #else
vqabs_s64(int64x1_t __p0)54807 __ai int64x1_t vqabs_s64(int64x1_t __p0) {
54808   int64x1_t __ret;
54809   __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3);
54810   return __ret;
54811 }
54812 #endif
54813 
54814 #ifdef __LITTLE_ENDIAN__
vqabsb_s8(int8_t __p0)54815 __ai int8_t vqabsb_s8(int8_t __p0) {
54816   int8_t __ret;
54817   __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0);
54818   return __ret;
54819 }
54820 #else
vqabsb_s8(int8_t __p0)54821 __ai int8_t vqabsb_s8(int8_t __p0) {
54822   int8_t __ret;
54823   __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0);
54824   return __ret;
54825 }
54826 #endif
54827 
54828 #ifdef __LITTLE_ENDIAN__
vqabss_s32(int32_t __p0)54829 __ai int32_t vqabss_s32(int32_t __p0) {
54830   int32_t __ret;
54831   __ret = (int32_t) __builtin_neon_vqabss_s32(__p0);
54832   return __ret;
54833 }
54834 #else
vqabss_s32(int32_t __p0)54835 __ai int32_t vqabss_s32(int32_t __p0) {
54836   int32_t __ret;
54837   __ret = (int32_t) __builtin_neon_vqabss_s32(__p0);
54838   return __ret;
54839 }
54840 #endif
54841 
54842 #ifdef __LITTLE_ENDIAN__
vqabsd_s64(int64_t __p0)54843 __ai int64_t vqabsd_s64(int64_t __p0) {
54844   int64_t __ret;
54845   __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0);
54846   return __ret;
54847 }
54848 #else
vqabsd_s64(int64_t __p0)54849 __ai int64_t vqabsd_s64(int64_t __p0) {
54850   int64_t __ret;
54851   __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0);
54852   return __ret;
54853 }
54854 #endif
54855 
54856 #ifdef __LITTLE_ENDIAN__
vqabsh_s16(int16_t __p0)54857 __ai int16_t vqabsh_s16(int16_t __p0) {
54858   int16_t __ret;
54859   __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0);
54860   return __ret;
54861 }
54862 #else
vqabsh_s16(int16_t __p0)54863 __ai int16_t vqabsh_s16(int16_t __p0) {
54864   int16_t __ret;
54865   __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0);
54866   return __ret;
54867 }
54868 #endif
54869 
54870 #ifdef __LITTLE_ENDIAN__
vqaddb_u8(uint8_t __p0,uint8_t __p1)54871 __ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) {
54872   uint8_t __ret;
54873   __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1);
54874   return __ret;
54875 }
54876 #else
vqaddb_u8(uint8_t __p0,uint8_t __p1)54877 __ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) {
54878   uint8_t __ret;
54879   __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1);
54880   return __ret;
54881 }
54882 #endif
54883 
54884 #ifdef __LITTLE_ENDIAN__
vqadds_u32(uint32_t __p0,uint32_t __p1)54885 __ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) {
54886   uint32_t __ret;
54887   __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1);
54888   return __ret;
54889 }
54890 #else
vqadds_u32(uint32_t __p0,uint32_t __p1)54891 __ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) {
54892   uint32_t __ret;
54893   __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1);
54894   return __ret;
54895 }
54896 #endif
54897 
54898 #ifdef __LITTLE_ENDIAN__
vqaddd_u64(uint64_t __p0,uint64_t __p1)54899 __ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) {
54900   uint64_t __ret;
54901   __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1);
54902   return __ret;
54903 }
54904 #else
vqaddd_u64(uint64_t __p0,uint64_t __p1)54905 __ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) {
54906   uint64_t __ret;
54907   __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1);
54908   return __ret;
54909 }
54910 #endif
54911 
54912 #ifdef __LITTLE_ENDIAN__
vqaddh_u16(uint16_t __p0,uint16_t __p1)54913 __ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) {
54914   uint16_t __ret;
54915   __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1);
54916   return __ret;
54917 }
54918 #else
vqaddh_u16(uint16_t __p0,uint16_t __p1)54919 __ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) {
54920   uint16_t __ret;
54921   __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1);
54922   return __ret;
54923 }
54924 #endif
54925 
54926 #ifdef __LITTLE_ENDIAN__
vqaddb_s8(int8_t __p0,int8_t __p1)54927 __ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) {
54928   int8_t __ret;
54929   __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1);
54930   return __ret;
54931 }
54932 #else
vqaddb_s8(int8_t __p0,int8_t __p1)54933 __ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) {
54934   int8_t __ret;
54935   __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1);
54936   return __ret;
54937 }
54938 #endif
54939 
54940 #ifdef __LITTLE_ENDIAN__
vqadds_s32(int32_t __p0,int32_t __p1)54941 __ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) {
54942   int32_t __ret;
54943   __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1);
54944   return __ret;
54945 }
54946 #else
vqadds_s32(int32_t __p0,int32_t __p1)54947 __ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) {
54948   int32_t __ret;
54949   __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1);
54950   return __ret;
54951 }
54952 #endif
54953 
54954 #ifdef __LITTLE_ENDIAN__
vqaddd_s64(int64_t __p0,int64_t __p1)54955 __ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) {
54956   int64_t __ret;
54957   __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1);
54958   return __ret;
54959 }
54960 #else
vqaddd_s64(int64_t __p0,int64_t __p1)54961 __ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) {
54962   int64_t __ret;
54963   __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1);
54964   return __ret;
54965 }
54966 #endif
54967 
54968 #ifdef __LITTLE_ENDIAN__
vqaddh_s16(int16_t __p0,int16_t __p1)54969 __ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) {
54970   int16_t __ret;
54971   __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1);
54972   return __ret;
54973 }
54974 #else
vqaddh_s16(int16_t __p0,int16_t __p1)54975 __ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) {
54976   int16_t __ret;
54977   __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1);
54978   return __ret;
54979 }
54980 #endif
54981 
54982 #ifdef __LITTLE_ENDIAN__
vqdmlals_s32(int64_t __p0,int32_t __p1,int32_t __p2)54983 __ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
54984   int64_t __ret;
54985   __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2);
54986   return __ret;
54987 }
54988 #else
vqdmlals_s32(int64_t __p0,int32_t __p1,int32_t __p2)54989 __ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
54990   int64_t __ret;
54991   __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2);
54992   return __ret;
54993 }
54994 #endif
54995 
54996 #ifdef __LITTLE_ENDIAN__
vqdmlalh_s16(int32_t __p0,int16_t __p1,int16_t __p2)54997 __ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
54998   int32_t __ret;
54999   __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2);
55000   return __ret;
55001 }
55002 #else
vqdmlalh_s16(int32_t __p0,int16_t __p1,int16_t __p2)55003 __ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
55004   int32_t __ret;
55005   __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2);
55006   return __ret;
55007 }
55008 #endif
55009 
55010 #ifdef __LITTLE_ENDIAN__
vqdmlal_high_s32(int64x2_t __p0,int32x4_t __p1,int32x4_t __p2)55011 __ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
55012   int64x2_t __ret;
55013   __ret = vqdmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
55014   return __ret;
55015 }
55016 #else
vqdmlal_high_s32(int64x2_t __p0,int32x4_t __p1,int32x4_t __p2)55017 __ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
55018   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
55019   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
55020   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
55021   int64x2_t __ret;
55022   __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
55023   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
55024   return __ret;
55025 }
55026 #endif
55027 
55028 #ifdef __LITTLE_ENDIAN__
vqdmlal_high_s16(int32x4_t __p0,int16x8_t __p1,int16x8_t __p2)55029 __ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
55030   int32x4_t __ret;
55031   __ret = vqdmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
55032   return __ret;
55033 }
55034 #else
vqdmlal_high_s16(int32x4_t __p0,int16x8_t __p1,int16x8_t __p2)55035 __ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
55036   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
55037   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
55038   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
55039   int32x4_t __ret;
55040   __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
55041   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
55042   return __ret;
55043 }
55044 #endif
55045 
55046 #ifdef __LITTLE_ENDIAN__
55047 #define vqdmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
55048   int64x2_t __s0 = __p0; \
55049   int32x4_t __s1 = __p1; \
55050   int32x2_t __s2 = __p2; \
55051   int64x2_t __ret; \
55052   __ret = vqdmlal_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
55053   __ret; \
55054 })
55055 #else
55056 #define vqdmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
55057   int64x2_t __s0 = __p0; \
55058   int32x4_t __s1 = __p1; \
55059   int32x2_t __s2 = __p2; \
55060   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
55061   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
55062   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
55063   int64x2_t __ret; \
55064   __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
55065   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
55066   __ret; \
55067 })
55068 #endif
55069 
55070 #ifdef __LITTLE_ENDIAN__
55071 #define vqdmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
55072   int32x4_t __s0 = __p0; \
55073   int16x8_t __s1 = __p1; \
55074   int16x4_t __s2 = __p2; \
55075   int32x4_t __ret; \
55076   __ret = vqdmlal_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
55077   __ret; \
55078 })
55079 #else
55080 #define vqdmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
55081   int32x4_t __s0 = __p0; \
55082   int16x8_t __s1 = __p1; \
55083   int16x4_t __s2 = __p2; \
55084   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
55085   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
55086   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
55087   int32x4_t __ret; \
55088   __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
55089   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
55090   __ret; \
55091 })
55092 #endif
55093 
55094 #ifdef __LITTLE_ENDIAN__
55095 #define vqdmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
55096   int64x2_t __s0 = __p0; \
55097   int32x4_t __s1 = __p1; \
55098   int32x4_t __s2 = __p2; \
55099   int64x2_t __ret; \
55100   __ret = vqdmlal_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
55101   __ret; \
55102 })
55103 #else
55104 #define vqdmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
55105   int64x2_t __s0 = __p0; \
55106   int32x4_t __s1 = __p1; \
55107   int32x4_t __s2 = __p2; \
55108   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
55109   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
55110   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
55111   int64x2_t __ret; \
55112   __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
55113   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
55114   __ret; \
55115 })
55116 #endif
55117 
55118 #ifdef __LITTLE_ENDIAN__
55119 #define vqdmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
55120   int32x4_t __s0 = __p0; \
55121   int16x8_t __s1 = __p1; \
55122   int16x8_t __s2 = __p2; \
55123   int32x4_t __ret; \
55124   __ret = vqdmlal_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
55125   __ret; \
55126 })
55127 #else
55128 #define vqdmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
55129   int32x4_t __s0 = __p0; \
55130   int16x8_t __s1 = __p1; \
55131   int16x8_t __s2 = __p2; \
55132   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
55133   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
55134   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
55135   int32x4_t __ret; \
55136   __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
55137   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
55138   __ret; \
55139 })
55140 #endif
55141 
55142 #ifdef __LITTLE_ENDIAN__
vqdmlal_high_n_s32(int64x2_t __p0,int32x4_t __p1,int32_t __p2)55143 __ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
55144   int64x2_t __ret;
55145   __ret = vqdmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
55146   return __ret;
55147 }
55148 #else
vqdmlal_high_n_s32(int64x2_t __p0,int32x4_t __p1,int32_t __p2)55149 __ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
55150   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
55151   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
55152   int64x2_t __ret;
55153   __ret = __noswap_vqdmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
55154   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
55155   return __ret;
55156 }
55157 #endif
55158 
55159 #ifdef __LITTLE_ENDIAN__
vqdmlal_high_n_s16(int32x4_t __p0,int16x8_t __p1,int16_t __p2)55160 __ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
55161   int32x4_t __ret;
55162   __ret = vqdmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
55163   return __ret;
55164 }
55165 #else
vqdmlal_high_n_s16(int32x4_t __p0,int16x8_t __p1,int16_t __p2)55166 __ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
55167   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
55168   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
55169   int32x4_t __ret;
55170   __ret = __noswap_vqdmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
55171   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
55172   return __ret;
55173 }
55174 #endif
55175 
55176 #ifdef __LITTLE_ENDIAN__
55177 #define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
55178   int64_t __s0 = __p0; \
55179   int32_t __s1 = __p1; \
55180   int32x2_t __s2 = __p2; \
55181   int64_t __ret; \
55182   __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, (int8x8_t)__s2, __p3); \
55183   __ret; \
55184 })
55185 #else
55186 #define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
55187   int64_t __s0 = __p0; \
55188   int32_t __s1 = __p1; \
55189   int32x2_t __s2 = __p2; \
55190   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
55191   int64_t __ret; \
55192   __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, (int8x8_t)__rev2, __p3); \
55193   __ret; \
55194 })
55195 #endif
55196 
55197 #ifdef __LITTLE_ENDIAN__
55198 #define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
55199   int32_t __s0 = __p0; \
55200   int16_t __s1 = __p1; \
55201   int16x4_t __s2 = __p2; \
55202   int32_t __ret; \
55203   __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, (int8x8_t)__s2, __p3); \
55204   __ret; \
55205 })
55206 #else
55207 #define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
55208   int32_t __s0 = __p0; \
55209   int16_t __s1 = __p1; \
55210   int16x4_t __s2 = __p2; \
55211   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
55212   int32_t __ret; \
55213   __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, (int8x8_t)__rev2, __p3); \
55214   __ret; \
55215 })
55216 #endif
55217 
55218 #ifdef __LITTLE_ENDIAN__
55219 #define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
55220   int64_t __s0 = __p0; \
55221   int32_t __s1 = __p1; \
55222   int32x4_t __s2 = __p2; \
55223   int64_t __ret; \
55224   __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, (int8x16_t)__s2, __p3); \
55225   __ret; \
55226 })
55227 #else
55228 #define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
55229   int64_t __s0 = __p0; \
55230   int32_t __s1 = __p1; \
55231   int32x4_t __s2 = __p2; \
55232   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
55233   int64_t __ret; \
55234   __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, (int8x16_t)__rev2, __p3); \
55235   __ret; \
55236 })
55237 #endif
55238 
55239 #ifdef __LITTLE_ENDIAN__
55240 #define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
55241   int32_t __s0 = __p0; \
55242   int16_t __s1 = __p1; \
55243   int16x8_t __s2 = __p2; \
55244   int32_t __ret; \
55245   __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, (int8x16_t)__s2, __p3); \
55246   __ret; \
55247 })
55248 #else
55249 #define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
55250   int32_t __s0 = __p0; \
55251   int16_t __s1 = __p1; \
55252   int16x8_t __s2 = __p2; \
55253   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
55254   int32_t __ret; \
55255   __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, (int8x16_t)__rev2, __p3); \
55256   __ret; \
55257 })
55258 #endif
55259 
55260 #ifdef __LITTLE_ENDIAN__
55261 #define vqdmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
55262   int64x2_t __s0 = __p0; \
55263   int32x2_t __s1 = __p1; \
55264   int32x4_t __s2 = __p2; \
55265   int64x2_t __ret; \
55266   __ret = vqdmlal_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
55267   __ret; \
55268 })
55269 #else
55270 #define vqdmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
55271   int64x2_t __s0 = __p0; \
55272   int32x2_t __s1 = __p1; \
55273   int32x4_t __s2 = __p2; \
55274   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
55275   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
55276   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
55277   int64x2_t __ret; \
55278   __ret = __noswap_vqdmlal_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
55279   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
55280   __ret; \
55281 })
55282 #endif
55283 
55284 #ifdef __LITTLE_ENDIAN__
55285 #define vqdmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
55286   int32x4_t __s0 = __p0; \
55287   int16x4_t __s1 = __p1; \
55288   int16x8_t __s2 = __p2; \
55289   int32x4_t __ret; \
55290   __ret = vqdmlal_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
55291   __ret; \
55292 })
55293 #else
55294 #define vqdmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
55295   int32x4_t __s0 = __p0; \
55296   int16x4_t __s1 = __p1; \
55297   int16x8_t __s2 = __p2; \
55298   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
55299   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
55300   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
55301   int32x4_t __ret; \
55302   __ret = __noswap_vqdmlal_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
55303   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
55304   __ret; \
55305 })
55306 #endif
55307 
55308 #ifdef __LITTLE_ENDIAN__
vqdmlsls_s32(int64_t __p0,int32_t __p1,int32_t __p2)55309 __ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
55310   int64_t __ret;
55311   __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2);
55312   return __ret;
55313 }
55314 #else
vqdmlsls_s32(int64_t __p0,int32_t __p1,int32_t __p2)55315 __ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
55316   int64_t __ret;
55317   __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2);
55318   return __ret;
55319 }
55320 #endif
55321 
55322 #ifdef __LITTLE_ENDIAN__
vqdmlslh_s16(int32_t __p0,int16_t __p1,int16_t __p2)55323 __ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
55324   int32_t __ret;
55325   __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2);
55326   return __ret;
55327 }
55328 #else
vqdmlslh_s16(int32_t __p0,int16_t __p1,int16_t __p2)55329 __ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
55330   int32_t __ret;
55331   __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2);
55332   return __ret;
55333 }
55334 #endif
55335 
55336 #ifdef __LITTLE_ENDIAN__
vqdmlsl_high_s32(int64x2_t __p0,int32x4_t __p1,int32x4_t __p2)55337 __ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
55338   int64x2_t __ret;
55339   __ret = vqdmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
55340   return __ret;
55341 }
55342 #else
vqdmlsl_high_s32(int64x2_t __p0,int32x4_t __p1,int32x4_t __p2)55343 __ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
55344   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
55345   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
55346   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
55347   int64x2_t __ret;
55348   __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
55349   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
55350   return __ret;
55351 }
55352 #endif
55353 
55354 #ifdef __LITTLE_ENDIAN__
vqdmlsl_high_s16(int32x4_t __p0,int16x8_t __p1,int16x8_t __p2)55355 __ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
55356   int32x4_t __ret;
55357   __ret = vqdmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
55358   return __ret;
55359 }
55360 #else
vqdmlsl_high_s16(int32x4_t __p0,int16x8_t __p1,int16x8_t __p2)55361 __ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
55362   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
55363   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
55364   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
55365   int32x4_t __ret;
55366   __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
55367   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
55368   return __ret;
55369 }
55370 #endif
55371 
55372 #ifdef __LITTLE_ENDIAN__
55373 #define vqdmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
55374   int64x2_t __s0 = __p0; \
55375   int32x4_t __s1 = __p1; \
55376   int32x2_t __s2 = __p2; \
55377   int64x2_t __ret; \
55378   __ret = vqdmlsl_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
55379   __ret; \
55380 })
55381 #else
55382 #define vqdmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
55383   int64x2_t __s0 = __p0; \
55384   int32x4_t __s1 = __p1; \
55385   int32x2_t __s2 = __p2; \
55386   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
55387   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
55388   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
55389   int64x2_t __ret; \
55390   __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
55391   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
55392   __ret; \
55393 })
55394 #endif
55395 
55396 #ifdef __LITTLE_ENDIAN__
55397 #define vqdmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
55398   int32x4_t __s0 = __p0; \
55399   int16x8_t __s1 = __p1; \
55400   int16x4_t __s2 = __p2; \
55401   int32x4_t __ret; \
55402   __ret = vqdmlsl_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
55403   __ret; \
55404 })
55405 #else
55406 #define vqdmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
55407   int32x4_t __s0 = __p0; \
55408   int16x8_t __s1 = __p1; \
55409   int16x4_t __s2 = __p2; \
55410   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
55411   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
55412   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
55413   int32x4_t __ret; \
55414   __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
55415   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
55416   __ret; \
55417 })
55418 #endif
55419 
55420 #ifdef __LITTLE_ENDIAN__
55421 #define vqdmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
55422   int64x2_t __s0 = __p0; \
55423   int32x4_t __s1 = __p1; \
55424   int32x4_t __s2 = __p2; \
55425   int64x2_t __ret; \
55426   __ret = vqdmlsl_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
55427   __ret; \
55428 })
55429 #else
55430 #define vqdmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
55431   int64x2_t __s0 = __p0; \
55432   int32x4_t __s1 = __p1; \
55433   int32x4_t __s2 = __p2; \
55434   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
55435   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
55436   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
55437   int64x2_t __ret; \
55438   __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
55439   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
55440   __ret; \
55441 })
55442 #endif
55443 
55444 #ifdef __LITTLE_ENDIAN__
55445 #define vqdmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
55446   int32x4_t __s0 = __p0; \
55447   int16x8_t __s1 = __p1; \
55448   int16x8_t __s2 = __p2; \
55449   int32x4_t __ret; \
55450   __ret = vqdmlsl_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
55451   __ret; \
55452 })
55453 #else
55454 #define vqdmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
55455   int32x4_t __s0 = __p0; \
55456   int16x8_t __s1 = __p1; \
55457   int16x8_t __s2 = __p2; \
55458   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
55459   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
55460   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
55461   int32x4_t __ret; \
55462   __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
55463   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
55464   __ret; \
55465 })
55466 #endif
55467 
55468 #ifdef __LITTLE_ENDIAN__
vqdmlsl_high_n_s32(int64x2_t __p0,int32x4_t __p1,int32_t __p2)55469 __ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
55470   int64x2_t __ret;
55471   __ret = vqdmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
55472   return __ret;
55473 }
55474 #else
vqdmlsl_high_n_s32(int64x2_t __p0,int32x4_t __p1,int32_t __p2)55475 __ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
55476   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
55477   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
55478   int64x2_t __ret;
55479   __ret = __noswap_vqdmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
55480   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
55481   return __ret;
55482 }
55483 #endif
55484 
55485 #ifdef __LITTLE_ENDIAN__
vqdmlsl_high_n_s16(int32x4_t __p0,int16x8_t __p1,int16_t __p2)55486 __ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
55487   int32x4_t __ret;
55488   __ret = vqdmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
55489   return __ret;
55490 }
55491 #else
vqdmlsl_high_n_s16(int32x4_t __p0,int16x8_t __p1,int16_t __p2)55492 __ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
55493   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
55494   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
55495   int32x4_t __ret;
55496   __ret = __noswap_vqdmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
55497   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
55498   return __ret;
55499 }
55500 #endif
55501 
55502 #ifdef __LITTLE_ENDIAN__
55503 #define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
55504   int64_t __s0 = __p0; \
55505   int32_t __s1 = __p1; \
55506   int32x2_t __s2 = __p2; \
55507   int64_t __ret; \
55508   __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, (int8x8_t)__s2, __p3); \
55509   __ret; \
55510 })
55511 #else
55512 #define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
55513   int64_t __s0 = __p0; \
55514   int32_t __s1 = __p1; \
55515   int32x2_t __s2 = __p2; \
55516   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
55517   int64_t __ret; \
55518   __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, (int8x8_t)__rev2, __p3); \
55519   __ret; \
55520 })
55521 #endif
55522 
55523 #ifdef __LITTLE_ENDIAN__
55524 #define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
55525   int32_t __s0 = __p0; \
55526   int16_t __s1 = __p1; \
55527   int16x4_t __s2 = __p2; \
55528   int32_t __ret; \
55529   __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, (int8x8_t)__s2, __p3); \
55530   __ret; \
55531 })
55532 #else
55533 #define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
55534   int32_t __s0 = __p0; \
55535   int16_t __s1 = __p1; \
55536   int16x4_t __s2 = __p2; \
55537   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
55538   int32_t __ret; \
55539   __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, (int8x8_t)__rev2, __p3); \
55540   __ret; \
55541 })
55542 #endif
55543 
55544 #ifdef __LITTLE_ENDIAN__
55545 #define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
55546   int64_t __s0 = __p0; \
55547   int32_t __s1 = __p1; \
55548   int32x4_t __s2 = __p2; \
55549   int64_t __ret; \
55550   __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, (int8x16_t)__s2, __p3); \
55551   __ret; \
55552 })
55553 #else
55554 #define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
55555   int64_t __s0 = __p0; \
55556   int32_t __s1 = __p1; \
55557   int32x4_t __s2 = __p2; \
55558   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
55559   int64_t __ret; \
55560   __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, (int8x16_t)__rev2, __p3); \
55561   __ret; \
55562 })
55563 #endif
55564 
55565 #ifdef __LITTLE_ENDIAN__
55566 #define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
55567   int32_t __s0 = __p0; \
55568   int16_t __s1 = __p1; \
55569   int16x8_t __s2 = __p2; \
55570   int32_t __ret; \
55571   __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, (int8x16_t)__s2, __p3); \
55572   __ret; \
55573 })
55574 #else
55575 #define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
55576   int32_t __s0 = __p0; \
55577   int16_t __s1 = __p1; \
55578   int16x8_t __s2 = __p2; \
55579   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
55580   int32_t __ret; \
55581   __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, (int8x16_t)__rev2, __p3); \
55582   __ret; \
55583 })
55584 #endif
55585 
55586 #ifdef __LITTLE_ENDIAN__
55587 #define vqdmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
55588   int64x2_t __s0 = __p0; \
55589   int32x2_t __s1 = __p1; \
55590   int32x4_t __s2 = __p2; \
55591   int64x2_t __ret; \
55592   __ret = vqdmlsl_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
55593   __ret; \
55594 })
55595 #else
55596 #define vqdmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
55597   int64x2_t __s0 = __p0; \
55598   int32x2_t __s1 = __p1; \
55599   int32x4_t __s2 = __p2; \
55600   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
55601   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
55602   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
55603   int64x2_t __ret; \
55604   __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
55605   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
55606   __ret; \
55607 })
55608 #endif
55609 
55610 #ifdef __LITTLE_ENDIAN__
55611 #define vqdmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
55612   int32x4_t __s0 = __p0; \
55613   int16x4_t __s1 = __p1; \
55614   int16x8_t __s2 = __p2; \
55615   int32x4_t __ret; \
55616   __ret = vqdmlsl_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
55617   __ret; \
55618 })
55619 #else
55620 #define vqdmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
55621   int32x4_t __s0 = __p0; \
55622   int16x4_t __s1 = __p1; \
55623   int16x8_t __s2 = __p2; \
55624   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
55625   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
55626   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
55627   int32x4_t __ret; \
55628   __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
55629   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
55630   __ret; \
55631 })
55632 #endif
55633 
55634 #ifdef __LITTLE_ENDIAN__
vqdmulhs_s32(int32_t __p0,int32_t __p1)55635 __ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) {
55636   int32_t __ret;
55637   __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1);
55638   return __ret;
55639 }
55640 #else
vqdmulhs_s32(int32_t __p0,int32_t __p1)55641 __ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) {
55642   int32_t __ret;
55643   __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1);
55644   return __ret;
55645 }
__noswap_vqdmulhs_s32(int32_t __p0,int32_t __p1)55646 __ai int32_t __noswap_vqdmulhs_s32(int32_t __p0, int32_t __p1) {
55647   int32_t __ret;
55648   __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1);
55649   return __ret;
55650 }
55651 #endif
55652 
55653 #ifdef __LITTLE_ENDIAN__
vqdmulhh_s16(int16_t __p0,int16_t __p1)55654 __ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) {
55655   int16_t __ret;
55656   __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1);
55657   return __ret;
55658 }
55659 #else
vqdmulhh_s16(int16_t __p0,int16_t __p1)55660 __ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) {
55661   int16_t __ret;
55662   __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1);
55663   return __ret;
55664 }
__noswap_vqdmulhh_s16(int16_t __p0,int16_t __p1)55665 __ai int16_t __noswap_vqdmulhh_s16(int16_t __p0, int16_t __p1) {
55666   int16_t __ret;
55667   __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1);
55668   return __ret;
55669 }
55670 #endif
55671 
55672 #ifdef __LITTLE_ENDIAN__
55673 #define vqdmulhs_lane_s32(__p0_150, __p1_150, __p2_150) __extension__ ({ \
55674   int32_t __s0_150 = __p0_150; \
55675   int32x2_t __s1_150 = __p1_150; \
55676   int32_t __ret_150; \
55677   __ret_150 = vqdmulhs_s32(__s0_150, vget_lane_s32(__s1_150, __p2_150)); \
55678   __ret_150; \
55679 })
55680 #else
55681 #define vqdmulhs_lane_s32(__p0_151, __p1_151, __p2_151) __extension__ ({ \
55682   int32_t __s0_151 = __p0_151; \
55683   int32x2_t __s1_151 = __p1_151; \
55684   int32x2_t __rev1_151;  __rev1_151 = __builtin_shufflevector(__s1_151, __s1_151, 1, 0); \
55685   int32_t __ret_151; \
55686   __ret_151 = __noswap_vqdmulhs_s32(__s0_151, __noswap_vget_lane_s32(__rev1_151, __p2_151)); \
55687   __ret_151; \
55688 })
55689 #endif
55690 
55691 #ifdef __LITTLE_ENDIAN__
55692 #define vqdmulhh_lane_s16(__p0_152, __p1_152, __p2_152) __extension__ ({ \
55693   int16_t __s0_152 = __p0_152; \
55694   int16x4_t __s1_152 = __p1_152; \
55695   int16_t __ret_152; \
55696   __ret_152 = vqdmulhh_s16(__s0_152, vget_lane_s16(__s1_152, __p2_152)); \
55697   __ret_152; \
55698 })
55699 #else
55700 #define vqdmulhh_lane_s16(__p0_153, __p1_153, __p2_153) __extension__ ({ \
55701   int16_t __s0_153 = __p0_153; \
55702   int16x4_t __s1_153 = __p1_153; \
55703   int16x4_t __rev1_153;  __rev1_153 = __builtin_shufflevector(__s1_153, __s1_153, 3, 2, 1, 0); \
55704   int16_t __ret_153; \
55705   __ret_153 = __noswap_vqdmulhh_s16(__s0_153, __noswap_vget_lane_s16(__rev1_153, __p2_153)); \
55706   __ret_153; \
55707 })
55708 #endif
55709 
55710 #ifdef __LITTLE_ENDIAN__
55711 #define vqdmulhs_laneq_s32(__p0_154, __p1_154, __p2_154) __extension__ ({ \
55712   int32_t __s0_154 = __p0_154; \
55713   int32x4_t __s1_154 = __p1_154; \
55714   int32_t __ret_154; \
55715   __ret_154 = vqdmulhs_s32(__s0_154, vgetq_lane_s32(__s1_154, __p2_154)); \
55716   __ret_154; \
55717 })
55718 #else
55719 #define vqdmulhs_laneq_s32(__p0_155, __p1_155, __p2_155) __extension__ ({ \
55720   int32_t __s0_155 = __p0_155; \
55721   int32x4_t __s1_155 = __p1_155; \
55722   int32x4_t __rev1_155;  __rev1_155 = __builtin_shufflevector(__s1_155, __s1_155, 3, 2, 1, 0); \
55723   int32_t __ret_155; \
55724   __ret_155 = __noswap_vqdmulhs_s32(__s0_155, __noswap_vgetq_lane_s32(__rev1_155, __p2_155)); \
55725   __ret_155; \
55726 })
55727 #endif
55728 
55729 #ifdef __LITTLE_ENDIAN__
55730 #define vqdmulhh_laneq_s16(__p0_156, __p1_156, __p2_156) __extension__ ({ \
55731   int16_t __s0_156 = __p0_156; \
55732   int16x8_t __s1_156 = __p1_156; \
55733   int16_t __ret_156; \
55734   __ret_156 = vqdmulhh_s16(__s0_156, vgetq_lane_s16(__s1_156, __p2_156)); \
55735   __ret_156; \
55736 })
55737 #else
55738 #define vqdmulhh_laneq_s16(__p0_157, __p1_157, __p2_157) __extension__ ({ \
55739   int16_t __s0_157 = __p0_157; \
55740   int16x8_t __s1_157 = __p1_157; \
55741   int16x8_t __rev1_157;  __rev1_157 = __builtin_shufflevector(__s1_157, __s1_157, 7, 6, 5, 4, 3, 2, 1, 0); \
55742   int16_t __ret_157; \
55743   __ret_157 = __noswap_vqdmulhh_s16(__s0_157, __noswap_vgetq_lane_s16(__rev1_157, __p2_157)); \
55744   __ret_157; \
55745 })
55746 #endif
55747 
55748 #ifdef __LITTLE_ENDIAN__
55749 #define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
55750   int32x4_t __s0 = __p0; \
55751   int32x4_t __s1 = __p1; \
55752   int32x4_t __ret; \
55753   __ret = vqdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
55754   __ret; \
55755 })
55756 #else
55757 #define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
55758   int32x4_t __s0 = __p0; \
55759   int32x4_t __s1 = __p1; \
55760   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
55761   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
55762   int32x4_t __ret; \
55763   __ret = __noswap_vqdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
55764   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
55765   __ret; \
55766 })
55767 #endif
55768 
55769 #ifdef __LITTLE_ENDIAN__
55770 #define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
55771   int16x8_t __s0 = __p0; \
55772   int16x8_t __s1 = __p1; \
55773   int16x8_t __ret; \
55774   __ret = vqdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
55775   __ret; \
55776 })
55777 #else
55778 #define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
55779   int16x8_t __s0 = __p0; \
55780   int16x8_t __s1 = __p1; \
55781   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
55782   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
55783   int16x8_t __ret; \
55784   __ret = __noswap_vqdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
55785   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
55786   __ret; \
55787 })
55788 #endif
55789 
55790 #ifdef __LITTLE_ENDIAN__
55791 #define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
55792   int32x2_t __s0 = __p0; \
55793   int32x4_t __s1 = __p1; \
55794   int32x2_t __ret; \
55795   __ret = vqdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
55796   __ret; \
55797 })
55798 #else
55799 #define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
55800   int32x2_t __s0 = __p0; \
55801   int32x4_t __s1 = __p1; \
55802   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
55803   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
55804   int32x2_t __ret; \
55805   __ret = __noswap_vqdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
55806   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
55807   __ret; \
55808 })
55809 #endif
55810 
55811 #ifdef __LITTLE_ENDIAN__
55812 #define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
55813   int16x4_t __s0 = __p0; \
55814   int16x8_t __s1 = __p1; \
55815   int16x4_t __ret; \
55816   __ret = vqdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
55817   __ret; \
55818 })
55819 #else
55820 #define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
55821   int16x4_t __s0 = __p0; \
55822   int16x8_t __s1 = __p1; \
55823   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
55824   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
55825   int16x4_t __ret; \
55826   __ret = __noswap_vqdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
55827   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
55828   __ret; \
55829 })
55830 #endif
55831 
55832 #ifdef __LITTLE_ENDIAN__
vqdmulls_s32(int32_t __p0,int32_t __p1)55833 __ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) {
55834   int64_t __ret;
55835   __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1);
55836   return __ret;
55837 }
55838 #else
vqdmulls_s32(int32_t __p0,int32_t __p1)55839 __ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) {
55840   int64_t __ret;
55841   __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1);
55842   return __ret;
55843 }
__noswap_vqdmulls_s32(int32_t __p0,int32_t __p1)55844 __ai int64_t __noswap_vqdmulls_s32(int32_t __p0, int32_t __p1) {
55845   int64_t __ret;
55846   __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1);
55847   return __ret;
55848 }
55849 #endif
55850 
55851 #ifdef __LITTLE_ENDIAN__
vqdmullh_s16(int16_t __p0,int16_t __p1)55852 __ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) {
55853   int32_t __ret;
55854   __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1);
55855   return __ret;
55856 }
55857 #else
vqdmullh_s16(int16_t __p0,int16_t __p1)55858 __ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) {
55859   int32_t __ret;
55860   __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1);
55861   return __ret;
55862 }
__noswap_vqdmullh_s16(int16_t __p0,int16_t __p1)55863 __ai int32_t __noswap_vqdmullh_s16(int16_t __p0, int16_t __p1) {
55864   int32_t __ret;
55865   __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1);
55866   return __ret;
55867 }
55868 #endif
55869 
55870 #ifdef __LITTLE_ENDIAN__
vqdmull_high_s32(int32x4_t __p0,int32x4_t __p1)55871 __ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
55872   int64x2_t __ret;
55873   __ret = vqdmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
55874   return __ret;
55875 }
55876 #else
vqdmull_high_s32(int32x4_t __p0,int32x4_t __p1)55877 __ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
55878   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
55879   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
55880   int64x2_t __ret;
55881   __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
55882   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
55883   return __ret;
55884 }
55885 #endif
55886 
55887 #ifdef __LITTLE_ENDIAN__
vqdmull_high_s16(int16x8_t __p0,int16x8_t __p1)55888 __ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
55889   int32x4_t __ret;
55890   __ret = vqdmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
55891   return __ret;
55892 }
55893 #else
vqdmull_high_s16(int16x8_t __p0,int16x8_t __p1)55894 __ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
55895   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
55896   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
55897   int32x4_t __ret;
55898   __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
55899   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
55900   return __ret;
55901 }
55902 #endif
55903 
55904 #ifdef __LITTLE_ENDIAN__
55905 #define vqdmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
55906   int32x4_t __s0 = __p0; \
55907   int32x2_t __s1 = __p1; \
55908   int64x2_t __ret; \
55909   __ret = vqdmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
55910   __ret; \
55911 })
55912 #else
55913 #define vqdmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
55914   int32x4_t __s0 = __p0; \
55915   int32x2_t __s1 = __p1; \
55916   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
55917   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
55918   int64x2_t __ret; \
55919   __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
55920   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
55921   __ret; \
55922 })
55923 #endif
55924 
55925 #ifdef __LITTLE_ENDIAN__
55926 #define vqdmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
55927   int16x8_t __s0 = __p0; \
55928   int16x4_t __s1 = __p1; \
55929   int32x4_t __ret; \
55930   __ret = vqdmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
55931   __ret; \
55932 })
55933 #else
55934 #define vqdmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
55935   int16x8_t __s0 = __p0; \
55936   int16x4_t __s1 = __p1; \
55937   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
55938   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
55939   int32x4_t __ret; \
55940   __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
55941   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
55942   __ret; \
55943 })
55944 #endif
55945 
55946 #ifdef __LITTLE_ENDIAN__
55947 #define vqdmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
55948   int32x4_t __s0 = __p0; \
55949   int32x4_t __s1 = __p1; \
55950   int64x2_t __ret; \
55951   __ret = vqdmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
55952   __ret; \
55953 })
55954 #else
55955 #define vqdmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
55956   int32x4_t __s0 = __p0; \
55957   int32x4_t __s1 = __p1; \
55958   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
55959   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
55960   int64x2_t __ret; \
55961   __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
55962   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
55963   __ret; \
55964 })
55965 #endif
55966 
55967 #ifdef __LITTLE_ENDIAN__
55968 #define vqdmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
55969   int16x8_t __s0 = __p0; \
55970   int16x8_t __s1 = __p1; \
55971   int32x4_t __ret; \
55972   __ret = vqdmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
55973   __ret; \
55974 })
55975 #else
55976 #define vqdmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
55977   int16x8_t __s0 = __p0; \
55978   int16x8_t __s1 = __p1; \
55979   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
55980   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
55981   int32x4_t __ret; \
55982   __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
55983   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
55984   __ret; \
55985 })
55986 #endif
55987 
55988 #ifdef __LITTLE_ENDIAN__
vqdmull_high_n_s32(int32x4_t __p0,int32_t __p1)55989 __ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
55990   int64x2_t __ret;
55991   __ret = vqdmull_n_s32(vget_high_s32(__p0), __p1);
55992   return __ret;
55993 }
55994 #else
vqdmull_high_n_s32(int32x4_t __p0,int32_t __p1)55995 __ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
55996   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
55997   int64x2_t __ret;
55998   __ret = __noswap_vqdmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
55999   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
56000   return __ret;
56001 }
56002 #endif
56003 
56004 #ifdef __LITTLE_ENDIAN__
vqdmull_high_n_s16(int16x8_t __p0,int16_t __p1)56005 __ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
56006   int32x4_t __ret;
56007   __ret = vqdmull_n_s16(vget_high_s16(__p0), __p1);
56008   return __ret;
56009 }
56010 #else
vqdmull_high_n_s16(int16x8_t __p0,int16_t __p1)56011 __ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
56012   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
56013   int32x4_t __ret;
56014   __ret = __noswap_vqdmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
56015   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
56016   return __ret;
56017 }
56018 #endif
56019 
56020 #ifdef __LITTLE_ENDIAN__
56021 #define vqdmulls_lane_s32(__p0_158, __p1_158, __p2_158) __extension__ ({ \
56022   int32_t __s0_158 = __p0_158; \
56023   int32x2_t __s1_158 = __p1_158; \
56024   int64_t __ret_158; \
56025   __ret_158 = vqdmulls_s32(__s0_158, vget_lane_s32(__s1_158, __p2_158)); \
56026   __ret_158; \
56027 })
56028 #else
56029 #define vqdmulls_lane_s32(__p0_159, __p1_159, __p2_159) __extension__ ({ \
56030   int32_t __s0_159 = __p0_159; \
56031   int32x2_t __s1_159 = __p1_159; \
56032   int32x2_t __rev1_159;  __rev1_159 = __builtin_shufflevector(__s1_159, __s1_159, 1, 0); \
56033   int64_t __ret_159; \
56034   __ret_159 = __noswap_vqdmulls_s32(__s0_159, __noswap_vget_lane_s32(__rev1_159, __p2_159)); \
56035   __ret_159; \
56036 })
56037 #endif
56038 
56039 #ifdef __LITTLE_ENDIAN__
56040 #define vqdmullh_lane_s16(__p0_160, __p1_160, __p2_160) __extension__ ({ \
56041   int16_t __s0_160 = __p0_160; \
56042   int16x4_t __s1_160 = __p1_160; \
56043   int32_t __ret_160; \
56044   __ret_160 = vqdmullh_s16(__s0_160, vget_lane_s16(__s1_160, __p2_160)); \
56045   __ret_160; \
56046 })
56047 #else
56048 #define vqdmullh_lane_s16(__p0_161, __p1_161, __p2_161) __extension__ ({ \
56049   int16_t __s0_161 = __p0_161; \
56050   int16x4_t __s1_161 = __p1_161; \
56051   int16x4_t __rev1_161;  __rev1_161 = __builtin_shufflevector(__s1_161, __s1_161, 3, 2, 1, 0); \
56052   int32_t __ret_161; \
56053   __ret_161 = __noswap_vqdmullh_s16(__s0_161, __noswap_vget_lane_s16(__rev1_161, __p2_161)); \
56054   __ret_161; \
56055 })
56056 #endif
56057 
56058 #ifdef __LITTLE_ENDIAN__
56059 #define vqdmulls_laneq_s32(__p0_162, __p1_162, __p2_162) __extension__ ({ \
56060   int32_t __s0_162 = __p0_162; \
56061   int32x4_t __s1_162 = __p1_162; \
56062   int64_t __ret_162; \
56063   __ret_162 = vqdmulls_s32(__s0_162, vgetq_lane_s32(__s1_162, __p2_162)); \
56064   __ret_162; \
56065 })
56066 #else
56067 #define vqdmulls_laneq_s32(__p0_163, __p1_163, __p2_163) __extension__ ({ \
56068   int32_t __s0_163 = __p0_163; \
56069   int32x4_t __s1_163 = __p1_163; \
56070   int32x4_t __rev1_163;  __rev1_163 = __builtin_shufflevector(__s1_163, __s1_163, 3, 2, 1, 0); \
56071   int64_t __ret_163; \
56072   __ret_163 = __noswap_vqdmulls_s32(__s0_163, __noswap_vgetq_lane_s32(__rev1_163, __p2_163)); \
56073   __ret_163; \
56074 })
56075 #endif
56076 
56077 #ifdef __LITTLE_ENDIAN__
56078 #define vqdmullh_laneq_s16(__p0_164, __p1_164, __p2_164) __extension__ ({ \
56079   int16_t __s0_164 = __p0_164; \
56080   int16x8_t __s1_164 = __p1_164; \
56081   int32_t __ret_164; \
56082   __ret_164 = vqdmullh_s16(__s0_164, vgetq_lane_s16(__s1_164, __p2_164)); \
56083   __ret_164; \
56084 })
56085 #else
56086 #define vqdmullh_laneq_s16(__p0_165, __p1_165, __p2_165) __extension__ ({ \
56087   int16_t __s0_165 = __p0_165; \
56088   int16x8_t __s1_165 = __p1_165; \
56089   int16x8_t __rev1_165;  __rev1_165 = __builtin_shufflevector(__s1_165, __s1_165, 7, 6, 5, 4, 3, 2, 1, 0); \
56090   int32_t __ret_165; \
56091   __ret_165 = __noswap_vqdmullh_s16(__s0_165, __noswap_vgetq_lane_s16(__rev1_165, __p2_165)); \
56092   __ret_165; \
56093 })
56094 #endif
56095 
56096 #ifdef __LITTLE_ENDIAN__
56097 #define vqdmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
56098   int32x2_t __s0 = __p0; \
56099   int32x4_t __s1 = __p1; \
56100   int64x2_t __ret; \
56101   __ret = vqdmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
56102   __ret; \
56103 })
56104 #else
56105 #define vqdmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
56106   int32x2_t __s0 = __p0; \
56107   int32x4_t __s1 = __p1; \
56108   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
56109   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
56110   int64x2_t __ret; \
56111   __ret = __noswap_vqdmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
56112   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
56113   __ret; \
56114 })
56115 #endif
56116 
56117 #ifdef __LITTLE_ENDIAN__
56118 #define vqdmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
56119   int16x4_t __s0 = __p0; \
56120   int16x8_t __s1 = __p1; \
56121   int32x4_t __ret; \
56122   __ret = vqdmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
56123   __ret; \
56124 })
56125 #else
56126 #define vqdmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
56127   int16x4_t __s0 = __p0; \
56128   int16x8_t __s1 = __p1; \
56129   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
56130   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
56131   int32x4_t __ret; \
56132   __ret = __noswap_vqdmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
56133   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
56134   __ret; \
56135 })
56136 #endif
56137 
56138 #ifdef __LITTLE_ENDIAN__
vqmovns_s32(int32_t __p0)56139 __ai int16_t vqmovns_s32(int32_t __p0) {
56140   int16_t __ret;
56141   __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0);
56142   return __ret;
56143 }
56144 #else
vqmovns_s32(int32_t __p0)56145 __ai int16_t vqmovns_s32(int32_t __p0) {
56146   int16_t __ret;
56147   __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0);
56148   return __ret;
56149 }
56150 #endif
56151 
56152 #ifdef __LITTLE_ENDIAN__
vqmovnd_s64(int64_t __p0)56153 __ai int32_t vqmovnd_s64(int64_t __p0) {
56154   int32_t __ret;
56155   __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0);
56156   return __ret;
56157 }
56158 #else
vqmovnd_s64(int64_t __p0)56159 __ai int32_t vqmovnd_s64(int64_t __p0) {
56160   int32_t __ret;
56161   __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0);
56162   return __ret;
56163 }
56164 #endif
56165 
56166 #ifdef __LITTLE_ENDIAN__
vqmovnh_s16(int16_t __p0)56167 __ai int8_t vqmovnh_s16(int16_t __p0) {
56168   int8_t __ret;
56169   __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0);
56170   return __ret;
56171 }
56172 #else
vqmovnh_s16(int16_t __p0)56173 __ai int8_t vqmovnh_s16(int16_t __p0) {
56174   int8_t __ret;
56175   __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0);
56176   return __ret;
56177 }
56178 #endif
56179 
56180 #ifdef __LITTLE_ENDIAN__
vqmovns_u32(uint32_t __p0)56181 __ai uint16_t vqmovns_u32(uint32_t __p0) {
56182   uint16_t __ret;
56183   __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0);
56184   return __ret;
56185 }
56186 #else
vqmovns_u32(uint32_t __p0)56187 __ai uint16_t vqmovns_u32(uint32_t __p0) {
56188   uint16_t __ret;
56189   __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0);
56190   return __ret;
56191 }
56192 #endif
56193 
56194 #ifdef __LITTLE_ENDIAN__
vqmovnd_u64(uint64_t __p0)56195 __ai uint32_t vqmovnd_u64(uint64_t __p0) {
56196   uint32_t __ret;
56197   __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0);
56198   return __ret;
56199 }
56200 #else
vqmovnd_u64(uint64_t __p0)56201 __ai uint32_t vqmovnd_u64(uint64_t __p0) {
56202   uint32_t __ret;
56203   __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0);
56204   return __ret;
56205 }
56206 #endif
56207 
56208 #ifdef __LITTLE_ENDIAN__
vqmovnh_u16(uint16_t __p0)56209 __ai uint8_t vqmovnh_u16(uint16_t __p0) {
56210   uint8_t __ret;
56211   __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0);
56212   return __ret;
56213 }
56214 #else
vqmovnh_u16(uint16_t __p0)56215 __ai uint8_t vqmovnh_u16(uint16_t __p0) {
56216   uint8_t __ret;
56217   __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0);
56218   return __ret;
56219 }
56220 #endif
56221 
56222 #ifdef __LITTLE_ENDIAN__
vqmovn_high_u32(uint16x4_t __p0,uint32x4_t __p1)56223 __ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
56224   uint16x8_t __ret;
56225   __ret = vcombine_u16(__p0, vqmovn_u32(__p1));
56226   return __ret;
56227 }
56228 #else
vqmovn_high_u32(uint16x4_t __p0,uint32x4_t __p1)56229 __ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
56230   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
56231   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
56232   uint16x8_t __ret;
56233   __ret = __noswap_vcombine_u16(__rev0, __noswap_vqmovn_u32(__rev1));
56234   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
56235   return __ret;
56236 }
56237 #endif
56238 
56239 #ifdef __LITTLE_ENDIAN__
vqmovn_high_u64(uint32x2_t __p0,uint64x2_t __p1)56240 __ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
56241   uint32x4_t __ret;
56242   __ret = vcombine_u32(__p0, vqmovn_u64(__p1));
56243   return __ret;
56244 }
56245 #else
vqmovn_high_u64(uint32x2_t __p0,uint64x2_t __p1)56246 __ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
56247   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
56248   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
56249   uint32x4_t __ret;
56250   __ret = __noswap_vcombine_u32(__rev0, __noswap_vqmovn_u64(__rev1));
56251   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
56252   return __ret;
56253 }
56254 #endif
56255 
56256 #ifdef __LITTLE_ENDIAN__
vqmovn_high_u16(uint8x8_t __p0,uint16x8_t __p1)56257 __ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
56258   uint8x16_t __ret;
56259   __ret = vcombine_u8(__p0, vqmovn_u16(__p1));
56260   return __ret;
56261 }
56262 #else
vqmovn_high_u16(uint8x8_t __p0,uint16x8_t __p1)56263 __ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
56264   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
56265   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
56266   uint8x16_t __ret;
56267   __ret = __noswap_vcombine_u8(__rev0, __noswap_vqmovn_u16(__rev1));
56268   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
56269   return __ret;
56270 }
56271 #endif
56272 
56273 #ifdef __LITTLE_ENDIAN__
vqmovn_high_s32(int16x4_t __p0,int32x4_t __p1)56274 __ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
56275   int16x8_t __ret;
56276   __ret = vcombine_s16(__p0, vqmovn_s32(__p1));
56277   return __ret;
56278 }
56279 #else
vqmovn_high_s32(int16x4_t __p0,int32x4_t __p1)56280 __ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
56281   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
56282   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
56283   int16x8_t __ret;
56284   __ret = __noswap_vcombine_s16(__rev0, __noswap_vqmovn_s32(__rev1));
56285   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
56286   return __ret;
56287 }
56288 #endif
56289 
56290 #ifdef __LITTLE_ENDIAN__
vqmovn_high_s64(int32x2_t __p0,int64x2_t __p1)56291 __ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
56292   int32x4_t __ret;
56293   __ret = vcombine_s32(__p0, vqmovn_s64(__p1));
56294   return __ret;
56295 }
56296 #else
vqmovn_high_s64(int32x2_t __p0,int64x2_t __p1)56297 __ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
56298   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
56299   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
56300   int32x4_t __ret;
56301   __ret = __noswap_vcombine_s32(__rev0, __noswap_vqmovn_s64(__rev1));
56302   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
56303   return __ret;
56304 }
56305 #endif
56306 
56307 #ifdef __LITTLE_ENDIAN__
vqmovn_high_s16(int8x8_t __p0,int16x8_t __p1)56308 __ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
56309   int8x16_t __ret;
56310   __ret = vcombine_s8(__p0, vqmovn_s16(__p1));
56311   return __ret;
56312 }
56313 #else
vqmovn_high_s16(int8x8_t __p0,int16x8_t __p1)56314 __ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
56315   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
56316   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
56317   int8x16_t __ret;
56318   __ret = __noswap_vcombine_s8(__rev0, __noswap_vqmovn_s16(__rev1));
56319   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
56320   return __ret;
56321 }
56322 #endif
56323 
56324 #ifdef __LITTLE_ENDIAN__
vqmovuns_s32(int32_t __p0)56325 __ai int16_t vqmovuns_s32(int32_t __p0) {
56326   int16_t __ret;
56327   __ret = (int16_t) __builtin_neon_vqmovuns_s32(__p0);
56328   return __ret;
56329 }
56330 #else
vqmovuns_s32(int32_t __p0)56331 __ai int16_t vqmovuns_s32(int32_t __p0) {
56332   int16_t __ret;
56333   __ret = (int16_t) __builtin_neon_vqmovuns_s32(__p0);
56334   return __ret;
56335 }
56336 #endif
56337 
56338 #ifdef __LITTLE_ENDIAN__
vqmovund_s64(int64_t __p0)56339 __ai int32_t vqmovund_s64(int64_t __p0) {
56340   int32_t __ret;
56341   __ret = (int32_t) __builtin_neon_vqmovund_s64(__p0);
56342   return __ret;
56343 }
56344 #else
vqmovund_s64(int64_t __p0)56345 __ai int32_t vqmovund_s64(int64_t __p0) {
56346   int32_t __ret;
56347   __ret = (int32_t) __builtin_neon_vqmovund_s64(__p0);
56348   return __ret;
56349 }
56350 #endif
56351 
56352 #ifdef __LITTLE_ENDIAN__
vqmovunh_s16(int16_t __p0)56353 __ai int8_t vqmovunh_s16(int16_t __p0) {
56354   int8_t __ret;
56355   __ret = (int8_t) __builtin_neon_vqmovunh_s16(__p0);
56356   return __ret;
56357 }
56358 #else
vqmovunh_s16(int16_t __p0)56359 __ai int8_t vqmovunh_s16(int16_t __p0) {
56360   int8_t __ret;
56361   __ret = (int8_t) __builtin_neon_vqmovunh_s16(__p0);
56362   return __ret;
56363 }
56364 #endif
56365 
56366 #ifdef __LITTLE_ENDIAN__
vqmovun_high_s32(int16x4_t __p0,int32x4_t __p1)56367 __ai int16x8_t vqmovun_high_s32(int16x4_t __p0, int32x4_t __p1) {
56368   int16x8_t __ret;
56369   __ret = vcombine_u16((uint16x4_t)(__p0), vqmovun_s32(__p1));
56370   return __ret;
56371 }
56372 #else
vqmovun_high_s32(int16x4_t __p0,int32x4_t __p1)56373 __ai int16x8_t vqmovun_high_s32(int16x4_t __p0, int32x4_t __p1) {
56374   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
56375   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
56376   int16x8_t __ret;
56377   __ret = __noswap_vcombine_u16((uint16x4_t)(__rev0), __noswap_vqmovun_s32(__rev1));
56378   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
56379   return __ret;
56380 }
56381 #endif
56382 
56383 #ifdef __LITTLE_ENDIAN__
vqmovun_high_s64(int32x2_t __p0,int64x2_t __p1)56384 __ai int32x4_t vqmovun_high_s64(int32x2_t __p0, int64x2_t __p1) {
56385   int32x4_t __ret;
56386   __ret = vcombine_u32((uint32x2_t)(__p0), vqmovun_s64(__p1));
56387   return __ret;
56388 }
56389 #else
vqmovun_high_s64(int32x2_t __p0,int64x2_t __p1)56390 __ai int32x4_t vqmovun_high_s64(int32x2_t __p0, int64x2_t __p1) {
56391   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
56392   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
56393   int32x4_t __ret;
56394   __ret = __noswap_vcombine_u32((uint32x2_t)(__rev0), __noswap_vqmovun_s64(__rev1));
56395   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
56396   return __ret;
56397 }
56398 #endif
56399 
56400 #ifdef __LITTLE_ENDIAN__
vqmovun_high_s16(int8x8_t __p0,int16x8_t __p1)56401 __ai int8x16_t vqmovun_high_s16(int8x8_t __p0, int16x8_t __p1) {
56402   int8x16_t __ret;
56403   __ret = vcombine_u8((uint8x8_t)(__p0), vqmovun_s16(__p1));
56404   return __ret;
56405 }
56406 #else
vqmovun_high_s16(int8x8_t __p0,int16x8_t __p1)56407 __ai int8x16_t vqmovun_high_s16(int8x8_t __p0, int16x8_t __p1) {
56408   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
56409   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
56410   int8x16_t __ret;
56411   __ret = __noswap_vcombine_u8((uint8x8_t)(__rev0), __noswap_vqmovun_s16(__rev1));
56412   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
56413   return __ret;
56414 }
56415 #endif
56416 
56417 #ifdef __LITTLE_ENDIAN__
vqnegq_s64(int64x2_t __p0)56418 __ai int64x2_t vqnegq_s64(int64x2_t __p0) {
56419   int64x2_t __ret;
56420   __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 35);
56421   return __ret;
56422 }
56423 #else
vqnegq_s64(int64x2_t __p0)56424 __ai int64x2_t vqnegq_s64(int64x2_t __p0) {
56425   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
56426   int64x2_t __ret;
56427   __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 35);
56428   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
56429   return __ret;
56430 }
56431 #endif
56432 
56433 #ifdef __LITTLE_ENDIAN__
vqneg_s64(int64x1_t __p0)56434 __ai int64x1_t vqneg_s64(int64x1_t __p0) {
56435   int64x1_t __ret;
56436   __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3);
56437   return __ret;
56438 }
56439 #else
vqneg_s64(int64x1_t __p0)56440 __ai int64x1_t vqneg_s64(int64x1_t __p0) {
56441   int64x1_t __ret;
56442   __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3);
56443   return __ret;
56444 }
56445 #endif
56446 
56447 #ifdef __LITTLE_ENDIAN__
vqnegb_s8(int8_t __p0)56448 __ai int8_t vqnegb_s8(int8_t __p0) {
56449   int8_t __ret;
56450   __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0);
56451   return __ret;
56452 }
56453 #else
vqnegb_s8(int8_t __p0)56454 __ai int8_t vqnegb_s8(int8_t __p0) {
56455   int8_t __ret;
56456   __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0);
56457   return __ret;
56458 }
56459 #endif
56460 
56461 #ifdef __LITTLE_ENDIAN__
vqnegs_s32(int32_t __p0)56462 __ai int32_t vqnegs_s32(int32_t __p0) {
56463   int32_t __ret;
56464   __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0);
56465   return __ret;
56466 }
56467 #else
vqnegs_s32(int32_t __p0)56468 __ai int32_t vqnegs_s32(int32_t __p0) {
56469   int32_t __ret;
56470   __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0);
56471   return __ret;
56472 }
56473 #endif
56474 
56475 #ifdef __LITTLE_ENDIAN__
vqnegd_s64(int64_t __p0)56476 __ai int64_t vqnegd_s64(int64_t __p0) {
56477   int64_t __ret;
56478   __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0);
56479   return __ret;
56480 }
56481 #else
vqnegd_s64(int64_t __p0)56482 __ai int64_t vqnegd_s64(int64_t __p0) {
56483   int64_t __ret;
56484   __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0);
56485   return __ret;
56486 }
56487 #endif
56488 
56489 #ifdef __LITTLE_ENDIAN__
vqnegh_s16(int16_t __p0)56490 __ai int16_t vqnegh_s16(int16_t __p0) {
56491   int16_t __ret;
56492   __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0);
56493   return __ret;
56494 }
56495 #else
vqnegh_s16(int16_t __p0)56496 __ai int16_t vqnegh_s16(int16_t __p0) {
56497   int16_t __ret;
56498   __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0);
56499   return __ret;
56500 }
56501 #endif
56502 
56503 #ifdef __LITTLE_ENDIAN__
vqrdmulhs_s32(int32_t __p0,int32_t __p1)56504 __ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) {
56505   int32_t __ret;
56506   __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1);
56507   return __ret;
56508 }
56509 #else
vqrdmulhs_s32(int32_t __p0,int32_t __p1)56510 __ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) {
56511   int32_t __ret;
56512   __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1);
56513   return __ret;
56514 }
__noswap_vqrdmulhs_s32(int32_t __p0,int32_t __p1)56515 __ai int32_t __noswap_vqrdmulhs_s32(int32_t __p0, int32_t __p1) {
56516   int32_t __ret;
56517   __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1);
56518   return __ret;
56519 }
56520 #endif
56521 
56522 #ifdef __LITTLE_ENDIAN__
vqrdmulhh_s16(int16_t __p0,int16_t __p1)56523 __ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) {
56524   int16_t __ret;
56525   __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1);
56526   return __ret;
56527 }
56528 #else
vqrdmulhh_s16(int16_t __p0,int16_t __p1)56529 __ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) {
56530   int16_t __ret;
56531   __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1);
56532   return __ret;
56533 }
__noswap_vqrdmulhh_s16(int16_t __p0,int16_t __p1)56534 __ai int16_t __noswap_vqrdmulhh_s16(int16_t __p0, int16_t __p1) {
56535   int16_t __ret;
56536   __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1);
56537   return __ret;
56538 }
56539 #endif
56540 
56541 #ifdef __LITTLE_ENDIAN__
56542 #define vqrdmulhs_lane_s32(__p0_166, __p1_166, __p2_166) __extension__ ({ \
56543   int32_t __s0_166 = __p0_166; \
56544   int32x2_t __s1_166 = __p1_166; \
56545   int32_t __ret_166; \
56546   __ret_166 = vqrdmulhs_s32(__s0_166, vget_lane_s32(__s1_166, __p2_166)); \
56547   __ret_166; \
56548 })
56549 #else
56550 #define vqrdmulhs_lane_s32(__p0_167, __p1_167, __p2_167) __extension__ ({ \
56551   int32_t __s0_167 = __p0_167; \
56552   int32x2_t __s1_167 = __p1_167; \
56553   int32x2_t __rev1_167;  __rev1_167 = __builtin_shufflevector(__s1_167, __s1_167, 1, 0); \
56554   int32_t __ret_167; \
56555   __ret_167 = __noswap_vqrdmulhs_s32(__s0_167, __noswap_vget_lane_s32(__rev1_167, __p2_167)); \
56556   __ret_167; \
56557 })
56558 #endif
56559 
56560 #ifdef __LITTLE_ENDIAN__
56561 #define vqrdmulhh_lane_s16(__p0_168, __p1_168, __p2_168) __extension__ ({ \
56562   int16_t __s0_168 = __p0_168; \
56563   int16x4_t __s1_168 = __p1_168; \
56564   int16_t __ret_168; \
56565   __ret_168 = vqrdmulhh_s16(__s0_168, vget_lane_s16(__s1_168, __p2_168)); \
56566   __ret_168; \
56567 })
56568 #else
56569 #define vqrdmulhh_lane_s16(__p0_169, __p1_169, __p2_169) __extension__ ({ \
56570   int16_t __s0_169 = __p0_169; \
56571   int16x4_t __s1_169 = __p1_169; \
56572   int16x4_t __rev1_169;  __rev1_169 = __builtin_shufflevector(__s1_169, __s1_169, 3, 2, 1, 0); \
56573   int16_t __ret_169; \
56574   __ret_169 = __noswap_vqrdmulhh_s16(__s0_169, __noswap_vget_lane_s16(__rev1_169, __p2_169)); \
56575   __ret_169; \
56576 })
56577 #endif
56578 
56579 #ifdef __LITTLE_ENDIAN__
56580 #define vqrdmulhs_laneq_s32(__p0_170, __p1_170, __p2_170) __extension__ ({ \
56581   int32_t __s0_170 = __p0_170; \
56582   int32x4_t __s1_170 = __p1_170; \
56583   int32_t __ret_170; \
56584   __ret_170 = vqrdmulhs_s32(__s0_170, vgetq_lane_s32(__s1_170, __p2_170)); \
56585   __ret_170; \
56586 })
56587 #else
56588 #define vqrdmulhs_laneq_s32(__p0_171, __p1_171, __p2_171) __extension__ ({ \
56589   int32_t __s0_171 = __p0_171; \
56590   int32x4_t __s1_171 = __p1_171; \
56591   int32x4_t __rev1_171;  __rev1_171 = __builtin_shufflevector(__s1_171, __s1_171, 3, 2, 1, 0); \
56592   int32_t __ret_171; \
56593   __ret_171 = __noswap_vqrdmulhs_s32(__s0_171, __noswap_vgetq_lane_s32(__rev1_171, __p2_171)); \
56594   __ret_171; \
56595 })
56596 #endif
56597 
56598 #ifdef __LITTLE_ENDIAN__
56599 #define vqrdmulhh_laneq_s16(__p0_172, __p1_172, __p2_172) __extension__ ({ \
56600   int16_t __s0_172 = __p0_172; \
56601   int16x8_t __s1_172 = __p1_172; \
56602   int16_t __ret_172; \
56603   __ret_172 = vqrdmulhh_s16(__s0_172, vgetq_lane_s16(__s1_172, __p2_172)); \
56604   __ret_172; \
56605 })
56606 #else
56607 #define vqrdmulhh_laneq_s16(__p0_173, __p1_173, __p2_173) __extension__ ({ \
56608   int16_t __s0_173 = __p0_173; \
56609   int16x8_t __s1_173 = __p1_173; \
56610   int16x8_t __rev1_173;  __rev1_173 = __builtin_shufflevector(__s1_173, __s1_173, 7, 6, 5, 4, 3, 2, 1, 0); \
56611   int16_t __ret_173; \
56612   __ret_173 = __noswap_vqrdmulhh_s16(__s0_173, __noswap_vgetq_lane_s16(__rev1_173, __p2_173)); \
56613   __ret_173; \
56614 })
56615 #endif
56616 
56617 #ifdef __LITTLE_ENDIAN__
56618 #define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
56619   int32x4_t __s0 = __p0; \
56620   int32x4_t __s1 = __p1; \
56621   int32x4_t __ret; \
56622   __ret = vqrdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
56623   __ret; \
56624 })
56625 #else
56626 #define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
56627   int32x4_t __s0 = __p0; \
56628   int32x4_t __s1 = __p1; \
56629   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
56630   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
56631   int32x4_t __ret; \
56632   __ret = __noswap_vqrdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
56633   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
56634   __ret; \
56635 })
56636 #endif
56637 
56638 #ifdef __LITTLE_ENDIAN__
56639 #define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
56640   int16x8_t __s0 = __p0; \
56641   int16x8_t __s1 = __p1; \
56642   int16x8_t __ret; \
56643   __ret = vqrdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
56644   __ret; \
56645 })
56646 #else
56647 #define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
56648   int16x8_t __s0 = __p0; \
56649   int16x8_t __s1 = __p1; \
56650   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
56651   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
56652   int16x8_t __ret; \
56653   __ret = __noswap_vqrdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
56654   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
56655   __ret; \
56656 })
56657 #endif
56658 
56659 #ifdef __LITTLE_ENDIAN__
56660 #define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
56661   int32x2_t __s0 = __p0; \
56662   int32x4_t __s1 = __p1; \
56663   int32x2_t __ret; \
56664   __ret = vqrdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
56665   __ret; \
56666 })
56667 #else
56668 #define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
56669   int32x2_t __s0 = __p0; \
56670   int32x4_t __s1 = __p1; \
56671   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
56672   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
56673   int32x2_t __ret; \
56674   __ret = __noswap_vqrdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
56675   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
56676   __ret; \
56677 })
56678 #endif
56679 
56680 #ifdef __LITTLE_ENDIAN__
56681 #define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
56682   int16x4_t __s0 = __p0; \
56683   int16x8_t __s1 = __p1; \
56684   int16x4_t __ret; \
56685   __ret = vqrdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
56686   __ret; \
56687 })
56688 #else
56689 #define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
56690   int16x4_t __s0 = __p0; \
56691   int16x8_t __s1 = __p1; \
56692   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
56693   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
56694   int16x4_t __ret; \
56695   __ret = __noswap_vqrdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
56696   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
56697   __ret; \
56698 })
56699 #endif
56700 
56701 #ifdef __LITTLE_ENDIAN__
vqrshlb_u8(uint8_t __p0,uint8_t __p1)56702 __ai uint8_t vqrshlb_u8(uint8_t __p0, uint8_t __p1) {
56703   uint8_t __ret;
56704   __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1);
56705   return __ret;
56706 }
56707 #else
vqrshlb_u8(uint8_t __p0,uint8_t __p1)56708 __ai uint8_t vqrshlb_u8(uint8_t __p0, uint8_t __p1) {
56709   uint8_t __ret;
56710   __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1);
56711   return __ret;
56712 }
56713 #endif
56714 
56715 #ifdef __LITTLE_ENDIAN__
vqrshls_u32(uint32_t __p0,uint32_t __p1)56716 __ai uint32_t vqrshls_u32(uint32_t __p0, uint32_t __p1) {
56717   uint32_t __ret;
56718   __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1);
56719   return __ret;
56720 }
56721 #else
vqrshls_u32(uint32_t __p0,uint32_t __p1)56722 __ai uint32_t vqrshls_u32(uint32_t __p0, uint32_t __p1) {
56723   uint32_t __ret;
56724   __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1);
56725   return __ret;
56726 }
56727 #endif
56728 
56729 #ifdef __LITTLE_ENDIAN__
vqrshld_u64(uint64_t __p0,uint64_t __p1)56730 __ai uint64_t vqrshld_u64(uint64_t __p0, uint64_t __p1) {
56731   uint64_t __ret;
56732   __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1);
56733   return __ret;
56734 }
56735 #else
vqrshld_u64(uint64_t __p0,uint64_t __p1)56736 __ai uint64_t vqrshld_u64(uint64_t __p0, uint64_t __p1) {
56737   uint64_t __ret;
56738   __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1);
56739   return __ret;
56740 }
56741 #endif
56742 
56743 #ifdef __LITTLE_ENDIAN__
vqrshlh_u16(uint16_t __p0,uint16_t __p1)56744 __ai uint16_t vqrshlh_u16(uint16_t __p0, uint16_t __p1) {
56745   uint16_t __ret;
56746   __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1);
56747   return __ret;
56748 }
56749 #else
vqrshlh_u16(uint16_t __p0,uint16_t __p1)56750 __ai uint16_t vqrshlh_u16(uint16_t __p0, uint16_t __p1) {
56751   uint16_t __ret;
56752   __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1);
56753   return __ret;
56754 }
56755 #endif
56756 
56757 #ifdef __LITTLE_ENDIAN__
vqrshlb_s8(int8_t __p0,int8_t __p1)56758 __ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) {
56759   int8_t __ret;
56760   __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1);
56761   return __ret;
56762 }
56763 #else
vqrshlb_s8(int8_t __p0,int8_t __p1)56764 __ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) {
56765   int8_t __ret;
56766   __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1);
56767   return __ret;
56768 }
56769 #endif
56770 
56771 #ifdef __LITTLE_ENDIAN__
vqrshls_s32(int32_t __p0,int32_t __p1)56772 __ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) {
56773   int32_t __ret;
56774   __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1);
56775   return __ret;
56776 }
56777 #else
vqrshls_s32(int32_t __p0,int32_t __p1)56778 __ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) {
56779   int32_t __ret;
56780   __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1);
56781   return __ret;
56782 }
56783 #endif
56784 
56785 #ifdef __LITTLE_ENDIAN__
vqrshld_s64(int64_t __p0,int64_t __p1)56786 __ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) {
56787   int64_t __ret;
56788   __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1);
56789   return __ret;
56790 }
56791 #else
vqrshld_s64(int64_t __p0,int64_t __p1)56792 __ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) {
56793   int64_t __ret;
56794   __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1);
56795   return __ret;
56796 }
56797 #endif
56798 
56799 #ifdef __LITTLE_ENDIAN__
vqrshlh_s16(int16_t __p0,int16_t __p1)56800 __ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) {
56801   int16_t __ret;
56802   __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1);
56803   return __ret;
56804 }
56805 #else
vqrshlh_s16(int16_t __p0,int16_t __p1)56806 __ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) {
56807   int16_t __ret;
56808   __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1);
56809   return __ret;
56810 }
56811 #endif
56812 
56813 #ifdef __LITTLE_ENDIAN__
56814 #define vqrshrn_high_n_u32(__p0_174, __p1_174, __p2_174) __extension__ ({ \
56815   uint16x4_t __s0_174 = __p0_174; \
56816   uint32x4_t __s1_174 = __p1_174; \
56817   uint16x8_t __ret_174; \
56818   __ret_174 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_174), (uint16x4_t)(vqrshrn_n_u32(__s1_174, __p2_174)))); \
56819   __ret_174; \
56820 })
56821 #else
56822 #define vqrshrn_high_n_u32(__p0_175, __p1_175, __p2_175) __extension__ ({ \
56823   uint16x4_t __s0_175 = __p0_175; \
56824   uint32x4_t __s1_175 = __p1_175; \
56825   uint16x4_t __rev0_175;  __rev0_175 = __builtin_shufflevector(__s0_175, __s0_175, 3, 2, 1, 0); \
56826   uint32x4_t __rev1_175;  __rev1_175 = __builtin_shufflevector(__s1_175, __s1_175, 3, 2, 1, 0); \
56827   uint16x8_t __ret_175; \
56828   __ret_175 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_175), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_175, __p2_175)))); \
56829   __ret_175 = __builtin_shufflevector(__ret_175, __ret_175, 7, 6, 5, 4, 3, 2, 1, 0); \
56830   __ret_175; \
56831 })
56832 #endif
56833 
56834 #ifdef __LITTLE_ENDIAN__
56835 #define vqrshrn_high_n_u64(__p0_176, __p1_176, __p2_176) __extension__ ({ \
56836   uint32x2_t __s0_176 = __p0_176; \
56837   uint64x2_t __s1_176 = __p1_176; \
56838   uint32x4_t __ret_176; \
56839   __ret_176 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_176), (uint32x2_t)(vqrshrn_n_u64(__s1_176, __p2_176)))); \
56840   __ret_176; \
56841 })
56842 #else
56843 #define vqrshrn_high_n_u64(__p0_177, __p1_177, __p2_177) __extension__ ({ \
56844   uint32x2_t __s0_177 = __p0_177; \
56845   uint64x2_t __s1_177 = __p1_177; \
56846   uint32x2_t __rev0_177;  __rev0_177 = __builtin_shufflevector(__s0_177, __s0_177, 1, 0); \
56847   uint64x2_t __rev1_177;  __rev1_177 = __builtin_shufflevector(__s1_177, __s1_177, 1, 0); \
56848   uint32x4_t __ret_177; \
56849   __ret_177 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_177), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_177, __p2_177)))); \
56850   __ret_177 = __builtin_shufflevector(__ret_177, __ret_177, 3, 2, 1, 0); \
56851   __ret_177; \
56852 })
56853 #endif
56854 
56855 #ifdef __LITTLE_ENDIAN__
56856 #define vqrshrn_high_n_u16(__p0_178, __p1_178, __p2_178) __extension__ ({ \
56857   uint8x8_t __s0_178 = __p0_178; \
56858   uint16x8_t __s1_178 = __p1_178; \
56859   uint8x16_t __ret_178; \
56860   __ret_178 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_178), (uint8x8_t)(vqrshrn_n_u16(__s1_178, __p2_178)))); \
56861   __ret_178; \
56862 })
56863 #else
56864 #define vqrshrn_high_n_u16(__p0_179, __p1_179, __p2_179) __extension__ ({ \
56865   uint8x8_t __s0_179 = __p0_179; \
56866   uint16x8_t __s1_179 = __p1_179; \
56867   uint8x8_t __rev0_179;  __rev0_179 = __builtin_shufflevector(__s0_179, __s0_179, 7, 6, 5, 4, 3, 2, 1, 0); \
56868   uint16x8_t __rev1_179;  __rev1_179 = __builtin_shufflevector(__s1_179, __s1_179, 7, 6, 5, 4, 3, 2, 1, 0); \
56869   uint8x16_t __ret_179; \
56870   __ret_179 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_179), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_179, __p2_179)))); \
56871   __ret_179 = __builtin_shufflevector(__ret_179, __ret_179, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56872   __ret_179; \
56873 })
56874 #endif
56875 
56876 #ifdef __LITTLE_ENDIAN__
56877 #define vqrshrn_high_n_s32(__p0_180, __p1_180, __p2_180) __extension__ ({ \
56878   int16x4_t __s0_180 = __p0_180; \
56879   int32x4_t __s1_180 = __p1_180; \
56880   int16x8_t __ret_180; \
56881   __ret_180 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_180), (int16x4_t)(vqrshrn_n_s32(__s1_180, __p2_180)))); \
56882   __ret_180; \
56883 })
56884 #else
56885 #define vqrshrn_high_n_s32(__p0_181, __p1_181, __p2_181) __extension__ ({ \
56886   int16x4_t __s0_181 = __p0_181; \
56887   int32x4_t __s1_181 = __p1_181; \
56888   int16x4_t __rev0_181;  __rev0_181 = __builtin_shufflevector(__s0_181, __s0_181, 3, 2, 1, 0); \
56889   int32x4_t __rev1_181;  __rev1_181 = __builtin_shufflevector(__s1_181, __s1_181, 3, 2, 1, 0); \
56890   int16x8_t __ret_181; \
56891   __ret_181 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_181), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_181, __p2_181)))); \
56892   __ret_181 = __builtin_shufflevector(__ret_181, __ret_181, 7, 6, 5, 4, 3, 2, 1, 0); \
56893   __ret_181; \
56894 })
56895 #endif
56896 
56897 #ifdef __LITTLE_ENDIAN__
56898 #define vqrshrn_high_n_s64(__p0_182, __p1_182, __p2_182) __extension__ ({ \
56899   int32x2_t __s0_182 = __p0_182; \
56900   int64x2_t __s1_182 = __p1_182; \
56901   int32x4_t __ret_182; \
56902   __ret_182 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_182), (int32x2_t)(vqrshrn_n_s64(__s1_182, __p2_182)))); \
56903   __ret_182; \
56904 })
56905 #else
56906 #define vqrshrn_high_n_s64(__p0_183, __p1_183, __p2_183) __extension__ ({ \
56907   int32x2_t __s0_183 = __p0_183; \
56908   int64x2_t __s1_183 = __p1_183; \
56909   int32x2_t __rev0_183;  __rev0_183 = __builtin_shufflevector(__s0_183, __s0_183, 1, 0); \
56910   int64x2_t __rev1_183;  __rev1_183 = __builtin_shufflevector(__s1_183, __s1_183, 1, 0); \
56911   int32x4_t __ret_183; \
56912   __ret_183 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_183), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_183, __p2_183)))); \
56913   __ret_183 = __builtin_shufflevector(__ret_183, __ret_183, 3, 2, 1, 0); \
56914   __ret_183; \
56915 })
56916 #endif
56917 
56918 #ifdef __LITTLE_ENDIAN__
56919 #define vqrshrn_high_n_s16(__p0_184, __p1_184, __p2_184) __extension__ ({ \
56920   int8x8_t __s0_184 = __p0_184; \
56921   int16x8_t __s1_184 = __p1_184; \
56922   int8x16_t __ret_184; \
56923   __ret_184 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_184), (int8x8_t)(vqrshrn_n_s16(__s1_184, __p2_184)))); \
56924   __ret_184; \
56925 })
56926 #else
56927 #define vqrshrn_high_n_s16(__p0_185, __p1_185, __p2_185) __extension__ ({ \
56928   int8x8_t __s0_185 = __p0_185; \
56929   int16x8_t __s1_185 = __p1_185; \
56930   int8x8_t __rev0_185;  __rev0_185 = __builtin_shufflevector(__s0_185, __s0_185, 7, 6, 5, 4, 3, 2, 1, 0); \
56931   int16x8_t __rev1_185;  __rev1_185 = __builtin_shufflevector(__s1_185, __s1_185, 7, 6, 5, 4, 3, 2, 1, 0); \
56932   int8x16_t __ret_185; \
56933   __ret_185 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_185), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_185, __p2_185)))); \
56934   __ret_185 = __builtin_shufflevector(__ret_185, __ret_185, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56935   __ret_185; \
56936 })
56937 #endif
56938 
56939 #ifdef __LITTLE_ENDIAN__
56940 #define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \
56941   uint32_t __s0 = __p0; \
56942   uint16_t __ret; \
56943   __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \
56944   __ret; \
56945 })
56946 #else
56947 #define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \
56948   uint32_t __s0 = __p0; \
56949   uint16_t __ret; \
56950   __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \
56951   __ret; \
56952 })
56953 #endif
56954 
56955 #ifdef __LITTLE_ENDIAN__
56956 #define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \
56957   uint64_t __s0 = __p0; \
56958   uint32_t __ret; \
56959   __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \
56960   __ret; \
56961 })
56962 #else
56963 #define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \
56964   uint64_t __s0 = __p0; \
56965   uint32_t __ret; \
56966   __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \
56967   __ret; \
56968 })
56969 #endif
56970 
56971 #ifdef __LITTLE_ENDIAN__
56972 #define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \
56973   uint16_t __s0 = __p0; \
56974   uint8_t __ret; \
56975   __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \
56976   __ret; \
56977 })
56978 #else
56979 #define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \
56980   uint16_t __s0 = __p0; \
56981   uint8_t __ret; \
56982   __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \
56983   __ret; \
56984 })
56985 #endif
56986 
56987 #ifdef __LITTLE_ENDIAN__
56988 #define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \
56989   int32_t __s0 = __p0; \
56990   int16_t __ret; \
56991   __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \
56992   __ret; \
56993 })
56994 #else
56995 #define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \
56996   int32_t __s0 = __p0; \
56997   int16_t __ret; \
56998   __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \
56999   __ret; \
57000 })
57001 #endif
57002 
57003 #ifdef __LITTLE_ENDIAN__
57004 #define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \
57005   int64_t __s0 = __p0; \
57006   int32_t __ret; \
57007   __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \
57008   __ret; \
57009 })
57010 #else
57011 #define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \
57012   int64_t __s0 = __p0; \
57013   int32_t __ret; \
57014   __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \
57015   __ret; \
57016 })
57017 #endif
57018 
57019 #ifdef __LITTLE_ENDIAN__
57020 #define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \
57021   int16_t __s0 = __p0; \
57022   int8_t __ret; \
57023   __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \
57024   __ret; \
57025 })
57026 #else
57027 #define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \
57028   int16_t __s0 = __p0; \
57029   int8_t __ret; \
57030   __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \
57031   __ret; \
57032 })
57033 #endif
57034 
57035 #ifdef __LITTLE_ENDIAN__
57036 #define vqrshrun_high_n_s32(__p0_186, __p1_186, __p2_186) __extension__ ({ \
57037   int16x4_t __s0_186 = __p0_186; \
57038   int32x4_t __s1_186 = __p1_186; \
57039   int16x8_t __ret_186; \
57040   __ret_186 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_186), (int16x4_t)(vqrshrun_n_s32(__s1_186, __p2_186)))); \
57041   __ret_186; \
57042 })
57043 #else
57044 #define vqrshrun_high_n_s32(__p0_187, __p1_187, __p2_187) __extension__ ({ \
57045   int16x4_t __s0_187 = __p0_187; \
57046   int32x4_t __s1_187 = __p1_187; \
57047   int16x4_t __rev0_187;  __rev0_187 = __builtin_shufflevector(__s0_187, __s0_187, 3, 2, 1, 0); \
57048   int32x4_t __rev1_187;  __rev1_187 = __builtin_shufflevector(__s1_187, __s1_187, 3, 2, 1, 0); \
57049   int16x8_t __ret_187; \
57050   __ret_187 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_187), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_187, __p2_187)))); \
57051   __ret_187 = __builtin_shufflevector(__ret_187, __ret_187, 7, 6, 5, 4, 3, 2, 1, 0); \
57052   __ret_187; \
57053 })
57054 #endif
57055 
57056 #ifdef __LITTLE_ENDIAN__
57057 #define vqrshrun_high_n_s64(__p0_188, __p1_188, __p2_188) __extension__ ({ \
57058   int32x2_t __s0_188 = __p0_188; \
57059   int64x2_t __s1_188 = __p1_188; \
57060   int32x4_t __ret_188; \
57061   __ret_188 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_188), (int32x2_t)(vqrshrun_n_s64(__s1_188, __p2_188)))); \
57062   __ret_188; \
57063 })
57064 #else
57065 #define vqrshrun_high_n_s64(__p0_189, __p1_189, __p2_189) __extension__ ({ \
57066   int32x2_t __s0_189 = __p0_189; \
57067   int64x2_t __s1_189 = __p1_189; \
57068   int32x2_t __rev0_189;  __rev0_189 = __builtin_shufflevector(__s0_189, __s0_189, 1, 0); \
57069   int64x2_t __rev1_189;  __rev1_189 = __builtin_shufflevector(__s1_189, __s1_189, 1, 0); \
57070   int32x4_t __ret_189; \
57071   __ret_189 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_189), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_189, __p2_189)))); \
57072   __ret_189 = __builtin_shufflevector(__ret_189, __ret_189, 3, 2, 1, 0); \
57073   __ret_189; \
57074 })
57075 #endif
57076 
57077 #ifdef __LITTLE_ENDIAN__
57078 #define vqrshrun_high_n_s16(__p0_190, __p1_190, __p2_190) __extension__ ({ \
57079   int8x8_t __s0_190 = __p0_190; \
57080   int16x8_t __s1_190 = __p1_190; \
57081   int8x16_t __ret_190; \
57082   __ret_190 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_190), (int8x8_t)(vqrshrun_n_s16(__s1_190, __p2_190)))); \
57083   __ret_190; \
57084 })
57085 #else
57086 #define vqrshrun_high_n_s16(__p0_191, __p1_191, __p2_191) __extension__ ({ \
57087   int8x8_t __s0_191 = __p0_191; \
57088   int16x8_t __s1_191 = __p1_191; \
57089   int8x8_t __rev0_191;  __rev0_191 = __builtin_shufflevector(__s0_191, __s0_191, 7, 6, 5, 4, 3, 2, 1, 0); \
57090   int16x8_t __rev1_191;  __rev1_191 = __builtin_shufflevector(__s1_191, __s1_191, 7, 6, 5, 4, 3, 2, 1, 0); \
57091   int8x16_t __ret_191; \
57092   __ret_191 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_191), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_191, __p2_191)))); \
57093   __ret_191 = __builtin_shufflevector(__ret_191, __ret_191, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
57094   __ret_191; \
57095 })
57096 #endif
57097 
57098 #ifdef __LITTLE_ENDIAN__
57099 #define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \
57100   int32_t __s0 = __p0; \
57101   int16_t __ret; \
57102   __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \
57103   __ret; \
57104 })
57105 #else
57106 #define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \
57107   int32_t __s0 = __p0; \
57108   int16_t __ret; \
57109   __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \
57110   __ret; \
57111 })
57112 #endif
57113 
57114 #ifdef __LITTLE_ENDIAN__
57115 #define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \
57116   int64_t __s0 = __p0; \
57117   int32_t __ret; \
57118   __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \
57119   __ret; \
57120 })
57121 #else
57122 #define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \
57123   int64_t __s0 = __p0; \
57124   int32_t __ret; \
57125   __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \
57126   __ret; \
57127 })
57128 #endif
57129 
57130 #ifdef __LITTLE_ENDIAN__
57131 #define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \
57132   int16_t __s0 = __p0; \
57133   int8_t __ret; \
57134   __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \
57135   __ret; \
57136 })
57137 #else
57138 #define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \
57139   int16_t __s0 = __p0; \
57140   int8_t __ret; \
57141   __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \
57142   __ret; \
57143 })
57144 #endif
57145 
57146 #ifdef __LITTLE_ENDIAN__
vqshlb_u8(uint8_t __p0,uint8_t __p1)57147 __ai uint8_t vqshlb_u8(uint8_t __p0, uint8_t __p1) {
57148   uint8_t __ret;
57149   __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1);
57150   return __ret;
57151 }
57152 #else
vqshlb_u8(uint8_t __p0,uint8_t __p1)57153 __ai uint8_t vqshlb_u8(uint8_t __p0, uint8_t __p1) {
57154   uint8_t __ret;
57155   __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1);
57156   return __ret;
57157 }
57158 #endif
57159 
57160 #ifdef __LITTLE_ENDIAN__
vqshls_u32(uint32_t __p0,uint32_t __p1)57161 __ai uint32_t vqshls_u32(uint32_t __p0, uint32_t __p1) {
57162   uint32_t __ret;
57163   __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1);
57164   return __ret;
57165 }
57166 #else
vqshls_u32(uint32_t __p0,uint32_t __p1)57167 __ai uint32_t vqshls_u32(uint32_t __p0, uint32_t __p1) {
57168   uint32_t __ret;
57169   __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1);
57170   return __ret;
57171 }
57172 #endif
57173 
57174 #ifdef __LITTLE_ENDIAN__
vqshld_u64(uint64_t __p0,uint64_t __p1)57175 __ai uint64_t vqshld_u64(uint64_t __p0, uint64_t __p1) {
57176   uint64_t __ret;
57177   __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1);
57178   return __ret;
57179 }
57180 #else
vqshld_u64(uint64_t __p0,uint64_t __p1)57181 __ai uint64_t vqshld_u64(uint64_t __p0, uint64_t __p1) {
57182   uint64_t __ret;
57183   __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1);
57184   return __ret;
57185 }
57186 #endif
57187 
57188 #ifdef __LITTLE_ENDIAN__
vqshlh_u16(uint16_t __p0,uint16_t __p1)57189 __ai uint16_t vqshlh_u16(uint16_t __p0, uint16_t __p1) {
57190   uint16_t __ret;
57191   __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1);
57192   return __ret;
57193 }
57194 #else
vqshlh_u16(uint16_t __p0,uint16_t __p1)57195 __ai uint16_t vqshlh_u16(uint16_t __p0, uint16_t __p1) {
57196   uint16_t __ret;
57197   __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1);
57198   return __ret;
57199 }
57200 #endif
57201 
57202 #ifdef __LITTLE_ENDIAN__
vqshlb_s8(int8_t __p0,int8_t __p1)57203 __ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) {
57204   int8_t __ret;
57205   __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1);
57206   return __ret;
57207 }
57208 #else
vqshlb_s8(int8_t __p0,int8_t __p1)57209 __ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) {
57210   int8_t __ret;
57211   __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1);
57212   return __ret;
57213 }
57214 #endif
57215 
57216 #ifdef __LITTLE_ENDIAN__
vqshls_s32(int32_t __p0,int32_t __p1)57217 __ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) {
57218   int32_t __ret;
57219   __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1);
57220   return __ret;
57221 }
57222 #else
vqshls_s32(int32_t __p0,int32_t __p1)57223 __ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) {
57224   int32_t __ret;
57225   __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1);
57226   return __ret;
57227 }
57228 #endif
57229 
57230 #ifdef __LITTLE_ENDIAN__
vqshld_s64(int64_t __p0,int64_t __p1)57231 __ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) {
57232   int64_t __ret;
57233   __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1);
57234   return __ret;
57235 }
57236 #else
vqshld_s64(int64_t __p0,int64_t __p1)57237 __ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) {
57238   int64_t __ret;
57239   __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1);
57240   return __ret;
57241 }
57242 #endif
57243 
57244 #ifdef __LITTLE_ENDIAN__
vqshlh_s16(int16_t __p0,int16_t __p1)57245 __ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) {
57246   int16_t __ret;
57247   __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1);
57248   return __ret;
57249 }
57250 #else
vqshlh_s16(int16_t __p0,int16_t __p1)57251 __ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) {
57252   int16_t __ret;
57253   __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1);
57254   return __ret;
57255 }
57256 #endif
57257 
57258 #ifdef __LITTLE_ENDIAN__
57259 #define vqshlb_n_u8(__p0, __p1) __extension__ ({ \
57260   uint8_t __s0 = __p0; \
57261   uint8_t __ret; \
57262   __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \
57263   __ret; \
57264 })
57265 #else
57266 #define vqshlb_n_u8(__p0, __p1) __extension__ ({ \
57267   uint8_t __s0 = __p0; \
57268   uint8_t __ret; \
57269   __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \
57270   __ret; \
57271 })
57272 #endif
57273 
57274 #ifdef __LITTLE_ENDIAN__
57275 #define vqshls_n_u32(__p0, __p1) __extension__ ({ \
57276   uint32_t __s0 = __p0; \
57277   uint32_t __ret; \
57278   __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \
57279   __ret; \
57280 })
57281 #else
57282 #define vqshls_n_u32(__p0, __p1) __extension__ ({ \
57283   uint32_t __s0 = __p0; \
57284   uint32_t __ret; \
57285   __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \
57286   __ret; \
57287 })
57288 #endif
57289 
57290 #ifdef __LITTLE_ENDIAN__
57291 #define vqshld_n_u64(__p0, __p1) __extension__ ({ \
57292   uint64_t __s0 = __p0; \
57293   uint64_t __ret; \
57294   __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \
57295   __ret; \
57296 })
57297 #else
57298 #define vqshld_n_u64(__p0, __p1) __extension__ ({ \
57299   uint64_t __s0 = __p0; \
57300   uint64_t __ret; \
57301   __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \
57302   __ret; \
57303 })
57304 #endif
57305 
57306 #ifdef __LITTLE_ENDIAN__
57307 #define vqshlh_n_u16(__p0, __p1) __extension__ ({ \
57308   uint16_t __s0 = __p0; \
57309   uint16_t __ret; \
57310   __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \
57311   __ret; \
57312 })
57313 #else
57314 #define vqshlh_n_u16(__p0, __p1) __extension__ ({ \
57315   uint16_t __s0 = __p0; \
57316   uint16_t __ret; \
57317   __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \
57318   __ret; \
57319 })
57320 #endif
57321 
57322 #ifdef __LITTLE_ENDIAN__
57323 #define vqshlb_n_s8(__p0, __p1) __extension__ ({ \
57324   int8_t __s0 = __p0; \
57325   int8_t __ret; \
57326   __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \
57327   __ret; \
57328 })
57329 #else
57330 #define vqshlb_n_s8(__p0, __p1) __extension__ ({ \
57331   int8_t __s0 = __p0; \
57332   int8_t __ret; \
57333   __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \
57334   __ret; \
57335 })
57336 #endif
57337 
57338 #ifdef __LITTLE_ENDIAN__
57339 #define vqshls_n_s32(__p0, __p1) __extension__ ({ \
57340   int32_t __s0 = __p0; \
57341   int32_t __ret; \
57342   __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \
57343   __ret; \
57344 })
57345 #else
57346 #define vqshls_n_s32(__p0, __p1) __extension__ ({ \
57347   int32_t __s0 = __p0; \
57348   int32_t __ret; \
57349   __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \
57350   __ret; \
57351 })
57352 #endif
57353 
57354 #ifdef __LITTLE_ENDIAN__
57355 #define vqshld_n_s64(__p0, __p1) __extension__ ({ \
57356   int64_t __s0 = __p0; \
57357   int64_t __ret; \
57358   __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \
57359   __ret; \
57360 })
57361 #else
57362 #define vqshld_n_s64(__p0, __p1) __extension__ ({ \
57363   int64_t __s0 = __p0; \
57364   int64_t __ret; \
57365   __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \
57366   __ret; \
57367 })
57368 #endif
57369 
57370 #ifdef __LITTLE_ENDIAN__
57371 #define vqshlh_n_s16(__p0, __p1) __extension__ ({ \
57372   int16_t __s0 = __p0; \
57373   int16_t __ret; \
57374   __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \
57375   __ret; \
57376 })
57377 #else
57378 #define vqshlh_n_s16(__p0, __p1) __extension__ ({ \
57379   int16_t __s0 = __p0; \
57380   int16_t __ret; \
57381   __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \
57382   __ret; \
57383 })
57384 #endif
57385 
57386 #ifdef __LITTLE_ENDIAN__
57387 #define vqshlub_n_s8(__p0, __p1) __extension__ ({ \
57388   int8_t __s0 = __p0; \
57389   int8_t __ret; \
57390   __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \
57391   __ret; \
57392 })
57393 #else
57394 #define vqshlub_n_s8(__p0, __p1) __extension__ ({ \
57395   int8_t __s0 = __p0; \
57396   int8_t __ret; \
57397   __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \
57398   __ret; \
57399 })
57400 #endif
57401 
57402 #ifdef __LITTLE_ENDIAN__
57403 #define vqshlus_n_s32(__p0, __p1) __extension__ ({ \
57404   int32_t __s0 = __p0; \
57405   int32_t __ret; \
57406   __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \
57407   __ret; \
57408 })
57409 #else
57410 #define vqshlus_n_s32(__p0, __p1) __extension__ ({ \
57411   int32_t __s0 = __p0; \
57412   int32_t __ret; \
57413   __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \
57414   __ret; \
57415 })
57416 #endif
57417 
57418 #ifdef __LITTLE_ENDIAN__
57419 #define vqshlud_n_s64(__p0, __p1) __extension__ ({ \
57420   int64_t __s0 = __p0; \
57421   int64_t __ret; \
57422   __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \
57423   __ret; \
57424 })
57425 #else
57426 #define vqshlud_n_s64(__p0, __p1) __extension__ ({ \
57427   int64_t __s0 = __p0; \
57428   int64_t __ret; \
57429   __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \
57430   __ret; \
57431 })
57432 #endif
57433 
57434 #ifdef __LITTLE_ENDIAN__
57435 #define vqshluh_n_s16(__p0, __p1) __extension__ ({ \
57436   int16_t __s0 = __p0; \
57437   int16_t __ret; \
57438   __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \
57439   __ret; \
57440 })
57441 #else
57442 #define vqshluh_n_s16(__p0, __p1) __extension__ ({ \
57443   int16_t __s0 = __p0; \
57444   int16_t __ret; \
57445   __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \
57446   __ret; \
57447 })
57448 #endif
57449 
57450 #ifdef __LITTLE_ENDIAN__
57451 #define vqshrn_high_n_u32(__p0_192, __p1_192, __p2_192) __extension__ ({ \
57452   uint16x4_t __s0_192 = __p0_192; \
57453   uint32x4_t __s1_192 = __p1_192; \
57454   uint16x8_t __ret_192; \
57455   __ret_192 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_192), (uint16x4_t)(vqshrn_n_u32(__s1_192, __p2_192)))); \
57456   __ret_192; \
57457 })
57458 #else
57459 #define vqshrn_high_n_u32(__p0_193, __p1_193, __p2_193) __extension__ ({ \
57460   uint16x4_t __s0_193 = __p0_193; \
57461   uint32x4_t __s1_193 = __p1_193; \
57462   uint16x4_t __rev0_193;  __rev0_193 = __builtin_shufflevector(__s0_193, __s0_193, 3, 2, 1, 0); \
57463   uint32x4_t __rev1_193;  __rev1_193 = __builtin_shufflevector(__s1_193, __s1_193, 3, 2, 1, 0); \
57464   uint16x8_t __ret_193; \
57465   __ret_193 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_193), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_193, __p2_193)))); \
57466   __ret_193 = __builtin_shufflevector(__ret_193, __ret_193, 7, 6, 5, 4, 3, 2, 1, 0); \
57467   __ret_193; \
57468 })
57469 #endif
57470 
57471 #ifdef __LITTLE_ENDIAN__
57472 #define vqshrn_high_n_u64(__p0_194, __p1_194, __p2_194) __extension__ ({ \
57473   uint32x2_t __s0_194 = __p0_194; \
57474   uint64x2_t __s1_194 = __p1_194; \
57475   uint32x4_t __ret_194; \
57476   __ret_194 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_194), (uint32x2_t)(vqshrn_n_u64(__s1_194, __p2_194)))); \
57477   __ret_194; \
57478 })
57479 #else
57480 #define vqshrn_high_n_u64(__p0_195, __p1_195, __p2_195) __extension__ ({ \
57481   uint32x2_t __s0_195 = __p0_195; \
57482   uint64x2_t __s1_195 = __p1_195; \
57483   uint32x2_t __rev0_195;  __rev0_195 = __builtin_shufflevector(__s0_195, __s0_195, 1, 0); \
57484   uint64x2_t __rev1_195;  __rev1_195 = __builtin_shufflevector(__s1_195, __s1_195, 1, 0); \
57485   uint32x4_t __ret_195; \
57486   __ret_195 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_195), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_195, __p2_195)))); \
57487   __ret_195 = __builtin_shufflevector(__ret_195, __ret_195, 3, 2, 1, 0); \
57488   __ret_195; \
57489 })
57490 #endif
57491 
57492 #ifdef __LITTLE_ENDIAN__
57493 #define vqshrn_high_n_u16(__p0_196, __p1_196, __p2_196) __extension__ ({ \
57494   uint8x8_t __s0_196 = __p0_196; \
57495   uint16x8_t __s1_196 = __p1_196; \
57496   uint8x16_t __ret_196; \
57497   __ret_196 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_196), (uint8x8_t)(vqshrn_n_u16(__s1_196, __p2_196)))); \
57498   __ret_196; \
57499 })
57500 #else
57501 #define vqshrn_high_n_u16(__p0_197, __p1_197, __p2_197) __extension__ ({ \
57502   uint8x8_t __s0_197 = __p0_197; \
57503   uint16x8_t __s1_197 = __p1_197; \
57504   uint8x8_t __rev0_197;  __rev0_197 = __builtin_shufflevector(__s0_197, __s0_197, 7, 6, 5, 4, 3, 2, 1, 0); \
57505   uint16x8_t __rev1_197;  __rev1_197 = __builtin_shufflevector(__s1_197, __s1_197, 7, 6, 5, 4, 3, 2, 1, 0); \
57506   uint8x16_t __ret_197; \
57507   __ret_197 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_197), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_197, __p2_197)))); \
57508   __ret_197 = __builtin_shufflevector(__ret_197, __ret_197, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
57509   __ret_197; \
57510 })
57511 #endif
57512 
57513 #ifdef __LITTLE_ENDIAN__
57514 #define vqshrn_high_n_s32(__p0_198, __p1_198, __p2_198) __extension__ ({ \
57515   int16x4_t __s0_198 = __p0_198; \
57516   int32x4_t __s1_198 = __p1_198; \
57517   int16x8_t __ret_198; \
57518   __ret_198 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_198), (int16x4_t)(vqshrn_n_s32(__s1_198, __p2_198)))); \
57519   __ret_198; \
57520 })
57521 #else
57522 #define vqshrn_high_n_s32(__p0_199, __p1_199, __p2_199) __extension__ ({ \
57523   int16x4_t __s0_199 = __p0_199; \
57524   int32x4_t __s1_199 = __p1_199; \
57525   int16x4_t __rev0_199;  __rev0_199 = __builtin_shufflevector(__s0_199, __s0_199, 3, 2, 1, 0); \
57526   int32x4_t __rev1_199;  __rev1_199 = __builtin_shufflevector(__s1_199, __s1_199, 3, 2, 1, 0); \
57527   int16x8_t __ret_199; \
57528   __ret_199 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_199), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_199, __p2_199)))); \
57529   __ret_199 = __builtin_shufflevector(__ret_199, __ret_199, 7, 6, 5, 4, 3, 2, 1, 0); \
57530   __ret_199; \
57531 })
57532 #endif
57533 
57534 #ifdef __LITTLE_ENDIAN__
57535 #define vqshrn_high_n_s64(__p0_200, __p1_200, __p2_200) __extension__ ({ \
57536   int32x2_t __s0_200 = __p0_200; \
57537   int64x2_t __s1_200 = __p1_200; \
57538   int32x4_t __ret_200; \
57539   __ret_200 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_200), (int32x2_t)(vqshrn_n_s64(__s1_200, __p2_200)))); \
57540   __ret_200; \
57541 })
57542 #else
57543 #define vqshrn_high_n_s64(__p0_201, __p1_201, __p2_201) __extension__ ({ \
57544   int32x2_t __s0_201 = __p0_201; \
57545   int64x2_t __s1_201 = __p1_201; \
57546   int32x2_t __rev0_201;  __rev0_201 = __builtin_shufflevector(__s0_201, __s0_201, 1, 0); \
57547   int64x2_t __rev1_201;  __rev1_201 = __builtin_shufflevector(__s1_201, __s1_201, 1, 0); \
57548   int32x4_t __ret_201; \
57549   __ret_201 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_201), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_201, __p2_201)))); \
57550   __ret_201 = __builtin_shufflevector(__ret_201, __ret_201, 3, 2, 1, 0); \
57551   __ret_201; \
57552 })
57553 #endif
57554 
57555 #ifdef __LITTLE_ENDIAN__
57556 #define vqshrn_high_n_s16(__p0_202, __p1_202, __p2_202) __extension__ ({ \
57557   int8x8_t __s0_202 = __p0_202; \
57558   int16x8_t __s1_202 = __p1_202; \
57559   int8x16_t __ret_202; \
57560   __ret_202 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_202), (int8x8_t)(vqshrn_n_s16(__s1_202, __p2_202)))); \
57561   __ret_202; \
57562 })
57563 #else
57564 #define vqshrn_high_n_s16(__p0_203, __p1_203, __p2_203) __extension__ ({ \
57565   int8x8_t __s0_203 = __p0_203; \
57566   int16x8_t __s1_203 = __p1_203; \
57567   int8x8_t __rev0_203;  __rev0_203 = __builtin_shufflevector(__s0_203, __s0_203, 7, 6, 5, 4, 3, 2, 1, 0); \
57568   int16x8_t __rev1_203;  __rev1_203 = __builtin_shufflevector(__s1_203, __s1_203, 7, 6, 5, 4, 3, 2, 1, 0); \
57569   int8x16_t __ret_203; \
57570   __ret_203 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_203), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_203, __p2_203)))); \
57571   __ret_203 = __builtin_shufflevector(__ret_203, __ret_203, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
57572   __ret_203; \
57573 })
57574 #endif
57575 
57576 #ifdef __LITTLE_ENDIAN__
57577 #define vqshrns_n_u32(__p0, __p1) __extension__ ({ \
57578   uint32_t __s0 = __p0; \
57579   uint16_t __ret; \
57580   __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \
57581   __ret; \
57582 })
57583 #else
57584 #define vqshrns_n_u32(__p0, __p1) __extension__ ({ \
57585   uint32_t __s0 = __p0; \
57586   uint16_t __ret; \
57587   __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \
57588   __ret; \
57589 })
57590 #endif
57591 
57592 #ifdef __LITTLE_ENDIAN__
57593 #define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \
57594   uint64_t __s0 = __p0; \
57595   uint32_t __ret; \
57596   __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \
57597   __ret; \
57598 })
57599 #else
57600 #define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \
57601   uint64_t __s0 = __p0; \
57602   uint32_t __ret; \
57603   __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \
57604   __ret; \
57605 })
57606 #endif
57607 
57608 #ifdef __LITTLE_ENDIAN__
57609 #define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \
57610   uint16_t __s0 = __p0; \
57611   uint8_t __ret; \
57612   __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \
57613   __ret; \
57614 })
57615 #else
57616 #define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \
57617   uint16_t __s0 = __p0; \
57618   uint8_t __ret; \
57619   __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \
57620   __ret; \
57621 })
57622 #endif
57623 
57624 #ifdef __LITTLE_ENDIAN__
57625 #define vqshrns_n_s32(__p0, __p1) __extension__ ({ \
57626   int32_t __s0 = __p0; \
57627   int16_t __ret; \
57628   __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \
57629   __ret; \
57630 })
57631 #else
57632 #define vqshrns_n_s32(__p0, __p1) __extension__ ({ \
57633   int32_t __s0 = __p0; \
57634   int16_t __ret; \
57635   __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \
57636   __ret; \
57637 })
57638 #endif
57639 
57640 #ifdef __LITTLE_ENDIAN__
57641 #define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \
57642   int64_t __s0 = __p0; \
57643   int32_t __ret; \
57644   __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \
57645   __ret; \
57646 })
57647 #else
57648 #define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \
57649   int64_t __s0 = __p0; \
57650   int32_t __ret; \
57651   __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \
57652   __ret; \
57653 })
57654 #endif
57655 
57656 #ifdef __LITTLE_ENDIAN__
57657 #define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \
57658   int16_t __s0 = __p0; \
57659   int8_t __ret; \
57660   __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \
57661   __ret; \
57662 })
57663 #else
57664 #define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \
57665   int16_t __s0 = __p0; \
57666   int8_t __ret; \
57667   __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \
57668   __ret; \
57669 })
57670 #endif
57671 
57672 #ifdef __LITTLE_ENDIAN__
57673 #define vqshrun_high_n_s32(__p0_204, __p1_204, __p2_204) __extension__ ({ \
57674   int16x4_t __s0_204 = __p0_204; \
57675   int32x4_t __s1_204 = __p1_204; \
57676   int16x8_t __ret_204; \
57677   __ret_204 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_204), (int16x4_t)(vqshrun_n_s32(__s1_204, __p2_204)))); \
57678   __ret_204; \
57679 })
57680 #else
57681 #define vqshrun_high_n_s32(__p0_205, __p1_205, __p2_205) __extension__ ({ \
57682   int16x4_t __s0_205 = __p0_205; \
57683   int32x4_t __s1_205 = __p1_205; \
57684   int16x4_t __rev0_205;  __rev0_205 = __builtin_shufflevector(__s0_205, __s0_205, 3, 2, 1, 0); \
57685   int32x4_t __rev1_205;  __rev1_205 = __builtin_shufflevector(__s1_205, __s1_205, 3, 2, 1, 0); \
57686   int16x8_t __ret_205; \
57687   __ret_205 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_205), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_205, __p2_205)))); \
57688   __ret_205 = __builtin_shufflevector(__ret_205, __ret_205, 7, 6, 5, 4, 3, 2, 1, 0); \
57689   __ret_205; \
57690 })
57691 #endif
57692 
57693 #ifdef __LITTLE_ENDIAN__
57694 #define vqshrun_high_n_s64(__p0_206, __p1_206, __p2_206) __extension__ ({ \
57695   int32x2_t __s0_206 = __p0_206; \
57696   int64x2_t __s1_206 = __p1_206; \
57697   int32x4_t __ret_206; \
57698   __ret_206 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_206), (int32x2_t)(vqshrun_n_s64(__s1_206, __p2_206)))); \
57699   __ret_206; \
57700 })
57701 #else
57702 #define vqshrun_high_n_s64(__p0_207, __p1_207, __p2_207) __extension__ ({ \
57703   int32x2_t __s0_207 = __p0_207; \
57704   int64x2_t __s1_207 = __p1_207; \
57705   int32x2_t __rev0_207;  __rev0_207 = __builtin_shufflevector(__s0_207, __s0_207, 1, 0); \
57706   int64x2_t __rev1_207;  __rev1_207 = __builtin_shufflevector(__s1_207, __s1_207, 1, 0); \
57707   int32x4_t __ret_207; \
57708   __ret_207 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_207), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_207, __p2_207)))); \
57709   __ret_207 = __builtin_shufflevector(__ret_207, __ret_207, 3, 2, 1, 0); \
57710   __ret_207; \
57711 })
57712 #endif
57713 
57714 #ifdef __LITTLE_ENDIAN__
57715 #define vqshrun_high_n_s16(__p0_208, __p1_208, __p2_208) __extension__ ({ \
57716   int8x8_t __s0_208 = __p0_208; \
57717   int16x8_t __s1_208 = __p1_208; \
57718   int8x16_t __ret_208; \
57719   __ret_208 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_208), (int8x8_t)(vqshrun_n_s16(__s1_208, __p2_208)))); \
57720   __ret_208; \
57721 })
57722 #else
57723 #define vqshrun_high_n_s16(__p0_209, __p1_209, __p2_209) __extension__ ({ \
57724   int8x8_t __s0_209 = __p0_209; \
57725   int16x8_t __s1_209 = __p1_209; \
57726   int8x8_t __rev0_209;  __rev0_209 = __builtin_shufflevector(__s0_209, __s0_209, 7, 6, 5, 4, 3, 2, 1, 0); \
57727   int16x8_t __rev1_209;  __rev1_209 = __builtin_shufflevector(__s1_209, __s1_209, 7, 6, 5, 4, 3, 2, 1, 0); \
57728   int8x16_t __ret_209; \
57729   __ret_209 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_209), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_209, __p2_209)))); \
57730   __ret_209 = __builtin_shufflevector(__ret_209, __ret_209, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
57731   __ret_209; \
57732 })
57733 #endif
57734 
57735 #ifdef __LITTLE_ENDIAN__
57736 #define vqshruns_n_s32(__p0, __p1) __extension__ ({ \
57737   int32_t __s0 = __p0; \
57738   int16_t __ret; \
57739   __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \
57740   __ret; \
57741 })
57742 #else
57743 #define vqshruns_n_s32(__p0, __p1) __extension__ ({ \
57744   int32_t __s0 = __p0; \
57745   int16_t __ret; \
57746   __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \
57747   __ret; \
57748 })
57749 #endif
57750 
57751 #ifdef __LITTLE_ENDIAN__
57752 #define vqshrund_n_s64(__p0, __p1) __extension__ ({ \
57753   int64_t __s0 = __p0; \
57754   int32_t __ret; \
57755   __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \
57756   __ret; \
57757 })
57758 #else
57759 #define vqshrund_n_s64(__p0, __p1) __extension__ ({ \
57760   int64_t __s0 = __p0; \
57761   int32_t __ret; \
57762   __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \
57763   __ret; \
57764 })
57765 #endif
57766 
57767 #ifdef __LITTLE_ENDIAN__
57768 #define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \
57769   int16_t __s0 = __p0; \
57770   int8_t __ret; \
57771   __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \
57772   __ret; \
57773 })
57774 #else
57775 #define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \
57776   int16_t __s0 = __p0; \
57777   int8_t __ret; \
57778   __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \
57779   __ret; \
57780 })
57781 #endif
57782 
57783 #ifdef __LITTLE_ENDIAN__
vqsubb_u8(uint8_t __p0,uint8_t __p1)57784 __ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) {
57785   uint8_t __ret;
57786   __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1);
57787   return __ret;
57788 }
57789 #else
vqsubb_u8(uint8_t __p0,uint8_t __p1)57790 __ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) {
57791   uint8_t __ret;
57792   __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1);
57793   return __ret;
57794 }
57795 #endif
57796 
57797 #ifdef __LITTLE_ENDIAN__
vqsubs_u32(uint32_t __p0,uint32_t __p1)57798 __ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) {
57799   uint32_t __ret;
57800   __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1);
57801   return __ret;
57802 }
57803 #else
vqsubs_u32(uint32_t __p0,uint32_t __p1)57804 __ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) {
57805   uint32_t __ret;
57806   __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1);
57807   return __ret;
57808 }
57809 #endif
57810 
57811 #ifdef __LITTLE_ENDIAN__
vqsubd_u64(uint64_t __p0,uint64_t __p1)57812 __ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) {
57813   uint64_t __ret;
57814   __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1);
57815   return __ret;
57816 }
57817 #else
vqsubd_u64(uint64_t __p0,uint64_t __p1)57818 __ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) {
57819   uint64_t __ret;
57820   __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1);
57821   return __ret;
57822 }
57823 #endif
57824 
57825 #ifdef __LITTLE_ENDIAN__
vqsubh_u16(uint16_t __p0,uint16_t __p1)57826 __ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) {
57827   uint16_t __ret;
57828   __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1);
57829   return __ret;
57830 }
57831 #else
vqsubh_u16(uint16_t __p0,uint16_t __p1)57832 __ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) {
57833   uint16_t __ret;
57834   __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1);
57835   return __ret;
57836 }
57837 #endif
57838 
57839 #ifdef __LITTLE_ENDIAN__
vqsubb_s8(int8_t __p0,int8_t __p1)57840 __ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) {
57841   int8_t __ret;
57842   __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1);
57843   return __ret;
57844 }
57845 #else
vqsubb_s8(int8_t __p0,int8_t __p1)57846 __ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) {
57847   int8_t __ret;
57848   __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1);
57849   return __ret;
57850 }
57851 #endif
57852 
57853 #ifdef __LITTLE_ENDIAN__
vqsubs_s32(int32_t __p0,int32_t __p1)57854 __ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) {
57855   int32_t __ret;
57856   __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1);
57857   return __ret;
57858 }
57859 #else
vqsubs_s32(int32_t __p0,int32_t __p1)57860 __ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) {
57861   int32_t __ret;
57862   __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1);
57863   return __ret;
57864 }
57865 #endif
57866 
57867 #ifdef __LITTLE_ENDIAN__
vqsubd_s64(int64_t __p0,int64_t __p1)57868 __ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) {
57869   int64_t __ret;
57870   __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1);
57871   return __ret;
57872 }
57873 #else
vqsubd_s64(int64_t __p0,int64_t __p1)57874 __ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) {
57875   int64_t __ret;
57876   __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1);
57877   return __ret;
57878 }
57879 #endif
57880 
57881 #ifdef __LITTLE_ENDIAN__
vqsubh_s16(int16_t __p0,int16_t __p1)57882 __ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) {
57883   int16_t __ret;
57884   __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1);
57885   return __ret;
57886 }
57887 #else
vqsubh_s16(int16_t __p0,int16_t __p1)57888 __ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) {
57889   int16_t __ret;
57890   __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1);
57891   return __ret;
57892 }
57893 #endif
57894 
57895 #ifdef __LITTLE_ENDIAN__
vqtbl1_p8(poly8x16_t __p0,uint8x8_t __p1)57896 __ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
57897   poly8x8_t __ret;
57898   __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 4);
57899   return __ret;
57900 }
57901 #else
vqtbl1_p8(poly8x16_t __p0,uint8x8_t __p1)57902 __ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
57903   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
57904   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
57905   poly8x8_t __ret;
57906   __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 4);
57907   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
57908   return __ret;
57909 }
57910 #endif
57911 
57912 #ifdef __LITTLE_ENDIAN__
vqtbl1q_p8(poly8x16_t __p0,uint8x16_t __p1)57913 __ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
57914   poly8x16_t __ret;
57915   __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
57916   return __ret;
57917 }
57918 #else
vqtbl1q_p8(poly8x16_t __p0,uint8x16_t __p1)57919 __ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
57920   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
57921   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
57922   poly8x16_t __ret;
57923   __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
57924   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
57925   return __ret;
57926 }
57927 #endif
57928 
57929 #ifdef __LITTLE_ENDIAN__
vqtbl1q_u8(uint8x16_t __p0,uint8x16_t __p1)57930 __ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
57931   uint8x16_t __ret;
57932   __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
57933   return __ret;
57934 }
57935 #else
vqtbl1q_u8(uint8x16_t __p0,uint8x16_t __p1)57936 __ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
57937   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
57938   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
57939   uint8x16_t __ret;
57940   __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
57941   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
57942   return __ret;
57943 }
57944 #endif
57945 
57946 #ifdef __LITTLE_ENDIAN__
vqtbl1q_s8(int8x16_t __p0,int8x16_t __p1)57947 __ai int8x16_t vqtbl1q_s8(int8x16_t __p0, int8x16_t __p1) {
57948   int8x16_t __ret;
57949   __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
57950   return __ret;
57951 }
57952 #else
vqtbl1q_s8(int8x16_t __p0,int8x16_t __p1)57953 __ai int8x16_t vqtbl1q_s8(int8x16_t __p0, int8x16_t __p1) {
57954   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
57955   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
57956   int8x16_t __ret;
57957   __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
57958   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
57959   return __ret;
57960 }
57961 #endif
57962 
57963 #ifdef __LITTLE_ENDIAN__
vqtbl1_u8(uint8x16_t __p0,uint8x8_t __p1)57964 __ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
57965   uint8x8_t __ret;
57966   __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 16);
57967   return __ret;
57968 }
57969 #else
vqtbl1_u8(uint8x16_t __p0,uint8x8_t __p1)57970 __ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
57971   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
57972   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
57973   uint8x8_t __ret;
57974   __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 16);
57975   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
57976   return __ret;
57977 }
57978 #endif
57979 
57980 #ifdef __LITTLE_ENDIAN__
vqtbl1_s8(int8x16_t __p0,int8x8_t __p1)57981 __ai int8x8_t vqtbl1_s8(int8x16_t __p0, int8x8_t __p1) {
57982   int8x8_t __ret;
57983   __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 0);
57984   return __ret;
57985 }
57986 #else
vqtbl1_s8(int8x16_t __p0,int8x8_t __p1)57987 __ai int8x8_t vqtbl1_s8(int8x16_t __p0, int8x8_t __p1) {
57988   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
57989   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
57990   int8x8_t __ret;
57991   __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 0);
57992   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
57993   return __ret;
57994 }
57995 #endif
57996 
57997 #ifdef __LITTLE_ENDIAN__
vqtbl2_p8(poly8x16x2_t __p0,uint8x8_t __p1)57998 __ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
57999   poly8x8_t __ret;
58000   __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 4);
58001   return __ret;
58002 }
58003 #else
vqtbl2_p8(poly8x16x2_t __p0,uint8x8_t __p1)58004 __ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
58005   poly8x16x2_t __rev0;
58006   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58007   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58008   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
58009   poly8x8_t __ret;
58010   __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 4);
58011   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58012   return __ret;
58013 }
58014 #endif
58015 
58016 #ifdef __LITTLE_ENDIAN__
vqtbl2q_p8(poly8x16x2_t __p0,uint8x16_t __p1)58017 __ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
58018   poly8x16_t __ret;
58019   __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 36);
58020   return __ret;
58021 }
58022 #else
vqtbl2q_p8(poly8x16x2_t __p0,uint8x16_t __p1)58023 __ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
58024   poly8x16x2_t __rev0;
58025   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58026   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58027   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58028   poly8x16_t __ret;
58029   __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 36);
58030   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58031   return __ret;
58032 }
58033 #endif
58034 
58035 #ifdef __LITTLE_ENDIAN__
vqtbl2q_u8(uint8x16x2_t __p0,uint8x16_t __p1)58036 __ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
58037   uint8x16_t __ret;
58038   __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 48);
58039   return __ret;
58040 }
58041 #else
vqtbl2q_u8(uint8x16x2_t __p0,uint8x16_t __p1)58042 __ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
58043   uint8x16x2_t __rev0;
58044   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58045   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58046   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58047   uint8x16_t __ret;
58048   __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 48);
58049   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58050   return __ret;
58051 }
58052 #endif
58053 
58054 #ifdef __LITTLE_ENDIAN__
vqtbl2q_s8(int8x16x2_t __p0,int8x16_t __p1)58055 __ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, int8x16_t __p1) {
58056   int8x16_t __ret;
58057   __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 32);
58058   return __ret;
58059 }
58060 #else
vqtbl2q_s8(int8x16x2_t __p0,int8x16_t __p1)58061 __ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, int8x16_t __p1) {
58062   int8x16x2_t __rev0;
58063   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58064   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58065   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58066   int8x16_t __ret;
58067   __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 32);
58068   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58069   return __ret;
58070 }
58071 #endif
58072 
58073 #ifdef __LITTLE_ENDIAN__
vqtbl2_u8(uint8x16x2_t __p0,uint8x8_t __p1)58074 __ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
58075   uint8x8_t __ret;
58076   __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 16);
58077   return __ret;
58078 }
58079 #else
vqtbl2_u8(uint8x16x2_t __p0,uint8x8_t __p1)58080 __ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
58081   uint8x16x2_t __rev0;
58082   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58083   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58084   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
58085   uint8x8_t __ret;
58086   __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 16);
58087   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58088   return __ret;
58089 }
58090 #endif
58091 
58092 #ifdef __LITTLE_ENDIAN__
vqtbl2_s8(int8x16x2_t __p0,int8x8_t __p1)58093 __ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, int8x8_t __p1) {
58094   int8x8_t __ret;
58095   __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 0);
58096   return __ret;
58097 }
58098 #else
vqtbl2_s8(int8x16x2_t __p0,int8x8_t __p1)58099 __ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, int8x8_t __p1) {
58100   int8x16x2_t __rev0;
58101   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58102   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58103   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
58104   int8x8_t __ret;
58105   __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 0);
58106   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58107   return __ret;
58108 }
58109 #endif
58110 
58111 #ifdef __LITTLE_ENDIAN__
vqtbl3_p8(poly8x16x3_t __p0,uint8x8_t __p1)58112 __ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
58113   poly8x8_t __ret;
58114   __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 4);
58115   return __ret;
58116 }
58117 #else
vqtbl3_p8(poly8x16x3_t __p0,uint8x8_t __p1)58118 __ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
58119   poly8x16x3_t __rev0;
58120   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58121   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58122   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58123   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
58124   poly8x8_t __ret;
58125   __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 4);
58126   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58127   return __ret;
58128 }
58129 #endif
58130 
58131 #ifdef __LITTLE_ENDIAN__
vqtbl3q_p8(poly8x16x3_t __p0,uint8x16_t __p1)58132 __ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
58133   poly8x16_t __ret;
58134   __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 36);
58135   return __ret;
58136 }
58137 #else
vqtbl3q_p8(poly8x16x3_t __p0,uint8x16_t __p1)58138 __ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
58139   poly8x16x3_t __rev0;
58140   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58141   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58142   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58143   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58144   poly8x16_t __ret;
58145   __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 36);
58146   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58147   return __ret;
58148 }
58149 #endif
58150 
58151 #ifdef __LITTLE_ENDIAN__
vqtbl3q_u8(uint8x16x3_t __p0,uint8x16_t __p1)58152 __ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
58153   uint8x16_t __ret;
58154   __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 48);
58155   return __ret;
58156 }
58157 #else
vqtbl3q_u8(uint8x16x3_t __p0,uint8x16_t __p1)58158 __ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
58159   uint8x16x3_t __rev0;
58160   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58161   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58162   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58163   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58164   uint8x16_t __ret;
58165   __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 48);
58166   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58167   return __ret;
58168 }
58169 #endif
58170 
58171 #ifdef __LITTLE_ENDIAN__
vqtbl3q_s8(int8x16x3_t __p0,int8x16_t __p1)58172 __ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, int8x16_t __p1) {
58173   int8x16_t __ret;
58174   __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 32);
58175   return __ret;
58176 }
58177 #else
vqtbl3q_s8(int8x16x3_t __p0,int8x16_t __p1)58178 __ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, int8x16_t __p1) {
58179   int8x16x3_t __rev0;
58180   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58181   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58182   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58183   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58184   int8x16_t __ret;
58185   __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 32);
58186   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58187   return __ret;
58188 }
58189 #endif
58190 
58191 #ifdef __LITTLE_ENDIAN__
vqtbl3_u8(uint8x16x3_t __p0,uint8x8_t __p1)58192 __ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
58193   uint8x8_t __ret;
58194   __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 16);
58195   return __ret;
58196 }
58197 #else
vqtbl3_u8(uint8x16x3_t __p0,uint8x8_t __p1)58198 __ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
58199   uint8x16x3_t __rev0;
58200   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58201   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58202   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58203   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
58204   uint8x8_t __ret;
58205   __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 16);
58206   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58207   return __ret;
58208 }
58209 #endif
58210 
58211 #ifdef __LITTLE_ENDIAN__
vqtbl3_s8(int8x16x3_t __p0,int8x8_t __p1)58212 __ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, int8x8_t __p1) {
58213   int8x8_t __ret;
58214   __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 0);
58215   return __ret;
58216 }
58217 #else
vqtbl3_s8(int8x16x3_t __p0,int8x8_t __p1)58218 __ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, int8x8_t __p1) {
58219   int8x16x3_t __rev0;
58220   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58221   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58222   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58223   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
58224   int8x8_t __ret;
58225   __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 0);
58226   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58227   return __ret;
58228 }
58229 #endif
58230 
58231 #ifdef __LITTLE_ENDIAN__
vqtbl4_p8(poly8x16x4_t __p0,uint8x8_t __p1)58232 __ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
58233   poly8x8_t __ret;
58234   __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 4);
58235   return __ret;
58236 }
58237 #else
vqtbl4_p8(poly8x16x4_t __p0,uint8x8_t __p1)58238 __ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
58239   poly8x16x4_t __rev0;
58240   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58241   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58242   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58243   __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58244   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
58245   poly8x8_t __ret;
58246   __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 4);
58247   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58248   return __ret;
58249 }
58250 #endif
58251 
58252 #ifdef __LITTLE_ENDIAN__
vqtbl4q_p8(poly8x16x4_t __p0,uint8x16_t __p1)58253 __ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
58254   poly8x16_t __ret;
58255   __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 36);
58256   return __ret;
58257 }
58258 #else
vqtbl4q_p8(poly8x16x4_t __p0,uint8x16_t __p1)58259 __ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
58260   poly8x16x4_t __rev0;
58261   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58262   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58263   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58264   __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58265   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58266   poly8x16_t __ret;
58267   __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 36);
58268   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58269   return __ret;
58270 }
58271 #endif
58272 
58273 #ifdef __LITTLE_ENDIAN__
vqtbl4q_u8(uint8x16x4_t __p0,uint8x16_t __p1)58274 __ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
58275   uint8x16_t __ret;
58276   __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 48);
58277   return __ret;
58278 }
58279 #else
vqtbl4q_u8(uint8x16x4_t __p0,uint8x16_t __p1)58280 __ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
58281   uint8x16x4_t __rev0;
58282   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58283   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58284   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58285   __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58286   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58287   uint8x16_t __ret;
58288   __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 48);
58289   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58290   return __ret;
58291 }
58292 #endif
58293 
58294 #ifdef __LITTLE_ENDIAN__
vqtbl4q_s8(int8x16x4_t __p0,int8x16_t __p1)58295 __ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, int8x16_t __p1) {
58296   int8x16_t __ret;
58297   __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 32);
58298   return __ret;
58299 }
58300 #else
vqtbl4q_s8(int8x16x4_t __p0,int8x16_t __p1)58301 __ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, int8x16_t __p1) {
58302   int8x16x4_t __rev0;
58303   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58304   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58305   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58306   __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58307   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58308   int8x16_t __ret;
58309   __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 32);
58310   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58311   return __ret;
58312 }
58313 #endif
58314 
58315 #ifdef __LITTLE_ENDIAN__
vqtbl4_u8(uint8x16x4_t __p0,uint8x8_t __p1)58316 __ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
58317   uint8x8_t __ret;
58318   __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 16);
58319   return __ret;
58320 }
58321 #else
vqtbl4_u8(uint8x16x4_t __p0,uint8x8_t __p1)58322 __ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
58323   uint8x16x4_t __rev0;
58324   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58325   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58326   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58327   __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58328   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
58329   uint8x8_t __ret;
58330   __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 16);
58331   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58332   return __ret;
58333 }
58334 #endif
58335 
58336 #ifdef __LITTLE_ENDIAN__
vqtbl4_s8(int8x16x4_t __p0,int8x8_t __p1)58337 __ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, int8x8_t __p1) {
58338   int8x8_t __ret;
58339   __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 0);
58340   return __ret;
58341 }
58342 #else
vqtbl4_s8(int8x16x4_t __p0,int8x8_t __p1)58343 __ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, int8x8_t __p1) {
58344   int8x16x4_t __rev0;
58345   __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58346   __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58347   __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58348   __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58349   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
58350   int8x8_t __ret;
58351   __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 0);
58352   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58353   return __ret;
58354 }
58355 #endif
58356 
58357 #ifdef __LITTLE_ENDIAN__
vqtbx1_p8(poly8x8_t __p0,poly8x16_t __p1,uint8x8_t __p2)58358 __ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
58359   poly8x8_t __ret;
58360   __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 4);
58361   return __ret;
58362 }
58363 #else
vqtbx1_p8(poly8x8_t __p0,poly8x16_t __p1,uint8x8_t __p2)58364 __ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
58365   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
58366   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58367   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
58368   poly8x8_t __ret;
58369   __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 4);
58370   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58371   return __ret;
58372 }
58373 #endif
58374 
58375 #ifdef __LITTLE_ENDIAN__
vqtbx1q_p8(poly8x16_t __p0,poly8x16_t __p1,uint8x16_t __p2)58376 __ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
58377   poly8x16_t __ret;
58378   __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
58379   return __ret;
58380 }
58381 #else
vqtbx1q_p8(poly8x16_t __p0,poly8x16_t __p1,uint8x16_t __p2)58382 __ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
58383   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58384   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58385   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58386   poly8x16_t __ret;
58387   __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
58388   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58389   return __ret;
58390 }
58391 #endif
58392 
58393 #ifdef __LITTLE_ENDIAN__
vqtbx1q_u8(uint8x16_t __p0,uint8x16_t __p1,uint8x16_t __p2)58394 __ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
58395   uint8x16_t __ret;
58396   __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
58397   return __ret;
58398 }
58399 #else
vqtbx1q_u8(uint8x16_t __p0,uint8x16_t __p1,uint8x16_t __p2)58400 __ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
58401   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58402   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58403   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58404   uint8x16_t __ret;
58405   __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
58406   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58407   return __ret;
58408 }
58409 #endif
58410 
58411 #ifdef __LITTLE_ENDIAN__
vqtbx1q_s8(int8x16_t __p0,int8x16_t __p1,int8x16_t __p2)58412 __ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
58413   int8x16_t __ret;
58414   __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
58415   return __ret;
58416 }
58417 #else
vqtbx1q_s8(int8x16_t __p0,int8x16_t __p1,int8x16_t __p2)58418 __ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
58419   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58420   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58421   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58422   int8x16_t __ret;
58423   __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
58424   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58425   return __ret;
58426 }
58427 #endif
58428 
58429 #ifdef __LITTLE_ENDIAN__
vqtbx1_u8(uint8x8_t __p0,uint8x16_t __p1,uint8x8_t __p2)58430 __ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
58431   uint8x8_t __ret;
58432   __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 16);
58433   return __ret;
58434 }
58435 #else
vqtbx1_u8(uint8x8_t __p0,uint8x16_t __p1,uint8x8_t __p2)58436 __ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
58437   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
58438   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58439   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
58440   uint8x8_t __ret;
58441   __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 16);
58442   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58443   return __ret;
58444 }
58445 #endif
58446 
58447 #ifdef __LITTLE_ENDIAN__
vqtbx1_s8(int8x8_t __p0,int8x16_t __p1,int8x8_t __p2)58448 __ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, int8x8_t __p2) {
58449   int8x8_t __ret;
58450   __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 0);
58451   return __ret;
58452 }
58453 #else
vqtbx1_s8(int8x8_t __p0,int8x16_t __p1,int8x8_t __p2)58454 __ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, int8x8_t __p2) {
58455   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
58456   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58457   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
58458   int8x8_t __ret;
58459   __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 0);
58460   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58461   return __ret;
58462 }
58463 #endif
58464 
58465 #ifdef __LITTLE_ENDIAN__
vqtbx2_p8(poly8x8_t __p0,poly8x16x2_t __p1,uint8x8_t __p2)58466 __ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
58467   poly8x8_t __ret;
58468   __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 4);
58469   return __ret;
58470 }
58471 #else
vqtbx2_p8(poly8x8_t __p0,poly8x16x2_t __p1,uint8x8_t __p2)58472 __ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
58473   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
58474   poly8x16x2_t __rev1;
58475   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58476   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58477   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
58478   poly8x8_t __ret;
58479   __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 4);
58480   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58481   return __ret;
58482 }
58483 #endif
58484 
58485 #ifdef __LITTLE_ENDIAN__
vqtbx2q_p8(poly8x16_t __p0,poly8x16x2_t __p1,uint8x16_t __p2)58486 __ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
58487   poly8x16_t __ret;
58488   __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 36);
58489   return __ret;
58490 }
58491 #else
vqtbx2q_p8(poly8x16_t __p0,poly8x16x2_t __p1,uint8x16_t __p2)58492 __ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
58493   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58494   poly8x16x2_t __rev1;
58495   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58496   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58497   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58498   poly8x16_t __ret;
58499   __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 36);
58500   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58501   return __ret;
58502 }
58503 #endif
58504 
58505 #ifdef __LITTLE_ENDIAN__
vqtbx2q_u8(uint8x16_t __p0,uint8x16x2_t __p1,uint8x16_t __p2)58506 __ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
58507   uint8x16_t __ret;
58508   __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 48);
58509   return __ret;
58510 }
58511 #else
vqtbx2q_u8(uint8x16_t __p0,uint8x16x2_t __p1,uint8x16_t __p2)58512 __ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
58513   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58514   uint8x16x2_t __rev1;
58515   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58516   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58517   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58518   uint8x16_t __ret;
58519   __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 48);
58520   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58521   return __ret;
58522 }
58523 #endif
58524 
58525 #ifdef __LITTLE_ENDIAN__
vqtbx2q_s8(int8x16_t __p0,int8x16x2_t __p1,int8x16_t __p2)58526 __ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, int8x16_t __p2) {
58527   int8x16_t __ret;
58528   __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 32);
58529   return __ret;
58530 }
58531 #else
vqtbx2q_s8(int8x16_t __p0,int8x16x2_t __p1,int8x16_t __p2)58532 __ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, int8x16_t __p2) {
58533   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58534   int8x16x2_t __rev1;
58535   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58536   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58537   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58538   int8x16_t __ret;
58539   __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 32);
58540   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58541   return __ret;
58542 }
58543 #endif
58544 
58545 #ifdef __LITTLE_ENDIAN__
vqtbx2_u8(uint8x8_t __p0,uint8x16x2_t __p1,uint8x8_t __p2)58546 __ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
58547   uint8x8_t __ret;
58548   __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 16);
58549   return __ret;
58550 }
58551 #else
vqtbx2_u8(uint8x8_t __p0,uint8x16x2_t __p1,uint8x8_t __p2)58552 __ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
58553   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
58554   uint8x16x2_t __rev1;
58555   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58556   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58557   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
58558   uint8x8_t __ret;
58559   __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 16);
58560   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58561   return __ret;
58562 }
58563 #endif
58564 
58565 #ifdef __LITTLE_ENDIAN__
vqtbx2_s8(int8x8_t __p0,int8x16x2_t __p1,int8x8_t __p2)58566 __ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, int8x8_t __p2) {
58567   int8x8_t __ret;
58568   __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 0);
58569   return __ret;
58570 }
58571 #else
vqtbx2_s8(int8x8_t __p0,int8x16x2_t __p1,int8x8_t __p2)58572 __ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, int8x8_t __p2) {
58573   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
58574   int8x16x2_t __rev1;
58575   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58576   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58577   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
58578   int8x8_t __ret;
58579   __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 0);
58580   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58581   return __ret;
58582 }
58583 #endif
58584 
58585 #ifdef __LITTLE_ENDIAN__
vqtbx3_p8(poly8x8_t __p0,poly8x16x3_t __p1,uint8x8_t __p2)58586 __ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
58587   poly8x8_t __ret;
58588   __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 4);
58589   return __ret;
58590 }
58591 #else
vqtbx3_p8(poly8x8_t __p0,poly8x16x3_t __p1,uint8x8_t __p2)58592 __ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
58593   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
58594   poly8x16x3_t __rev1;
58595   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58596   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58597   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58598   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
58599   poly8x8_t __ret;
58600   __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 4);
58601   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58602   return __ret;
58603 }
58604 #endif
58605 
58606 #ifdef __LITTLE_ENDIAN__
vqtbx3q_p8(poly8x16_t __p0,poly8x16x3_t __p1,uint8x16_t __p2)58607 __ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
58608   poly8x16_t __ret;
58609   __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 36);
58610   return __ret;
58611 }
58612 #else
vqtbx3q_p8(poly8x16_t __p0,poly8x16x3_t __p1,uint8x16_t __p2)58613 __ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
58614   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58615   poly8x16x3_t __rev1;
58616   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58617   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58618   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58619   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58620   poly8x16_t __ret;
58621   __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 36);
58622   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58623   return __ret;
58624 }
58625 #endif
58626 
58627 #ifdef __LITTLE_ENDIAN__
vqtbx3q_u8(uint8x16_t __p0,uint8x16x3_t __p1,uint8x16_t __p2)58628 __ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
58629   uint8x16_t __ret;
58630   __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 48);
58631   return __ret;
58632 }
58633 #else
vqtbx3q_u8(uint8x16_t __p0,uint8x16x3_t __p1,uint8x16_t __p2)58634 __ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
58635   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58636   uint8x16x3_t __rev1;
58637   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58638   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58639   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58640   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58641   uint8x16_t __ret;
58642   __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 48);
58643   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58644   return __ret;
58645 }
58646 #endif
58647 
58648 #ifdef __LITTLE_ENDIAN__
vqtbx3q_s8(int8x16_t __p0,int8x16x3_t __p1,int8x16_t __p2)58649 __ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, int8x16_t __p2) {
58650   int8x16_t __ret;
58651   __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 32);
58652   return __ret;
58653 }
58654 #else
vqtbx3q_s8(int8x16_t __p0,int8x16x3_t __p1,int8x16_t __p2)58655 __ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, int8x16_t __p2) {
58656   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58657   int8x16x3_t __rev1;
58658   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58659   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58660   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58661   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58662   int8x16_t __ret;
58663   __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 32);
58664   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58665   return __ret;
58666 }
58667 #endif
58668 
58669 #ifdef __LITTLE_ENDIAN__
vqtbx3_u8(uint8x8_t __p0,uint8x16x3_t __p1,uint8x8_t __p2)58670 __ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
58671   uint8x8_t __ret;
58672   __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 16);
58673   return __ret;
58674 }
58675 #else
vqtbx3_u8(uint8x8_t __p0,uint8x16x3_t __p1,uint8x8_t __p2)58676 __ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
58677   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
58678   uint8x16x3_t __rev1;
58679   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58680   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58681   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58682   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
58683   uint8x8_t __ret;
58684   __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 16);
58685   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58686   return __ret;
58687 }
58688 #endif
58689 
58690 #ifdef __LITTLE_ENDIAN__
vqtbx3_s8(int8x8_t __p0,int8x16x3_t __p1,int8x8_t __p2)58691 __ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, int8x8_t __p2) {
58692   int8x8_t __ret;
58693   __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 0);
58694   return __ret;
58695 }
58696 #else
vqtbx3_s8(int8x8_t __p0,int8x16x3_t __p1,int8x8_t __p2)58697 __ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, int8x8_t __p2) {
58698   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
58699   int8x16x3_t __rev1;
58700   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58701   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58702   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58703   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
58704   int8x8_t __ret;
58705   __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 0);
58706   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58707   return __ret;
58708 }
58709 #endif
58710 
58711 #ifdef __LITTLE_ENDIAN__
vqtbx4_p8(poly8x8_t __p0,poly8x16x4_t __p1,uint8x8_t __p2)58712 __ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
58713   poly8x8_t __ret;
58714   __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 4);
58715   return __ret;
58716 }
58717 #else
vqtbx4_p8(poly8x8_t __p0,poly8x16x4_t __p1,uint8x8_t __p2)58718 __ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
58719   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
58720   poly8x16x4_t __rev1;
58721   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58722   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58723   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58724   __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58725   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
58726   poly8x8_t __ret;
58727   __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 4);
58728   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58729   return __ret;
58730 }
58731 #endif
58732 
58733 #ifdef __LITTLE_ENDIAN__
vqtbx4q_p8(poly8x16_t __p0,poly8x16x4_t __p1,uint8x16_t __p2)58734 __ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
58735   poly8x16_t __ret;
58736   __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 36);
58737   return __ret;
58738 }
58739 #else
vqtbx4q_p8(poly8x16_t __p0,poly8x16x4_t __p1,uint8x16_t __p2)58740 __ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
58741   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58742   poly8x16x4_t __rev1;
58743   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58744   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58745   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58746   __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58747   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58748   poly8x16_t __ret;
58749   __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 36);
58750   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58751   return __ret;
58752 }
58753 #endif
58754 
58755 #ifdef __LITTLE_ENDIAN__
vqtbx4q_u8(uint8x16_t __p0,uint8x16x4_t __p1,uint8x16_t __p2)58756 __ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
58757   uint8x16_t __ret;
58758   __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 48);
58759   return __ret;
58760 }
58761 #else
vqtbx4q_u8(uint8x16_t __p0,uint8x16x4_t __p1,uint8x16_t __p2)58762 __ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
58763   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58764   uint8x16x4_t __rev1;
58765   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58766   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58767   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58768   __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58769   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58770   uint8x16_t __ret;
58771   __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 48);
58772   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58773   return __ret;
58774 }
58775 #endif
58776 
58777 #ifdef __LITTLE_ENDIAN__
vqtbx4q_s8(int8x16_t __p0,int8x16x4_t __p1,int8x16_t __p2)58778 __ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, int8x16_t __p2) {
58779   int8x16_t __ret;
58780   __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 32);
58781   return __ret;
58782 }
58783 #else
vqtbx4q_s8(int8x16_t __p0,int8x16x4_t __p1,int8x16_t __p2)58784 __ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, int8x16_t __p2) {
58785   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58786   int8x16x4_t __rev1;
58787   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58788   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58789   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58790   __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58791   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58792   int8x16_t __ret;
58793   __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 32);
58794   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58795   return __ret;
58796 }
58797 #endif
58798 
58799 #ifdef __LITTLE_ENDIAN__
vqtbx4_u8(uint8x8_t __p0,uint8x16x4_t __p1,uint8x8_t __p2)58800 __ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
58801   uint8x8_t __ret;
58802   __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 16);
58803   return __ret;
58804 }
58805 #else
vqtbx4_u8(uint8x8_t __p0,uint8x16x4_t __p1,uint8x8_t __p2)58806 __ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
58807   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
58808   uint8x16x4_t __rev1;
58809   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58810   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58811   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58812   __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58813   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
58814   uint8x8_t __ret;
58815   __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 16);
58816   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58817   return __ret;
58818 }
58819 #endif
58820 
58821 #ifdef __LITTLE_ENDIAN__
vqtbx4_s8(int8x8_t __p0,int8x16x4_t __p1,int8x8_t __p2)58822 __ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, int8x8_t __p2) {
58823   int8x8_t __ret;
58824   __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 0);
58825   return __ret;
58826 }
58827 #else
vqtbx4_s8(int8x8_t __p0,int8x16x4_t __p1,int8x8_t __p2)58828 __ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, int8x8_t __p2) {
58829   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
58830   int8x16x4_t __rev1;
58831   __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58832   __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58833   __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58834   __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58835   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
58836   int8x8_t __ret;
58837   __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 0);
58838   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58839   return __ret;
58840 }
58841 #endif
58842 
58843 #ifdef __LITTLE_ENDIAN__
vraddhn_high_u32(uint16x4_t __p0,uint32x4_t __p1,uint32x4_t __p2)58844 __ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
58845   uint16x8_t __ret;
58846   __ret = vcombine_u16(__p0, vraddhn_u32(__p1, __p2));
58847   return __ret;
58848 }
58849 #else
vraddhn_high_u32(uint16x4_t __p0,uint32x4_t __p1,uint32x4_t __p2)58850 __ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
58851   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
58852   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
58853   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
58854   uint16x8_t __ret;
58855   __ret = __noswap_vcombine_u16(__rev0, __noswap_vraddhn_u32(__rev1, __rev2));
58856   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58857   return __ret;
58858 }
58859 #endif
58860 
58861 #ifdef __LITTLE_ENDIAN__
vraddhn_high_u64(uint32x2_t __p0,uint64x2_t __p1,uint64x2_t __p2)58862 __ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
58863   uint32x4_t __ret;
58864   __ret = vcombine_u32(__p0, vraddhn_u64(__p1, __p2));
58865   return __ret;
58866 }
58867 #else
vraddhn_high_u64(uint32x2_t __p0,uint64x2_t __p1,uint64x2_t __p2)58868 __ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
58869   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
58870   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
58871   uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
58872   uint32x4_t __ret;
58873   __ret = __noswap_vcombine_u32(__rev0, __noswap_vraddhn_u64(__rev1, __rev2));
58874   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
58875   return __ret;
58876 }
58877 #endif
58878 
58879 #ifdef __LITTLE_ENDIAN__
vraddhn_high_u16(uint8x8_t __p0,uint16x8_t __p1,uint16x8_t __p2)58880 __ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
58881   uint8x16_t __ret;
58882   __ret = vcombine_u8(__p0, vraddhn_u16(__p1, __p2));
58883   return __ret;
58884 }
58885 #else
vraddhn_high_u16(uint8x8_t __p0,uint16x8_t __p1,uint16x8_t __p2)58886 __ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
58887   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
58888   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
58889   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
58890   uint8x16_t __ret;
58891   __ret = __noswap_vcombine_u8(__rev0, __noswap_vraddhn_u16(__rev1, __rev2));
58892   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58893   return __ret;
58894 }
58895 #endif
58896 
58897 #ifdef __LITTLE_ENDIAN__
vraddhn_high_s32(int16x4_t __p0,int32x4_t __p1,int32x4_t __p2)58898 __ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
58899   int16x8_t __ret;
58900   __ret = vcombine_s16(__p0, vraddhn_s32(__p1, __p2));
58901   return __ret;
58902 }
58903 #else
vraddhn_high_s32(int16x4_t __p0,int32x4_t __p1,int32x4_t __p2)58904 __ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
58905   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
58906   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
58907   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
58908   int16x8_t __ret;
58909   __ret = __noswap_vcombine_s16(__rev0, __noswap_vraddhn_s32(__rev1, __rev2));
58910   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58911   return __ret;
58912 }
58913 #endif
58914 
58915 #ifdef __LITTLE_ENDIAN__
vraddhn_high_s64(int32x2_t __p0,int64x2_t __p1,int64x2_t __p2)58916 __ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
58917   int32x4_t __ret;
58918   __ret = vcombine_s32(__p0, vraddhn_s64(__p1, __p2));
58919   return __ret;
58920 }
58921 #else
vraddhn_high_s64(int32x2_t __p0,int64x2_t __p1,int64x2_t __p2)58922 __ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
58923   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
58924   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
58925   int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
58926   int32x4_t __ret;
58927   __ret = __noswap_vcombine_s32(__rev0, __noswap_vraddhn_s64(__rev1, __rev2));
58928   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
58929   return __ret;
58930 }
58931 #endif
58932 
58933 #ifdef __LITTLE_ENDIAN__
vraddhn_high_s16(int8x8_t __p0,int16x8_t __p1,int16x8_t __p2)58934 __ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
58935   int8x16_t __ret;
58936   __ret = vcombine_s8(__p0, vraddhn_s16(__p1, __p2));
58937   return __ret;
58938 }
58939 #else
vraddhn_high_s16(int8x8_t __p0,int16x8_t __p1,int16x8_t __p2)58940 __ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
58941   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
58942   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
58943   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
58944   int8x16_t __ret;
58945   __ret = __noswap_vcombine_s8(__rev0, __noswap_vraddhn_s16(__rev1, __rev2));
58946   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58947   return __ret;
58948 }
58949 #endif
58950 
58951 #ifdef __LITTLE_ENDIAN__
vrbit_p8(poly8x8_t __p0)58952 __ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
58953   poly8x8_t __ret;
58954   __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 4);
58955   return __ret;
58956 }
58957 #else
vrbit_p8(poly8x8_t __p0)58958 __ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
58959   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
58960   poly8x8_t __ret;
58961   __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 4);
58962   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58963   return __ret;
58964 }
58965 #endif
58966 
58967 #ifdef __LITTLE_ENDIAN__
vrbitq_p8(poly8x16_t __p0)58968 __ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
58969   poly8x16_t __ret;
58970   __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 36);
58971   return __ret;
58972 }
58973 #else
vrbitq_p8(poly8x16_t __p0)58974 __ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
58975   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58976   poly8x16_t __ret;
58977   __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 36);
58978   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58979   return __ret;
58980 }
58981 #endif
58982 
58983 #ifdef __LITTLE_ENDIAN__
vrbitq_u8(uint8x16_t __p0)58984 __ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
58985   uint8x16_t __ret;
58986   __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 48);
58987   return __ret;
58988 }
58989 #else
vrbitq_u8(uint8x16_t __p0)58990 __ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
58991   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58992   uint8x16_t __ret;
58993   __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 48);
58994   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58995   return __ret;
58996 }
58997 #endif
58998 
58999 #ifdef __LITTLE_ENDIAN__
vrbitq_s8(int8x16_t __p0)59000 __ai int8x16_t vrbitq_s8(int8x16_t __p0) {
59001   int8x16_t __ret;
59002   __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 32);
59003   return __ret;
59004 }
59005 #else
vrbitq_s8(int8x16_t __p0)59006 __ai int8x16_t vrbitq_s8(int8x16_t __p0) {
59007   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
59008   int8x16_t __ret;
59009   __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 32);
59010   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
59011   return __ret;
59012 }
59013 #endif
59014 
59015 #ifdef __LITTLE_ENDIAN__
vrbit_u8(uint8x8_t __p0)59016 __ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
59017   uint8x8_t __ret;
59018   __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 16);
59019   return __ret;
59020 }
59021 #else
vrbit_u8(uint8x8_t __p0)59022 __ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
59023   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
59024   uint8x8_t __ret;
59025   __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 16);
59026   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
59027   return __ret;
59028 }
59029 #endif
59030 
59031 #ifdef __LITTLE_ENDIAN__
vrbit_s8(int8x8_t __p0)59032 __ai int8x8_t vrbit_s8(int8x8_t __p0) {
59033   int8x8_t __ret;
59034   __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 0);
59035   return __ret;
59036 }
59037 #else
vrbit_s8(int8x8_t __p0)59038 __ai int8x8_t vrbit_s8(int8x8_t __p0) {
59039   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
59040   int8x8_t __ret;
59041   __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 0);
59042   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
59043   return __ret;
59044 }
59045 #endif
59046 
59047 #ifdef __LITTLE_ENDIAN__
vrecpeq_f64(float64x2_t __p0)59048 __ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
59049   float64x2_t __ret;
59050   __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 42);
59051   return __ret;
59052 }
59053 #else
vrecpeq_f64(float64x2_t __p0)59054 __ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
59055   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
59056   float64x2_t __ret;
59057   __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 42);
59058   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
59059   return __ret;
59060 }
59061 #endif
59062 
59063 #ifdef __LITTLE_ENDIAN__
vrecpe_f64(float64x1_t __p0)59064 __ai float64x1_t vrecpe_f64(float64x1_t __p0) {
59065   float64x1_t __ret;
59066   __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10);
59067   return __ret;
59068 }
59069 #else
vrecpe_f64(float64x1_t __p0)59070 __ai float64x1_t vrecpe_f64(float64x1_t __p0) {
59071   float64x1_t __ret;
59072   __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10);
59073   return __ret;
59074 }
59075 #endif
59076 
59077 #ifdef __LITTLE_ENDIAN__
vrecped_f64(float64_t __p0)59078 __ai float64_t vrecped_f64(float64_t __p0) {
59079   float64_t __ret;
59080   __ret = (float64_t) __builtin_neon_vrecped_f64(__p0);
59081   return __ret;
59082 }
59083 #else
vrecped_f64(float64_t __p0)59084 __ai float64_t vrecped_f64(float64_t __p0) {
59085   float64_t __ret;
59086   __ret = (float64_t) __builtin_neon_vrecped_f64(__p0);
59087   return __ret;
59088 }
59089 #endif
59090 
59091 #ifdef __LITTLE_ENDIAN__
vrecpes_f32(float32_t __p0)59092 __ai float32_t vrecpes_f32(float32_t __p0) {
59093   float32_t __ret;
59094   __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0);
59095   return __ret;
59096 }
59097 #else
vrecpes_f32(float32_t __p0)59098 __ai float32_t vrecpes_f32(float32_t __p0) {
59099   float32_t __ret;
59100   __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0);
59101   return __ret;
59102 }
59103 #endif
59104 
59105 #ifdef __LITTLE_ENDIAN__
vrecpsq_f64(float64x2_t __p0,float64x2_t __p1)59106 __ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
59107   float64x2_t __ret;
59108   __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
59109   return __ret;
59110 }
59111 #else
vrecpsq_f64(float64x2_t __p0,float64x2_t __p1)59112 __ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
59113   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
59114   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
59115   float64x2_t __ret;
59116   __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
59117   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
59118   return __ret;
59119 }
59120 #endif
59121 
59122 #ifdef __LITTLE_ENDIAN__
vrecps_f64(float64x1_t __p0,float64x1_t __p1)59123 __ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) {
59124   float64x1_t __ret;
59125   __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
59126   return __ret;
59127 }
59128 #else
vrecps_f64(float64x1_t __p0,float64x1_t __p1)59129 __ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) {
59130   float64x1_t __ret;
59131   __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
59132   return __ret;
59133 }
59134 #endif
59135 
59136 #ifdef __LITTLE_ENDIAN__
vrecpsd_f64(float64_t __p0,float64_t __p1)59137 __ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) {
59138   float64_t __ret;
59139   __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1);
59140   return __ret;
59141 }
59142 #else
vrecpsd_f64(float64_t __p0,float64_t __p1)59143 __ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) {
59144   float64_t __ret;
59145   __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1);
59146   return __ret;
59147 }
59148 #endif
59149 
59150 #ifdef __LITTLE_ENDIAN__
vrecpss_f32(float32_t __p0,float32_t __p1)59151 __ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) {
59152   float32_t __ret;
59153   __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1);
59154   return __ret;
59155 }
59156 #else
vrecpss_f32(float32_t __p0,float32_t __p1)59157 __ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) {
59158   float32_t __ret;
59159   __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1);
59160   return __ret;
59161 }
59162 #endif
59163 
59164 #ifdef __LITTLE_ENDIAN__
vrecpxd_f64(float64_t __p0)59165 __ai float64_t vrecpxd_f64(float64_t __p0) {
59166   float64_t __ret;
59167   __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0);
59168   return __ret;
59169 }
59170 #else
vrecpxd_f64(float64_t __p0)59171 __ai float64_t vrecpxd_f64(float64_t __p0) {
59172   float64_t __ret;
59173   __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0);
59174   return __ret;
59175 }
59176 #endif
59177 
59178 #ifdef __LITTLE_ENDIAN__
vrecpxs_f32(float32_t __p0)59179 __ai float32_t vrecpxs_f32(float32_t __p0) {
59180   float32_t __ret;
59181   __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0);
59182   return __ret;
59183 }
59184 #else
vrecpxs_f32(float32_t __p0)59185 __ai float32_t vrecpxs_f32(float32_t __p0) {
59186   float32_t __ret;
59187   __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0);
59188   return __ret;
59189 }
59190 #endif
59191 
59192 #ifdef __LITTLE_ENDIAN__
vrndq_f64(float64x2_t __p0)59193 __ai float64x2_t vrndq_f64(float64x2_t __p0) {
59194   float64x2_t __ret;
59195   __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42);
59196   return __ret;
59197 }
59198 #else
vrndq_f64(float64x2_t __p0)59199 __ai float64x2_t vrndq_f64(float64x2_t __p0) {
59200   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
59201   float64x2_t __ret;
59202   __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42);
59203   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
59204   return __ret;
59205 }
59206 #endif
59207 
59208 #ifdef __LITTLE_ENDIAN__
vrndq_f32(float32x4_t __p0)59209 __ai float32x4_t vrndq_f32(float32x4_t __p0) {
59210   float32x4_t __ret;
59211   __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 41);
59212   return __ret;
59213 }
59214 #else
vrndq_f32(float32x4_t __p0)59215 __ai float32x4_t vrndq_f32(float32x4_t __p0) {
59216   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
59217   float32x4_t __ret;
59218   __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 41);
59219   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
59220   return __ret;
59221 }
59222 #endif
59223 
59224 #ifdef __LITTLE_ENDIAN__
vrnd_f64(float64x1_t __p0)59225 __ai float64x1_t vrnd_f64(float64x1_t __p0) {
59226   float64x1_t __ret;
59227   __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10);
59228   return __ret;
59229 }
59230 #else
vrnd_f64(float64x1_t __p0)59231 __ai float64x1_t vrnd_f64(float64x1_t __p0) {
59232   float64x1_t __ret;
59233   __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10);
59234   return __ret;
59235 }
59236 #endif
59237 
59238 #ifdef __LITTLE_ENDIAN__
vrnd_f32(float32x2_t __p0)59239 __ai float32x2_t vrnd_f32(float32x2_t __p0) {
59240   float32x2_t __ret;
59241   __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 9);
59242   return __ret;
59243 }
59244 #else
vrnd_f32(float32x2_t __p0)59245 __ai float32x2_t vrnd_f32(float32x2_t __p0) {
59246   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
59247   float32x2_t __ret;
59248   __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 9);
59249   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
59250   return __ret;
59251 }
59252 #endif
59253 
59254 #ifdef __LITTLE_ENDIAN__
vrndaq_f64(float64x2_t __p0)59255 __ai float64x2_t vrndaq_f64(float64x2_t __p0) {
59256   float64x2_t __ret;
59257   __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42);
59258   return __ret;
59259 }
59260 #else
vrndaq_f64(float64x2_t __p0)59261 __ai float64x2_t vrndaq_f64(float64x2_t __p0) {
59262   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
59263   float64x2_t __ret;
59264   __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42);
59265   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
59266   return __ret;
59267 }
59268 #endif
59269 
59270 #ifdef __LITTLE_ENDIAN__
vrndaq_f32(float32x4_t __p0)59271 __ai float32x4_t vrndaq_f32(float32x4_t __p0) {
59272   float32x4_t __ret;
59273   __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 41);
59274   return __ret;
59275 }
59276 #else
vrndaq_f32(float32x4_t __p0)59277 __ai float32x4_t vrndaq_f32(float32x4_t __p0) {
59278   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
59279   float32x4_t __ret;
59280   __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 41);
59281   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
59282   return __ret;
59283 }
59284 #endif
59285 
59286 #ifdef __LITTLE_ENDIAN__
vrnda_f64(float64x1_t __p0)59287 __ai float64x1_t vrnda_f64(float64x1_t __p0) {
59288   float64x1_t __ret;
59289   __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10);
59290   return __ret;
59291 }
59292 #else
vrnda_f64(float64x1_t __p0)59293 __ai float64x1_t vrnda_f64(float64x1_t __p0) {
59294   float64x1_t __ret;
59295   __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10);
59296   return __ret;
59297 }
59298 #endif
59299 
59300 #ifdef __LITTLE_ENDIAN__
vrnda_f32(float32x2_t __p0)59301 __ai float32x2_t vrnda_f32(float32x2_t __p0) {
59302   float32x2_t __ret;
59303   __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 9);
59304   return __ret;
59305 }
59306 #else
vrnda_f32(float32x2_t __p0)59307 __ai float32x2_t vrnda_f32(float32x2_t __p0) {
59308   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
59309   float32x2_t __ret;
59310   __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 9);
59311   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
59312   return __ret;
59313 }
59314 #endif
59315 
59316 #ifdef __LITTLE_ENDIAN__
vrndiq_f64(float64x2_t __p0)59317 __ai float64x2_t vrndiq_f64(float64x2_t __p0) {
59318   float64x2_t __ret;
59319   __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42);
59320   return __ret;
59321 }
59322 #else
vrndiq_f64(float64x2_t __p0)59323 __ai float64x2_t vrndiq_f64(float64x2_t __p0) {
59324   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
59325   float64x2_t __ret;
59326   __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42);
59327   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
59328   return __ret;
59329 }
59330 #endif
59331 
59332 #ifdef __LITTLE_ENDIAN__
vrndiq_f32(float32x4_t __p0)59333 __ai float32x4_t vrndiq_f32(float32x4_t __p0) {
59334   float32x4_t __ret;
59335   __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 41);
59336   return __ret;
59337 }
59338 #else
vrndiq_f32(float32x4_t __p0)59339 __ai float32x4_t vrndiq_f32(float32x4_t __p0) {
59340   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
59341   float32x4_t __ret;
59342   __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 41);
59343   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
59344   return __ret;
59345 }
59346 #endif
59347 
59348 #ifdef __LITTLE_ENDIAN__
vrndi_f64(float64x1_t __p0)59349 __ai float64x1_t vrndi_f64(float64x1_t __p0) {
59350   float64x1_t __ret;
59351   __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10);
59352   return __ret;
59353 }
59354 #else
vrndi_f64(float64x1_t __p0)59355 __ai float64x1_t vrndi_f64(float64x1_t __p0) {
59356   float64x1_t __ret;
59357   __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10);
59358   return __ret;
59359 }
59360 #endif
59361 
59362 #ifdef __LITTLE_ENDIAN__
vrndi_f32(float32x2_t __p0)59363 __ai float32x2_t vrndi_f32(float32x2_t __p0) {
59364   float32x2_t __ret;
59365   __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 9);
59366   return __ret;
59367 }
59368 #else
vrndi_f32(float32x2_t __p0)59369 __ai float32x2_t vrndi_f32(float32x2_t __p0) {
59370   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
59371   float32x2_t __ret;
59372   __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 9);
59373   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
59374   return __ret;
59375 }
59376 #endif
59377 
59378 #ifdef __LITTLE_ENDIAN__
vrndmq_f64(float64x2_t __p0)59379 __ai float64x2_t vrndmq_f64(float64x2_t __p0) {
59380   float64x2_t __ret;
59381   __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42);
59382   return __ret;
59383 }
59384 #else
vrndmq_f64(float64x2_t __p0)59385 __ai float64x2_t vrndmq_f64(float64x2_t __p0) {
59386   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
59387   float64x2_t __ret;
59388   __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42);
59389   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
59390   return __ret;
59391 }
59392 #endif
59393 
59394 #ifdef __LITTLE_ENDIAN__
vrndmq_f32(float32x4_t __p0)59395 __ai float32x4_t vrndmq_f32(float32x4_t __p0) {
59396   float32x4_t __ret;
59397   __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 41);
59398   return __ret;
59399 }
59400 #else
vrndmq_f32(float32x4_t __p0)59401 __ai float32x4_t vrndmq_f32(float32x4_t __p0) {
59402   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
59403   float32x4_t __ret;
59404   __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 41);
59405   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
59406   return __ret;
59407 }
59408 #endif
59409 
59410 #ifdef __LITTLE_ENDIAN__
vrndm_f64(float64x1_t __p0)59411 __ai float64x1_t vrndm_f64(float64x1_t __p0) {
59412   float64x1_t __ret;
59413   __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10);
59414   return __ret;
59415 }
59416 #else
vrndm_f64(float64x1_t __p0)59417 __ai float64x1_t vrndm_f64(float64x1_t __p0) {
59418   float64x1_t __ret;
59419   __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10);
59420   return __ret;
59421 }
59422 #endif
59423 
59424 #ifdef __LITTLE_ENDIAN__
vrndm_f32(float32x2_t __p0)59425 __ai float32x2_t vrndm_f32(float32x2_t __p0) {
59426   float32x2_t __ret;
59427   __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 9);
59428   return __ret;
59429 }
59430 #else
vrndm_f32(float32x2_t __p0)59431 __ai float32x2_t vrndm_f32(float32x2_t __p0) {
59432   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
59433   float32x2_t __ret;
59434   __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 9);
59435   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
59436   return __ret;
59437 }
59438 #endif
59439 
59440 #ifdef __LITTLE_ENDIAN__
vrndnq_f64(float64x2_t __p0)59441 __ai float64x2_t vrndnq_f64(float64x2_t __p0) {
59442   float64x2_t __ret;
59443   __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42);
59444   return __ret;
59445 }
59446 #else
vrndnq_f64(float64x2_t __p0)59447 __ai float64x2_t vrndnq_f64(float64x2_t __p0) {
59448   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
59449   float64x2_t __ret;
59450   __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42);
59451   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
59452   return __ret;
59453 }
59454 #endif
59455 
59456 #ifdef __LITTLE_ENDIAN__
vrndnq_f32(float32x4_t __p0)59457 __ai float32x4_t vrndnq_f32(float32x4_t __p0) {
59458   float32x4_t __ret;
59459   __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 41);
59460   return __ret;
59461 }
59462 #else
vrndnq_f32(float32x4_t __p0)59463 __ai float32x4_t vrndnq_f32(float32x4_t __p0) {
59464   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
59465   float32x4_t __ret;
59466   __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 41);
59467   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
59468   return __ret;
59469 }
59470 #endif
59471 
59472 #ifdef __LITTLE_ENDIAN__
vrndn_f64(float64x1_t __p0)59473 __ai float64x1_t vrndn_f64(float64x1_t __p0) {
59474   float64x1_t __ret;
59475   __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10);
59476   return __ret;
59477 }
59478 #else
vrndn_f64(float64x1_t __p0)59479 __ai float64x1_t vrndn_f64(float64x1_t __p0) {
59480   float64x1_t __ret;
59481   __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10);
59482   return __ret;
59483 }
59484 #endif
59485 
59486 #ifdef __LITTLE_ENDIAN__
vrndn_f32(float32x2_t __p0)59487 __ai float32x2_t vrndn_f32(float32x2_t __p0) {
59488   float32x2_t __ret;
59489   __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 9);
59490   return __ret;
59491 }
59492 #else
vrndn_f32(float32x2_t __p0)59493 __ai float32x2_t vrndn_f32(float32x2_t __p0) {
59494   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
59495   float32x2_t __ret;
59496   __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 9);
59497   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
59498   return __ret;
59499 }
59500 #endif
59501 
59502 #ifdef __LITTLE_ENDIAN__
vrndpq_f64(float64x2_t __p0)59503 __ai float64x2_t vrndpq_f64(float64x2_t __p0) {
59504   float64x2_t __ret;
59505   __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42);
59506   return __ret;
59507 }
59508 #else
vrndpq_f64(float64x2_t __p0)59509 __ai float64x2_t vrndpq_f64(float64x2_t __p0) {
59510   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
59511   float64x2_t __ret;
59512   __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42);
59513   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
59514   return __ret;
59515 }
59516 #endif
59517 
59518 #ifdef __LITTLE_ENDIAN__
vrndpq_f32(float32x4_t __p0)59519 __ai float32x4_t vrndpq_f32(float32x4_t __p0) {
59520   float32x4_t __ret;
59521   __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 41);
59522   return __ret;
59523 }
59524 #else
vrndpq_f32(float32x4_t __p0)59525 __ai float32x4_t vrndpq_f32(float32x4_t __p0) {
59526   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
59527   float32x4_t __ret;
59528   __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 41);
59529   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
59530   return __ret;
59531 }
59532 #endif
59533 
59534 #ifdef __LITTLE_ENDIAN__
vrndp_f64(float64x1_t __p0)59535 __ai float64x1_t vrndp_f64(float64x1_t __p0) {
59536   float64x1_t __ret;
59537   __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10);
59538   return __ret;
59539 }
59540 #else
vrndp_f64(float64x1_t __p0)59541 __ai float64x1_t vrndp_f64(float64x1_t __p0) {
59542   float64x1_t __ret;
59543   __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10);
59544   return __ret;
59545 }
59546 #endif
59547 
59548 #ifdef __LITTLE_ENDIAN__
vrndp_f32(float32x2_t __p0)59549 __ai float32x2_t vrndp_f32(float32x2_t __p0) {
59550   float32x2_t __ret;
59551   __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 9);
59552   return __ret;
59553 }
59554 #else
vrndp_f32(float32x2_t __p0)59555 __ai float32x2_t vrndp_f32(float32x2_t __p0) {
59556   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
59557   float32x2_t __ret;
59558   __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 9);
59559   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
59560   return __ret;
59561 }
59562 #endif
59563 
59564 #ifdef __LITTLE_ENDIAN__
vrndxq_f64(float64x2_t __p0)59565 __ai float64x2_t vrndxq_f64(float64x2_t __p0) {
59566   float64x2_t __ret;
59567   __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42);
59568   return __ret;
59569 }
59570 #else
vrndxq_f64(float64x2_t __p0)59571 __ai float64x2_t vrndxq_f64(float64x2_t __p0) {
59572   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
59573   float64x2_t __ret;
59574   __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42);
59575   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
59576   return __ret;
59577 }
59578 #endif
59579 
59580 #ifdef __LITTLE_ENDIAN__
vrndxq_f32(float32x4_t __p0)59581 __ai float32x4_t vrndxq_f32(float32x4_t __p0) {
59582   float32x4_t __ret;
59583   __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 41);
59584   return __ret;
59585 }
59586 #else
vrndxq_f32(float32x4_t __p0)59587 __ai float32x4_t vrndxq_f32(float32x4_t __p0) {
59588   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
59589   float32x4_t __ret;
59590   __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 41);
59591   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
59592   return __ret;
59593 }
59594 #endif
59595 
59596 #ifdef __LITTLE_ENDIAN__
vrndx_f64(float64x1_t __p0)59597 __ai float64x1_t vrndx_f64(float64x1_t __p0) {
59598   float64x1_t __ret;
59599   __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10);
59600   return __ret;
59601 }
59602 #else
vrndx_f64(float64x1_t __p0)59603 __ai float64x1_t vrndx_f64(float64x1_t __p0) {
59604   float64x1_t __ret;
59605   __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10);
59606   return __ret;
59607 }
59608 #endif
59609 
59610 #ifdef __LITTLE_ENDIAN__
vrndx_f32(float32x2_t __p0)59611 __ai float32x2_t vrndx_f32(float32x2_t __p0) {
59612   float32x2_t __ret;
59613   __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 9);
59614   return __ret;
59615 }
59616 #else
vrndx_f32(float32x2_t __p0)59617 __ai float32x2_t vrndx_f32(float32x2_t __p0) {
59618   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
59619   float32x2_t __ret;
59620   __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 9);
59621   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
59622   return __ret;
59623 }
59624 #endif
59625 
59626 #ifdef __LITTLE_ENDIAN__
vrshld_u64(uint64_t __p0,uint64_t __p1)59627 __ai uint64_t vrshld_u64(uint64_t __p0, uint64_t __p1) {
59628   uint64_t __ret;
59629   __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1);
59630   return __ret;
59631 }
59632 #else
vrshld_u64(uint64_t __p0,uint64_t __p1)59633 __ai uint64_t vrshld_u64(uint64_t __p0, uint64_t __p1) {
59634   uint64_t __ret;
59635   __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1);
59636   return __ret;
59637 }
59638 #endif
59639 
59640 #ifdef __LITTLE_ENDIAN__
vrshld_s64(int64_t __p0,int64_t __p1)59641 __ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) {
59642   int64_t __ret;
59643   __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1);
59644   return __ret;
59645 }
59646 #else
vrshld_s64(int64_t __p0,int64_t __p1)59647 __ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) {
59648   int64_t __ret;
59649   __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1);
59650   return __ret;
59651 }
59652 #endif
59653 
59654 #ifdef __LITTLE_ENDIAN__
59655 #define vrshrd_n_u64(__p0, __p1) __extension__ ({ \
59656   uint64_t __s0 = __p0; \
59657   uint64_t __ret; \
59658   __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \
59659   __ret; \
59660 })
59661 #else
59662 #define vrshrd_n_u64(__p0, __p1) __extension__ ({ \
59663   uint64_t __s0 = __p0; \
59664   uint64_t __ret; \
59665   __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \
59666   __ret; \
59667 })
59668 #endif
59669 
59670 #ifdef __LITTLE_ENDIAN__
59671 #define vrshrd_n_s64(__p0, __p1) __extension__ ({ \
59672   int64_t __s0 = __p0; \
59673   int64_t __ret; \
59674   __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \
59675   __ret; \
59676 })
59677 #else
59678 #define vrshrd_n_s64(__p0, __p1) __extension__ ({ \
59679   int64_t __s0 = __p0; \
59680   int64_t __ret; \
59681   __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \
59682   __ret; \
59683 })
59684 #endif
59685 
59686 #ifdef __LITTLE_ENDIAN__
59687 #define vrshrn_high_n_u32(__p0_210, __p1_210, __p2_210) __extension__ ({ \
59688   uint16x4_t __s0_210 = __p0_210; \
59689   uint32x4_t __s1_210 = __p1_210; \
59690   uint16x8_t __ret_210; \
59691   __ret_210 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_210), (uint16x4_t)(vrshrn_n_u32(__s1_210, __p2_210)))); \
59692   __ret_210; \
59693 })
59694 #else
59695 #define vrshrn_high_n_u32(__p0_211, __p1_211, __p2_211) __extension__ ({ \
59696   uint16x4_t __s0_211 = __p0_211; \
59697   uint32x4_t __s1_211 = __p1_211; \
59698   uint16x4_t __rev0_211;  __rev0_211 = __builtin_shufflevector(__s0_211, __s0_211, 3, 2, 1, 0); \
59699   uint32x4_t __rev1_211;  __rev1_211 = __builtin_shufflevector(__s1_211, __s1_211, 3, 2, 1, 0); \
59700   uint16x8_t __ret_211; \
59701   __ret_211 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_211), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_211, __p2_211)))); \
59702   __ret_211 = __builtin_shufflevector(__ret_211, __ret_211, 7, 6, 5, 4, 3, 2, 1, 0); \
59703   __ret_211; \
59704 })
59705 #endif
59706 
59707 #ifdef __LITTLE_ENDIAN__
59708 #define vrshrn_high_n_u64(__p0_212, __p1_212, __p2_212) __extension__ ({ \
59709   uint32x2_t __s0_212 = __p0_212; \
59710   uint64x2_t __s1_212 = __p1_212; \
59711   uint32x4_t __ret_212; \
59712   __ret_212 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_212), (uint32x2_t)(vrshrn_n_u64(__s1_212, __p2_212)))); \
59713   __ret_212; \
59714 })
59715 #else
59716 #define vrshrn_high_n_u64(__p0_213, __p1_213, __p2_213) __extension__ ({ \
59717   uint32x2_t __s0_213 = __p0_213; \
59718   uint64x2_t __s1_213 = __p1_213; \
59719   uint32x2_t __rev0_213;  __rev0_213 = __builtin_shufflevector(__s0_213, __s0_213, 1, 0); \
59720   uint64x2_t __rev1_213;  __rev1_213 = __builtin_shufflevector(__s1_213, __s1_213, 1, 0); \
59721   uint32x4_t __ret_213; \
59722   __ret_213 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_213), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_213, __p2_213)))); \
59723   __ret_213 = __builtin_shufflevector(__ret_213, __ret_213, 3, 2, 1, 0); \
59724   __ret_213; \
59725 })
59726 #endif
59727 
59728 #ifdef __LITTLE_ENDIAN__
59729 #define vrshrn_high_n_u16(__p0_214, __p1_214, __p2_214) __extension__ ({ \
59730   uint8x8_t __s0_214 = __p0_214; \
59731   uint16x8_t __s1_214 = __p1_214; \
59732   uint8x16_t __ret_214; \
59733   __ret_214 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_214), (uint8x8_t)(vrshrn_n_u16(__s1_214, __p2_214)))); \
59734   __ret_214; \
59735 })
59736 #else
59737 #define vrshrn_high_n_u16(__p0_215, __p1_215, __p2_215) __extension__ ({ \
59738   uint8x8_t __s0_215 = __p0_215; \
59739   uint16x8_t __s1_215 = __p1_215; \
59740   uint8x8_t __rev0_215;  __rev0_215 = __builtin_shufflevector(__s0_215, __s0_215, 7, 6, 5, 4, 3, 2, 1, 0); \
59741   uint16x8_t __rev1_215;  __rev1_215 = __builtin_shufflevector(__s1_215, __s1_215, 7, 6, 5, 4, 3, 2, 1, 0); \
59742   uint8x16_t __ret_215; \
59743   __ret_215 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_215), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_215, __p2_215)))); \
59744   __ret_215 = __builtin_shufflevector(__ret_215, __ret_215, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
59745   __ret_215; \
59746 })
59747 #endif
59748 
59749 #ifdef __LITTLE_ENDIAN__
59750 #define vrshrn_high_n_s32(__p0_216, __p1_216, __p2_216) __extension__ ({ \
59751   int16x4_t __s0_216 = __p0_216; \
59752   int32x4_t __s1_216 = __p1_216; \
59753   int16x8_t __ret_216; \
59754   __ret_216 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_216), (int16x4_t)(vrshrn_n_s32(__s1_216, __p2_216)))); \
59755   __ret_216; \
59756 })
59757 #else
59758 #define vrshrn_high_n_s32(__p0_217, __p1_217, __p2_217) __extension__ ({ \
59759   int16x4_t __s0_217 = __p0_217; \
59760   int32x4_t __s1_217 = __p1_217; \
59761   int16x4_t __rev0_217;  __rev0_217 = __builtin_shufflevector(__s0_217, __s0_217, 3, 2, 1, 0); \
59762   int32x4_t __rev1_217;  __rev1_217 = __builtin_shufflevector(__s1_217, __s1_217, 3, 2, 1, 0); \
59763   int16x8_t __ret_217; \
59764   __ret_217 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_217), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_217, __p2_217)))); \
59765   __ret_217 = __builtin_shufflevector(__ret_217, __ret_217, 7, 6, 5, 4, 3, 2, 1, 0); \
59766   __ret_217; \
59767 })
59768 #endif
59769 
59770 #ifdef __LITTLE_ENDIAN__
59771 #define vrshrn_high_n_s64(__p0_218, __p1_218, __p2_218) __extension__ ({ \
59772   int32x2_t __s0_218 = __p0_218; \
59773   int64x2_t __s1_218 = __p1_218; \
59774   int32x4_t __ret_218; \
59775   __ret_218 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_218), (int32x2_t)(vrshrn_n_s64(__s1_218, __p2_218)))); \
59776   __ret_218; \
59777 })
59778 #else
59779 #define vrshrn_high_n_s64(__p0_219, __p1_219, __p2_219) __extension__ ({ \
59780   int32x2_t __s0_219 = __p0_219; \
59781   int64x2_t __s1_219 = __p1_219; \
59782   int32x2_t __rev0_219;  __rev0_219 = __builtin_shufflevector(__s0_219, __s0_219, 1, 0); \
59783   int64x2_t __rev1_219;  __rev1_219 = __builtin_shufflevector(__s1_219, __s1_219, 1, 0); \
59784   int32x4_t __ret_219; \
59785   __ret_219 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_219), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_219, __p2_219)))); \
59786   __ret_219 = __builtin_shufflevector(__ret_219, __ret_219, 3, 2, 1, 0); \
59787   __ret_219; \
59788 })
59789 #endif
59790 
59791 #ifdef __LITTLE_ENDIAN__
59792 #define vrshrn_high_n_s16(__p0_220, __p1_220, __p2_220) __extension__ ({ \
59793   int8x8_t __s0_220 = __p0_220; \
59794   int16x8_t __s1_220 = __p1_220; \
59795   int8x16_t __ret_220; \
59796   __ret_220 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_220), (int8x8_t)(vrshrn_n_s16(__s1_220, __p2_220)))); \
59797   __ret_220; \
59798 })
59799 #else
59800 #define vrshrn_high_n_s16(__p0_221, __p1_221, __p2_221) __extension__ ({ \
59801   int8x8_t __s0_221 = __p0_221; \
59802   int16x8_t __s1_221 = __p1_221; \
59803   int8x8_t __rev0_221;  __rev0_221 = __builtin_shufflevector(__s0_221, __s0_221, 7, 6, 5, 4, 3, 2, 1, 0); \
59804   int16x8_t __rev1_221;  __rev1_221 = __builtin_shufflevector(__s1_221, __s1_221, 7, 6, 5, 4, 3, 2, 1, 0); \
59805   int8x16_t __ret_221; \
59806   __ret_221 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_221), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_221, __p2_221)))); \
59807   __ret_221 = __builtin_shufflevector(__ret_221, __ret_221, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
59808   __ret_221; \
59809 })
59810 #endif
59811 
59812 #ifdef __LITTLE_ENDIAN__
vrsqrteq_f64(float64x2_t __p0)59813 __ai float64x2_t vrsqrteq_f64(float64x2_t __p0) {
59814   float64x2_t __ret;
59815   __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 42);
59816   return __ret;
59817 }
59818 #else
vrsqrteq_f64(float64x2_t __p0)59819 __ai float64x2_t vrsqrteq_f64(float64x2_t __p0) {
59820   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
59821   float64x2_t __ret;
59822   __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 42);
59823   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
59824   return __ret;
59825 }
59826 #endif
59827 
59828 #ifdef __LITTLE_ENDIAN__
vrsqrte_f64(float64x1_t __p0)59829 __ai float64x1_t vrsqrte_f64(float64x1_t __p0) {
59830   float64x1_t __ret;
59831   __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10);
59832   return __ret;
59833 }
59834 #else
vrsqrte_f64(float64x1_t __p0)59835 __ai float64x1_t vrsqrte_f64(float64x1_t __p0) {
59836   float64x1_t __ret;
59837   __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10);
59838   return __ret;
59839 }
59840 #endif
59841 
59842 #ifdef __LITTLE_ENDIAN__
vrsqrted_f64(float64_t __p0)59843 __ai float64_t vrsqrted_f64(float64_t __p0) {
59844   float64_t __ret;
59845   __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0);
59846   return __ret;
59847 }
59848 #else
vrsqrted_f64(float64_t __p0)59849 __ai float64_t vrsqrted_f64(float64_t __p0) {
59850   float64_t __ret;
59851   __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0);
59852   return __ret;
59853 }
59854 #endif
59855 
59856 #ifdef __LITTLE_ENDIAN__
vrsqrtes_f32(float32_t __p0)59857 __ai float32_t vrsqrtes_f32(float32_t __p0) {
59858   float32_t __ret;
59859   __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0);
59860   return __ret;
59861 }
59862 #else
vrsqrtes_f32(float32_t __p0)59863 __ai float32_t vrsqrtes_f32(float32_t __p0) {
59864   float32_t __ret;
59865   __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0);
59866   return __ret;
59867 }
59868 #endif
59869 
59870 #ifdef __LITTLE_ENDIAN__
vrsqrtsq_f64(float64x2_t __p0,float64x2_t __p1)59871 __ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) {
59872   float64x2_t __ret;
59873   __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
59874   return __ret;
59875 }
59876 #else
vrsqrtsq_f64(float64x2_t __p0,float64x2_t __p1)59877 __ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) {
59878   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
59879   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
59880   float64x2_t __ret;
59881   __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
59882   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
59883   return __ret;
59884 }
59885 #endif
59886 
59887 #ifdef __LITTLE_ENDIAN__
vrsqrts_f64(float64x1_t __p0,float64x1_t __p1)59888 __ai float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) {
59889   float64x1_t __ret;
59890   __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
59891   return __ret;
59892 }
59893 #else
vrsqrts_f64(float64x1_t __p0,float64x1_t __p1)59894 __ai float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) {
59895   float64x1_t __ret;
59896   __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
59897   return __ret;
59898 }
59899 #endif
59900 
59901 #ifdef __LITTLE_ENDIAN__
vrsqrtsd_f64(float64_t __p0,float64_t __p1)59902 __ai float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) {
59903   float64_t __ret;
59904   __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1);
59905   return __ret;
59906 }
59907 #else
vrsqrtsd_f64(float64_t __p0,float64_t __p1)59908 __ai float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) {
59909   float64_t __ret;
59910   __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1);
59911   return __ret;
59912 }
59913 #endif
59914 
59915 #ifdef __LITTLE_ENDIAN__
vrsqrtss_f32(float32_t __p0,float32_t __p1)59916 __ai float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) {
59917   float32_t __ret;
59918   __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1);
59919   return __ret;
59920 }
59921 #else
vrsqrtss_f32(float32_t __p0,float32_t __p1)59922 __ai float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) {
59923   float32_t __ret;
59924   __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1);
59925   return __ret;
59926 }
59927 #endif
59928 
59929 #ifdef __LITTLE_ENDIAN__
59930 #define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
59931   uint64_t __s0 = __p0; \
59932   uint64_t __s1 = __p1; \
59933   uint64_t __ret; \
59934   __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \
59935   __ret; \
59936 })
59937 #else
59938 #define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
59939   uint64_t __s0 = __p0; \
59940   uint64_t __s1 = __p1; \
59941   uint64_t __ret; \
59942   __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \
59943   __ret; \
59944 })
59945 #endif
59946 
59947 #ifdef __LITTLE_ENDIAN__
59948 #define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
59949   int64_t __s0 = __p0; \
59950   int64_t __s1 = __p1; \
59951   int64_t __ret; \
59952   __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \
59953   __ret; \
59954 })
59955 #else
59956 #define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
59957   int64_t __s0 = __p0; \
59958   int64_t __s1 = __p1; \
59959   int64_t __ret; \
59960   __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \
59961   __ret; \
59962 })
59963 #endif
59964 
59965 #ifdef __LITTLE_ENDIAN__
vrsubhn_high_u32(uint16x4_t __p0,uint32x4_t __p1,uint32x4_t __p2)59966 __ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
59967   uint16x8_t __ret;
59968   __ret = vcombine_u16(__p0, vrsubhn_u32(__p1, __p2));
59969   return __ret;
59970 }
59971 #else
vrsubhn_high_u32(uint16x4_t __p0,uint32x4_t __p1,uint32x4_t __p2)59972 __ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
59973   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
59974   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
59975   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
59976   uint16x8_t __ret;
59977   __ret = __noswap_vcombine_u16(__rev0, __noswap_vrsubhn_u32(__rev1, __rev2));
59978   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
59979   return __ret;
59980 }
59981 #endif
59982 
59983 #ifdef __LITTLE_ENDIAN__
vrsubhn_high_u64(uint32x2_t __p0,uint64x2_t __p1,uint64x2_t __p2)59984 __ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
59985   uint32x4_t __ret;
59986   __ret = vcombine_u32(__p0, vrsubhn_u64(__p1, __p2));
59987   return __ret;
59988 }
59989 #else
vrsubhn_high_u64(uint32x2_t __p0,uint64x2_t __p1,uint64x2_t __p2)59990 __ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
59991   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
59992   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
59993   uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
59994   uint32x4_t __ret;
59995   __ret = __noswap_vcombine_u32(__rev0, __noswap_vrsubhn_u64(__rev1, __rev2));
59996   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
59997   return __ret;
59998 }
59999 #endif
60000 
60001 #ifdef __LITTLE_ENDIAN__
vrsubhn_high_u16(uint8x8_t __p0,uint16x8_t __p1,uint16x8_t __p2)60002 __ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
60003   uint8x16_t __ret;
60004   __ret = vcombine_u8(__p0, vrsubhn_u16(__p1, __p2));
60005   return __ret;
60006 }
60007 #else
vrsubhn_high_u16(uint8x8_t __p0,uint16x8_t __p1,uint16x8_t __p2)60008 __ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
60009   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
60010   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
60011   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
60012   uint8x16_t __ret;
60013   __ret = __noswap_vcombine_u8(__rev0, __noswap_vrsubhn_u16(__rev1, __rev2));
60014   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
60015   return __ret;
60016 }
60017 #endif
60018 
60019 #ifdef __LITTLE_ENDIAN__
vrsubhn_high_s32(int16x4_t __p0,int32x4_t __p1,int32x4_t __p2)60020 __ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
60021   int16x8_t __ret;
60022   __ret = vcombine_s16(__p0, vrsubhn_s32(__p1, __p2));
60023   return __ret;
60024 }
60025 #else
vrsubhn_high_s32(int16x4_t __p0,int32x4_t __p1,int32x4_t __p2)60026 __ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
60027   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
60028   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
60029   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
60030   int16x8_t __ret;
60031   __ret = __noswap_vcombine_s16(__rev0, __noswap_vrsubhn_s32(__rev1, __rev2));
60032   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
60033   return __ret;
60034 }
60035 #endif
60036 
60037 #ifdef __LITTLE_ENDIAN__
vrsubhn_high_s64(int32x2_t __p0,int64x2_t __p1,int64x2_t __p2)60038 __ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
60039   int32x4_t __ret;
60040   __ret = vcombine_s32(__p0, vrsubhn_s64(__p1, __p2));
60041   return __ret;
60042 }
60043 #else
vrsubhn_high_s64(int32x2_t __p0,int64x2_t __p1,int64x2_t __p2)60044 __ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
60045   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60046   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
60047   int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
60048   int32x4_t __ret;
60049   __ret = __noswap_vcombine_s32(__rev0, __noswap_vrsubhn_s64(__rev1, __rev2));
60050   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
60051   return __ret;
60052 }
60053 #endif
60054 
60055 #ifdef __LITTLE_ENDIAN__
vrsubhn_high_s16(int8x8_t __p0,int16x8_t __p1,int16x8_t __p2)60056 __ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
60057   int8x16_t __ret;
60058   __ret = vcombine_s8(__p0, vrsubhn_s16(__p1, __p2));
60059   return __ret;
60060 }
60061 #else
vrsubhn_high_s16(int8x8_t __p0,int16x8_t __p1,int16x8_t __p2)60062 __ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
60063   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
60064   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
60065   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
60066   int8x16_t __ret;
60067   __ret = __noswap_vcombine_s8(__rev0, __noswap_vrsubhn_s16(__rev1, __rev2));
60068   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
60069   return __ret;
60070 }
60071 #endif
60072 
60073 #ifdef __LITTLE_ENDIAN__
60074 #define vset_lane_f16(__p0_222, __p1_222, __p2_222) __extension__ ({ \
60075   float16_t __s0_222 = __p0_222; \
60076   float16x4_t __s1_222 = __p1_222; \
60077   float16x4_t __ret_222; \
60078 float16_t __reint_222 = __s0_222; \
60079 float16x4_t __reint1_222 = __s1_222; \
60080 int16x4_t __reint2_222 = vset_lane_s16(*(int16_t *) &__reint_222, *(int16x4_t *) &__reint1_222, __p2_222); \
60081   __ret_222 = *(float16x4_t *) &__reint2_222; \
60082   __ret_222; \
60083 })
60084 #else
60085 #define vset_lane_f16(__p0_223, __p1_223, __p2_223) __extension__ ({ \
60086   float16_t __s0_223 = __p0_223; \
60087   float16x4_t __s1_223 = __p1_223; \
60088   float16x4_t __rev1_223;  __rev1_223 = __builtin_shufflevector(__s1_223, __s1_223, 3, 2, 1, 0); \
60089   float16x4_t __ret_223; \
60090 float16_t __reint_223 = __s0_223; \
60091 float16x4_t __reint1_223 = __rev1_223; \
60092 int16x4_t __reint2_223 = __noswap_vset_lane_s16(*(int16_t *) &__reint_223, *(int16x4_t *) &__reint1_223, __p2_223); \
60093   __ret_223 = *(float16x4_t *) &__reint2_223; \
60094   __ret_223 = __builtin_shufflevector(__ret_223, __ret_223, 3, 2, 1, 0); \
60095   __ret_223; \
60096 })
60097 #endif
60098 
60099 #ifdef __LITTLE_ENDIAN__
60100 #define vsetq_lane_f16(__p0_224, __p1_224, __p2_224) __extension__ ({ \
60101   float16_t __s0_224 = __p0_224; \
60102   float16x8_t __s1_224 = __p1_224; \
60103   float16x8_t __ret_224; \
60104 float16_t __reint_224 = __s0_224; \
60105 float16x8_t __reint1_224 = __s1_224; \
60106 int16x8_t __reint2_224 = vsetq_lane_s16(*(int16_t *) &__reint_224, *(int16x8_t *) &__reint1_224, __p2_224); \
60107   __ret_224 = *(float16x8_t *) &__reint2_224; \
60108   __ret_224; \
60109 })
60110 #else
60111 #define vsetq_lane_f16(__p0_225, __p1_225, __p2_225) __extension__ ({ \
60112   float16_t __s0_225 = __p0_225; \
60113   float16x8_t __s1_225 = __p1_225; \
60114   float16x8_t __rev1_225;  __rev1_225 = __builtin_shufflevector(__s1_225, __s1_225, 7, 6, 5, 4, 3, 2, 1, 0); \
60115   float16x8_t __ret_225; \
60116 float16_t __reint_225 = __s0_225; \
60117 float16x8_t __reint1_225 = __rev1_225; \
60118 int16x8_t __reint2_225 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_225, *(int16x8_t *) &__reint1_225, __p2_225); \
60119   __ret_225 = *(float16x8_t *) &__reint2_225; \
60120   __ret_225 = __builtin_shufflevector(__ret_225, __ret_225, 7, 6, 5, 4, 3, 2, 1, 0); \
60121   __ret_225; \
60122 })
60123 #endif
60124 
60125 #ifdef __LITTLE_ENDIAN__
60126 #define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \
60127   poly64_t __s0 = __p0; \
60128   poly64x1_t __s1 = __p1; \
60129   poly64x1_t __ret; \
60130   __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
60131   __ret; \
60132 })
60133 #else
60134 #define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \
60135   poly64_t __s0 = __p0; \
60136   poly64x1_t __s1 = __p1; \
60137   poly64x1_t __ret; \
60138   __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
60139   __ret; \
60140 })
60141 #define __noswap_vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \
60142   poly64_t __s0 = __p0; \
60143   poly64x1_t __s1 = __p1; \
60144   poly64x1_t __ret; \
60145   __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
60146   __ret; \
60147 })
60148 #endif
60149 
60150 #ifdef __LITTLE_ENDIAN__
60151 #define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
60152   poly64_t __s0 = __p0; \
60153   poly64x2_t __s1 = __p1; \
60154   poly64x2_t __ret; \
60155   __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
60156   __ret; \
60157 })
60158 #else
60159 #define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
60160   poly64_t __s0 = __p0; \
60161   poly64x2_t __s1 = __p1; \
60162   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
60163   poly64x2_t __ret; \
60164   __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
60165   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
60166   __ret; \
60167 })
60168 #define __noswap_vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
60169   poly64_t __s0 = __p0; \
60170   poly64x2_t __s1 = __p1; \
60171   poly64x2_t __ret; \
60172   __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
60173   __ret; \
60174 })
60175 #endif
60176 
60177 #ifdef __LITTLE_ENDIAN__
60178 #define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
60179   float64_t __s0 = __p0; \
60180   float64x2_t __s1 = __p1; \
60181   float64x2_t __ret; \
60182   __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__s1, __p2); \
60183   __ret; \
60184 })
60185 #else
60186 #define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
60187   float64_t __s0 = __p0; \
60188   float64x2_t __s1 = __p1; \
60189   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
60190   float64x2_t __ret; \
60191   __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__rev1, __p2); \
60192   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
60193   __ret; \
60194 })
60195 #define __noswap_vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
60196   float64_t __s0 = __p0; \
60197   float64x2_t __s1 = __p1; \
60198   float64x2_t __ret; \
60199   __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__s1, __p2); \
60200   __ret; \
60201 })
60202 #endif
60203 
60204 #ifdef __LITTLE_ENDIAN__
60205 #define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \
60206   float64_t __s0 = __p0; \
60207   float64x1_t __s1 = __p1; \
60208   float64x1_t __ret; \
60209   __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (int8x8_t)__s1, __p2); \
60210   __ret; \
60211 })
60212 #else
60213 #define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \
60214   float64_t __s0 = __p0; \
60215   float64x1_t __s1 = __p1; \
60216   float64x1_t __ret; \
60217   __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (int8x8_t)__s1, __p2); \
60218   __ret; \
60219 })
60220 #define __noswap_vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \
60221   float64_t __s0 = __p0; \
60222   float64x1_t __s1 = __p1; \
60223   float64x1_t __ret; \
60224   __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (int8x8_t)__s1, __p2); \
60225   __ret; \
60226 })
60227 #endif
60228 
60229 #ifdef __LITTLE_ENDIAN__
vshld_u64(uint64_t __p0,uint64_t __p1)60230 __ai uint64_t vshld_u64(uint64_t __p0, uint64_t __p1) {
60231   uint64_t __ret;
60232   __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1);
60233   return __ret;
60234 }
60235 #else
vshld_u64(uint64_t __p0,uint64_t __p1)60236 __ai uint64_t vshld_u64(uint64_t __p0, uint64_t __p1) {
60237   uint64_t __ret;
60238   __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1);
60239   return __ret;
60240 }
60241 #endif
60242 
60243 #ifdef __LITTLE_ENDIAN__
vshld_s64(int64_t __p0,int64_t __p1)60244 __ai int64_t vshld_s64(int64_t __p0, int64_t __p1) {
60245   int64_t __ret;
60246   __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1);
60247   return __ret;
60248 }
60249 #else
vshld_s64(int64_t __p0,int64_t __p1)60250 __ai int64_t vshld_s64(int64_t __p0, int64_t __p1) {
60251   int64_t __ret;
60252   __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1);
60253   return __ret;
60254 }
60255 #endif
60256 
60257 #ifdef __LITTLE_ENDIAN__
60258 #define vshld_n_u64(__p0, __p1) __extension__ ({ \
60259   uint64_t __s0 = __p0; \
60260   uint64_t __ret; \
60261   __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \
60262   __ret; \
60263 })
60264 #else
60265 #define vshld_n_u64(__p0, __p1) __extension__ ({ \
60266   uint64_t __s0 = __p0; \
60267   uint64_t __ret; \
60268   __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \
60269   __ret; \
60270 })
60271 #endif
60272 
60273 #ifdef __LITTLE_ENDIAN__
60274 #define vshld_n_s64(__p0, __p1) __extension__ ({ \
60275   int64_t __s0 = __p0; \
60276   int64_t __ret; \
60277   __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \
60278   __ret; \
60279 })
60280 #else
60281 #define vshld_n_s64(__p0, __p1) __extension__ ({ \
60282   int64_t __s0 = __p0; \
60283   int64_t __ret; \
60284   __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \
60285   __ret; \
60286 })
60287 #endif
60288 
60289 #ifdef __LITTLE_ENDIAN__
60290 #define vshll_high_n_u8(__p0_226, __p1_226) __extension__ ({ \
60291   uint8x16_t __s0_226 = __p0_226; \
60292   uint16x8_t __ret_226; \
60293   __ret_226 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_226), __p1_226)); \
60294   __ret_226; \
60295 })
60296 #else
60297 #define vshll_high_n_u8(__p0_227, __p1_227) __extension__ ({ \
60298   uint8x16_t __s0_227 = __p0_227; \
60299   uint8x16_t __rev0_227;  __rev0_227 = __builtin_shufflevector(__s0_227, __s0_227, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
60300   uint16x8_t __ret_227; \
60301   __ret_227 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_227), __p1_227)); \
60302   __ret_227 = __builtin_shufflevector(__ret_227, __ret_227, 7, 6, 5, 4, 3, 2, 1, 0); \
60303   __ret_227; \
60304 })
60305 #endif
60306 
60307 #ifdef __LITTLE_ENDIAN__
60308 #define vshll_high_n_u32(__p0_228, __p1_228) __extension__ ({ \
60309   uint32x4_t __s0_228 = __p0_228; \
60310   uint64x2_t __ret_228; \
60311   __ret_228 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_228), __p1_228)); \
60312   __ret_228; \
60313 })
60314 #else
60315 #define vshll_high_n_u32(__p0_229, __p1_229) __extension__ ({ \
60316   uint32x4_t __s0_229 = __p0_229; \
60317   uint32x4_t __rev0_229;  __rev0_229 = __builtin_shufflevector(__s0_229, __s0_229, 3, 2, 1, 0); \
60318   uint64x2_t __ret_229; \
60319   __ret_229 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_229), __p1_229)); \
60320   __ret_229 = __builtin_shufflevector(__ret_229, __ret_229, 1, 0); \
60321   __ret_229; \
60322 })
60323 #endif
60324 
60325 #ifdef __LITTLE_ENDIAN__
60326 #define vshll_high_n_u16(__p0_230, __p1_230) __extension__ ({ \
60327   uint16x8_t __s0_230 = __p0_230; \
60328   uint32x4_t __ret_230; \
60329   __ret_230 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_230), __p1_230)); \
60330   __ret_230; \
60331 })
60332 #else
60333 #define vshll_high_n_u16(__p0_231, __p1_231) __extension__ ({ \
60334   uint16x8_t __s0_231 = __p0_231; \
60335   uint16x8_t __rev0_231;  __rev0_231 = __builtin_shufflevector(__s0_231, __s0_231, 7, 6, 5, 4, 3, 2, 1, 0); \
60336   uint32x4_t __ret_231; \
60337   __ret_231 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_231), __p1_231)); \
60338   __ret_231 = __builtin_shufflevector(__ret_231, __ret_231, 3, 2, 1, 0); \
60339   __ret_231; \
60340 })
60341 #endif
60342 
60343 #ifdef __LITTLE_ENDIAN__
60344 #define vshll_high_n_s8(__p0_232, __p1_232) __extension__ ({ \
60345   int8x16_t __s0_232 = __p0_232; \
60346   int16x8_t __ret_232; \
60347   __ret_232 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_232), __p1_232)); \
60348   __ret_232; \
60349 })
60350 #else
60351 #define vshll_high_n_s8(__p0_233, __p1_233) __extension__ ({ \
60352   int8x16_t __s0_233 = __p0_233; \
60353   int8x16_t __rev0_233;  __rev0_233 = __builtin_shufflevector(__s0_233, __s0_233, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
60354   int16x8_t __ret_233; \
60355   __ret_233 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_233), __p1_233)); \
60356   __ret_233 = __builtin_shufflevector(__ret_233, __ret_233, 7, 6, 5, 4, 3, 2, 1, 0); \
60357   __ret_233; \
60358 })
60359 #endif
60360 
60361 #ifdef __LITTLE_ENDIAN__
60362 #define vshll_high_n_s32(__p0_234, __p1_234) __extension__ ({ \
60363   int32x4_t __s0_234 = __p0_234; \
60364   int64x2_t __ret_234; \
60365   __ret_234 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_234), __p1_234)); \
60366   __ret_234; \
60367 })
60368 #else
60369 #define vshll_high_n_s32(__p0_235, __p1_235) __extension__ ({ \
60370   int32x4_t __s0_235 = __p0_235; \
60371   int32x4_t __rev0_235;  __rev0_235 = __builtin_shufflevector(__s0_235, __s0_235, 3, 2, 1, 0); \
60372   int64x2_t __ret_235; \
60373   __ret_235 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_235), __p1_235)); \
60374   __ret_235 = __builtin_shufflevector(__ret_235, __ret_235, 1, 0); \
60375   __ret_235; \
60376 })
60377 #endif
60378 
60379 #ifdef __LITTLE_ENDIAN__
60380 #define vshll_high_n_s16(__p0_236, __p1_236) __extension__ ({ \
60381   int16x8_t __s0_236 = __p0_236; \
60382   int32x4_t __ret_236; \
60383   __ret_236 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_236), __p1_236)); \
60384   __ret_236; \
60385 })
60386 #else
60387 #define vshll_high_n_s16(__p0_237, __p1_237) __extension__ ({ \
60388   int16x8_t __s0_237 = __p0_237; \
60389   int16x8_t __rev0_237;  __rev0_237 = __builtin_shufflevector(__s0_237, __s0_237, 7, 6, 5, 4, 3, 2, 1, 0); \
60390   int32x4_t __ret_237; \
60391   __ret_237 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_237), __p1_237)); \
60392   __ret_237 = __builtin_shufflevector(__ret_237, __ret_237, 3, 2, 1, 0); \
60393   __ret_237; \
60394 })
60395 #endif
60396 
60397 #ifdef __LITTLE_ENDIAN__
60398 #define vshrd_n_u64(__p0, __p1) __extension__ ({ \
60399   uint64_t __s0 = __p0; \
60400   uint64_t __ret; \
60401   __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \
60402   __ret; \
60403 })
60404 #else
60405 #define vshrd_n_u64(__p0, __p1) __extension__ ({ \
60406   uint64_t __s0 = __p0; \
60407   uint64_t __ret; \
60408   __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \
60409   __ret; \
60410 })
60411 #endif
60412 
60413 #ifdef __LITTLE_ENDIAN__
60414 #define vshrd_n_s64(__p0, __p1) __extension__ ({ \
60415   int64_t __s0 = __p0; \
60416   int64_t __ret; \
60417   __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \
60418   __ret; \
60419 })
60420 #else
60421 #define vshrd_n_s64(__p0, __p1) __extension__ ({ \
60422   int64_t __s0 = __p0; \
60423   int64_t __ret; \
60424   __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \
60425   __ret; \
60426 })
60427 #endif
60428 
60429 #ifdef __LITTLE_ENDIAN__
60430 #define vshrn_high_n_u32(__p0_238, __p1_238, __p2_238) __extension__ ({ \
60431   uint16x4_t __s0_238 = __p0_238; \
60432   uint32x4_t __s1_238 = __p1_238; \
60433   uint16x8_t __ret_238; \
60434   __ret_238 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_238), (uint16x4_t)(vshrn_n_u32(__s1_238, __p2_238)))); \
60435   __ret_238; \
60436 })
60437 #else
60438 #define vshrn_high_n_u32(__p0_239, __p1_239, __p2_239) __extension__ ({ \
60439   uint16x4_t __s0_239 = __p0_239; \
60440   uint32x4_t __s1_239 = __p1_239; \
60441   uint16x4_t __rev0_239;  __rev0_239 = __builtin_shufflevector(__s0_239, __s0_239, 3, 2, 1, 0); \
60442   uint32x4_t __rev1_239;  __rev1_239 = __builtin_shufflevector(__s1_239, __s1_239, 3, 2, 1, 0); \
60443   uint16x8_t __ret_239; \
60444   __ret_239 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_239), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_239, __p2_239)))); \
60445   __ret_239 = __builtin_shufflevector(__ret_239, __ret_239, 7, 6, 5, 4, 3, 2, 1, 0); \
60446   __ret_239; \
60447 })
60448 #endif
60449 
60450 #ifdef __LITTLE_ENDIAN__
60451 #define vshrn_high_n_u64(__p0_240, __p1_240, __p2_240) __extension__ ({ \
60452   uint32x2_t __s0_240 = __p0_240; \
60453   uint64x2_t __s1_240 = __p1_240; \
60454   uint32x4_t __ret_240; \
60455   __ret_240 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_240), (uint32x2_t)(vshrn_n_u64(__s1_240, __p2_240)))); \
60456   __ret_240; \
60457 })
60458 #else
60459 #define vshrn_high_n_u64(__p0_241, __p1_241, __p2_241) __extension__ ({ \
60460   uint32x2_t __s0_241 = __p0_241; \
60461   uint64x2_t __s1_241 = __p1_241; \
60462   uint32x2_t __rev0_241;  __rev0_241 = __builtin_shufflevector(__s0_241, __s0_241, 1, 0); \
60463   uint64x2_t __rev1_241;  __rev1_241 = __builtin_shufflevector(__s1_241, __s1_241, 1, 0); \
60464   uint32x4_t __ret_241; \
60465   __ret_241 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_241), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_241, __p2_241)))); \
60466   __ret_241 = __builtin_shufflevector(__ret_241, __ret_241, 3, 2, 1, 0); \
60467   __ret_241; \
60468 })
60469 #endif
60470 
60471 #ifdef __LITTLE_ENDIAN__
60472 #define vshrn_high_n_u16(__p0_242, __p1_242, __p2_242) __extension__ ({ \
60473   uint8x8_t __s0_242 = __p0_242; \
60474   uint16x8_t __s1_242 = __p1_242; \
60475   uint8x16_t __ret_242; \
60476   __ret_242 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_242), (uint8x8_t)(vshrn_n_u16(__s1_242, __p2_242)))); \
60477   __ret_242; \
60478 })
60479 #else
60480 #define vshrn_high_n_u16(__p0_243, __p1_243, __p2_243) __extension__ ({ \
60481   uint8x8_t __s0_243 = __p0_243; \
60482   uint16x8_t __s1_243 = __p1_243; \
60483   uint8x8_t __rev0_243;  __rev0_243 = __builtin_shufflevector(__s0_243, __s0_243, 7, 6, 5, 4, 3, 2, 1, 0); \
60484   uint16x8_t __rev1_243;  __rev1_243 = __builtin_shufflevector(__s1_243, __s1_243, 7, 6, 5, 4, 3, 2, 1, 0); \
60485   uint8x16_t __ret_243; \
60486   __ret_243 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_243), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_243, __p2_243)))); \
60487   __ret_243 = __builtin_shufflevector(__ret_243, __ret_243, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
60488   __ret_243; \
60489 })
60490 #endif
60491 
60492 #ifdef __LITTLE_ENDIAN__
60493 #define vshrn_high_n_s32(__p0_244, __p1_244, __p2_244) __extension__ ({ \
60494   int16x4_t __s0_244 = __p0_244; \
60495   int32x4_t __s1_244 = __p1_244; \
60496   int16x8_t __ret_244; \
60497   __ret_244 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_244), (int16x4_t)(vshrn_n_s32(__s1_244, __p2_244)))); \
60498   __ret_244; \
60499 })
60500 #else
60501 #define vshrn_high_n_s32(__p0_245, __p1_245, __p2_245) __extension__ ({ \
60502   int16x4_t __s0_245 = __p0_245; \
60503   int32x4_t __s1_245 = __p1_245; \
60504   int16x4_t __rev0_245;  __rev0_245 = __builtin_shufflevector(__s0_245, __s0_245, 3, 2, 1, 0); \
60505   int32x4_t __rev1_245;  __rev1_245 = __builtin_shufflevector(__s1_245, __s1_245, 3, 2, 1, 0); \
60506   int16x8_t __ret_245; \
60507   __ret_245 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_245), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_245, __p2_245)))); \
60508   __ret_245 = __builtin_shufflevector(__ret_245, __ret_245, 7, 6, 5, 4, 3, 2, 1, 0); \
60509   __ret_245; \
60510 })
60511 #endif
60512 
60513 #ifdef __LITTLE_ENDIAN__
60514 #define vshrn_high_n_s64(__p0_246, __p1_246, __p2_246) __extension__ ({ \
60515   int32x2_t __s0_246 = __p0_246; \
60516   int64x2_t __s1_246 = __p1_246; \
60517   int32x4_t __ret_246; \
60518   __ret_246 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_246), (int32x2_t)(vshrn_n_s64(__s1_246, __p2_246)))); \
60519   __ret_246; \
60520 })
60521 #else
60522 #define vshrn_high_n_s64(__p0_247, __p1_247, __p2_247) __extension__ ({ \
60523   int32x2_t __s0_247 = __p0_247; \
60524   int64x2_t __s1_247 = __p1_247; \
60525   int32x2_t __rev0_247;  __rev0_247 = __builtin_shufflevector(__s0_247, __s0_247, 1, 0); \
60526   int64x2_t __rev1_247;  __rev1_247 = __builtin_shufflevector(__s1_247, __s1_247, 1, 0); \
60527   int32x4_t __ret_247; \
60528   __ret_247 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_247), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_247, __p2_247)))); \
60529   __ret_247 = __builtin_shufflevector(__ret_247, __ret_247, 3, 2, 1, 0); \
60530   __ret_247; \
60531 })
60532 #endif
60533 
60534 #ifdef __LITTLE_ENDIAN__
60535 #define vshrn_high_n_s16(__p0_248, __p1_248, __p2_248) __extension__ ({ \
60536   int8x8_t __s0_248 = __p0_248; \
60537   int16x8_t __s1_248 = __p1_248; \
60538   int8x16_t __ret_248; \
60539   __ret_248 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_248), (int8x8_t)(vshrn_n_s16(__s1_248, __p2_248)))); \
60540   __ret_248; \
60541 })
60542 #else
60543 #define vshrn_high_n_s16(__p0_249, __p1_249, __p2_249) __extension__ ({ \
60544   int8x8_t __s0_249 = __p0_249; \
60545   int16x8_t __s1_249 = __p1_249; \
60546   int8x8_t __rev0_249;  __rev0_249 = __builtin_shufflevector(__s0_249, __s0_249, 7, 6, 5, 4, 3, 2, 1, 0); \
60547   int16x8_t __rev1_249;  __rev1_249 = __builtin_shufflevector(__s1_249, __s1_249, 7, 6, 5, 4, 3, 2, 1, 0); \
60548   int8x16_t __ret_249; \
60549   __ret_249 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_249), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_249, __p2_249)))); \
60550   __ret_249 = __builtin_shufflevector(__ret_249, __ret_249, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
60551   __ret_249; \
60552 })
60553 #endif
60554 
60555 #ifdef __LITTLE_ENDIAN__
60556 #define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \
60557   uint64_t __s0 = __p0; \
60558   uint64_t __s1 = __p1; \
60559   uint64_t __ret; \
60560   __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \
60561   __ret; \
60562 })
60563 #else
60564 #define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \
60565   uint64_t __s0 = __p0; \
60566   uint64_t __s1 = __p1; \
60567   uint64_t __ret; \
60568   __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \
60569   __ret; \
60570 })
60571 #endif
60572 
60573 #ifdef __LITTLE_ENDIAN__
60574 #define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \
60575   int64_t __s0 = __p0; \
60576   int64_t __s1 = __p1; \
60577   int64_t __ret; \
60578   __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \
60579   __ret; \
60580 })
60581 #else
60582 #define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \
60583   int64_t __s0 = __p0; \
60584   int64_t __s1 = __p1; \
60585   int64_t __ret; \
60586   __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \
60587   __ret; \
60588 })
60589 #endif
60590 
60591 #ifdef __LITTLE_ENDIAN__
60592 #define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \
60593   poly64x1_t __s0 = __p0; \
60594   poly64x1_t __s1 = __p1; \
60595   poly64x1_t __ret; \
60596   __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
60597   __ret; \
60598 })
60599 #else
60600 #define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \
60601   poly64x1_t __s0 = __p0; \
60602   poly64x1_t __s1 = __p1; \
60603   poly64x1_t __ret; \
60604   __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
60605   __ret; \
60606 })
60607 #endif
60608 
60609 #ifdef __LITTLE_ENDIAN__
60610 #define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
60611   poly64x2_t __s0 = __p0; \
60612   poly64x2_t __s1 = __p1; \
60613   poly64x2_t __ret; \
60614   __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
60615   __ret; \
60616 })
60617 #else
60618 #define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
60619   poly64x2_t __s0 = __p0; \
60620   poly64x2_t __s1 = __p1; \
60621   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
60622   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
60623   poly64x2_t __ret; \
60624   __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
60625   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
60626   __ret; \
60627 })
60628 #endif
60629 
60630 #ifdef __LITTLE_ENDIAN__
vsqaddb_u8(uint8_t __p0,uint8_t __p1)60631 __ai uint8_t vsqaddb_u8(uint8_t __p0, uint8_t __p1) {
60632   uint8_t __ret;
60633   __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1);
60634   return __ret;
60635 }
60636 #else
vsqaddb_u8(uint8_t __p0,uint8_t __p1)60637 __ai uint8_t vsqaddb_u8(uint8_t __p0, uint8_t __p1) {
60638   uint8_t __ret;
60639   __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1);
60640   return __ret;
60641 }
60642 #endif
60643 
60644 #ifdef __LITTLE_ENDIAN__
vsqadds_u32(uint32_t __p0,uint32_t __p1)60645 __ai uint32_t vsqadds_u32(uint32_t __p0, uint32_t __p1) {
60646   uint32_t __ret;
60647   __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1);
60648   return __ret;
60649 }
60650 #else
vsqadds_u32(uint32_t __p0,uint32_t __p1)60651 __ai uint32_t vsqadds_u32(uint32_t __p0, uint32_t __p1) {
60652   uint32_t __ret;
60653   __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1);
60654   return __ret;
60655 }
60656 #endif
60657 
60658 #ifdef __LITTLE_ENDIAN__
vsqaddd_u64(uint64_t __p0,uint64_t __p1)60659 __ai uint64_t vsqaddd_u64(uint64_t __p0, uint64_t __p1) {
60660   uint64_t __ret;
60661   __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1);
60662   return __ret;
60663 }
60664 #else
vsqaddd_u64(uint64_t __p0,uint64_t __p1)60665 __ai uint64_t vsqaddd_u64(uint64_t __p0, uint64_t __p1) {
60666   uint64_t __ret;
60667   __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1);
60668   return __ret;
60669 }
60670 #endif
60671 
60672 #ifdef __LITTLE_ENDIAN__
vsqaddh_u16(uint16_t __p0,uint16_t __p1)60673 __ai uint16_t vsqaddh_u16(uint16_t __p0, uint16_t __p1) {
60674   uint16_t __ret;
60675   __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1);
60676   return __ret;
60677 }
60678 #else
vsqaddh_u16(uint16_t __p0,uint16_t __p1)60679 __ai uint16_t vsqaddh_u16(uint16_t __p0, uint16_t __p1) {
60680   uint16_t __ret;
60681   __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1);
60682   return __ret;
60683 }
60684 #endif
60685 
60686 #ifdef __LITTLE_ENDIAN__
vsqaddq_u8(uint8x16_t __p0,uint8x16_t __p1)60687 __ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
60688   uint8x16_t __ret;
60689   __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
60690   return __ret;
60691 }
60692 #else
vsqaddq_u8(uint8x16_t __p0,uint8x16_t __p1)60693 __ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
60694   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
60695   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
60696   uint8x16_t __ret;
60697   __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
60698   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
60699   return __ret;
60700 }
60701 #endif
60702 
60703 #ifdef __LITTLE_ENDIAN__
vsqaddq_u32(uint32x4_t __p0,uint32x4_t __p1)60704 __ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
60705   uint32x4_t __ret;
60706   __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
60707   return __ret;
60708 }
60709 #else
vsqaddq_u32(uint32x4_t __p0,uint32x4_t __p1)60710 __ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
60711   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
60712   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
60713   uint32x4_t __ret;
60714   __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
60715   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
60716   return __ret;
60717 }
60718 #endif
60719 
60720 #ifdef __LITTLE_ENDIAN__
vsqaddq_u64(uint64x2_t __p0,uint64x2_t __p1)60721 __ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
60722   uint64x2_t __ret;
60723   __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
60724   return __ret;
60725 }
60726 #else
vsqaddq_u64(uint64x2_t __p0,uint64x2_t __p1)60727 __ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
60728   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60729   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
60730   uint64x2_t __ret;
60731   __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
60732   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
60733   return __ret;
60734 }
60735 #endif
60736 
60737 #ifdef __LITTLE_ENDIAN__
vsqaddq_u16(uint16x8_t __p0,uint16x8_t __p1)60738 __ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
60739   uint16x8_t __ret;
60740   __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
60741   return __ret;
60742 }
60743 #else
vsqaddq_u16(uint16x8_t __p0,uint16x8_t __p1)60744 __ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
60745   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
60746   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
60747   uint16x8_t __ret;
60748   __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
60749   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
60750   return __ret;
60751 }
60752 #endif
60753 
60754 #ifdef __LITTLE_ENDIAN__
vsqadd_u8(uint8x8_t __p0,uint8x8_t __p1)60755 __ai uint8x8_t vsqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
60756   uint8x8_t __ret;
60757   __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
60758   return __ret;
60759 }
60760 #else
vsqadd_u8(uint8x8_t __p0,uint8x8_t __p1)60761 __ai uint8x8_t vsqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
60762   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
60763   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
60764   uint8x8_t __ret;
60765   __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
60766   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
60767   return __ret;
60768 }
60769 #endif
60770 
60771 #ifdef __LITTLE_ENDIAN__
vsqadd_u32(uint32x2_t __p0,uint32x2_t __p1)60772 __ai uint32x2_t vsqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
60773   uint32x2_t __ret;
60774   __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
60775   return __ret;
60776 }
60777 #else
vsqadd_u32(uint32x2_t __p0,uint32x2_t __p1)60778 __ai uint32x2_t vsqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
60779   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60780   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
60781   uint32x2_t __ret;
60782   __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
60783   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
60784   return __ret;
60785 }
60786 #endif
60787 
60788 #ifdef __LITTLE_ENDIAN__
vsqadd_u64(uint64x1_t __p0,uint64x1_t __p1)60789 __ai uint64x1_t vsqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
60790   uint64x1_t __ret;
60791   __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
60792   return __ret;
60793 }
60794 #else
vsqadd_u64(uint64x1_t __p0,uint64x1_t __p1)60795 __ai uint64x1_t vsqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
60796   uint64x1_t __ret;
60797   __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
60798   return __ret;
60799 }
60800 #endif
60801 
60802 #ifdef __LITTLE_ENDIAN__
vsqadd_u16(uint16x4_t __p0,uint16x4_t __p1)60803 __ai uint16x4_t vsqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
60804   uint16x4_t __ret;
60805   __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
60806   return __ret;
60807 }
60808 #else
vsqadd_u16(uint16x4_t __p0,uint16x4_t __p1)60809 __ai uint16x4_t vsqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
60810   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
60811   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
60812   uint16x4_t __ret;
60813   __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
60814   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
60815   return __ret;
60816 }
60817 #endif
60818 
60819 #ifdef __LITTLE_ENDIAN__
vsqrtq_f64(float64x2_t __p0)60820 __ai float64x2_t vsqrtq_f64(float64x2_t __p0) {
60821   float64x2_t __ret;
60822   __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 42);
60823   return __ret;
60824 }
60825 #else
vsqrtq_f64(float64x2_t __p0)60826 __ai float64x2_t vsqrtq_f64(float64x2_t __p0) {
60827   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60828   float64x2_t __ret;
60829   __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 42);
60830   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
60831   return __ret;
60832 }
60833 #endif
60834 
60835 #ifdef __LITTLE_ENDIAN__
vsqrtq_f32(float32x4_t __p0)60836 __ai float32x4_t vsqrtq_f32(float32x4_t __p0) {
60837   float32x4_t __ret;
60838   __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 41);
60839   return __ret;
60840 }
60841 #else
vsqrtq_f32(float32x4_t __p0)60842 __ai float32x4_t vsqrtq_f32(float32x4_t __p0) {
60843   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
60844   float32x4_t __ret;
60845   __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 41);
60846   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
60847   return __ret;
60848 }
60849 #endif
60850 
60851 #ifdef __LITTLE_ENDIAN__
vsqrt_f64(float64x1_t __p0)60852 __ai float64x1_t vsqrt_f64(float64x1_t __p0) {
60853   float64x1_t __ret;
60854   __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10);
60855   return __ret;
60856 }
60857 #else
vsqrt_f64(float64x1_t __p0)60858 __ai float64x1_t vsqrt_f64(float64x1_t __p0) {
60859   float64x1_t __ret;
60860   __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10);
60861   return __ret;
60862 }
60863 #endif
60864 
60865 #ifdef __LITTLE_ENDIAN__
vsqrt_f32(float32x2_t __p0)60866 __ai float32x2_t vsqrt_f32(float32x2_t __p0) {
60867   float32x2_t __ret;
60868   __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 9);
60869   return __ret;
60870 }
60871 #else
vsqrt_f32(float32x2_t __p0)60872 __ai float32x2_t vsqrt_f32(float32x2_t __p0) {
60873   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60874   float32x2_t __ret;
60875   __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 9);
60876   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
60877   return __ret;
60878 }
60879 #endif
60880 
60881 #ifdef __LITTLE_ENDIAN__
60882 #define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
60883   uint64_t __s0 = __p0; \
60884   uint64_t __s1 = __p1; \
60885   uint64_t __ret; \
60886   __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \
60887   __ret; \
60888 })
60889 #else
60890 #define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
60891   uint64_t __s0 = __p0; \
60892   uint64_t __s1 = __p1; \
60893   uint64_t __ret; \
60894   __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \
60895   __ret; \
60896 })
60897 #endif
60898 
60899 #ifdef __LITTLE_ENDIAN__
60900 #define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
60901   int64_t __s0 = __p0; \
60902   int64_t __s1 = __p1; \
60903   int64_t __ret; \
60904   __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \
60905   __ret; \
60906 })
60907 #else
60908 #define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
60909   int64_t __s0 = __p0; \
60910   int64_t __s1 = __p1; \
60911   int64_t __ret; \
60912   __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \
60913   __ret; \
60914 })
60915 #endif
60916 
60917 #ifdef __LITTLE_ENDIAN__
60918 #define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \
60919   uint64_t __s0 = __p0; \
60920   uint64_t __s1 = __p1; \
60921   uint64_t __ret; \
60922   __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \
60923   __ret; \
60924 })
60925 #else
60926 #define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \
60927   uint64_t __s0 = __p0; \
60928   uint64_t __s1 = __p1; \
60929   uint64_t __ret; \
60930   __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \
60931   __ret; \
60932 })
60933 #endif
60934 
60935 #ifdef __LITTLE_ENDIAN__
60936 #define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \
60937   int64_t __s0 = __p0; \
60938   int64_t __s1 = __p1; \
60939   int64_t __ret; \
60940   __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \
60941   __ret; \
60942 })
60943 #else
60944 #define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \
60945   int64_t __s0 = __p0; \
60946   int64_t __s1 = __p1; \
60947   int64_t __ret; \
60948   __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \
60949   __ret; \
60950 })
60951 #endif
60952 
60953 #ifdef __LITTLE_ENDIAN__
60954 #define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \
60955   poly64x1_t __s0 = __p0; \
60956   poly64x1_t __s1 = __p1; \
60957   poly64x1_t __ret; \
60958   __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
60959   __ret; \
60960 })
60961 #else
60962 #define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \
60963   poly64x1_t __s0 = __p0; \
60964   poly64x1_t __s1 = __p1; \
60965   poly64x1_t __ret; \
60966   __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
60967   __ret; \
60968 })
60969 #endif
60970 
60971 #ifdef __LITTLE_ENDIAN__
60972 #define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
60973   poly64x2_t __s0 = __p0; \
60974   poly64x2_t __s1 = __p1; \
60975   poly64x2_t __ret; \
60976   __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
60977   __ret; \
60978 })
60979 #else
60980 #define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
60981   poly64x2_t __s0 = __p0; \
60982   poly64x2_t __s1 = __p1; \
60983   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
60984   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
60985   poly64x2_t __ret; \
60986   __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
60987   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
60988   __ret; \
60989 })
60990 #endif
60991 
60992 #ifdef __LITTLE_ENDIAN__
60993 #define vst1_p64(__p0, __p1) __extension__ ({ \
60994   poly64x1_t __s1 = __p1; \
60995   __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 6); \
60996 })
60997 #else
60998 #define vst1_p64(__p0, __p1) __extension__ ({ \
60999   poly64x1_t __s1 = __p1; \
61000   __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 6); \
61001 })
61002 #endif
61003 
61004 #ifdef __LITTLE_ENDIAN__
61005 #define vst1q_p64(__p0, __p1) __extension__ ({ \
61006   poly64x2_t __s1 = __p1; \
61007   __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 38); \
61008 })
61009 #else
61010 #define vst1q_p64(__p0, __p1) __extension__ ({ \
61011   poly64x2_t __s1 = __p1; \
61012   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
61013   __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 38); \
61014 })
61015 #endif
61016 
61017 #ifdef __LITTLE_ENDIAN__
61018 #define vst1q_f64(__p0, __p1) __extension__ ({ \
61019   float64x2_t __s1 = __p1; \
61020   __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 42); \
61021 })
61022 #else
61023 #define vst1q_f64(__p0, __p1) __extension__ ({ \
61024   float64x2_t __s1 = __p1; \
61025   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
61026   __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 42); \
61027 })
61028 #endif
61029 
61030 #ifdef __LITTLE_ENDIAN__
61031 #define vst1_f64(__p0, __p1) __extension__ ({ \
61032   float64x1_t __s1 = __p1; \
61033   __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 10); \
61034 })
61035 #else
61036 #define vst1_f64(__p0, __p1) __extension__ ({ \
61037   float64x1_t __s1 = __p1; \
61038   __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 10); \
61039 })
61040 #endif
61041 
61042 #ifdef __LITTLE_ENDIAN__
61043 #define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
61044   poly64x1_t __s1 = __p1; \
61045   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
61046 })
61047 #else
61048 #define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
61049   poly64x1_t __s1 = __p1; \
61050   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
61051 })
61052 #endif
61053 
61054 #ifdef __LITTLE_ENDIAN__
61055 #define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
61056   poly64x2_t __s1 = __p1; \
61057   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \
61058 })
61059 #else
61060 #define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
61061   poly64x2_t __s1 = __p1; \
61062   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
61063   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \
61064 })
61065 #endif
61066 
61067 #ifdef __LITTLE_ENDIAN__
61068 #define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
61069   float64x2_t __s1 = __p1; \
61070   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \
61071 })
61072 #else
61073 #define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
61074   float64x2_t __s1 = __p1; \
61075   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
61076   __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \
61077 })
61078 #endif
61079 
61080 #ifdef __LITTLE_ENDIAN__
61081 #define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
61082   float64x1_t __s1 = __p1; \
61083   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
61084 })
61085 #else
61086 #define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
61087   float64x1_t __s1 = __p1; \
61088   __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
61089 })
61090 #endif
61091 
61092 #ifdef __LITTLE_ENDIAN__
61093 #define vst1_p8_x2(__p0, __p1) __extension__ ({ \
61094   poly8x8x2_t __s1 = __p1; \
61095   __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \
61096 })
61097 #else
61098 #define vst1_p8_x2(__p0, __p1) __extension__ ({ \
61099   poly8x8x2_t __s1 = __p1; \
61100   poly8x8x2_t __rev1; \
61101   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
61102   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
61103   __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \
61104 })
61105 #endif
61106 
61107 #ifdef __LITTLE_ENDIAN__
61108 #define vst1_p64_x2(__p0, __p1) __extension__ ({ \
61109   poly64x1x2_t __s1 = __p1; \
61110   __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
61111 })
61112 #else
61113 #define vst1_p64_x2(__p0, __p1) __extension__ ({ \
61114   poly64x1x2_t __s1 = __p1; \
61115   __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
61116 })
61117 #endif
61118 
61119 #ifdef __LITTLE_ENDIAN__
61120 #define vst1_p16_x2(__p0, __p1) __extension__ ({ \
61121   poly16x4x2_t __s1 = __p1; \
61122   __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \
61123 })
61124 #else
61125 #define vst1_p16_x2(__p0, __p1) __extension__ ({ \
61126   poly16x4x2_t __s1 = __p1; \
61127   poly16x4x2_t __rev1; \
61128   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
61129   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
61130   __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \
61131 })
61132 #endif
61133 
61134 #ifdef __LITTLE_ENDIAN__
61135 #define vst1q_p8_x2(__p0, __p1) __extension__ ({ \
61136   poly8x16x2_t __s1 = __p1; \
61137   __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \
61138 })
61139 #else
61140 #define vst1q_p8_x2(__p0, __p1) __extension__ ({ \
61141   poly8x16x2_t __s1 = __p1; \
61142   poly8x16x2_t __rev1; \
61143   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
61144   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
61145   __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \
61146 })
61147 #endif
61148 
61149 #ifdef __LITTLE_ENDIAN__
61150 #define vst1q_p64_x2(__p0, __p1) __extension__ ({ \
61151   poly64x2x2_t __s1 = __p1; \
61152   __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \
61153 })
61154 #else
61155 #define vst1q_p64_x2(__p0, __p1) __extension__ ({ \
61156   poly64x2x2_t __s1 = __p1; \
61157   poly64x2x2_t __rev1; \
61158   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
61159   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
61160   __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \
61161 })
61162 #endif
61163 
61164 #ifdef __LITTLE_ENDIAN__
61165 #define vst1q_p16_x2(__p0, __p1) __extension__ ({ \
61166   poly16x8x2_t __s1 = __p1; \
61167   __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \
61168 })
61169 #else
61170 #define vst1q_p16_x2(__p0, __p1) __extension__ ({ \
61171   poly16x8x2_t __s1 = __p1; \
61172   poly16x8x2_t __rev1; \
61173   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
61174   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
61175   __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \
61176 })
61177 #endif
61178 
61179 #ifdef __LITTLE_ENDIAN__
61180 #define vst1q_u8_x2(__p0, __p1) __extension__ ({ \
61181   uint8x16x2_t __s1 = __p1; \
61182   __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \
61183 })
61184 #else
61185 #define vst1q_u8_x2(__p0, __p1) __extension__ ({ \
61186   uint8x16x2_t __s1 = __p1; \
61187   uint8x16x2_t __rev1; \
61188   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
61189   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
61190   __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \
61191 })
61192 #endif
61193 
61194 #ifdef __LITTLE_ENDIAN__
61195 #define vst1q_u32_x2(__p0, __p1) __extension__ ({ \
61196   uint32x4x2_t __s1 = __p1; \
61197   __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \
61198 })
61199 #else
61200 #define vst1q_u32_x2(__p0, __p1) __extension__ ({ \
61201   uint32x4x2_t __s1 = __p1; \
61202   uint32x4x2_t __rev1; \
61203   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
61204   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
61205   __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \
61206 })
61207 #endif
61208 
61209 #ifdef __LITTLE_ENDIAN__
61210 #define vst1q_u64_x2(__p0, __p1) __extension__ ({ \
61211   uint64x2x2_t __s1 = __p1; \
61212   __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \
61213 })
61214 #else
61215 #define vst1q_u64_x2(__p0, __p1) __extension__ ({ \
61216   uint64x2x2_t __s1 = __p1; \
61217   uint64x2x2_t __rev1; \
61218   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
61219   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
61220   __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \
61221 })
61222 #endif
61223 
61224 #ifdef __LITTLE_ENDIAN__
61225 #define vst1q_u16_x2(__p0, __p1) __extension__ ({ \
61226   uint16x8x2_t __s1 = __p1; \
61227   __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \
61228 })
61229 #else
61230 #define vst1q_u16_x2(__p0, __p1) __extension__ ({ \
61231   uint16x8x2_t __s1 = __p1; \
61232   uint16x8x2_t __rev1; \
61233   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
61234   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
61235   __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \
61236 })
61237 #endif
61238 
61239 #ifdef __LITTLE_ENDIAN__
61240 #define vst1q_s8_x2(__p0, __p1) __extension__ ({ \
61241   int8x16x2_t __s1 = __p1; \
61242   __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \
61243 })
61244 #else
61245 #define vst1q_s8_x2(__p0, __p1) __extension__ ({ \
61246   int8x16x2_t __s1 = __p1; \
61247   int8x16x2_t __rev1; \
61248   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
61249   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
61250   __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \
61251 })
61252 #endif
61253 
61254 #ifdef __LITTLE_ENDIAN__
61255 #define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
61256   float64x2x2_t __s1 = __p1; \
61257   __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 42); \
61258 })
61259 #else
61260 #define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
61261   float64x2x2_t __s1 = __p1; \
61262   float64x2x2_t __rev1; \
61263   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
61264   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
61265   __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 42); \
61266 })
61267 #endif
61268 
61269 #ifdef __LITTLE_ENDIAN__
61270 #define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
61271   float32x4x2_t __s1 = __p1; \
61272   __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 41); \
61273 })
61274 #else
61275 #define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
61276   float32x4x2_t __s1 = __p1; \
61277   float32x4x2_t __rev1; \
61278   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
61279   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
61280   __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 41); \
61281 })
61282 #endif
61283 
61284 #ifdef __LITTLE_ENDIAN__
61285 #define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
61286   float16x8x2_t __s1 = __p1; \
61287   __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 40); \
61288 })
61289 #else
61290 #define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
61291   float16x8x2_t __s1 = __p1; \
61292   float16x8x2_t __rev1; \
61293   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
61294   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
61295   __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 40); \
61296 })
61297 #endif
61298 
61299 #ifdef __LITTLE_ENDIAN__
61300 #define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
61301   int32x4x2_t __s1 = __p1; \
61302   __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 34); \
61303 })
61304 #else
61305 #define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
61306   int32x4x2_t __s1 = __p1; \
61307   int32x4x2_t __rev1; \
61308   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
61309   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
61310   __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 34); \
61311 })
61312 #endif
61313 
61314 #ifdef __LITTLE_ENDIAN__
61315 #define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
61316   int64x2x2_t __s1 = __p1; \
61317   __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 35); \
61318 })
61319 #else
61320 #define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
61321   int64x2x2_t __s1 = __p1; \
61322   int64x2x2_t __rev1; \
61323   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
61324   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
61325   __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 35); \
61326 })
61327 #endif
61328 
61329 #ifdef __LITTLE_ENDIAN__
61330 #define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
61331   int16x8x2_t __s1 = __p1; \
61332   __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 33); \
61333 })
61334 #else
61335 #define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
61336   int16x8x2_t __s1 = __p1; \
61337   int16x8x2_t __rev1; \
61338   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
61339   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
61340   __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 33); \
61341 })
61342 #endif
61343 
61344 #ifdef __LITTLE_ENDIAN__
61345 #define vst1_u8_x2(__p0, __p1) __extension__ ({ \
61346   uint8x8x2_t __s1 = __p1; \
61347   __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \
61348 })
61349 #else
61350 #define vst1_u8_x2(__p0, __p1) __extension__ ({ \
61351   uint8x8x2_t __s1 = __p1; \
61352   uint8x8x2_t __rev1; \
61353   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
61354   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
61355   __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \
61356 })
61357 #endif
61358 
61359 #ifdef __LITTLE_ENDIAN__
61360 #define vst1_u32_x2(__p0, __p1) __extension__ ({ \
61361   uint32x2x2_t __s1 = __p1; \
61362   __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \
61363 })
61364 #else
61365 #define vst1_u32_x2(__p0, __p1) __extension__ ({ \
61366   uint32x2x2_t __s1 = __p1; \
61367   uint32x2x2_t __rev1; \
61368   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
61369   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
61370   __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \
61371 })
61372 #endif
61373 
61374 #ifdef __LITTLE_ENDIAN__
61375 #define vst1_u64_x2(__p0, __p1) __extension__ ({ \
61376   uint64x1x2_t __s1 = __p1; \
61377   __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
61378 })
61379 #else
61380 #define vst1_u64_x2(__p0, __p1) __extension__ ({ \
61381   uint64x1x2_t __s1 = __p1; \
61382   __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
61383 })
61384 #endif
61385 
61386 #ifdef __LITTLE_ENDIAN__
61387 #define vst1_u16_x2(__p0, __p1) __extension__ ({ \
61388   uint16x4x2_t __s1 = __p1; \
61389   __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \
61390 })
61391 #else
61392 #define vst1_u16_x2(__p0, __p1) __extension__ ({ \
61393   uint16x4x2_t __s1 = __p1; \
61394   uint16x4x2_t __rev1; \
61395   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
61396   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
61397   __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \
61398 })
61399 #endif
61400 
61401 #ifdef __LITTLE_ENDIAN__
61402 #define vst1_s8_x2(__p0, __p1) __extension__ ({ \
61403   int8x8x2_t __s1 = __p1; \
61404   __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \
61405 })
61406 #else
61407 #define vst1_s8_x2(__p0, __p1) __extension__ ({ \
61408   int8x8x2_t __s1 = __p1; \
61409   int8x8x2_t __rev1; \
61410   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
61411   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
61412   __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \
61413 })
61414 #endif
61415 
61416 #ifdef __LITTLE_ENDIAN__
61417 #define vst1_f64_x2(__p0, __p1) __extension__ ({ \
61418   float64x1x2_t __s1 = __p1; \
61419   __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 10); \
61420 })
61421 #else
61422 #define vst1_f64_x2(__p0, __p1) __extension__ ({ \
61423   float64x1x2_t __s1 = __p1; \
61424   __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 10); \
61425 })
61426 #endif
61427 
61428 #ifdef __LITTLE_ENDIAN__
61429 #define vst1_f32_x2(__p0, __p1) __extension__ ({ \
61430   float32x2x2_t __s1 = __p1; \
61431   __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 9); \
61432 })
61433 #else
61434 #define vst1_f32_x2(__p0, __p1) __extension__ ({ \
61435   float32x2x2_t __s1 = __p1; \
61436   float32x2x2_t __rev1; \
61437   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
61438   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
61439   __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 9); \
61440 })
61441 #endif
61442 
61443 #ifdef __LITTLE_ENDIAN__
61444 #define vst1_f16_x2(__p0, __p1) __extension__ ({ \
61445   float16x4x2_t __s1 = __p1; \
61446   __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 8); \
61447 })
61448 #else
61449 #define vst1_f16_x2(__p0, __p1) __extension__ ({ \
61450   float16x4x2_t __s1 = __p1; \
61451   float16x4x2_t __rev1; \
61452   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
61453   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
61454   __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 8); \
61455 })
61456 #endif
61457 
61458 #ifdef __LITTLE_ENDIAN__
61459 #define vst1_s32_x2(__p0, __p1) __extension__ ({ \
61460   int32x2x2_t __s1 = __p1; \
61461   __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 2); \
61462 })
61463 #else
61464 #define vst1_s32_x2(__p0, __p1) __extension__ ({ \
61465   int32x2x2_t __s1 = __p1; \
61466   int32x2x2_t __rev1; \
61467   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
61468   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
61469   __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 2); \
61470 })
61471 #endif
61472 
61473 #ifdef __LITTLE_ENDIAN__
61474 #define vst1_s64_x2(__p0, __p1) __extension__ ({ \
61475   int64x1x2_t __s1 = __p1; \
61476   __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 3); \
61477 })
61478 #else
61479 #define vst1_s64_x2(__p0, __p1) __extension__ ({ \
61480   int64x1x2_t __s1 = __p1; \
61481   __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 3); \
61482 })
61483 #endif
61484 
61485 #ifdef __LITTLE_ENDIAN__
61486 #define vst1_s16_x2(__p0, __p1) __extension__ ({ \
61487   int16x4x2_t __s1 = __p1; \
61488   __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 1); \
61489 })
61490 #else
61491 #define vst1_s16_x2(__p0, __p1) __extension__ ({ \
61492   int16x4x2_t __s1 = __p1; \
61493   int16x4x2_t __rev1; \
61494   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
61495   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
61496   __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 1); \
61497 })
61498 #endif
61499 
61500 #ifdef __LITTLE_ENDIAN__
61501 #define vst1_p8_x3(__p0, __p1) __extension__ ({ \
61502   poly8x8x3_t __s1 = __p1; \
61503   __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \
61504 })
61505 #else
61506 #define vst1_p8_x3(__p0, __p1) __extension__ ({ \
61507   poly8x8x3_t __s1 = __p1; \
61508   poly8x8x3_t __rev1; \
61509   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
61510   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
61511   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
61512   __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \
61513 })
61514 #endif
61515 
61516 #ifdef __LITTLE_ENDIAN__
61517 #define vst1_p64_x3(__p0, __p1) __extension__ ({ \
61518   poly64x1x3_t __s1 = __p1; \
61519   __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
61520 })
61521 #else
61522 #define vst1_p64_x3(__p0, __p1) __extension__ ({ \
61523   poly64x1x3_t __s1 = __p1; \
61524   __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
61525 })
61526 #endif
61527 
61528 #ifdef __LITTLE_ENDIAN__
61529 #define vst1_p16_x3(__p0, __p1) __extension__ ({ \
61530   poly16x4x3_t __s1 = __p1; \
61531   __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \
61532 })
61533 #else
61534 #define vst1_p16_x3(__p0, __p1) __extension__ ({ \
61535   poly16x4x3_t __s1 = __p1; \
61536   poly16x4x3_t __rev1; \
61537   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
61538   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
61539   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
61540   __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \
61541 })
61542 #endif
61543 
61544 #ifdef __LITTLE_ENDIAN__
61545 #define vst1q_p8_x3(__p0, __p1) __extension__ ({ \
61546   poly8x16x3_t __s1 = __p1; \
61547   __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \
61548 })
61549 #else
61550 #define vst1q_p8_x3(__p0, __p1) __extension__ ({ \
61551   poly8x16x3_t __s1 = __p1; \
61552   poly8x16x3_t __rev1; \
61553   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
61554   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
61555   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
61556   __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \
61557 })
61558 #endif
61559 
61560 #ifdef __LITTLE_ENDIAN__
61561 #define vst1q_p64_x3(__p0, __p1) __extension__ ({ \
61562   poly64x2x3_t __s1 = __p1; \
61563   __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \
61564 })
61565 #else
61566 #define vst1q_p64_x3(__p0, __p1) __extension__ ({ \
61567   poly64x2x3_t __s1 = __p1; \
61568   poly64x2x3_t __rev1; \
61569   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
61570   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
61571   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
61572   __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \
61573 })
61574 #endif
61575 
61576 #ifdef __LITTLE_ENDIAN__
61577 #define vst1q_p16_x3(__p0, __p1) __extension__ ({ \
61578   poly16x8x3_t __s1 = __p1; \
61579   __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \
61580 })
61581 #else
61582 #define vst1q_p16_x3(__p0, __p1) __extension__ ({ \
61583   poly16x8x3_t __s1 = __p1; \
61584   poly16x8x3_t __rev1; \
61585   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
61586   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
61587   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
61588   __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \
61589 })
61590 #endif
61591 
61592 #ifdef __LITTLE_ENDIAN__
61593 #define vst1q_u8_x3(__p0, __p1) __extension__ ({ \
61594   uint8x16x3_t __s1 = __p1; \
61595   __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \
61596 })
61597 #else
61598 #define vst1q_u8_x3(__p0, __p1) __extension__ ({ \
61599   uint8x16x3_t __s1 = __p1; \
61600   uint8x16x3_t __rev1; \
61601   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
61602   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
61603   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
61604   __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \
61605 })
61606 #endif
61607 
61608 #ifdef __LITTLE_ENDIAN__
61609 #define vst1q_u32_x3(__p0, __p1) __extension__ ({ \
61610   uint32x4x3_t __s1 = __p1; \
61611   __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \
61612 })
61613 #else
61614 #define vst1q_u32_x3(__p0, __p1) __extension__ ({ \
61615   uint32x4x3_t __s1 = __p1; \
61616   uint32x4x3_t __rev1; \
61617   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
61618   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
61619   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
61620   __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \
61621 })
61622 #endif
61623 
61624 #ifdef __LITTLE_ENDIAN__
61625 #define vst1q_u64_x3(__p0, __p1) __extension__ ({ \
61626   uint64x2x3_t __s1 = __p1; \
61627   __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \
61628 })
61629 #else
61630 #define vst1q_u64_x3(__p0, __p1) __extension__ ({ \
61631   uint64x2x3_t __s1 = __p1; \
61632   uint64x2x3_t __rev1; \
61633   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
61634   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
61635   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
61636   __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \
61637 })
61638 #endif
61639 
61640 #ifdef __LITTLE_ENDIAN__
61641 #define vst1q_u16_x3(__p0, __p1) __extension__ ({ \
61642   uint16x8x3_t __s1 = __p1; \
61643   __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \
61644 })
61645 #else
61646 #define vst1q_u16_x3(__p0, __p1) __extension__ ({ \
61647   uint16x8x3_t __s1 = __p1; \
61648   uint16x8x3_t __rev1; \
61649   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
61650   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
61651   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
61652   __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \
61653 })
61654 #endif
61655 
61656 #ifdef __LITTLE_ENDIAN__
61657 #define vst1q_s8_x3(__p0, __p1) __extension__ ({ \
61658   int8x16x3_t __s1 = __p1; \
61659   __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \
61660 })
61661 #else
61662 #define vst1q_s8_x3(__p0, __p1) __extension__ ({ \
61663   int8x16x3_t __s1 = __p1; \
61664   int8x16x3_t __rev1; \
61665   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
61666   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
61667   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
61668   __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \
61669 })
61670 #endif
61671 
61672 #ifdef __LITTLE_ENDIAN__
61673 #define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
61674   float64x2x3_t __s1 = __p1; \
61675   __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 42); \
61676 })
61677 #else
61678 #define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
61679   float64x2x3_t __s1 = __p1; \
61680   float64x2x3_t __rev1; \
61681   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
61682   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
61683   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
61684   __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 42); \
61685 })
61686 #endif
61687 
61688 #ifdef __LITTLE_ENDIAN__
61689 #define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
61690   float32x4x3_t __s1 = __p1; \
61691   __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 41); \
61692 })
61693 #else
61694 #define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
61695   float32x4x3_t __s1 = __p1; \
61696   float32x4x3_t __rev1; \
61697   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
61698   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
61699   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
61700   __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 41); \
61701 })
61702 #endif
61703 
61704 #ifdef __LITTLE_ENDIAN__
61705 #define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
61706   float16x8x3_t __s1 = __p1; \
61707   __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 40); \
61708 })
61709 #else
61710 #define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
61711   float16x8x3_t __s1 = __p1; \
61712   float16x8x3_t __rev1; \
61713   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
61714   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
61715   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
61716   __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 40); \
61717 })
61718 #endif
61719 
61720 #ifdef __LITTLE_ENDIAN__
61721 #define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
61722   int32x4x3_t __s1 = __p1; \
61723   __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 34); \
61724 })
61725 #else
61726 #define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
61727   int32x4x3_t __s1 = __p1; \
61728   int32x4x3_t __rev1; \
61729   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
61730   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
61731   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
61732   __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 34); \
61733 })
61734 #endif
61735 
61736 #ifdef __LITTLE_ENDIAN__
61737 #define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
61738   int64x2x3_t __s1 = __p1; \
61739   __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 35); \
61740 })
61741 #else
61742 #define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
61743   int64x2x3_t __s1 = __p1; \
61744   int64x2x3_t __rev1; \
61745   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
61746   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
61747   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
61748   __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 35); \
61749 })
61750 #endif
61751 
61752 #ifdef __LITTLE_ENDIAN__
61753 #define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
61754   int16x8x3_t __s1 = __p1; \
61755   __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 33); \
61756 })
61757 #else
61758 #define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
61759   int16x8x3_t __s1 = __p1; \
61760   int16x8x3_t __rev1; \
61761   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
61762   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
61763   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
61764   __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 33); \
61765 })
61766 #endif
61767 
61768 #ifdef __LITTLE_ENDIAN__
61769 #define vst1_u8_x3(__p0, __p1) __extension__ ({ \
61770   uint8x8x3_t __s1 = __p1; \
61771   __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \
61772 })
61773 #else
61774 #define vst1_u8_x3(__p0, __p1) __extension__ ({ \
61775   uint8x8x3_t __s1 = __p1; \
61776   uint8x8x3_t __rev1; \
61777   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
61778   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
61779   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
61780   __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \
61781 })
61782 #endif
61783 
61784 #ifdef __LITTLE_ENDIAN__
61785 #define vst1_u32_x3(__p0, __p1) __extension__ ({ \
61786   uint32x2x3_t __s1 = __p1; \
61787   __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \
61788 })
61789 #else
61790 #define vst1_u32_x3(__p0, __p1) __extension__ ({ \
61791   uint32x2x3_t __s1 = __p1; \
61792   uint32x2x3_t __rev1; \
61793   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
61794   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
61795   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
61796   __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \
61797 })
61798 #endif
61799 
61800 #ifdef __LITTLE_ENDIAN__
61801 #define vst1_u64_x3(__p0, __p1) __extension__ ({ \
61802   uint64x1x3_t __s1 = __p1; \
61803   __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
61804 })
61805 #else
61806 #define vst1_u64_x3(__p0, __p1) __extension__ ({ \
61807   uint64x1x3_t __s1 = __p1; \
61808   __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
61809 })
61810 #endif
61811 
61812 #ifdef __LITTLE_ENDIAN__
61813 #define vst1_u16_x3(__p0, __p1) __extension__ ({ \
61814   uint16x4x3_t __s1 = __p1; \
61815   __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \
61816 })
61817 #else
61818 #define vst1_u16_x3(__p0, __p1) __extension__ ({ \
61819   uint16x4x3_t __s1 = __p1; \
61820   uint16x4x3_t __rev1; \
61821   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
61822   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
61823   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
61824   __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \
61825 })
61826 #endif
61827 
61828 #ifdef __LITTLE_ENDIAN__
61829 #define vst1_s8_x3(__p0, __p1) __extension__ ({ \
61830   int8x8x3_t __s1 = __p1; \
61831   __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \
61832 })
61833 #else
61834 #define vst1_s8_x3(__p0, __p1) __extension__ ({ \
61835   int8x8x3_t __s1 = __p1; \
61836   int8x8x3_t __rev1; \
61837   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
61838   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
61839   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
61840   __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \
61841 })
61842 #endif
61843 
61844 #ifdef __LITTLE_ENDIAN__
61845 #define vst1_f64_x3(__p0, __p1) __extension__ ({ \
61846   float64x1x3_t __s1 = __p1; \
61847   __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
61848 })
61849 #else
61850 #define vst1_f64_x3(__p0, __p1) __extension__ ({ \
61851   float64x1x3_t __s1 = __p1; \
61852   __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
61853 })
61854 #endif
61855 
61856 #ifdef __LITTLE_ENDIAN__
61857 #define vst1_f32_x3(__p0, __p1) __extension__ ({ \
61858   float32x2x3_t __s1 = __p1; \
61859   __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 9); \
61860 })
61861 #else
61862 #define vst1_f32_x3(__p0, __p1) __extension__ ({ \
61863   float32x2x3_t __s1 = __p1; \
61864   float32x2x3_t __rev1; \
61865   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
61866   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
61867   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
61868   __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 9); \
61869 })
61870 #endif
61871 
61872 #ifdef __LITTLE_ENDIAN__
61873 #define vst1_f16_x3(__p0, __p1) __extension__ ({ \
61874   float16x4x3_t __s1 = __p1; \
61875   __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 8); \
61876 })
61877 #else
61878 #define vst1_f16_x3(__p0, __p1) __extension__ ({ \
61879   float16x4x3_t __s1 = __p1; \
61880   float16x4x3_t __rev1; \
61881   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
61882   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
61883   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
61884   __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 8); \
61885 })
61886 #endif
61887 
61888 #ifdef __LITTLE_ENDIAN__
61889 #define vst1_s32_x3(__p0, __p1) __extension__ ({ \
61890   int32x2x3_t __s1 = __p1; \
61891   __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 2); \
61892 })
61893 #else
61894 #define vst1_s32_x3(__p0, __p1) __extension__ ({ \
61895   int32x2x3_t __s1 = __p1; \
61896   int32x2x3_t __rev1; \
61897   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
61898   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
61899   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
61900   __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 2); \
61901 })
61902 #endif
61903 
61904 #ifdef __LITTLE_ENDIAN__
61905 #define vst1_s64_x3(__p0, __p1) __extension__ ({ \
61906   int64x1x3_t __s1 = __p1; \
61907   __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
61908 })
61909 #else
61910 #define vst1_s64_x3(__p0, __p1) __extension__ ({ \
61911   int64x1x3_t __s1 = __p1; \
61912   __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
61913 })
61914 #endif
61915 
61916 #ifdef __LITTLE_ENDIAN__
61917 #define vst1_s16_x3(__p0, __p1) __extension__ ({ \
61918   int16x4x3_t __s1 = __p1; \
61919   __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 1); \
61920 })
61921 #else
61922 #define vst1_s16_x3(__p0, __p1) __extension__ ({ \
61923   int16x4x3_t __s1 = __p1; \
61924   int16x4x3_t __rev1; \
61925   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
61926   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
61927   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
61928   __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 1); \
61929 })
61930 #endif
61931 
61932 #ifdef __LITTLE_ENDIAN__
61933 #define vst1_p8_x4(__p0, __p1) __extension__ ({ \
61934   poly8x8x4_t __s1 = __p1; \
61935   __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \
61936 })
61937 #else
61938 #define vst1_p8_x4(__p0, __p1) __extension__ ({ \
61939   poly8x8x4_t __s1 = __p1; \
61940   poly8x8x4_t __rev1; \
61941   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
61942   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
61943   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
61944   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
61945   __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \
61946 })
61947 #endif
61948 
61949 #ifdef __LITTLE_ENDIAN__
61950 #define vst1_p64_x4(__p0, __p1) __extension__ ({ \
61951   poly64x1x4_t __s1 = __p1; \
61952   __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
61953 })
61954 #else
61955 #define vst1_p64_x4(__p0, __p1) __extension__ ({ \
61956   poly64x1x4_t __s1 = __p1; \
61957   __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
61958 })
61959 #endif
61960 
61961 #ifdef __LITTLE_ENDIAN__
61962 #define vst1_p16_x4(__p0, __p1) __extension__ ({ \
61963   poly16x4x4_t __s1 = __p1; \
61964   __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \
61965 })
61966 #else
61967 #define vst1_p16_x4(__p0, __p1) __extension__ ({ \
61968   poly16x4x4_t __s1 = __p1; \
61969   poly16x4x4_t __rev1; \
61970   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
61971   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
61972   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
61973   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
61974   __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \
61975 })
61976 #endif
61977 
61978 #ifdef __LITTLE_ENDIAN__
61979 #define vst1q_p8_x4(__p0, __p1) __extension__ ({ \
61980   poly8x16x4_t __s1 = __p1; \
61981   __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \
61982 })
61983 #else
61984 #define vst1q_p8_x4(__p0, __p1) __extension__ ({ \
61985   poly8x16x4_t __s1 = __p1; \
61986   poly8x16x4_t __rev1; \
61987   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
61988   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
61989   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
61990   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
61991   __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \
61992 })
61993 #endif
61994 
61995 #ifdef __LITTLE_ENDIAN__
61996 #define vst1q_p64_x4(__p0, __p1) __extension__ ({ \
61997   poly64x2x4_t __s1 = __p1; \
61998   __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \
61999 })
62000 #else
62001 #define vst1q_p64_x4(__p0, __p1) __extension__ ({ \
62002   poly64x2x4_t __s1 = __p1; \
62003   poly64x2x4_t __rev1; \
62004   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62005   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62006   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
62007   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
62008   __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \
62009 })
62010 #endif
62011 
62012 #ifdef __LITTLE_ENDIAN__
62013 #define vst1q_p16_x4(__p0, __p1) __extension__ ({ \
62014   poly16x8x4_t __s1 = __p1; \
62015   __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \
62016 })
62017 #else
62018 #define vst1q_p16_x4(__p0, __p1) __extension__ ({ \
62019   poly16x8x4_t __s1 = __p1; \
62020   poly16x8x4_t __rev1; \
62021   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
62022   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
62023   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
62024   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
62025   __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \
62026 })
62027 #endif
62028 
62029 #ifdef __LITTLE_ENDIAN__
62030 #define vst1q_u8_x4(__p0, __p1) __extension__ ({ \
62031   uint8x16x4_t __s1 = __p1; \
62032   __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \
62033 })
62034 #else
62035 #define vst1q_u8_x4(__p0, __p1) __extension__ ({ \
62036   uint8x16x4_t __s1 = __p1; \
62037   uint8x16x4_t __rev1; \
62038   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62039   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62040   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62041   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62042   __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \
62043 })
62044 #endif
62045 
62046 #ifdef __LITTLE_ENDIAN__
62047 #define vst1q_u32_x4(__p0, __p1) __extension__ ({ \
62048   uint32x4x4_t __s1 = __p1; \
62049   __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \
62050 })
62051 #else
62052 #define vst1q_u32_x4(__p0, __p1) __extension__ ({ \
62053   uint32x4x4_t __s1 = __p1; \
62054   uint32x4x4_t __rev1; \
62055   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
62056   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
62057   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
62058   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
62059   __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \
62060 })
62061 #endif
62062 
62063 #ifdef __LITTLE_ENDIAN__
62064 #define vst1q_u64_x4(__p0, __p1) __extension__ ({ \
62065   uint64x2x4_t __s1 = __p1; \
62066   __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \
62067 })
62068 #else
62069 #define vst1q_u64_x4(__p0, __p1) __extension__ ({ \
62070   uint64x2x4_t __s1 = __p1; \
62071   uint64x2x4_t __rev1; \
62072   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62073   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62074   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
62075   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
62076   __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \
62077 })
62078 #endif
62079 
62080 #ifdef __LITTLE_ENDIAN__
62081 #define vst1q_u16_x4(__p0, __p1) __extension__ ({ \
62082   uint16x8x4_t __s1 = __p1; \
62083   __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \
62084 })
62085 #else
62086 #define vst1q_u16_x4(__p0, __p1) __extension__ ({ \
62087   uint16x8x4_t __s1 = __p1; \
62088   uint16x8x4_t __rev1; \
62089   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
62090   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
62091   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
62092   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
62093   __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \
62094 })
62095 #endif
62096 
62097 #ifdef __LITTLE_ENDIAN__
62098 #define vst1q_s8_x4(__p0, __p1) __extension__ ({ \
62099   int8x16x4_t __s1 = __p1; \
62100   __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \
62101 })
62102 #else
62103 #define vst1q_s8_x4(__p0, __p1) __extension__ ({ \
62104   int8x16x4_t __s1 = __p1; \
62105   int8x16x4_t __rev1; \
62106   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62107   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62108   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62109   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62110   __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \
62111 })
62112 #endif
62113 
62114 #ifdef __LITTLE_ENDIAN__
62115 #define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
62116   float64x2x4_t __s1 = __p1; \
62117   __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 42); \
62118 })
62119 #else
62120 #define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
62121   float64x2x4_t __s1 = __p1; \
62122   float64x2x4_t __rev1; \
62123   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62124   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62125   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
62126   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
62127   __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 42); \
62128 })
62129 #endif
62130 
62131 #ifdef __LITTLE_ENDIAN__
62132 #define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
62133   float32x4x4_t __s1 = __p1; \
62134   __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 41); \
62135 })
62136 #else
62137 #define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
62138   float32x4x4_t __s1 = __p1; \
62139   float32x4x4_t __rev1; \
62140   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
62141   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
62142   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
62143   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
62144   __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 41); \
62145 })
62146 #endif
62147 
62148 #ifdef __LITTLE_ENDIAN__
62149 #define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
62150   float16x8x4_t __s1 = __p1; \
62151   __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 40); \
62152 })
62153 #else
62154 #define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
62155   float16x8x4_t __s1 = __p1; \
62156   float16x8x4_t __rev1; \
62157   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
62158   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
62159   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
62160   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
62161   __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 40); \
62162 })
62163 #endif
62164 
62165 #ifdef __LITTLE_ENDIAN__
62166 #define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
62167   int32x4x4_t __s1 = __p1; \
62168   __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 34); \
62169 })
62170 #else
62171 #define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
62172   int32x4x4_t __s1 = __p1; \
62173   int32x4x4_t __rev1; \
62174   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
62175   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
62176   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
62177   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
62178   __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 34); \
62179 })
62180 #endif
62181 
62182 #ifdef __LITTLE_ENDIAN__
62183 #define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
62184   int64x2x4_t __s1 = __p1; \
62185   __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 35); \
62186 })
62187 #else
62188 #define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
62189   int64x2x4_t __s1 = __p1; \
62190   int64x2x4_t __rev1; \
62191   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62192   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62193   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
62194   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
62195   __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 35); \
62196 })
62197 #endif
62198 
62199 #ifdef __LITTLE_ENDIAN__
62200 #define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
62201   int16x8x4_t __s1 = __p1; \
62202   __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 33); \
62203 })
62204 #else
62205 #define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
62206   int16x8x4_t __s1 = __p1; \
62207   int16x8x4_t __rev1; \
62208   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
62209   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
62210   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
62211   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
62212   __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 33); \
62213 })
62214 #endif
62215 
62216 #ifdef __LITTLE_ENDIAN__
62217 #define vst1_u8_x4(__p0, __p1) __extension__ ({ \
62218   uint8x8x4_t __s1 = __p1; \
62219   __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \
62220 })
62221 #else
62222 #define vst1_u8_x4(__p0, __p1) __extension__ ({ \
62223   uint8x8x4_t __s1 = __p1; \
62224   uint8x8x4_t __rev1; \
62225   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
62226   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
62227   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
62228   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
62229   __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \
62230 })
62231 #endif
62232 
62233 #ifdef __LITTLE_ENDIAN__
62234 #define vst1_u32_x4(__p0, __p1) __extension__ ({ \
62235   uint32x2x4_t __s1 = __p1; \
62236   __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \
62237 })
62238 #else
62239 #define vst1_u32_x4(__p0, __p1) __extension__ ({ \
62240   uint32x2x4_t __s1 = __p1; \
62241   uint32x2x4_t __rev1; \
62242   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62243   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62244   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
62245   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
62246   __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \
62247 })
62248 #endif
62249 
62250 #ifdef __LITTLE_ENDIAN__
62251 #define vst1_u64_x4(__p0, __p1) __extension__ ({ \
62252   uint64x1x4_t __s1 = __p1; \
62253   __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
62254 })
62255 #else
62256 #define vst1_u64_x4(__p0, __p1) __extension__ ({ \
62257   uint64x1x4_t __s1 = __p1; \
62258   __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
62259 })
62260 #endif
62261 
62262 #ifdef __LITTLE_ENDIAN__
62263 #define vst1_u16_x4(__p0, __p1) __extension__ ({ \
62264   uint16x4x4_t __s1 = __p1; \
62265   __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \
62266 })
62267 #else
62268 #define vst1_u16_x4(__p0, __p1) __extension__ ({ \
62269   uint16x4x4_t __s1 = __p1; \
62270   uint16x4x4_t __rev1; \
62271   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
62272   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
62273   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
62274   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
62275   __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \
62276 })
62277 #endif
62278 
62279 #ifdef __LITTLE_ENDIAN__
62280 #define vst1_s8_x4(__p0, __p1) __extension__ ({ \
62281   int8x8x4_t __s1 = __p1; \
62282   __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \
62283 })
62284 #else
62285 #define vst1_s8_x4(__p0, __p1) __extension__ ({ \
62286   int8x8x4_t __s1 = __p1; \
62287   int8x8x4_t __rev1; \
62288   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
62289   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
62290   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
62291   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
62292   __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \
62293 })
62294 #endif
62295 
62296 #ifdef __LITTLE_ENDIAN__
62297 #define vst1_f64_x4(__p0, __p1) __extension__ ({ \
62298   float64x1x4_t __s1 = __p1; \
62299   __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
62300 })
62301 #else
62302 #define vst1_f64_x4(__p0, __p1) __extension__ ({ \
62303   float64x1x4_t __s1 = __p1; \
62304   __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
62305 })
62306 #endif
62307 
62308 #ifdef __LITTLE_ENDIAN__
62309 #define vst1_f32_x4(__p0, __p1) __extension__ ({ \
62310   float32x2x4_t __s1 = __p1; \
62311   __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 9); \
62312 })
62313 #else
62314 #define vst1_f32_x4(__p0, __p1) __extension__ ({ \
62315   float32x2x4_t __s1 = __p1; \
62316   float32x2x4_t __rev1; \
62317   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62318   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62319   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
62320   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
62321   __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 9); \
62322 })
62323 #endif
62324 
62325 #ifdef __LITTLE_ENDIAN__
62326 #define vst1_f16_x4(__p0, __p1) __extension__ ({ \
62327   float16x4x4_t __s1 = __p1; \
62328   __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 8); \
62329 })
62330 #else
62331 #define vst1_f16_x4(__p0, __p1) __extension__ ({ \
62332   float16x4x4_t __s1 = __p1; \
62333   float16x4x4_t __rev1; \
62334   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
62335   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
62336   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
62337   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
62338   __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 8); \
62339 })
62340 #endif
62341 
62342 #ifdef __LITTLE_ENDIAN__
62343 #define vst1_s32_x4(__p0, __p1) __extension__ ({ \
62344   int32x2x4_t __s1 = __p1; \
62345   __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 2); \
62346 })
62347 #else
62348 #define vst1_s32_x4(__p0, __p1) __extension__ ({ \
62349   int32x2x4_t __s1 = __p1; \
62350   int32x2x4_t __rev1; \
62351   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62352   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62353   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
62354   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
62355   __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 2); \
62356 })
62357 #endif
62358 
62359 #ifdef __LITTLE_ENDIAN__
62360 #define vst1_s64_x4(__p0, __p1) __extension__ ({ \
62361   int64x1x4_t __s1 = __p1; \
62362   __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
62363 })
62364 #else
62365 #define vst1_s64_x4(__p0, __p1) __extension__ ({ \
62366   int64x1x4_t __s1 = __p1; \
62367   __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
62368 })
62369 #endif
62370 
62371 #ifdef __LITTLE_ENDIAN__
62372 #define vst1_s16_x4(__p0, __p1) __extension__ ({ \
62373   int16x4x4_t __s1 = __p1; \
62374   __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 1); \
62375 })
62376 #else
62377 #define vst1_s16_x4(__p0, __p1) __extension__ ({ \
62378   int16x4x4_t __s1 = __p1; \
62379   int16x4x4_t __rev1; \
62380   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
62381   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
62382   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
62383   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
62384   __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 1); \
62385 })
62386 #endif
62387 
62388 #ifdef __LITTLE_ENDIAN__
62389 #define vst2_p64(__p0, __p1) __extension__ ({ \
62390   poly64x1x2_t __s1 = __p1; \
62391   __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
62392 })
62393 #else
62394 #define vst2_p64(__p0, __p1) __extension__ ({ \
62395   poly64x1x2_t __s1 = __p1; \
62396   __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
62397 })
62398 #endif
62399 
62400 #ifdef __LITTLE_ENDIAN__
62401 #define vst2q_p64(__p0, __p1) __extension__ ({ \
62402   poly64x2x2_t __s1 = __p1; \
62403   __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \
62404 })
62405 #else
62406 #define vst2q_p64(__p0, __p1) __extension__ ({ \
62407   poly64x2x2_t __s1 = __p1; \
62408   poly64x2x2_t __rev1; \
62409   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62410   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62411   __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \
62412 })
62413 #endif
62414 
62415 #ifdef __LITTLE_ENDIAN__
62416 #define vst2q_u64(__p0, __p1) __extension__ ({ \
62417   uint64x2x2_t __s1 = __p1; \
62418   __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \
62419 })
62420 #else
62421 #define vst2q_u64(__p0, __p1) __extension__ ({ \
62422   uint64x2x2_t __s1 = __p1; \
62423   uint64x2x2_t __rev1; \
62424   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62425   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62426   __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \
62427 })
62428 #endif
62429 
62430 #ifdef __LITTLE_ENDIAN__
62431 #define vst2q_f64(__p0, __p1) __extension__ ({ \
62432   float64x2x2_t __s1 = __p1; \
62433   __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 42); \
62434 })
62435 #else
62436 #define vst2q_f64(__p0, __p1) __extension__ ({ \
62437   float64x2x2_t __s1 = __p1; \
62438   float64x2x2_t __rev1; \
62439   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62440   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62441   __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 42); \
62442 })
62443 #endif
62444 
62445 #ifdef __LITTLE_ENDIAN__
62446 #define vst2q_s64(__p0, __p1) __extension__ ({ \
62447   int64x2x2_t __s1 = __p1; \
62448   __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 35); \
62449 })
62450 #else
62451 #define vst2q_s64(__p0, __p1) __extension__ ({ \
62452   int64x2x2_t __s1 = __p1; \
62453   int64x2x2_t __rev1; \
62454   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62455   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62456   __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 35); \
62457 })
62458 #endif
62459 
62460 #ifdef __LITTLE_ENDIAN__
62461 #define vst2_f64(__p0, __p1) __extension__ ({ \
62462   float64x1x2_t __s1 = __p1; \
62463   __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 10); \
62464 })
62465 #else
62466 #define vst2_f64(__p0, __p1) __extension__ ({ \
62467   float64x1x2_t __s1 = __p1; \
62468   __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 10); \
62469 })
62470 #endif
62471 
62472 #ifdef __LITTLE_ENDIAN__
62473 #define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
62474   poly64x1x2_t __s1 = __p1; \
62475   __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
62476 })
62477 #else
62478 #define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
62479   poly64x1x2_t __s1 = __p1; \
62480   __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
62481 })
62482 #endif
62483 
62484 #ifdef __LITTLE_ENDIAN__
62485 #define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
62486   poly8x16x2_t __s1 = __p1; \
62487   __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \
62488 })
62489 #else
62490 #define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
62491   poly8x16x2_t __s1 = __p1; \
62492   poly8x16x2_t __rev1; \
62493   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62494   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62495   __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \
62496 })
62497 #endif
62498 
62499 #ifdef __LITTLE_ENDIAN__
62500 #define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
62501   poly64x2x2_t __s1 = __p1; \
62502   __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \
62503 })
62504 #else
62505 #define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
62506   poly64x2x2_t __s1 = __p1; \
62507   poly64x2x2_t __rev1; \
62508   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62509   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62510   __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \
62511 })
62512 #endif
62513 
62514 #ifdef __LITTLE_ENDIAN__
62515 #define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
62516   uint8x16x2_t __s1 = __p1; \
62517   __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \
62518 })
62519 #else
62520 #define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
62521   uint8x16x2_t __s1 = __p1; \
62522   uint8x16x2_t __rev1; \
62523   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62524   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62525   __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \
62526 })
62527 #endif
62528 
62529 #ifdef __LITTLE_ENDIAN__
62530 #define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
62531   uint64x2x2_t __s1 = __p1; \
62532   __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \
62533 })
62534 #else
62535 #define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
62536   uint64x2x2_t __s1 = __p1; \
62537   uint64x2x2_t __rev1; \
62538   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62539   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62540   __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \
62541 })
62542 #endif
62543 
62544 #ifdef __LITTLE_ENDIAN__
62545 #define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
62546   int8x16x2_t __s1 = __p1; \
62547   __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \
62548 })
62549 #else
62550 #define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
62551   int8x16x2_t __s1 = __p1; \
62552   int8x16x2_t __rev1; \
62553   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62554   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62555   __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \
62556 })
62557 #endif
62558 
62559 #ifdef __LITTLE_ENDIAN__
62560 #define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
62561   float64x2x2_t __s1 = __p1; \
62562   __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 42); \
62563 })
62564 #else
62565 #define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
62566   float64x2x2_t __s1 = __p1; \
62567   float64x2x2_t __rev1; \
62568   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62569   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62570   __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 42); \
62571 })
62572 #endif
62573 
62574 #ifdef __LITTLE_ENDIAN__
62575 #define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
62576   int64x2x2_t __s1 = __p1; \
62577   __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 35); \
62578 })
62579 #else
62580 #define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
62581   int64x2x2_t __s1 = __p1; \
62582   int64x2x2_t __rev1; \
62583   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62584   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62585   __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 35); \
62586 })
62587 #endif
62588 
62589 #ifdef __LITTLE_ENDIAN__
62590 #define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
62591   uint64x1x2_t __s1 = __p1; \
62592   __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
62593 })
62594 #else
62595 #define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
62596   uint64x1x2_t __s1 = __p1; \
62597   __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
62598 })
62599 #endif
62600 
62601 #ifdef __LITTLE_ENDIAN__
62602 #define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
62603   float64x1x2_t __s1 = __p1; \
62604   __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 10); \
62605 })
62606 #else
62607 #define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
62608   float64x1x2_t __s1 = __p1; \
62609   __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 10); \
62610 })
62611 #endif
62612 
62613 #ifdef __LITTLE_ENDIAN__
62614 #define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
62615   int64x1x2_t __s1 = __p1; \
62616   __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 3); \
62617 })
62618 #else
62619 #define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
62620   int64x1x2_t __s1 = __p1; \
62621   __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 3); \
62622 })
62623 #endif
62624 
62625 #ifdef __LITTLE_ENDIAN__
62626 #define vst3_p64(__p0, __p1) __extension__ ({ \
62627   poly64x1x3_t __s1 = __p1; \
62628   __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
62629 })
62630 #else
62631 #define vst3_p64(__p0, __p1) __extension__ ({ \
62632   poly64x1x3_t __s1 = __p1; \
62633   __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
62634 })
62635 #endif
62636 
62637 #ifdef __LITTLE_ENDIAN__
62638 #define vst3q_p64(__p0, __p1) __extension__ ({ \
62639   poly64x2x3_t __s1 = __p1; \
62640   __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \
62641 })
62642 #else
62643 #define vst3q_p64(__p0, __p1) __extension__ ({ \
62644   poly64x2x3_t __s1 = __p1; \
62645   poly64x2x3_t __rev1; \
62646   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62647   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62648   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
62649   __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \
62650 })
62651 #endif
62652 
62653 #ifdef __LITTLE_ENDIAN__
62654 #define vst3q_u64(__p0, __p1) __extension__ ({ \
62655   uint64x2x3_t __s1 = __p1; \
62656   __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \
62657 })
62658 #else
62659 #define vst3q_u64(__p0, __p1) __extension__ ({ \
62660   uint64x2x3_t __s1 = __p1; \
62661   uint64x2x3_t __rev1; \
62662   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62663   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62664   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
62665   __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \
62666 })
62667 #endif
62668 
62669 #ifdef __LITTLE_ENDIAN__
62670 #define vst3q_f64(__p0, __p1) __extension__ ({ \
62671   float64x2x3_t __s1 = __p1; \
62672   __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 42); \
62673 })
62674 #else
62675 #define vst3q_f64(__p0, __p1) __extension__ ({ \
62676   float64x2x3_t __s1 = __p1; \
62677   float64x2x3_t __rev1; \
62678   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62679   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62680   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
62681   __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 42); \
62682 })
62683 #endif
62684 
62685 #ifdef __LITTLE_ENDIAN__
62686 #define vst3q_s64(__p0, __p1) __extension__ ({ \
62687   int64x2x3_t __s1 = __p1; \
62688   __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 35); \
62689 })
62690 #else
62691 #define vst3q_s64(__p0, __p1) __extension__ ({ \
62692   int64x2x3_t __s1 = __p1; \
62693   int64x2x3_t __rev1; \
62694   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62695   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62696   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
62697   __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 35); \
62698 })
62699 #endif
62700 
62701 #ifdef __LITTLE_ENDIAN__
62702 #define vst3_f64(__p0, __p1) __extension__ ({ \
62703   float64x1x3_t __s1 = __p1; \
62704   __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
62705 })
62706 #else
62707 #define vst3_f64(__p0, __p1) __extension__ ({ \
62708   float64x1x3_t __s1 = __p1; \
62709   __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
62710 })
62711 #endif
62712 
62713 #ifdef __LITTLE_ENDIAN__
62714 #define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
62715   poly64x1x3_t __s1 = __p1; \
62716   __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
62717 })
62718 #else
62719 #define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
62720   poly64x1x3_t __s1 = __p1; \
62721   __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
62722 })
62723 #endif
62724 
62725 #ifdef __LITTLE_ENDIAN__
62726 #define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
62727   poly8x16x3_t __s1 = __p1; \
62728   __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \
62729 })
62730 #else
62731 #define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
62732   poly8x16x3_t __s1 = __p1; \
62733   poly8x16x3_t __rev1; \
62734   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62735   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62736   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62737   __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \
62738 })
62739 #endif
62740 
62741 #ifdef __LITTLE_ENDIAN__
62742 #define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
62743   poly64x2x3_t __s1 = __p1; \
62744   __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \
62745 })
62746 #else
62747 #define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
62748   poly64x2x3_t __s1 = __p1; \
62749   poly64x2x3_t __rev1; \
62750   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62751   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62752   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
62753   __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \
62754 })
62755 #endif
62756 
62757 #ifdef __LITTLE_ENDIAN__
62758 #define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
62759   uint8x16x3_t __s1 = __p1; \
62760   __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \
62761 })
62762 #else
62763 #define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
62764   uint8x16x3_t __s1 = __p1; \
62765   uint8x16x3_t __rev1; \
62766   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62767   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62768   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62769   __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \
62770 })
62771 #endif
62772 
62773 #ifdef __LITTLE_ENDIAN__
62774 #define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
62775   uint64x2x3_t __s1 = __p1; \
62776   __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \
62777 })
62778 #else
62779 #define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
62780   uint64x2x3_t __s1 = __p1; \
62781   uint64x2x3_t __rev1; \
62782   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62783   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62784   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
62785   __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \
62786 })
62787 #endif
62788 
62789 #ifdef __LITTLE_ENDIAN__
62790 #define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
62791   int8x16x3_t __s1 = __p1; \
62792   __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \
62793 })
62794 #else
62795 #define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
62796   int8x16x3_t __s1 = __p1; \
62797   int8x16x3_t __rev1; \
62798   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62799   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62800   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62801   __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \
62802 })
62803 #endif
62804 
62805 #ifdef __LITTLE_ENDIAN__
62806 #define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
62807   float64x2x3_t __s1 = __p1; \
62808   __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 42); \
62809 })
62810 #else
62811 #define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
62812   float64x2x3_t __s1 = __p1; \
62813   float64x2x3_t __rev1; \
62814   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62815   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62816   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
62817   __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 42); \
62818 })
62819 #endif
62820 
62821 #ifdef __LITTLE_ENDIAN__
62822 #define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
62823   int64x2x3_t __s1 = __p1; \
62824   __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 35); \
62825 })
62826 #else
62827 #define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
62828   int64x2x3_t __s1 = __p1; \
62829   int64x2x3_t __rev1; \
62830   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62831   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62832   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
62833   __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 35); \
62834 })
62835 #endif
62836 
62837 #ifdef __LITTLE_ENDIAN__
62838 #define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
62839   uint64x1x3_t __s1 = __p1; \
62840   __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
62841 })
62842 #else
62843 #define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
62844   uint64x1x3_t __s1 = __p1; \
62845   __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
62846 })
62847 #endif
62848 
62849 #ifdef __LITTLE_ENDIAN__
62850 #define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
62851   float64x1x3_t __s1 = __p1; \
62852   __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
62853 })
62854 #else
62855 #define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
62856   float64x1x3_t __s1 = __p1; \
62857   __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
62858 })
62859 #endif
62860 
62861 #ifdef __LITTLE_ENDIAN__
62862 #define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
62863   int64x1x3_t __s1 = __p1; \
62864   __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
62865 })
62866 #else
62867 #define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
62868   int64x1x3_t __s1 = __p1; \
62869   __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
62870 })
62871 #endif
62872 
62873 #ifdef __LITTLE_ENDIAN__
62874 #define vst4_p64(__p0, __p1) __extension__ ({ \
62875   poly64x1x4_t __s1 = __p1; \
62876   __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
62877 })
62878 #else
62879 #define vst4_p64(__p0, __p1) __extension__ ({ \
62880   poly64x1x4_t __s1 = __p1; \
62881   __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
62882 })
62883 #endif
62884 
62885 #ifdef __LITTLE_ENDIAN__
62886 #define vst4q_p64(__p0, __p1) __extension__ ({ \
62887   poly64x2x4_t __s1 = __p1; \
62888   __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \
62889 })
62890 #else
62891 #define vst4q_p64(__p0, __p1) __extension__ ({ \
62892   poly64x2x4_t __s1 = __p1; \
62893   poly64x2x4_t __rev1; \
62894   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62895   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62896   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
62897   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
62898   __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \
62899 })
62900 #endif
62901 
62902 #ifdef __LITTLE_ENDIAN__
62903 #define vst4q_u64(__p0, __p1) __extension__ ({ \
62904   uint64x2x4_t __s1 = __p1; \
62905   __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \
62906 })
62907 #else
62908 #define vst4q_u64(__p0, __p1) __extension__ ({ \
62909   uint64x2x4_t __s1 = __p1; \
62910   uint64x2x4_t __rev1; \
62911   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62912   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62913   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
62914   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
62915   __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \
62916 })
62917 #endif
62918 
62919 #ifdef __LITTLE_ENDIAN__
62920 #define vst4q_f64(__p0, __p1) __extension__ ({ \
62921   float64x2x4_t __s1 = __p1; \
62922   __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 42); \
62923 })
62924 #else
62925 #define vst4q_f64(__p0, __p1) __extension__ ({ \
62926   float64x2x4_t __s1 = __p1; \
62927   float64x2x4_t __rev1; \
62928   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62929   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62930   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
62931   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
62932   __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 42); \
62933 })
62934 #endif
62935 
62936 #ifdef __LITTLE_ENDIAN__
62937 #define vst4q_s64(__p0, __p1) __extension__ ({ \
62938   int64x2x4_t __s1 = __p1; \
62939   __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 35); \
62940 })
62941 #else
62942 #define vst4q_s64(__p0, __p1) __extension__ ({ \
62943   int64x2x4_t __s1 = __p1; \
62944   int64x2x4_t __rev1; \
62945   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
62946   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
62947   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
62948   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
62949   __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 35); \
62950 })
62951 #endif
62952 
62953 #ifdef __LITTLE_ENDIAN__
62954 #define vst4_f64(__p0, __p1) __extension__ ({ \
62955   float64x1x4_t __s1 = __p1; \
62956   __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
62957 })
62958 #else
62959 #define vst4_f64(__p0, __p1) __extension__ ({ \
62960   float64x1x4_t __s1 = __p1; \
62961   __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
62962 })
62963 #endif
62964 
62965 #ifdef __LITTLE_ENDIAN__
62966 #define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
62967   poly64x1x4_t __s1 = __p1; \
62968   __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
62969 })
62970 #else
62971 #define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
62972   poly64x1x4_t __s1 = __p1; \
62973   __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
62974 })
62975 #endif
62976 
62977 #ifdef __LITTLE_ENDIAN__
62978 #define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
62979   poly8x16x4_t __s1 = __p1; \
62980   __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \
62981 })
62982 #else
62983 #define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
62984   poly8x16x4_t __s1 = __p1; \
62985   poly8x16x4_t __rev1; \
62986   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62987   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62988   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62989   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62990   __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \
62991 })
62992 #endif
62993 
62994 #ifdef __LITTLE_ENDIAN__
62995 #define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
62996   poly64x2x4_t __s1 = __p1; \
62997   __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \
62998 })
62999 #else
63000 #define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
63001   poly64x2x4_t __s1 = __p1; \
63002   poly64x2x4_t __rev1; \
63003   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
63004   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
63005   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
63006   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
63007   __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \
63008 })
63009 #endif
63010 
63011 #ifdef __LITTLE_ENDIAN__
63012 #define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
63013   uint8x16x4_t __s1 = __p1; \
63014   __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \
63015 })
63016 #else
63017 #define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
63018   uint8x16x4_t __s1 = __p1; \
63019   uint8x16x4_t __rev1; \
63020   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
63021   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
63022   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
63023   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
63024   __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \
63025 })
63026 #endif
63027 
63028 #ifdef __LITTLE_ENDIAN__
63029 #define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
63030   uint64x2x4_t __s1 = __p1; \
63031   __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \
63032 })
63033 #else
63034 #define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
63035   uint64x2x4_t __s1 = __p1; \
63036   uint64x2x4_t __rev1; \
63037   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
63038   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
63039   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
63040   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
63041   __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \
63042 })
63043 #endif
63044 
63045 #ifdef __LITTLE_ENDIAN__
63046 #define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
63047   int8x16x4_t __s1 = __p1; \
63048   __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \
63049 })
63050 #else
63051 #define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
63052   int8x16x4_t __s1 = __p1; \
63053   int8x16x4_t __rev1; \
63054   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
63055   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
63056   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
63057   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
63058   __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \
63059 })
63060 #endif
63061 
63062 #ifdef __LITTLE_ENDIAN__
63063 #define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
63064   float64x2x4_t __s1 = __p1; \
63065   __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 42); \
63066 })
63067 #else
63068 #define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
63069   float64x2x4_t __s1 = __p1; \
63070   float64x2x4_t __rev1; \
63071   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
63072   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
63073   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
63074   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
63075   __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 42); \
63076 })
63077 #endif
63078 
63079 #ifdef __LITTLE_ENDIAN__
63080 #define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
63081   int64x2x4_t __s1 = __p1; \
63082   __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 35); \
63083 })
63084 #else
63085 #define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
63086   int64x2x4_t __s1 = __p1; \
63087   int64x2x4_t __rev1; \
63088   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
63089   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
63090   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
63091   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
63092   __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 35); \
63093 })
63094 #endif
63095 
63096 #ifdef __LITTLE_ENDIAN__
63097 #define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
63098   uint64x1x4_t __s1 = __p1; \
63099   __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
63100 })
63101 #else
63102 #define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
63103   uint64x1x4_t __s1 = __p1; \
63104   __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
63105 })
63106 #endif
63107 
63108 #ifdef __LITTLE_ENDIAN__
63109 #define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
63110   float64x1x4_t __s1 = __p1; \
63111   __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
63112 })
63113 #else
63114 #define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
63115   float64x1x4_t __s1 = __p1; \
63116   __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
63117 })
63118 #endif
63119 
63120 #ifdef __LITTLE_ENDIAN__
63121 #define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
63122   int64x1x4_t __s1 = __p1; \
63123   __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
63124 })
63125 #else
63126 #define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
63127   int64x1x4_t __s1 = __p1; \
63128   __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
63129 })
63130 #endif
63131 
63132 #ifdef __LITTLE_ENDIAN__
63133 #define vstrq_p128(__p0, __p1) __extension__ ({ \
63134   poly128_t __s1 = __p1; \
63135   __builtin_neon_vstrq_p128(__p0, __s1); \
63136 })
63137 #else
63138 #define vstrq_p128(__p0, __p1) __extension__ ({ \
63139   poly128_t __s1 = __p1; \
63140   __builtin_neon_vstrq_p128(__p0, __s1); \
63141 })
63142 #endif
63143 
63144 #ifdef __LITTLE_ENDIAN__
vsubd_u64(uint64_t __p0,uint64_t __p1)63145 __ai uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) {
63146   uint64_t __ret;
63147   __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1);
63148   return __ret;
63149 }
63150 #else
vsubd_u64(uint64_t __p0,uint64_t __p1)63151 __ai uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) {
63152   uint64_t __ret;
63153   __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1);
63154   return __ret;
63155 }
63156 #endif
63157 
63158 #ifdef __LITTLE_ENDIAN__
vsubd_s64(int64_t __p0,int64_t __p1)63159 __ai int64_t vsubd_s64(int64_t __p0, int64_t __p1) {
63160   int64_t __ret;
63161   __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1);
63162   return __ret;
63163 }
63164 #else
vsubd_s64(int64_t __p0,int64_t __p1)63165 __ai int64_t vsubd_s64(int64_t __p0, int64_t __p1) {
63166   int64_t __ret;
63167   __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1);
63168   return __ret;
63169 }
63170 #endif
63171 
63172 #ifdef __LITTLE_ENDIAN__
vsubq_f64(float64x2_t __p0,float64x2_t __p1)63173 __ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) {
63174   float64x2_t __ret;
63175   __ret = __p0 - __p1;
63176   return __ret;
63177 }
63178 #else
vsubq_f64(float64x2_t __p0,float64x2_t __p1)63179 __ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) {
63180   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
63181   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
63182   float64x2_t __ret;
63183   __ret = __rev0 - __rev1;
63184   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
63185   return __ret;
63186 }
63187 #endif
63188 
63189 #ifdef __LITTLE_ENDIAN__
vsub_f64(float64x1_t __p0,float64x1_t __p1)63190 __ai float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) {
63191   float64x1_t __ret;
63192   __ret = __p0 - __p1;
63193   return __ret;
63194 }
63195 #else
vsub_f64(float64x1_t __p0,float64x1_t __p1)63196 __ai float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) {
63197   float64x1_t __ret;
63198   __ret = __p0 - __p1;
63199   return __ret;
63200 }
63201 #endif
63202 
63203 #ifdef __LITTLE_ENDIAN__
vsubhn_high_u32(uint16x4_t __p0,uint32x4_t __p1,uint32x4_t __p2)63204 __ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
63205   uint16x8_t __ret;
63206   __ret = vcombine_u16(__p0, vsubhn_u32(__p1, __p2));
63207   return __ret;
63208 }
63209 #else
vsubhn_high_u32(uint16x4_t __p0,uint32x4_t __p1,uint32x4_t __p2)63210 __ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
63211   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
63212   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
63213   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
63214   uint16x8_t __ret;
63215   __ret = __noswap_vcombine_u16(__rev0, __noswap_vsubhn_u32(__rev1, __rev2));
63216   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
63217   return __ret;
63218 }
63219 #endif
63220 
63221 #ifdef __LITTLE_ENDIAN__
vsubhn_high_u64(uint32x2_t __p0,uint64x2_t __p1,uint64x2_t __p2)63222 __ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
63223   uint32x4_t __ret;
63224   __ret = vcombine_u32(__p0, vsubhn_u64(__p1, __p2));
63225   return __ret;
63226 }
63227 #else
vsubhn_high_u64(uint32x2_t __p0,uint64x2_t __p1,uint64x2_t __p2)63228 __ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
63229   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
63230   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
63231   uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
63232   uint32x4_t __ret;
63233   __ret = __noswap_vcombine_u32(__rev0, __noswap_vsubhn_u64(__rev1, __rev2));
63234   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
63235   return __ret;
63236 }
63237 #endif
63238 
63239 #ifdef __LITTLE_ENDIAN__
vsubhn_high_u16(uint8x8_t __p0,uint16x8_t __p1,uint16x8_t __p2)63240 __ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
63241   uint8x16_t __ret;
63242   __ret = vcombine_u8(__p0, vsubhn_u16(__p1, __p2));
63243   return __ret;
63244 }
63245 #else
vsubhn_high_u16(uint8x8_t __p0,uint16x8_t __p1,uint16x8_t __p2)63246 __ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
63247   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
63248   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
63249   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
63250   uint8x16_t __ret;
63251   __ret = __noswap_vcombine_u8(__rev0, __noswap_vsubhn_u16(__rev1, __rev2));
63252   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
63253   return __ret;
63254 }
63255 #endif
63256 
63257 #ifdef __LITTLE_ENDIAN__
vsubhn_high_s32(int16x4_t __p0,int32x4_t __p1,int32x4_t __p2)63258 __ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
63259   int16x8_t __ret;
63260   __ret = vcombine_s16(__p0, vsubhn_s32(__p1, __p2));
63261   return __ret;
63262 }
63263 #else
vsubhn_high_s32(int16x4_t __p0,int32x4_t __p1,int32x4_t __p2)63264 __ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
63265   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
63266   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
63267   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
63268   int16x8_t __ret;
63269   __ret = __noswap_vcombine_s16(__rev0, __noswap_vsubhn_s32(__rev1, __rev2));
63270   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
63271   return __ret;
63272 }
63273 #endif
63274 
63275 #ifdef __LITTLE_ENDIAN__
vsubhn_high_s64(int32x2_t __p0,int64x2_t __p1,int64x2_t __p2)63276 __ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
63277   int32x4_t __ret;
63278   __ret = vcombine_s32(__p0, vsubhn_s64(__p1, __p2));
63279   return __ret;
63280 }
63281 #else
vsubhn_high_s64(int32x2_t __p0,int64x2_t __p1,int64x2_t __p2)63282 __ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
63283   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
63284   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
63285   int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
63286   int32x4_t __ret;
63287   __ret = __noswap_vcombine_s32(__rev0, __noswap_vsubhn_s64(__rev1, __rev2));
63288   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
63289   return __ret;
63290 }
63291 #endif
63292 
63293 #ifdef __LITTLE_ENDIAN__
vsubhn_high_s16(int8x8_t __p0,int16x8_t __p1,int16x8_t __p2)63294 __ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
63295   int8x16_t __ret;
63296   __ret = vcombine_s8(__p0, vsubhn_s16(__p1, __p2));
63297   return __ret;
63298 }
63299 #else
vsubhn_high_s16(int8x8_t __p0,int16x8_t __p1,int16x8_t __p2)63300 __ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
63301   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
63302   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
63303   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
63304   int8x16_t __ret;
63305   __ret = __noswap_vcombine_s8(__rev0, __noswap_vsubhn_s16(__rev1, __rev2));
63306   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
63307   return __ret;
63308 }
63309 #endif
63310 
63311 #ifdef __LITTLE_ENDIAN__
vsubl_high_u8(uint8x16_t __p0,uint8x16_t __p1)63312 __ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
63313   uint16x8_t __ret;
63314   __ret = vmovl_high_u8(__p0) - vmovl_high_u8(__p1);
63315   return __ret;
63316 }
63317 #else
vsubl_high_u8(uint8x16_t __p0,uint8x16_t __p1)63318 __ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
63319   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
63320   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
63321   uint16x8_t __ret;
63322   __ret = __noswap_vmovl_high_u8(__rev0) - __noswap_vmovl_high_u8(__rev1);
63323   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
63324   return __ret;
63325 }
63326 #endif
63327 
63328 #ifdef __LITTLE_ENDIAN__
vsubl_high_u32(uint32x4_t __p0,uint32x4_t __p1)63329 __ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
63330   uint64x2_t __ret;
63331   __ret = vmovl_high_u32(__p0) - vmovl_high_u32(__p1);
63332   return __ret;
63333 }
63334 #else
vsubl_high_u32(uint32x4_t __p0,uint32x4_t __p1)63335 __ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
63336   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
63337   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
63338   uint64x2_t __ret;
63339   __ret = __noswap_vmovl_high_u32(__rev0) - __noswap_vmovl_high_u32(__rev1);
63340   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
63341   return __ret;
63342 }
63343 #endif
63344 
63345 #ifdef __LITTLE_ENDIAN__
vsubl_high_u16(uint16x8_t __p0,uint16x8_t __p1)63346 __ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
63347   uint32x4_t __ret;
63348   __ret = vmovl_high_u16(__p0) - vmovl_high_u16(__p1);
63349   return __ret;
63350 }
63351 #else
vsubl_high_u16(uint16x8_t __p0,uint16x8_t __p1)63352 __ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
63353   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
63354   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
63355   uint32x4_t __ret;
63356   __ret = __noswap_vmovl_high_u16(__rev0) - __noswap_vmovl_high_u16(__rev1);
63357   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
63358   return __ret;
63359 }
63360 #endif
63361 
63362 #ifdef __LITTLE_ENDIAN__
vsubl_high_s8(int8x16_t __p0,int8x16_t __p1)63363 __ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) {
63364   int16x8_t __ret;
63365   __ret = vmovl_high_s8(__p0) - vmovl_high_s8(__p1);
63366   return __ret;
63367 }
63368 #else
vsubl_high_s8(int8x16_t __p0,int8x16_t __p1)63369 __ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) {
63370   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
63371   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
63372   int16x8_t __ret;
63373   __ret = __noswap_vmovl_high_s8(__rev0) - __noswap_vmovl_high_s8(__rev1);
63374   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
63375   return __ret;
63376 }
63377 #endif
63378 
63379 #ifdef __LITTLE_ENDIAN__
vsubl_high_s32(int32x4_t __p0,int32x4_t __p1)63380 __ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) {
63381   int64x2_t __ret;
63382   __ret = vmovl_high_s32(__p0) - vmovl_high_s32(__p1);
63383   return __ret;
63384 }
63385 #else
vsubl_high_s32(int32x4_t __p0,int32x4_t __p1)63386 __ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) {
63387   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
63388   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
63389   int64x2_t __ret;
63390   __ret = __noswap_vmovl_high_s32(__rev0) - __noswap_vmovl_high_s32(__rev1);
63391   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
63392   return __ret;
63393 }
63394 #endif
63395 
63396 #ifdef __LITTLE_ENDIAN__
vsubl_high_s16(int16x8_t __p0,int16x8_t __p1)63397 __ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) {
63398   int32x4_t __ret;
63399   __ret = vmovl_high_s16(__p0) - vmovl_high_s16(__p1);
63400   return __ret;
63401 }
63402 #else
vsubl_high_s16(int16x8_t __p0,int16x8_t __p1)63403 __ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) {
63404   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
63405   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
63406   int32x4_t __ret;
63407   __ret = __noswap_vmovl_high_s16(__rev0) - __noswap_vmovl_high_s16(__rev1);
63408   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
63409   return __ret;
63410 }
63411 #endif
63412 
63413 #ifdef __LITTLE_ENDIAN__
vsubw_high_u8(uint16x8_t __p0,uint8x16_t __p1)63414 __ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
63415   uint16x8_t __ret;
63416   __ret = __p0 - vmovl_high_u8(__p1);
63417   return __ret;
63418 }
63419 #else
vsubw_high_u8(uint16x8_t __p0,uint8x16_t __p1)63420 __ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
63421   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
63422   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
63423   uint16x8_t __ret;
63424   __ret = __rev0 - __noswap_vmovl_high_u8(__rev1);
63425   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
63426   return __ret;
63427 }
63428 #endif
63429 
63430 #ifdef __LITTLE_ENDIAN__
vsubw_high_u32(uint64x2_t __p0,uint32x4_t __p1)63431 __ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
63432   uint64x2_t __ret;
63433   __ret = __p0 - vmovl_high_u32(__p1);
63434   return __ret;
63435 }
63436 #else
vsubw_high_u32(uint64x2_t __p0,uint32x4_t __p1)63437 __ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
63438   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
63439   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
63440   uint64x2_t __ret;
63441   __ret = __rev0 - __noswap_vmovl_high_u32(__rev1);
63442   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
63443   return __ret;
63444 }
63445 #endif
63446 
63447 #ifdef __LITTLE_ENDIAN__
vsubw_high_u16(uint32x4_t __p0,uint16x8_t __p1)63448 __ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
63449   uint32x4_t __ret;
63450   __ret = __p0 - vmovl_high_u16(__p1);
63451   return __ret;
63452 }
63453 #else
vsubw_high_u16(uint32x4_t __p0,uint16x8_t __p1)63454 __ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
63455   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
63456   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
63457   uint32x4_t __ret;
63458   __ret = __rev0 - __noswap_vmovl_high_u16(__rev1);
63459   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
63460   return __ret;
63461 }
63462 #endif
63463 
63464 #ifdef __LITTLE_ENDIAN__
vsubw_high_s8(int16x8_t __p0,int8x16_t __p1)63465 __ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) {
63466   int16x8_t __ret;
63467   __ret = __p0 - vmovl_high_s8(__p1);
63468   return __ret;
63469 }
63470 #else
vsubw_high_s8(int16x8_t __p0,int8x16_t __p1)63471 __ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) {
63472   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
63473   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
63474   int16x8_t __ret;
63475   __ret = __rev0 - __noswap_vmovl_high_s8(__rev1);
63476   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
63477   return __ret;
63478 }
63479 #endif
63480 
63481 #ifdef __LITTLE_ENDIAN__
vsubw_high_s32(int64x2_t __p0,int32x4_t __p1)63482 __ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) {
63483   int64x2_t __ret;
63484   __ret = __p0 - vmovl_high_s32(__p1);
63485   return __ret;
63486 }
63487 #else
vsubw_high_s32(int64x2_t __p0,int32x4_t __p1)63488 __ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) {
63489   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
63490   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
63491   int64x2_t __ret;
63492   __ret = __rev0 - __noswap_vmovl_high_s32(__rev1);
63493   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
63494   return __ret;
63495 }
63496 #endif
63497 
63498 #ifdef __LITTLE_ENDIAN__
vsubw_high_s16(int32x4_t __p0,int16x8_t __p1)63499 __ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) {
63500   int32x4_t __ret;
63501   __ret = __p0 - vmovl_high_s16(__p1);
63502   return __ret;
63503 }
63504 #else
vsubw_high_s16(int32x4_t __p0,int16x8_t __p1)63505 __ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) {
63506   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
63507   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
63508   int32x4_t __ret;
63509   __ret = __rev0 - __noswap_vmovl_high_s16(__rev1);
63510   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
63511   return __ret;
63512 }
63513 #endif
63514 
63515 #ifdef __LITTLE_ENDIAN__
vtrn1_p8(poly8x8_t __p0,poly8x8_t __p1)63516 __ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
63517   poly8x8_t __ret;
63518   __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
63519   return __ret;
63520 }
63521 #else
vtrn1_p8(poly8x8_t __p0,poly8x8_t __p1)63522 __ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
63523   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
63524   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
63525   poly8x8_t __ret;
63526   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
63527   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
63528   return __ret;
63529 }
63530 #endif
63531 
63532 #ifdef __LITTLE_ENDIAN__
vtrn1_p16(poly16x4_t __p0,poly16x4_t __p1)63533 __ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) {
63534   poly16x4_t __ret;
63535   __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
63536   return __ret;
63537 }
63538 #else
vtrn1_p16(poly16x4_t __p0,poly16x4_t __p1)63539 __ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) {
63540   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
63541   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
63542   poly16x4_t __ret;
63543   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
63544   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
63545   return __ret;
63546 }
63547 #endif
63548 
63549 #ifdef __LITTLE_ENDIAN__
vtrn1q_p8(poly8x16_t __p0,poly8x16_t __p1)63550 __ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
63551   poly8x16_t __ret;
63552   __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
63553   return __ret;
63554 }
63555 #else
vtrn1q_p8(poly8x16_t __p0,poly8x16_t __p1)63556 __ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
63557   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
63558   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
63559   poly8x16_t __ret;
63560   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
63561   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
63562   return __ret;
63563 }
63564 #endif
63565 
63566 #ifdef __LITTLE_ENDIAN__
vtrn1q_p64(poly64x2_t __p0,poly64x2_t __p1)63567 __ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
63568   poly64x2_t __ret;
63569   __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
63570   return __ret;
63571 }
63572 #else
vtrn1q_p64(poly64x2_t __p0,poly64x2_t __p1)63573 __ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
63574   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
63575   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
63576   poly64x2_t __ret;
63577   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
63578   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
63579   return __ret;
63580 }
63581 #endif
63582 
63583 #ifdef __LITTLE_ENDIAN__
vtrn1q_p16(poly16x8_t __p0,poly16x8_t __p1)63584 __ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
63585   poly16x8_t __ret;
63586   __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
63587   return __ret;
63588 }
63589 #else
vtrn1q_p16(poly16x8_t __p0,poly16x8_t __p1)63590 __ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
63591   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
63592   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
63593   poly16x8_t __ret;
63594   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
63595   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
63596   return __ret;
63597 }
63598 #endif
63599 
63600 #ifdef __LITTLE_ENDIAN__
vtrn1q_u8(uint8x16_t __p0,uint8x16_t __p1)63601 __ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
63602   uint8x16_t __ret;
63603   __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
63604   return __ret;
63605 }
63606 #else
vtrn1q_u8(uint8x16_t __p0,uint8x16_t __p1)63607 __ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
63608   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
63609   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
63610   uint8x16_t __ret;
63611   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
63612   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
63613   return __ret;
63614 }
63615 #endif
63616 
63617 #ifdef __LITTLE_ENDIAN__
vtrn1q_u32(uint32x4_t __p0,uint32x4_t __p1)63618 __ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
63619   uint32x4_t __ret;
63620   __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
63621   return __ret;
63622 }
63623 #else
vtrn1q_u32(uint32x4_t __p0,uint32x4_t __p1)63624 __ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
63625   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
63626   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
63627   uint32x4_t __ret;
63628   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
63629   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
63630   return __ret;
63631 }
63632 #endif
63633 
63634 #ifdef __LITTLE_ENDIAN__
vtrn1q_u64(uint64x2_t __p0,uint64x2_t __p1)63635 __ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
63636   uint64x2_t __ret;
63637   __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
63638   return __ret;
63639 }
63640 #else
vtrn1q_u64(uint64x2_t __p0,uint64x2_t __p1)63641 __ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
63642   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
63643   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
63644   uint64x2_t __ret;
63645   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
63646   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
63647   return __ret;
63648 }
63649 #endif
63650 
63651 #ifdef __LITTLE_ENDIAN__
vtrn1q_u16(uint16x8_t __p0,uint16x8_t __p1)63652 __ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
63653   uint16x8_t __ret;
63654   __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
63655   return __ret;
63656 }
63657 #else
vtrn1q_u16(uint16x8_t __p0,uint16x8_t __p1)63658 __ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
63659   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
63660   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
63661   uint16x8_t __ret;
63662   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
63663   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
63664   return __ret;
63665 }
63666 #endif
63667 
63668 #ifdef __LITTLE_ENDIAN__
vtrn1q_s8(int8x16_t __p0,int8x16_t __p1)63669 __ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) {
63670   int8x16_t __ret;
63671   __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
63672   return __ret;
63673 }
63674 #else
vtrn1q_s8(int8x16_t __p0,int8x16_t __p1)63675 __ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) {
63676   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
63677   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
63678   int8x16_t __ret;
63679   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
63680   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
63681   return __ret;
63682 }
63683 #endif
63684 
63685 #ifdef __LITTLE_ENDIAN__
vtrn1q_f64(float64x2_t __p0,float64x2_t __p1)63686 __ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) {
63687   float64x2_t __ret;
63688   __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
63689   return __ret;
63690 }
63691 #else
vtrn1q_f64(float64x2_t __p0,float64x2_t __p1)63692 __ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) {
63693   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
63694   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
63695   float64x2_t __ret;
63696   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
63697   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
63698   return __ret;
63699 }
63700 #endif
63701 
63702 #ifdef __LITTLE_ENDIAN__
vtrn1q_f32(float32x4_t __p0,float32x4_t __p1)63703 __ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) {
63704   float32x4_t __ret;
63705   __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
63706   return __ret;
63707 }
63708 #else
vtrn1q_f32(float32x4_t __p0,float32x4_t __p1)63709 __ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) {
63710   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
63711   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
63712   float32x4_t __ret;
63713   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
63714   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
63715   return __ret;
63716 }
63717 #endif
63718 
63719 #ifdef __LITTLE_ENDIAN__
vtrn1q_s32(int32x4_t __p0,int32x4_t __p1)63720 __ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) {
63721   int32x4_t __ret;
63722   __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
63723   return __ret;
63724 }
63725 #else
vtrn1q_s32(int32x4_t __p0,int32x4_t __p1)63726 __ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) {
63727   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
63728   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
63729   int32x4_t __ret;
63730   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
63731   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
63732   return __ret;
63733 }
63734 #endif
63735 
63736 #ifdef __LITTLE_ENDIAN__
vtrn1q_s64(int64x2_t __p0,int64x2_t __p1)63737 __ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) {
63738   int64x2_t __ret;
63739   __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
63740   return __ret;
63741 }
63742 #else
vtrn1q_s64(int64x2_t __p0,int64x2_t __p1)63743 __ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) {
63744   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
63745   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
63746   int64x2_t __ret;
63747   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
63748   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
63749   return __ret;
63750 }
63751 #endif
63752 
63753 #ifdef __LITTLE_ENDIAN__
vtrn1q_s16(int16x8_t __p0,int16x8_t __p1)63754 __ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) {
63755   int16x8_t __ret;
63756   __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
63757   return __ret;
63758 }
63759 #else
vtrn1q_s16(int16x8_t __p0,int16x8_t __p1)63760 __ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) {
63761   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
63762   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
63763   int16x8_t __ret;
63764   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
63765   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
63766   return __ret;
63767 }
63768 #endif
63769 
63770 #ifdef __LITTLE_ENDIAN__
vtrn1_u8(uint8x8_t __p0,uint8x8_t __p1)63771 __ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) {
63772   uint8x8_t __ret;
63773   __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
63774   return __ret;
63775 }
63776 #else
vtrn1_u8(uint8x8_t __p0,uint8x8_t __p1)63777 __ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) {
63778   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
63779   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
63780   uint8x8_t __ret;
63781   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
63782   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
63783   return __ret;
63784 }
63785 #endif
63786 
63787 #ifdef __LITTLE_ENDIAN__
vtrn1_u32(uint32x2_t __p0,uint32x2_t __p1)63788 __ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) {
63789   uint32x2_t __ret;
63790   __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
63791   return __ret;
63792 }
63793 #else
vtrn1_u32(uint32x2_t __p0,uint32x2_t __p1)63794 __ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) {
63795   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
63796   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
63797   uint32x2_t __ret;
63798   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
63799   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
63800   return __ret;
63801 }
63802 #endif
63803 
63804 #ifdef __LITTLE_ENDIAN__
vtrn1_u16(uint16x4_t __p0,uint16x4_t __p1)63805 __ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) {
63806   uint16x4_t __ret;
63807   __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
63808   return __ret;
63809 }
63810 #else
vtrn1_u16(uint16x4_t __p0,uint16x4_t __p1)63811 __ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) {
63812   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
63813   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
63814   uint16x4_t __ret;
63815   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
63816   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
63817   return __ret;
63818 }
63819 #endif
63820 
63821 #ifdef __LITTLE_ENDIAN__
vtrn1_s8(int8x8_t __p0,int8x8_t __p1)63822 __ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) {
63823   int8x8_t __ret;
63824   __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
63825   return __ret;
63826 }
63827 #else
vtrn1_s8(int8x8_t __p0,int8x8_t __p1)63828 __ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) {
63829   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
63830   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
63831   int8x8_t __ret;
63832   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
63833   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
63834   return __ret;
63835 }
63836 #endif
63837 
63838 #ifdef __LITTLE_ENDIAN__
vtrn1_f32(float32x2_t __p0,float32x2_t __p1)63839 __ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) {
63840   float32x2_t __ret;
63841   __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
63842   return __ret;
63843 }
63844 #else
vtrn1_f32(float32x2_t __p0,float32x2_t __p1)63845 __ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) {
63846   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
63847   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
63848   float32x2_t __ret;
63849   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
63850   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
63851   return __ret;
63852 }
63853 #endif
63854 
63855 #ifdef __LITTLE_ENDIAN__
vtrn1_s32(int32x2_t __p0,int32x2_t __p1)63856 __ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) {
63857   int32x2_t __ret;
63858   __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
63859   return __ret;
63860 }
63861 #else
vtrn1_s32(int32x2_t __p0,int32x2_t __p1)63862 __ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) {
63863   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
63864   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
63865   int32x2_t __ret;
63866   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
63867   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
63868   return __ret;
63869 }
63870 #endif
63871 
63872 #ifdef __LITTLE_ENDIAN__
vtrn1_s16(int16x4_t __p0,int16x4_t __p1)63873 __ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) {
63874   int16x4_t __ret;
63875   __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
63876   return __ret;
63877 }
63878 #else
vtrn1_s16(int16x4_t __p0,int16x4_t __p1)63879 __ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) {
63880   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
63881   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
63882   int16x4_t __ret;
63883   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
63884   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
63885   return __ret;
63886 }
63887 #endif
63888 
63889 #ifdef __LITTLE_ENDIAN__
vtrn2_p8(poly8x8_t __p0,poly8x8_t __p1)63890 __ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) {
63891   poly8x8_t __ret;
63892   __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
63893   return __ret;
63894 }
63895 #else
vtrn2_p8(poly8x8_t __p0,poly8x8_t __p1)63896 __ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) {
63897   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
63898   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
63899   poly8x8_t __ret;
63900   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
63901   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
63902   return __ret;
63903 }
63904 #endif
63905 
63906 #ifdef __LITTLE_ENDIAN__
vtrn2_p16(poly16x4_t __p0,poly16x4_t __p1)63907 __ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) {
63908   poly16x4_t __ret;
63909   __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
63910   return __ret;
63911 }
63912 #else
vtrn2_p16(poly16x4_t __p0,poly16x4_t __p1)63913 __ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) {
63914   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
63915   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
63916   poly16x4_t __ret;
63917   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
63918   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
63919   return __ret;
63920 }
63921 #endif
63922 
63923 #ifdef __LITTLE_ENDIAN__
vtrn2q_p8(poly8x16_t __p0,poly8x16_t __p1)63924 __ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
63925   poly8x16_t __ret;
63926   __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
63927   return __ret;
63928 }
63929 #else
vtrn2q_p8(poly8x16_t __p0,poly8x16_t __p1)63930 __ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
63931   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
63932   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
63933   poly8x16_t __ret;
63934   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
63935   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
63936   return __ret;
63937 }
63938 #endif
63939 
63940 #ifdef __LITTLE_ENDIAN__
vtrn2q_p64(poly64x2_t __p0,poly64x2_t __p1)63941 __ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
63942   poly64x2_t __ret;
63943   __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
63944   return __ret;
63945 }
63946 #else
vtrn2q_p64(poly64x2_t __p0,poly64x2_t __p1)63947 __ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
63948   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
63949   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
63950   poly64x2_t __ret;
63951   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
63952   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
63953   return __ret;
63954 }
63955 #endif
63956 
63957 #ifdef __LITTLE_ENDIAN__
vtrn2q_p16(poly16x8_t __p0,poly16x8_t __p1)63958 __ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
63959   poly16x8_t __ret;
63960   __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
63961   return __ret;
63962 }
63963 #else
vtrn2q_p16(poly16x8_t __p0,poly16x8_t __p1)63964 __ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
63965   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
63966   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
63967   poly16x8_t __ret;
63968   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
63969   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
63970   return __ret;
63971 }
63972 #endif
63973 
63974 #ifdef __LITTLE_ENDIAN__
vtrn2q_u8(uint8x16_t __p0,uint8x16_t __p1)63975 __ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
63976   uint8x16_t __ret;
63977   __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
63978   return __ret;
63979 }
63980 #else
vtrn2q_u8(uint8x16_t __p0,uint8x16_t __p1)63981 __ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
63982   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
63983   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
63984   uint8x16_t __ret;
63985   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
63986   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
63987   return __ret;
63988 }
63989 #endif
63990 
63991 #ifdef __LITTLE_ENDIAN__
vtrn2q_u32(uint32x4_t __p0,uint32x4_t __p1)63992 __ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
63993   uint32x4_t __ret;
63994   __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
63995   return __ret;
63996 }
63997 #else
vtrn2q_u32(uint32x4_t __p0,uint32x4_t __p1)63998 __ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
63999   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
64000   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
64001   uint32x4_t __ret;
64002   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
64003   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
64004   return __ret;
64005 }
64006 #endif
64007 
64008 #ifdef __LITTLE_ENDIAN__
vtrn2q_u64(uint64x2_t __p0,uint64x2_t __p1)64009 __ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
64010   uint64x2_t __ret;
64011   __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
64012   return __ret;
64013 }
64014 #else
vtrn2q_u64(uint64x2_t __p0,uint64x2_t __p1)64015 __ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
64016   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
64017   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
64018   uint64x2_t __ret;
64019   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
64020   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
64021   return __ret;
64022 }
64023 #endif
64024 
64025 #ifdef __LITTLE_ENDIAN__
vtrn2q_u16(uint16x8_t __p0,uint16x8_t __p1)64026 __ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
64027   uint16x8_t __ret;
64028   __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
64029   return __ret;
64030 }
64031 #else
vtrn2q_u16(uint16x8_t __p0,uint16x8_t __p1)64032 __ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
64033   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64034   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64035   uint16x8_t __ret;
64036   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
64037   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64038   return __ret;
64039 }
64040 #endif
64041 
64042 #ifdef __LITTLE_ENDIAN__
vtrn2q_s8(int8x16_t __p0,int8x16_t __p1)64043 __ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) {
64044   int8x16_t __ret;
64045   __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
64046   return __ret;
64047 }
64048 #else
vtrn2q_s8(int8x16_t __p0,int8x16_t __p1)64049 __ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) {
64050   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64051   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64052   int8x16_t __ret;
64053   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
64054   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64055   return __ret;
64056 }
64057 #endif
64058 
64059 #ifdef __LITTLE_ENDIAN__
vtrn2q_f64(float64x2_t __p0,float64x2_t __p1)64060 __ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) {
64061   float64x2_t __ret;
64062   __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
64063   return __ret;
64064 }
64065 #else
vtrn2q_f64(float64x2_t __p0,float64x2_t __p1)64066 __ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) {
64067   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
64068   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
64069   float64x2_t __ret;
64070   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
64071   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
64072   return __ret;
64073 }
64074 #endif
64075 
64076 #ifdef __LITTLE_ENDIAN__
vtrn2q_f32(float32x4_t __p0,float32x4_t __p1)64077 __ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) {
64078   float32x4_t __ret;
64079   __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
64080   return __ret;
64081 }
64082 #else
vtrn2q_f32(float32x4_t __p0,float32x4_t __p1)64083 __ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) {
64084   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
64085   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
64086   float32x4_t __ret;
64087   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
64088   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
64089   return __ret;
64090 }
64091 #endif
64092 
64093 #ifdef __LITTLE_ENDIAN__
vtrn2q_s32(int32x4_t __p0,int32x4_t __p1)64094 __ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) {
64095   int32x4_t __ret;
64096   __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
64097   return __ret;
64098 }
64099 #else
vtrn2q_s32(int32x4_t __p0,int32x4_t __p1)64100 __ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) {
64101   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
64102   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
64103   int32x4_t __ret;
64104   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
64105   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
64106   return __ret;
64107 }
64108 #endif
64109 
64110 #ifdef __LITTLE_ENDIAN__
vtrn2q_s64(int64x2_t __p0,int64x2_t __p1)64111 __ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) {
64112   int64x2_t __ret;
64113   __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
64114   return __ret;
64115 }
64116 #else
vtrn2q_s64(int64x2_t __p0,int64x2_t __p1)64117 __ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) {
64118   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
64119   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
64120   int64x2_t __ret;
64121   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
64122   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
64123   return __ret;
64124 }
64125 #endif
64126 
64127 #ifdef __LITTLE_ENDIAN__
vtrn2q_s16(int16x8_t __p0,int16x8_t __p1)64128 __ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) {
64129   int16x8_t __ret;
64130   __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
64131   return __ret;
64132 }
64133 #else
vtrn2q_s16(int16x8_t __p0,int16x8_t __p1)64134 __ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) {
64135   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64136   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64137   int16x8_t __ret;
64138   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
64139   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64140   return __ret;
64141 }
64142 #endif
64143 
64144 #ifdef __LITTLE_ENDIAN__
vtrn2_u8(uint8x8_t __p0,uint8x8_t __p1)64145 __ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) {
64146   uint8x8_t __ret;
64147   __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
64148   return __ret;
64149 }
64150 #else
vtrn2_u8(uint8x8_t __p0,uint8x8_t __p1)64151 __ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) {
64152   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64153   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64154   uint8x8_t __ret;
64155   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
64156   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64157   return __ret;
64158 }
64159 #endif
64160 
64161 #ifdef __LITTLE_ENDIAN__
vtrn2_u32(uint32x2_t __p0,uint32x2_t __p1)64162 __ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) {
64163   uint32x2_t __ret;
64164   __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
64165   return __ret;
64166 }
64167 #else
vtrn2_u32(uint32x2_t __p0,uint32x2_t __p1)64168 __ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) {
64169   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
64170   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
64171   uint32x2_t __ret;
64172   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
64173   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
64174   return __ret;
64175 }
64176 #endif
64177 
64178 #ifdef __LITTLE_ENDIAN__
vtrn2_u16(uint16x4_t __p0,uint16x4_t __p1)64179 __ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) {
64180   uint16x4_t __ret;
64181   __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
64182   return __ret;
64183 }
64184 #else
vtrn2_u16(uint16x4_t __p0,uint16x4_t __p1)64185 __ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) {
64186   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
64187   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
64188   uint16x4_t __ret;
64189   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
64190   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
64191   return __ret;
64192 }
64193 #endif
64194 
64195 #ifdef __LITTLE_ENDIAN__
vtrn2_s8(int8x8_t __p0,int8x8_t __p1)64196 __ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) {
64197   int8x8_t __ret;
64198   __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
64199   return __ret;
64200 }
64201 #else
vtrn2_s8(int8x8_t __p0,int8x8_t __p1)64202 __ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) {
64203   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64204   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64205   int8x8_t __ret;
64206   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
64207   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64208   return __ret;
64209 }
64210 #endif
64211 
64212 #ifdef __LITTLE_ENDIAN__
vtrn2_f32(float32x2_t __p0,float32x2_t __p1)64213 __ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) {
64214   float32x2_t __ret;
64215   __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
64216   return __ret;
64217 }
64218 #else
vtrn2_f32(float32x2_t __p0,float32x2_t __p1)64219 __ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) {
64220   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
64221   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
64222   float32x2_t __ret;
64223   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
64224   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
64225   return __ret;
64226 }
64227 #endif
64228 
64229 #ifdef __LITTLE_ENDIAN__
vtrn2_s32(int32x2_t __p0,int32x2_t __p1)64230 __ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) {
64231   int32x2_t __ret;
64232   __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
64233   return __ret;
64234 }
64235 #else
vtrn2_s32(int32x2_t __p0,int32x2_t __p1)64236 __ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) {
64237   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
64238   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
64239   int32x2_t __ret;
64240   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
64241   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
64242   return __ret;
64243 }
64244 #endif
64245 
64246 #ifdef __LITTLE_ENDIAN__
vtrn2_s16(int16x4_t __p0,int16x4_t __p1)64247 __ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) {
64248   int16x4_t __ret;
64249   __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
64250   return __ret;
64251 }
64252 #else
vtrn2_s16(int16x4_t __p0,int16x4_t __p1)64253 __ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) {
64254   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
64255   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
64256   int16x4_t __ret;
64257   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
64258   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
64259   return __ret;
64260 }
64261 #endif
64262 
64263 #ifdef __LITTLE_ENDIAN__
vtst_p64(poly64x1_t __p0,poly64x1_t __p1)64264 __ai uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) {
64265   uint64x1_t __ret;
64266   __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
64267   return __ret;
64268 }
64269 #else
vtst_p64(poly64x1_t __p0,poly64x1_t __p1)64270 __ai uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) {
64271   uint64x1_t __ret;
64272   __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
64273   return __ret;
64274 }
64275 #endif
64276 
64277 #ifdef __LITTLE_ENDIAN__
vtstq_p64(poly64x2_t __p0,poly64x2_t __p1)64278 __ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) {
64279   uint64x2_t __ret;
64280   __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
64281   return __ret;
64282 }
64283 #else
vtstq_p64(poly64x2_t __p0,poly64x2_t __p1)64284 __ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) {
64285   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
64286   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
64287   uint64x2_t __ret;
64288   __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
64289   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
64290   return __ret;
64291 }
64292 #endif
64293 
64294 #ifdef __LITTLE_ENDIAN__
vtstq_u64(uint64x2_t __p0,uint64x2_t __p1)64295 __ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) {
64296   uint64x2_t __ret;
64297   __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
64298   return __ret;
64299 }
64300 #else
vtstq_u64(uint64x2_t __p0,uint64x2_t __p1)64301 __ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) {
64302   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
64303   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
64304   uint64x2_t __ret;
64305   __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
64306   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
64307   return __ret;
64308 }
64309 #endif
64310 
64311 #ifdef __LITTLE_ENDIAN__
vtstq_s64(int64x2_t __p0,int64x2_t __p1)64312 __ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) {
64313   uint64x2_t __ret;
64314   __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
64315   return __ret;
64316 }
64317 #else
vtstq_s64(int64x2_t __p0,int64x2_t __p1)64318 __ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) {
64319   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
64320   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
64321   uint64x2_t __ret;
64322   __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
64323   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
64324   return __ret;
64325 }
64326 #endif
64327 
64328 #ifdef __LITTLE_ENDIAN__
vtst_u64(uint64x1_t __p0,uint64x1_t __p1)64329 __ai uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) {
64330   uint64x1_t __ret;
64331   __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
64332   return __ret;
64333 }
64334 #else
vtst_u64(uint64x1_t __p0,uint64x1_t __p1)64335 __ai uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) {
64336   uint64x1_t __ret;
64337   __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
64338   return __ret;
64339 }
64340 #endif
64341 
64342 #ifdef __LITTLE_ENDIAN__
vtst_s64(int64x1_t __p0,int64x1_t __p1)64343 __ai uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) {
64344   uint64x1_t __ret;
64345   __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
64346   return __ret;
64347 }
64348 #else
vtst_s64(int64x1_t __p0,int64x1_t __p1)64349 __ai uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) {
64350   uint64x1_t __ret;
64351   __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
64352   return __ret;
64353 }
64354 #endif
64355 
64356 #ifdef __LITTLE_ENDIAN__
vtstd_u64(uint64_t __p0,uint64_t __p1)64357 __ai uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) {
64358   uint64_t __ret;
64359   __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1);
64360   return __ret;
64361 }
64362 #else
vtstd_u64(uint64_t __p0,uint64_t __p1)64363 __ai uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) {
64364   uint64_t __ret;
64365   __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1);
64366   return __ret;
64367 }
64368 #endif
64369 
64370 #ifdef __LITTLE_ENDIAN__
vtstd_s64(int64_t __p0,int64_t __p1)64371 __ai int64_t vtstd_s64(int64_t __p0, int64_t __p1) {
64372   int64_t __ret;
64373   __ret = (int64_t) __builtin_neon_vtstd_s64(__p0, __p1);
64374   return __ret;
64375 }
64376 #else
vtstd_s64(int64_t __p0,int64_t __p1)64377 __ai int64_t vtstd_s64(int64_t __p0, int64_t __p1) {
64378   int64_t __ret;
64379   __ret = (int64_t) __builtin_neon_vtstd_s64(__p0, __p1);
64380   return __ret;
64381 }
64382 #endif
64383 
64384 #ifdef __LITTLE_ENDIAN__
vuqaddb_s8(int8_t __p0,int8_t __p1)64385 __ai int8_t vuqaddb_s8(int8_t __p0, int8_t __p1) {
64386   int8_t __ret;
64387   __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1);
64388   return __ret;
64389 }
64390 #else
vuqaddb_s8(int8_t __p0,int8_t __p1)64391 __ai int8_t vuqaddb_s8(int8_t __p0, int8_t __p1) {
64392   int8_t __ret;
64393   __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1);
64394   return __ret;
64395 }
64396 #endif
64397 
64398 #ifdef __LITTLE_ENDIAN__
vuqadds_s32(int32_t __p0,int32_t __p1)64399 __ai int32_t vuqadds_s32(int32_t __p0, int32_t __p1) {
64400   int32_t __ret;
64401   __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1);
64402   return __ret;
64403 }
64404 #else
vuqadds_s32(int32_t __p0,int32_t __p1)64405 __ai int32_t vuqadds_s32(int32_t __p0, int32_t __p1) {
64406   int32_t __ret;
64407   __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1);
64408   return __ret;
64409 }
64410 #endif
64411 
64412 #ifdef __LITTLE_ENDIAN__
vuqaddd_s64(int64_t __p0,int64_t __p1)64413 __ai int64_t vuqaddd_s64(int64_t __p0, int64_t __p1) {
64414   int64_t __ret;
64415   __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1);
64416   return __ret;
64417 }
64418 #else
vuqaddd_s64(int64_t __p0,int64_t __p1)64419 __ai int64_t vuqaddd_s64(int64_t __p0, int64_t __p1) {
64420   int64_t __ret;
64421   __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1);
64422   return __ret;
64423 }
64424 #endif
64425 
64426 #ifdef __LITTLE_ENDIAN__
vuqaddh_s16(int16_t __p0,int16_t __p1)64427 __ai int16_t vuqaddh_s16(int16_t __p0, int16_t __p1) {
64428   int16_t __ret;
64429   __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1);
64430   return __ret;
64431 }
64432 #else
vuqaddh_s16(int16_t __p0,int16_t __p1)64433 __ai int16_t vuqaddh_s16(int16_t __p0, int16_t __p1) {
64434   int16_t __ret;
64435   __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1);
64436   return __ret;
64437 }
64438 #endif
64439 
64440 #ifdef __LITTLE_ENDIAN__
vuqaddq_s8(int8x16_t __p0,int8x16_t __p1)64441 __ai int8x16_t vuqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
64442   int8x16_t __ret;
64443   __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
64444   return __ret;
64445 }
64446 #else
vuqaddq_s8(int8x16_t __p0,int8x16_t __p1)64447 __ai int8x16_t vuqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
64448   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64449   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64450   int8x16_t __ret;
64451   __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
64452   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64453   return __ret;
64454 }
64455 #endif
64456 
64457 #ifdef __LITTLE_ENDIAN__
vuqaddq_s32(int32x4_t __p0,int32x4_t __p1)64458 __ai int32x4_t vuqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
64459   int32x4_t __ret;
64460   __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
64461   return __ret;
64462 }
64463 #else
vuqaddq_s32(int32x4_t __p0,int32x4_t __p1)64464 __ai int32x4_t vuqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
64465   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
64466   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
64467   int32x4_t __ret;
64468   __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
64469   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
64470   return __ret;
64471 }
64472 #endif
64473 
64474 #ifdef __LITTLE_ENDIAN__
vuqaddq_s64(int64x2_t __p0,int64x2_t __p1)64475 __ai int64x2_t vuqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
64476   int64x2_t __ret;
64477   __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
64478   return __ret;
64479 }
64480 #else
vuqaddq_s64(int64x2_t __p0,int64x2_t __p1)64481 __ai int64x2_t vuqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
64482   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
64483   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
64484   int64x2_t __ret;
64485   __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
64486   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
64487   return __ret;
64488 }
64489 #endif
64490 
64491 #ifdef __LITTLE_ENDIAN__
vuqaddq_s16(int16x8_t __p0,int16x8_t __p1)64492 __ai int16x8_t vuqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
64493   int16x8_t __ret;
64494   __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
64495   return __ret;
64496 }
64497 #else
vuqaddq_s16(int16x8_t __p0,int16x8_t __p1)64498 __ai int16x8_t vuqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
64499   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64500   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64501   int16x8_t __ret;
64502   __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
64503   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64504   return __ret;
64505 }
64506 #endif
64507 
64508 #ifdef __LITTLE_ENDIAN__
vuqadd_s8(int8x8_t __p0,int8x8_t __p1)64509 __ai int8x8_t vuqadd_s8(int8x8_t __p0, int8x8_t __p1) {
64510   int8x8_t __ret;
64511   __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
64512   return __ret;
64513 }
64514 #else
vuqadd_s8(int8x8_t __p0,int8x8_t __p1)64515 __ai int8x8_t vuqadd_s8(int8x8_t __p0, int8x8_t __p1) {
64516   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64517   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64518   int8x8_t __ret;
64519   __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
64520   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64521   return __ret;
64522 }
64523 #endif
64524 
64525 #ifdef __LITTLE_ENDIAN__
vuqadd_s32(int32x2_t __p0,int32x2_t __p1)64526 __ai int32x2_t vuqadd_s32(int32x2_t __p0, int32x2_t __p1) {
64527   int32x2_t __ret;
64528   __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
64529   return __ret;
64530 }
64531 #else
vuqadd_s32(int32x2_t __p0,int32x2_t __p1)64532 __ai int32x2_t vuqadd_s32(int32x2_t __p0, int32x2_t __p1) {
64533   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
64534   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
64535   int32x2_t __ret;
64536   __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
64537   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
64538   return __ret;
64539 }
64540 #endif
64541 
64542 #ifdef __LITTLE_ENDIAN__
vuqadd_s64(int64x1_t __p0,int64x1_t __p1)64543 __ai int64x1_t vuqadd_s64(int64x1_t __p0, int64x1_t __p1) {
64544   int64x1_t __ret;
64545   __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
64546   return __ret;
64547 }
64548 #else
vuqadd_s64(int64x1_t __p0,int64x1_t __p1)64549 __ai int64x1_t vuqadd_s64(int64x1_t __p0, int64x1_t __p1) {
64550   int64x1_t __ret;
64551   __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
64552   return __ret;
64553 }
64554 #endif
64555 
64556 #ifdef __LITTLE_ENDIAN__
vuqadd_s16(int16x4_t __p0,int16x4_t __p1)64557 __ai int16x4_t vuqadd_s16(int16x4_t __p0, int16x4_t __p1) {
64558   int16x4_t __ret;
64559   __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
64560   return __ret;
64561 }
64562 #else
vuqadd_s16(int16x4_t __p0,int16x4_t __p1)64563 __ai int16x4_t vuqadd_s16(int16x4_t __p0, int16x4_t __p1) {
64564   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
64565   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
64566   int16x4_t __ret;
64567   __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
64568   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
64569   return __ret;
64570 }
64571 #endif
64572 
64573 #ifdef __LITTLE_ENDIAN__
vuzp1_p8(poly8x8_t __p0,poly8x8_t __p1)64574 __ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
64575   poly8x8_t __ret;
64576   __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
64577   return __ret;
64578 }
64579 #else
vuzp1_p8(poly8x8_t __p0,poly8x8_t __p1)64580 __ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
64581   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64582   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64583   poly8x8_t __ret;
64584   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
64585   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64586   return __ret;
64587 }
64588 #endif
64589 
64590 #ifdef __LITTLE_ENDIAN__
vuzp1_p16(poly16x4_t __p0,poly16x4_t __p1)64591 __ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) {
64592   poly16x4_t __ret;
64593   __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
64594   return __ret;
64595 }
64596 #else
vuzp1_p16(poly16x4_t __p0,poly16x4_t __p1)64597 __ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) {
64598   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
64599   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
64600   poly16x4_t __ret;
64601   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
64602   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
64603   return __ret;
64604 }
64605 #endif
64606 
64607 #ifdef __LITTLE_ENDIAN__
vuzp1q_p8(poly8x16_t __p0,poly8x16_t __p1)64608 __ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
64609   poly8x16_t __ret;
64610   __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
64611   return __ret;
64612 }
64613 #else
vuzp1q_p8(poly8x16_t __p0,poly8x16_t __p1)64614 __ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
64615   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64616   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64617   poly8x16_t __ret;
64618   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
64619   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64620   return __ret;
64621 }
64622 #endif
64623 
64624 #ifdef __LITTLE_ENDIAN__
vuzp1q_p64(poly64x2_t __p0,poly64x2_t __p1)64625 __ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
64626   poly64x2_t __ret;
64627   __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
64628   return __ret;
64629 }
64630 #else
vuzp1q_p64(poly64x2_t __p0,poly64x2_t __p1)64631 __ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
64632   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
64633   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
64634   poly64x2_t __ret;
64635   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
64636   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
64637   return __ret;
64638 }
64639 #endif
64640 
64641 #ifdef __LITTLE_ENDIAN__
vuzp1q_p16(poly16x8_t __p0,poly16x8_t __p1)64642 __ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
64643   poly16x8_t __ret;
64644   __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
64645   return __ret;
64646 }
64647 #else
vuzp1q_p16(poly16x8_t __p0,poly16x8_t __p1)64648 __ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
64649   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64650   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64651   poly16x8_t __ret;
64652   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
64653   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64654   return __ret;
64655 }
64656 #endif
64657 
64658 #ifdef __LITTLE_ENDIAN__
vuzp1q_u8(uint8x16_t __p0,uint8x16_t __p1)64659 __ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
64660   uint8x16_t __ret;
64661   __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
64662   return __ret;
64663 }
64664 #else
vuzp1q_u8(uint8x16_t __p0,uint8x16_t __p1)64665 __ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
64666   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64667   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64668   uint8x16_t __ret;
64669   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
64670   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64671   return __ret;
64672 }
64673 #endif
64674 
64675 #ifdef __LITTLE_ENDIAN__
vuzp1q_u32(uint32x4_t __p0,uint32x4_t __p1)64676 __ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
64677   uint32x4_t __ret;
64678   __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
64679   return __ret;
64680 }
64681 #else
vuzp1q_u32(uint32x4_t __p0,uint32x4_t __p1)64682 __ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
64683   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
64684   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
64685   uint32x4_t __ret;
64686   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
64687   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
64688   return __ret;
64689 }
64690 #endif
64691 
64692 #ifdef __LITTLE_ENDIAN__
vuzp1q_u64(uint64x2_t __p0,uint64x2_t __p1)64693 __ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
64694   uint64x2_t __ret;
64695   __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
64696   return __ret;
64697 }
64698 #else
vuzp1q_u64(uint64x2_t __p0,uint64x2_t __p1)64699 __ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
64700   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
64701   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
64702   uint64x2_t __ret;
64703   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
64704   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
64705   return __ret;
64706 }
64707 #endif
64708 
64709 #ifdef __LITTLE_ENDIAN__
vuzp1q_u16(uint16x8_t __p0,uint16x8_t __p1)64710 __ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
64711   uint16x8_t __ret;
64712   __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
64713   return __ret;
64714 }
64715 #else
vuzp1q_u16(uint16x8_t __p0,uint16x8_t __p1)64716 __ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
64717   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64718   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64719   uint16x8_t __ret;
64720   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
64721   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64722   return __ret;
64723 }
64724 #endif
64725 
64726 #ifdef __LITTLE_ENDIAN__
vuzp1q_s8(int8x16_t __p0,int8x16_t __p1)64727 __ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) {
64728   int8x16_t __ret;
64729   __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
64730   return __ret;
64731 }
64732 #else
vuzp1q_s8(int8x16_t __p0,int8x16_t __p1)64733 __ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) {
64734   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64735   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64736   int8x16_t __ret;
64737   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
64738   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64739   return __ret;
64740 }
64741 #endif
64742 
64743 #ifdef __LITTLE_ENDIAN__
vuzp1q_f64(float64x2_t __p0,float64x2_t __p1)64744 __ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) {
64745   float64x2_t __ret;
64746   __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
64747   return __ret;
64748 }
64749 #else
vuzp1q_f64(float64x2_t __p0,float64x2_t __p1)64750 __ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) {
64751   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
64752   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
64753   float64x2_t __ret;
64754   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
64755   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
64756   return __ret;
64757 }
64758 #endif
64759 
64760 #ifdef __LITTLE_ENDIAN__
vuzp1q_f32(float32x4_t __p0,float32x4_t __p1)64761 __ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) {
64762   float32x4_t __ret;
64763   __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
64764   return __ret;
64765 }
64766 #else
vuzp1q_f32(float32x4_t __p0,float32x4_t __p1)64767 __ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) {
64768   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
64769   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
64770   float32x4_t __ret;
64771   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
64772   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
64773   return __ret;
64774 }
64775 #endif
64776 
64777 #ifdef __LITTLE_ENDIAN__
vuzp1q_s32(int32x4_t __p0,int32x4_t __p1)64778 __ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) {
64779   int32x4_t __ret;
64780   __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
64781   return __ret;
64782 }
64783 #else
vuzp1q_s32(int32x4_t __p0,int32x4_t __p1)64784 __ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) {
64785   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
64786   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
64787   int32x4_t __ret;
64788   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
64789   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
64790   return __ret;
64791 }
64792 #endif
64793 
64794 #ifdef __LITTLE_ENDIAN__
vuzp1q_s64(int64x2_t __p0,int64x2_t __p1)64795 __ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) {
64796   int64x2_t __ret;
64797   __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
64798   return __ret;
64799 }
64800 #else
vuzp1q_s64(int64x2_t __p0,int64x2_t __p1)64801 __ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) {
64802   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
64803   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
64804   int64x2_t __ret;
64805   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
64806   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
64807   return __ret;
64808 }
64809 #endif
64810 
64811 #ifdef __LITTLE_ENDIAN__
vuzp1q_s16(int16x8_t __p0,int16x8_t __p1)64812 __ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) {
64813   int16x8_t __ret;
64814   __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
64815   return __ret;
64816 }
64817 #else
vuzp1q_s16(int16x8_t __p0,int16x8_t __p1)64818 __ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) {
64819   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64820   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64821   int16x8_t __ret;
64822   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
64823   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64824   return __ret;
64825 }
64826 #endif
64827 
64828 #ifdef __LITTLE_ENDIAN__
vuzp1_u8(uint8x8_t __p0,uint8x8_t __p1)64829 __ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) {
64830   uint8x8_t __ret;
64831   __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
64832   return __ret;
64833 }
64834 #else
vuzp1_u8(uint8x8_t __p0,uint8x8_t __p1)64835 __ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) {
64836   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64837   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64838   uint8x8_t __ret;
64839   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
64840   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64841   return __ret;
64842 }
64843 #endif
64844 
64845 #ifdef __LITTLE_ENDIAN__
vuzp1_u32(uint32x2_t __p0,uint32x2_t __p1)64846 __ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) {
64847   uint32x2_t __ret;
64848   __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
64849   return __ret;
64850 }
64851 #else
vuzp1_u32(uint32x2_t __p0,uint32x2_t __p1)64852 __ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) {
64853   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
64854   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
64855   uint32x2_t __ret;
64856   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
64857   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
64858   return __ret;
64859 }
64860 #endif
64861 
64862 #ifdef __LITTLE_ENDIAN__
vuzp1_u16(uint16x4_t __p0,uint16x4_t __p1)64863 __ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) {
64864   uint16x4_t __ret;
64865   __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
64866   return __ret;
64867 }
64868 #else
vuzp1_u16(uint16x4_t __p0,uint16x4_t __p1)64869 __ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) {
64870   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
64871   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
64872   uint16x4_t __ret;
64873   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
64874   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
64875   return __ret;
64876 }
64877 #endif
64878 
64879 #ifdef __LITTLE_ENDIAN__
vuzp1_s8(int8x8_t __p0,int8x8_t __p1)64880 __ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) {
64881   int8x8_t __ret;
64882   __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
64883   return __ret;
64884 }
64885 #else
vuzp1_s8(int8x8_t __p0,int8x8_t __p1)64886 __ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) {
64887   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64888   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64889   int8x8_t __ret;
64890   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
64891   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64892   return __ret;
64893 }
64894 #endif
64895 
64896 #ifdef __LITTLE_ENDIAN__
vuzp1_f32(float32x2_t __p0,float32x2_t __p1)64897 __ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) {
64898   float32x2_t __ret;
64899   __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
64900   return __ret;
64901 }
64902 #else
vuzp1_f32(float32x2_t __p0,float32x2_t __p1)64903 __ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) {
64904   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
64905   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
64906   float32x2_t __ret;
64907   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
64908   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
64909   return __ret;
64910 }
64911 #endif
64912 
64913 #ifdef __LITTLE_ENDIAN__
vuzp1_s32(int32x2_t __p0,int32x2_t __p1)64914 __ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) {
64915   int32x2_t __ret;
64916   __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
64917   return __ret;
64918 }
64919 #else
vuzp1_s32(int32x2_t __p0,int32x2_t __p1)64920 __ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) {
64921   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
64922   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
64923   int32x2_t __ret;
64924   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
64925   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
64926   return __ret;
64927 }
64928 #endif
64929 
64930 #ifdef __LITTLE_ENDIAN__
vuzp1_s16(int16x4_t __p0,int16x4_t __p1)64931 __ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) {
64932   int16x4_t __ret;
64933   __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
64934   return __ret;
64935 }
64936 #else
vuzp1_s16(int16x4_t __p0,int16x4_t __p1)64937 __ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) {
64938   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
64939   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
64940   int16x4_t __ret;
64941   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
64942   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
64943   return __ret;
64944 }
64945 #endif
64946 
64947 #ifdef __LITTLE_ENDIAN__
vuzp2_p8(poly8x8_t __p0,poly8x8_t __p1)64948 __ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) {
64949   poly8x8_t __ret;
64950   __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
64951   return __ret;
64952 }
64953 #else
vuzp2_p8(poly8x8_t __p0,poly8x8_t __p1)64954 __ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) {
64955   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64956   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64957   poly8x8_t __ret;
64958   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
64959   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64960   return __ret;
64961 }
64962 #endif
64963 
64964 #ifdef __LITTLE_ENDIAN__
vuzp2_p16(poly16x4_t __p0,poly16x4_t __p1)64965 __ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) {
64966   poly16x4_t __ret;
64967   __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
64968   return __ret;
64969 }
64970 #else
vuzp2_p16(poly16x4_t __p0,poly16x4_t __p1)64971 __ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) {
64972   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
64973   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
64974   poly16x4_t __ret;
64975   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
64976   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
64977   return __ret;
64978 }
64979 #endif
64980 
64981 #ifdef __LITTLE_ENDIAN__
vuzp2q_p8(poly8x16_t __p0,poly8x16_t __p1)64982 __ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
64983   poly8x16_t __ret;
64984   __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
64985   return __ret;
64986 }
64987 #else
vuzp2q_p8(poly8x16_t __p0,poly8x16_t __p1)64988 __ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
64989   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64990   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64991   poly8x16_t __ret;
64992   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
64993   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64994   return __ret;
64995 }
64996 #endif
64997 
64998 #ifdef __LITTLE_ENDIAN__
vuzp2q_p64(poly64x2_t __p0,poly64x2_t __p1)64999 __ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
65000   poly64x2_t __ret;
65001   __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
65002   return __ret;
65003 }
65004 #else
vuzp2q_p64(poly64x2_t __p0,poly64x2_t __p1)65005 __ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
65006   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65007   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
65008   poly64x2_t __ret;
65009   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
65010   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
65011   return __ret;
65012 }
65013 #endif
65014 
65015 #ifdef __LITTLE_ENDIAN__
vuzp2q_p16(poly16x8_t __p0,poly16x8_t __p1)65016 __ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
65017   poly16x8_t __ret;
65018   __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
65019   return __ret;
65020 }
65021 #else
vuzp2q_p16(poly16x8_t __p0,poly16x8_t __p1)65022 __ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
65023   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
65024   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
65025   poly16x8_t __ret;
65026   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
65027   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
65028   return __ret;
65029 }
65030 #endif
65031 
65032 #ifdef __LITTLE_ENDIAN__
vuzp2q_u8(uint8x16_t __p0,uint8x16_t __p1)65033 __ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
65034   uint8x16_t __ret;
65035   __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
65036   return __ret;
65037 }
65038 #else
vuzp2q_u8(uint8x16_t __p0,uint8x16_t __p1)65039 __ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
65040   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65041   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65042   uint8x16_t __ret;
65043   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
65044   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65045   return __ret;
65046 }
65047 #endif
65048 
65049 #ifdef __LITTLE_ENDIAN__
vuzp2q_u32(uint32x4_t __p0,uint32x4_t __p1)65050 __ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
65051   uint32x4_t __ret;
65052   __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
65053   return __ret;
65054 }
65055 #else
vuzp2q_u32(uint32x4_t __p0,uint32x4_t __p1)65056 __ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
65057   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
65058   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
65059   uint32x4_t __ret;
65060   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
65061   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
65062   return __ret;
65063 }
65064 #endif
65065 
65066 #ifdef __LITTLE_ENDIAN__
vuzp2q_u64(uint64x2_t __p0,uint64x2_t __p1)65067 __ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
65068   uint64x2_t __ret;
65069   __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
65070   return __ret;
65071 }
65072 #else
vuzp2q_u64(uint64x2_t __p0,uint64x2_t __p1)65073 __ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
65074   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65075   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
65076   uint64x2_t __ret;
65077   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
65078   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
65079   return __ret;
65080 }
65081 #endif
65082 
65083 #ifdef __LITTLE_ENDIAN__
vuzp2q_u16(uint16x8_t __p0,uint16x8_t __p1)65084 __ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
65085   uint16x8_t __ret;
65086   __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
65087   return __ret;
65088 }
65089 #else
vuzp2q_u16(uint16x8_t __p0,uint16x8_t __p1)65090 __ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
65091   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
65092   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
65093   uint16x8_t __ret;
65094   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
65095   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
65096   return __ret;
65097 }
65098 #endif
65099 
65100 #ifdef __LITTLE_ENDIAN__
vuzp2q_s8(int8x16_t __p0,int8x16_t __p1)65101 __ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) {
65102   int8x16_t __ret;
65103   __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
65104   return __ret;
65105 }
65106 #else
vuzp2q_s8(int8x16_t __p0,int8x16_t __p1)65107 __ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) {
65108   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65109   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65110   int8x16_t __ret;
65111   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
65112   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65113   return __ret;
65114 }
65115 #endif
65116 
65117 #ifdef __LITTLE_ENDIAN__
vuzp2q_f64(float64x2_t __p0,float64x2_t __p1)65118 __ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) {
65119   float64x2_t __ret;
65120   __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
65121   return __ret;
65122 }
65123 #else
vuzp2q_f64(float64x2_t __p0,float64x2_t __p1)65124 __ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) {
65125   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65126   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
65127   float64x2_t __ret;
65128   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
65129   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
65130   return __ret;
65131 }
65132 #endif
65133 
65134 #ifdef __LITTLE_ENDIAN__
vuzp2q_f32(float32x4_t __p0,float32x4_t __p1)65135 __ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) {
65136   float32x4_t __ret;
65137   __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
65138   return __ret;
65139 }
65140 #else
vuzp2q_f32(float32x4_t __p0,float32x4_t __p1)65141 __ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) {
65142   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
65143   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
65144   float32x4_t __ret;
65145   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
65146   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
65147   return __ret;
65148 }
65149 #endif
65150 
65151 #ifdef __LITTLE_ENDIAN__
vuzp2q_s32(int32x4_t __p0,int32x4_t __p1)65152 __ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) {
65153   int32x4_t __ret;
65154   __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
65155   return __ret;
65156 }
65157 #else
vuzp2q_s32(int32x4_t __p0,int32x4_t __p1)65158 __ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) {
65159   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
65160   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
65161   int32x4_t __ret;
65162   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
65163   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
65164   return __ret;
65165 }
65166 #endif
65167 
65168 #ifdef __LITTLE_ENDIAN__
vuzp2q_s64(int64x2_t __p0,int64x2_t __p1)65169 __ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) {
65170   int64x2_t __ret;
65171   __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
65172   return __ret;
65173 }
65174 #else
vuzp2q_s64(int64x2_t __p0,int64x2_t __p1)65175 __ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) {
65176   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65177   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
65178   int64x2_t __ret;
65179   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
65180   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
65181   return __ret;
65182 }
65183 #endif
65184 
65185 #ifdef __LITTLE_ENDIAN__
vuzp2q_s16(int16x8_t __p0,int16x8_t __p1)65186 __ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) {
65187   int16x8_t __ret;
65188   __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
65189   return __ret;
65190 }
65191 #else
vuzp2q_s16(int16x8_t __p0,int16x8_t __p1)65192 __ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) {
65193   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
65194   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
65195   int16x8_t __ret;
65196   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
65197   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
65198   return __ret;
65199 }
65200 #endif
65201 
65202 #ifdef __LITTLE_ENDIAN__
vuzp2_u8(uint8x8_t __p0,uint8x8_t __p1)65203 __ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) {
65204   uint8x8_t __ret;
65205   __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
65206   return __ret;
65207 }
65208 #else
vuzp2_u8(uint8x8_t __p0,uint8x8_t __p1)65209 __ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) {
65210   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
65211   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
65212   uint8x8_t __ret;
65213   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
65214   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
65215   return __ret;
65216 }
65217 #endif
65218 
65219 #ifdef __LITTLE_ENDIAN__
vuzp2_u32(uint32x2_t __p0,uint32x2_t __p1)65220 __ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) {
65221   uint32x2_t __ret;
65222   __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
65223   return __ret;
65224 }
65225 #else
vuzp2_u32(uint32x2_t __p0,uint32x2_t __p1)65226 __ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) {
65227   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65228   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
65229   uint32x2_t __ret;
65230   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
65231   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
65232   return __ret;
65233 }
65234 #endif
65235 
65236 #ifdef __LITTLE_ENDIAN__
vuzp2_u16(uint16x4_t __p0,uint16x4_t __p1)65237 __ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) {
65238   uint16x4_t __ret;
65239   __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
65240   return __ret;
65241 }
65242 #else
vuzp2_u16(uint16x4_t __p0,uint16x4_t __p1)65243 __ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) {
65244   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
65245   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
65246   uint16x4_t __ret;
65247   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
65248   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
65249   return __ret;
65250 }
65251 #endif
65252 
65253 #ifdef __LITTLE_ENDIAN__
vuzp2_s8(int8x8_t __p0,int8x8_t __p1)65254 __ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) {
65255   int8x8_t __ret;
65256   __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
65257   return __ret;
65258 }
65259 #else
vuzp2_s8(int8x8_t __p0,int8x8_t __p1)65260 __ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) {
65261   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
65262   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
65263   int8x8_t __ret;
65264   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
65265   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
65266   return __ret;
65267 }
65268 #endif
65269 
65270 #ifdef __LITTLE_ENDIAN__
vuzp2_f32(float32x2_t __p0,float32x2_t __p1)65271 __ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) {
65272   float32x2_t __ret;
65273   __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
65274   return __ret;
65275 }
65276 #else
vuzp2_f32(float32x2_t __p0,float32x2_t __p1)65277 __ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) {
65278   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65279   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
65280   float32x2_t __ret;
65281   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
65282   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
65283   return __ret;
65284 }
65285 #endif
65286 
65287 #ifdef __LITTLE_ENDIAN__
vuzp2_s32(int32x2_t __p0,int32x2_t __p1)65288 __ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) {
65289   int32x2_t __ret;
65290   __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
65291   return __ret;
65292 }
65293 #else
vuzp2_s32(int32x2_t __p0,int32x2_t __p1)65294 __ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) {
65295   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65296   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
65297   int32x2_t __ret;
65298   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
65299   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
65300   return __ret;
65301 }
65302 #endif
65303 
65304 #ifdef __LITTLE_ENDIAN__
vuzp2_s16(int16x4_t __p0,int16x4_t __p1)65305 __ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) {
65306   int16x4_t __ret;
65307   __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
65308   return __ret;
65309 }
65310 #else
vuzp2_s16(int16x4_t __p0,int16x4_t __p1)65311 __ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) {
65312   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
65313   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
65314   int16x4_t __ret;
65315   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
65316   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
65317   return __ret;
65318 }
65319 #endif
65320 
65321 #ifdef __LITTLE_ENDIAN__
vzip1_p8(poly8x8_t __p0,poly8x8_t __p1)65322 __ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) {
65323   poly8x8_t __ret;
65324   __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
65325   return __ret;
65326 }
65327 #else
vzip1_p8(poly8x8_t __p0,poly8x8_t __p1)65328 __ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) {
65329   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
65330   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
65331   poly8x8_t __ret;
65332   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
65333   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
65334   return __ret;
65335 }
65336 #endif
65337 
65338 #ifdef __LITTLE_ENDIAN__
vzip1_p16(poly16x4_t __p0,poly16x4_t __p1)65339 __ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) {
65340   poly16x4_t __ret;
65341   __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
65342   return __ret;
65343 }
65344 #else
vzip1_p16(poly16x4_t __p0,poly16x4_t __p1)65345 __ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) {
65346   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
65347   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
65348   poly16x4_t __ret;
65349   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
65350   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
65351   return __ret;
65352 }
65353 #endif
65354 
65355 #ifdef __LITTLE_ENDIAN__
vzip1q_p8(poly8x16_t __p0,poly8x16_t __p1)65356 __ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
65357   poly8x16_t __ret;
65358   __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
65359   return __ret;
65360 }
65361 #else
vzip1q_p8(poly8x16_t __p0,poly8x16_t __p1)65362 __ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
65363   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65364   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65365   poly8x16_t __ret;
65366   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
65367   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65368   return __ret;
65369 }
65370 #endif
65371 
65372 #ifdef __LITTLE_ENDIAN__
vzip1q_p64(poly64x2_t __p0,poly64x2_t __p1)65373 __ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
65374   poly64x2_t __ret;
65375   __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
65376   return __ret;
65377 }
65378 #else
vzip1q_p64(poly64x2_t __p0,poly64x2_t __p1)65379 __ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
65380   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65381   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
65382   poly64x2_t __ret;
65383   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
65384   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
65385   return __ret;
65386 }
65387 #endif
65388 
65389 #ifdef __LITTLE_ENDIAN__
vzip1q_p16(poly16x8_t __p0,poly16x8_t __p1)65390 __ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
65391   poly16x8_t __ret;
65392   __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
65393   return __ret;
65394 }
65395 #else
vzip1q_p16(poly16x8_t __p0,poly16x8_t __p1)65396 __ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
65397   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
65398   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
65399   poly16x8_t __ret;
65400   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
65401   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
65402   return __ret;
65403 }
65404 #endif
65405 
65406 #ifdef __LITTLE_ENDIAN__
vzip1q_u8(uint8x16_t __p0,uint8x16_t __p1)65407 __ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
65408   uint8x16_t __ret;
65409   __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
65410   return __ret;
65411 }
65412 #else
vzip1q_u8(uint8x16_t __p0,uint8x16_t __p1)65413 __ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
65414   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65415   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65416   uint8x16_t __ret;
65417   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
65418   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65419   return __ret;
65420 }
65421 #endif
65422 
65423 #ifdef __LITTLE_ENDIAN__
vzip1q_u32(uint32x4_t __p0,uint32x4_t __p1)65424 __ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
65425   uint32x4_t __ret;
65426   __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
65427   return __ret;
65428 }
65429 #else
vzip1q_u32(uint32x4_t __p0,uint32x4_t __p1)65430 __ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
65431   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
65432   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
65433   uint32x4_t __ret;
65434   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
65435   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
65436   return __ret;
65437 }
65438 #endif
65439 
65440 #ifdef __LITTLE_ENDIAN__
vzip1q_u64(uint64x2_t __p0,uint64x2_t __p1)65441 __ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
65442   uint64x2_t __ret;
65443   __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
65444   return __ret;
65445 }
65446 #else
vzip1q_u64(uint64x2_t __p0,uint64x2_t __p1)65447 __ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
65448   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65449   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
65450   uint64x2_t __ret;
65451   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
65452   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
65453   return __ret;
65454 }
65455 #endif
65456 
65457 #ifdef __LITTLE_ENDIAN__
vzip1q_u16(uint16x8_t __p0,uint16x8_t __p1)65458 __ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
65459   uint16x8_t __ret;
65460   __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
65461   return __ret;
65462 }
65463 #else
vzip1q_u16(uint16x8_t __p0,uint16x8_t __p1)65464 __ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
65465   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
65466   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
65467   uint16x8_t __ret;
65468   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
65469   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
65470   return __ret;
65471 }
65472 #endif
65473 
65474 #ifdef __LITTLE_ENDIAN__
vzip1q_s8(int8x16_t __p0,int8x16_t __p1)65475 __ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) {
65476   int8x16_t __ret;
65477   __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
65478   return __ret;
65479 }
65480 #else
vzip1q_s8(int8x16_t __p0,int8x16_t __p1)65481 __ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) {
65482   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65483   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65484   int8x16_t __ret;
65485   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
65486   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65487   return __ret;
65488 }
65489 #endif
65490 
65491 #ifdef __LITTLE_ENDIAN__
vzip1q_f64(float64x2_t __p0,float64x2_t __p1)65492 __ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) {
65493   float64x2_t __ret;
65494   __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
65495   return __ret;
65496 }
65497 #else
vzip1q_f64(float64x2_t __p0,float64x2_t __p1)65498 __ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) {
65499   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65500   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
65501   float64x2_t __ret;
65502   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
65503   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
65504   return __ret;
65505 }
65506 #endif
65507 
65508 #ifdef __LITTLE_ENDIAN__
vzip1q_f32(float32x4_t __p0,float32x4_t __p1)65509 __ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) {
65510   float32x4_t __ret;
65511   __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
65512   return __ret;
65513 }
65514 #else
vzip1q_f32(float32x4_t __p0,float32x4_t __p1)65515 __ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) {
65516   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
65517   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
65518   float32x4_t __ret;
65519   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
65520   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
65521   return __ret;
65522 }
65523 #endif
65524 
65525 #ifdef __LITTLE_ENDIAN__
vzip1q_s32(int32x4_t __p0,int32x4_t __p1)65526 __ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) {
65527   int32x4_t __ret;
65528   __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
65529   return __ret;
65530 }
65531 #else
vzip1q_s32(int32x4_t __p0,int32x4_t __p1)65532 __ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) {
65533   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
65534   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
65535   int32x4_t __ret;
65536   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
65537   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
65538   return __ret;
65539 }
65540 #endif
65541 
65542 #ifdef __LITTLE_ENDIAN__
vzip1q_s64(int64x2_t __p0,int64x2_t __p1)65543 __ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) {
65544   int64x2_t __ret;
65545   __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
65546   return __ret;
65547 }
65548 #else
vzip1q_s64(int64x2_t __p0,int64x2_t __p1)65549 __ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) {
65550   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65551   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
65552   int64x2_t __ret;
65553   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
65554   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
65555   return __ret;
65556 }
65557 #endif
65558 
65559 #ifdef __LITTLE_ENDIAN__
vzip1q_s16(int16x8_t __p0,int16x8_t __p1)65560 __ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) {
65561   int16x8_t __ret;
65562   __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
65563   return __ret;
65564 }
65565 #else
vzip1q_s16(int16x8_t __p0,int16x8_t __p1)65566 __ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) {
65567   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
65568   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
65569   int16x8_t __ret;
65570   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
65571   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
65572   return __ret;
65573 }
65574 #endif
65575 
65576 #ifdef __LITTLE_ENDIAN__
vzip1_u8(uint8x8_t __p0,uint8x8_t __p1)65577 __ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) {
65578   uint8x8_t __ret;
65579   __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
65580   return __ret;
65581 }
65582 #else
vzip1_u8(uint8x8_t __p0,uint8x8_t __p1)65583 __ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) {
65584   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
65585   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
65586   uint8x8_t __ret;
65587   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
65588   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
65589   return __ret;
65590 }
65591 #endif
65592 
65593 #ifdef __LITTLE_ENDIAN__
vzip1_u32(uint32x2_t __p0,uint32x2_t __p1)65594 __ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) {
65595   uint32x2_t __ret;
65596   __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
65597   return __ret;
65598 }
65599 #else
vzip1_u32(uint32x2_t __p0,uint32x2_t __p1)65600 __ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) {
65601   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65602   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
65603   uint32x2_t __ret;
65604   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
65605   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
65606   return __ret;
65607 }
65608 #endif
65609 
65610 #ifdef __LITTLE_ENDIAN__
vzip1_u16(uint16x4_t __p0,uint16x4_t __p1)65611 __ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) {
65612   uint16x4_t __ret;
65613   __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
65614   return __ret;
65615 }
65616 #else
vzip1_u16(uint16x4_t __p0,uint16x4_t __p1)65617 __ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) {
65618   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
65619   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
65620   uint16x4_t __ret;
65621   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
65622   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
65623   return __ret;
65624 }
65625 #endif
65626 
65627 #ifdef __LITTLE_ENDIAN__
vzip1_s8(int8x8_t __p0,int8x8_t __p1)65628 __ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) {
65629   int8x8_t __ret;
65630   __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
65631   return __ret;
65632 }
65633 #else
vzip1_s8(int8x8_t __p0,int8x8_t __p1)65634 __ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) {
65635   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
65636   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
65637   int8x8_t __ret;
65638   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
65639   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
65640   return __ret;
65641 }
65642 #endif
65643 
65644 #ifdef __LITTLE_ENDIAN__
vzip1_f32(float32x2_t __p0,float32x2_t __p1)65645 __ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) {
65646   float32x2_t __ret;
65647   __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
65648   return __ret;
65649 }
65650 #else
vzip1_f32(float32x2_t __p0,float32x2_t __p1)65651 __ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) {
65652   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65653   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
65654   float32x2_t __ret;
65655   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
65656   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
65657   return __ret;
65658 }
65659 #endif
65660 
65661 #ifdef __LITTLE_ENDIAN__
vzip1_s32(int32x2_t __p0,int32x2_t __p1)65662 __ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) {
65663   int32x2_t __ret;
65664   __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
65665   return __ret;
65666 }
65667 #else
vzip1_s32(int32x2_t __p0,int32x2_t __p1)65668 __ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) {
65669   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65670   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
65671   int32x2_t __ret;
65672   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
65673   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
65674   return __ret;
65675 }
65676 #endif
65677 
65678 #ifdef __LITTLE_ENDIAN__
vzip1_s16(int16x4_t __p0,int16x4_t __p1)65679 __ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) {
65680   int16x4_t __ret;
65681   __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
65682   return __ret;
65683 }
65684 #else
vzip1_s16(int16x4_t __p0,int16x4_t __p1)65685 __ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) {
65686   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
65687   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
65688   int16x4_t __ret;
65689   __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
65690   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
65691   return __ret;
65692 }
65693 #endif
65694 
65695 #ifdef __LITTLE_ENDIAN__
vzip2_p8(poly8x8_t __p0,poly8x8_t __p1)65696 __ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) {
65697   poly8x8_t __ret;
65698   __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
65699   return __ret;
65700 }
65701 #else
vzip2_p8(poly8x8_t __p0,poly8x8_t __p1)65702 __ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) {
65703   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
65704   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
65705   poly8x8_t __ret;
65706   __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
65707   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
65708   return __ret;
65709 }
65710 #endif
65711 
65712 #ifdef __LITTLE_ENDIAN__
vzip2_p16(poly16x4_t __p0,poly16x4_t __p1)65713 __ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) {
65714   poly16x4_t __ret;
65715   __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
65716   return __ret;
65717 }
65718 #else
vzip2_p16(poly16x4_t __p0,poly16x4_t __p1)65719 __ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) {
65720   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
65721   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
65722   poly16x4_t __ret;
65723   __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
65724   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
65725   return __ret;
65726 }
65727 #endif
65728 
65729 #ifdef __LITTLE_ENDIAN__
vzip2q_p8(poly8x16_t __p0,poly8x16_t __p1)65730 __ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
65731   poly8x16_t __ret;
65732   __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
65733   return __ret;
65734 }
65735 #else
vzip2q_p8(poly8x16_t __p0,poly8x16_t __p1)65736 __ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
65737   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65738   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65739   poly8x16_t __ret;
65740   __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
65741   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65742   return __ret;
65743 }
65744 #endif
65745 
65746 #ifdef __LITTLE_ENDIAN__
vzip2q_p64(poly64x2_t __p0,poly64x2_t __p1)65747 __ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
65748   poly64x2_t __ret;
65749   __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
65750   return __ret;
65751 }
65752 #else
vzip2q_p64(poly64x2_t __p0,poly64x2_t __p1)65753 __ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
65754   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65755   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
65756   poly64x2_t __ret;
65757   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
65758   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
65759   return __ret;
65760 }
65761 #endif
65762 
65763 #ifdef __LITTLE_ENDIAN__
vzip2q_p16(poly16x8_t __p0,poly16x8_t __p1)65764 __ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
65765   poly16x8_t __ret;
65766   __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
65767   return __ret;
65768 }
65769 #else
vzip2q_p16(poly16x8_t __p0,poly16x8_t __p1)65770 __ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
65771   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
65772   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
65773   poly16x8_t __ret;
65774   __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
65775   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
65776   return __ret;
65777 }
65778 #endif
65779 
65780 #ifdef __LITTLE_ENDIAN__
vzip2q_u8(uint8x16_t __p0,uint8x16_t __p1)65781 __ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
65782   uint8x16_t __ret;
65783   __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
65784   return __ret;
65785 }
65786 #else
vzip2q_u8(uint8x16_t __p0,uint8x16_t __p1)65787 __ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
65788   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65789   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65790   uint8x16_t __ret;
65791   __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
65792   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65793   return __ret;
65794 }
65795 #endif
65796 
65797 #ifdef __LITTLE_ENDIAN__
vzip2q_u32(uint32x4_t __p0,uint32x4_t __p1)65798 __ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
65799   uint32x4_t __ret;
65800   __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
65801   return __ret;
65802 }
65803 #else
vzip2q_u32(uint32x4_t __p0,uint32x4_t __p1)65804 __ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
65805   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
65806   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
65807   uint32x4_t __ret;
65808   __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
65809   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
65810   return __ret;
65811 }
65812 #endif
65813 
65814 #ifdef __LITTLE_ENDIAN__
vzip2q_u64(uint64x2_t __p0,uint64x2_t __p1)65815 __ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
65816   uint64x2_t __ret;
65817   __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
65818   return __ret;
65819 }
65820 #else
vzip2q_u64(uint64x2_t __p0,uint64x2_t __p1)65821 __ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
65822   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65823   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
65824   uint64x2_t __ret;
65825   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
65826   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
65827   return __ret;
65828 }
65829 #endif
65830 
65831 #ifdef __LITTLE_ENDIAN__
vzip2q_u16(uint16x8_t __p0,uint16x8_t __p1)65832 __ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
65833   uint16x8_t __ret;
65834   __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
65835   return __ret;
65836 }
65837 #else
vzip2q_u16(uint16x8_t __p0,uint16x8_t __p1)65838 __ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
65839   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
65840   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
65841   uint16x8_t __ret;
65842   __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
65843   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
65844   return __ret;
65845 }
65846 #endif
65847 
65848 #ifdef __LITTLE_ENDIAN__
vzip2q_s8(int8x16_t __p0,int8x16_t __p1)65849 __ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) {
65850   int8x16_t __ret;
65851   __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
65852   return __ret;
65853 }
65854 #else
vzip2q_s8(int8x16_t __p0,int8x16_t __p1)65855 __ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) {
65856   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65857   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65858   int8x16_t __ret;
65859   __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
65860   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65861   return __ret;
65862 }
65863 #endif
65864 
65865 #ifdef __LITTLE_ENDIAN__
vzip2q_f64(float64x2_t __p0,float64x2_t __p1)65866 __ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) {
65867   float64x2_t __ret;
65868   __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
65869   return __ret;
65870 }
65871 #else
vzip2q_f64(float64x2_t __p0,float64x2_t __p1)65872 __ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) {
65873   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65874   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
65875   float64x2_t __ret;
65876   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
65877   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
65878   return __ret;
65879 }
65880 #endif
65881 
65882 #ifdef __LITTLE_ENDIAN__
vzip2q_f32(float32x4_t __p0,float32x4_t __p1)65883 __ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) {
65884   float32x4_t __ret;
65885   __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
65886   return __ret;
65887 }
65888 #else
vzip2q_f32(float32x4_t __p0,float32x4_t __p1)65889 __ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) {
65890   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
65891   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
65892   float32x4_t __ret;
65893   __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
65894   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
65895   return __ret;
65896 }
65897 #endif
65898 
65899 #ifdef __LITTLE_ENDIAN__
vzip2q_s32(int32x4_t __p0,int32x4_t __p1)65900 __ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) {
65901   int32x4_t __ret;
65902   __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
65903   return __ret;
65904 }
65905 #else
vzip2q_s32(int32x4_t __p0,int32x4_t __p1)65906 __ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) {
65907   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
65908   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
65909   int32x4_t __ret;
65910   __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
65911   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
65912   return __ret;
65913 }
65914 #endif
65915 
65916 #ifdef __LITTLE_ENDIAN__
vzip2q_s64(int64x2_t __p0,int64x2_t __p1)65917 __ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) {
65918   int64x2_t __ret;
65919   __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
65920   return __ret;
65921 }
65922 #else
vzip2q_s64(int64x2_t __p0,int64x2_t __p1)65923 __ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) {
65924   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65925   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
65926   int64x2_t __ret;
65927   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
65928   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
65929   return __ret;
65930 }
65931 #endif
65932 
65933 #ifdef __LITTLE_ENDIAN__
vzip2q_s16(int16x8_t __p0,int16x8_t __p1)65934 __ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) {
65935   int16x8_t __ret;
65936   __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
65937   return __ret;
65938 }
65939 #else
vzip2q_s16(int16x8_t __p0,int16x8_t __p1)65940 __ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) {
65941   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
65942   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
65943   int16x8_t __ret;
65944   __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
65945   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
65946   return __ret;
65947 }
65948 #endif
65949 
65950 #ifdef __LITTLE_ENDIAN__
vzip2_u8(uint8x8_t __p0,uint8x8_t __p1)65951 __ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) {
65952   uint8x8_t __ret;
65953   __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
65954   return __ret;
65955 }
65956 #else
vzip2_u8(uint8x8_t __p0,uint8x8_t __p1)65957 __ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) {
65958   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
65959   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
65960   uint8x8_t __ret;
65961   __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
65962   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
65963   return __ret;
65964 }
65965 #endif
65966 
65967 #ifdef __LITTLE_ENDIAN__
vzip2_u32(uint32x2_t __p0,uint32x2_t __p1)65968 __ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) {
65969   uint32x2_t __ret;
65970   __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
65971   return __ret;
65972 }
65973 #else
vzip2_u32(uint32x2_t __p0,uint32x2_t __p1)65974 __ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) {
65975   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65976   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
65977   uint32x2_t __ret;
65978   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
65979   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
65980   return __ret;
65981 }
65982 #endif
65983 
65984 #ifdef __LITTLE_ENDIAN__
vzip2_u16(uint16x4_t __p0,uint16x4_t __p1)65985 __ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) {
65986   uint16x4_t __ret;
65987   __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
65988   return __ret;
65989 }
65990 #else
vzip2_u16(uint16x4_t __p0,uint16x4_t __p1)65991 __ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) {
65992   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
65993   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
65994   uint16x4_t __ret;
65995   __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
65996   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
65997   return __ret;
65998 }
65999 #endif
66000 
66001 #ifdef __LITTLE_ENDIAN__
vzip2_s8(int8x8_t __p0,int8x8_t __p1)66002 __ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) {
66003   int8x8_t __ret;
66004   __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
66005   return __ret;
66006 }
66007 #else
vzip2_s8(int8x8_t __p0,int8x8_t __p1)66008 __ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) {
66009   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
66010   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
66011   int8x8_t __ret;
66012   __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
66013   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
66014   return __ret;
66015 }
66016 #endif
66017 
66018 #ifdef __LITTLE_ENDIAN__
vzip2_f32(float32x2_t __p0,float32x2_t __p1)66019 __ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) {
66020   float32x2_t __ret;
66021   __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
66022   return __ret;
66023 }
66024 #else
vzip2_f32(float32x2_t __p0,float32x2_t __p1)66025 __ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) {
66026   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
66027   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
66028   float32x2_t __ret;
66029   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
66030   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
66031   return __ret;
66032 }
66033 #endif
66034 
66035 #ifdef __LITTLE_ENDIAN__
vzip2_s32(int32x2_t __p0,int32x2_t __p1)66036 __ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) {
66037   int32x2_t __ret;
66038   __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
66039   return __ret;
66040 }
66041 #else
vzip2_s32(int32x2_t __p0,int32x2_t __p1)66042 __ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) {
66043   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
66044   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
66045   int32x2_t __ret;
66046   __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
66047   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
66048   return __ret;
66049 }
66050 #endif
66051 
66052 #ifdef __LITTLE_ENDIAN__
vzip2_s16(int16x4_t __p0,int16x4_t __p1)66053 __ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) {
66054   int16x4_t __ret;
66055   __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
66056   return __ret;
66057 }
66058 #else
vzip2_s16(int16x4_t __p0,int16x4_t __p1)66059 __ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) {
66060   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
66061   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
66062   int16x4_t __ret;
66063   __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
66064   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
66065   return __ret;
66066 }
66067 #endif
66068 
66069 #endif
66070 #ifdef __LITTLE_ENDIAN__
vabaq_u8(uint8x16_t __p0,uint8x16_t __p1,uint8x16_t __p2)66071 __ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
66072   uint8x16_t __ret;
66073   __ret = __p0 + vabdq_u8(__p1, __p2);
66074   return __ret;
66075 }
66076 #else
vabaq_u8(uint8x16_t __p0,uint8x16_t __p1,uint8x16_t __p2)66077 __ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
66078   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
66079   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
66080   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
66081   uint8x16_t __ret;
66082   __ret = __rev0 + __noswap_vabdq_u8(__rev1, __rev2);
66083   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
66084   return __ret;
66085 }
66086 #endif
66087 
66088 #ifdef __LITTLE_ENDIAN__
vabaq_u32(uint32x4_t __p0,uint32x4_t __p1,uint32x4_t __p2)66089 __ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
66090   uint32x4_t __ret;
66091   __ret = __p0 + vabdq_u32(__p1, __p2);
66092   return __ret;
66093 }
66094 #else
vabaq_u32(uint32x4_t __p0,uint32x4_t __p1,uint32x4_t __p2)66095 __ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
66096   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
66097   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
66098   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
66099   uint32x4_t __ret;
66100   __ret = __rev0 + __noswap_vabdq_u32(__rev1, __rev2);
66101   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
66102   return __ret;
66103 }
66104 #endif
66105 
66106 #ifdef __LITTLE_ENDIAN__
vabaq_u16(uint16x8_t __p0,uint16x8_t __p1,uint16x8_t __p2)66107 __ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
66108   uint16x8_t __ret;
66109   __ret = __p0 + vabdq_u16(__p1, __p2);
66110   return __ret;
66111 }
66112 #else
vabaq_u16(uint16x8_t __p0,uint16x8_t __p1,uint16x8_t __p2)66113 __ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
66114   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
66115   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
66116   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
66117   uint16x8_t __ret;
66118   __ret = __rev0 + __noswap_vabdq_u16(__rev1, __rev2);
66119   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
66120   return __ret;
66121 }
66122 #endif
66123 
66124 #ifdef __LITTLE_ENDIAN__
vabaq_s8(int8x16_t __p0,int8x16_t __p1,int8x16_t __p2)66125 __ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
66126   int8x16_t __ret;
66127   __ret = __p0 + vabdq_s8(__p1, __p2);
66128   return __ret;
66129 }
66130 #else
vabaq_s8(int8x16_t __p0,int8x16_t __p1,int8x16_t __p2)66131 __ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
66132   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
66133   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
66134   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
66135   int8x16_t __ret;
66136   __ret = __rev0 + __noswap_vabdq_s8(__rev1, __rev2);
66137   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
66138   return __ret;
66139 }
66140 #endif
66141 
66142 #ifdef __LITTLE_ENDIAN__
vabaq_s32(int32x4_t __p0,int32x4_t __p1,int32x4_t __p2)66143 __ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
66144   int32x4_t __ret;
66145   __ret = __p0 + vabdq_s32(__p1, __p2);
66146   return __ret;
66147 }
66148 #else
vabaq_s32(int32x4_t __p0,int32x4_t __p1,int32x4_t __p2)66149 __ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
66150   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
66151   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
66152   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
66153   int32x4_t __ret;
66154   __ret = __rev0 + __noswap_vabdq_s32(__rev1, __rev2);
66155   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
66156   return __ret;
66157 }
66158 #endif
66159 
66160 #ifdef __LITTLE_ENDIAN__
vabaq_s16(int16x8_t __p0,int16x8_t __p1,int16x8_t __p2)66161 __ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
66162   int16x8_t __ret;
66163   __ret = __p0 + vabdq_s16(__p1, __p2);
66164   return __ret;
66165 }
66166 #else
vabaq_s16(int16x8_t __p0,int16x8_t __p1,int16x8_t __p2)66167 __ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
66168   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
66169   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
66170   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
66171   int16x8_t __ret;
66172   __ret = __rev0 + __noswap_vabdq_s16(__rev1, __rev2);
66173   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
66174   return __ret;
66175 }
66176 #endif
66177 
66178 #ifdef __LITTLE_ENDIAN__
vaba_u8(uint8x8_t __p0,uint8x8_t __p1,uint8x8_t __p2)66179 __ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
66180   uint8x8_t __ret;
66181   __ret = __p0 + vabd_u8(__p1, __p2);
66182   return __ret;
66183 }
66184 #else
vaba_u8(uint8x8_t __p0,uint8x8_t __p1,uint8x8_t __p2)66185 __ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
66186   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
66187   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
66188   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
66189   uint8x8_t __ret;
66190   __ret = __rev0 + __noswap_vabd_u8(__rev1, __rev2);
66191   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
66192   return __ret;
66193 }
66194 #endif
66195 
66196 #ifdef __LITTLE_ENDIAN__
vaba_u32(uint32x2_t __p0,uint32x2_t __p1,uint32x2_t __p2)66197 __ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
66198   uint32x2_t __ret;
66199   __ret = __p0 + vabd_u32(__p1, __p2);
66200   return __ret;
66201 }
66202 #else
vaba_u32(uint32x2_t __p0,uint32x2_t __p1,uint32x2_t __p2)66203 __ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
66204   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
66205   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
66206   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
66207   uint32x2_t __ret;
66208   __ret = __rev0 + __noswap_vabd_u32(__rev1, __rev2);
66209   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
66210   return __ret;
66211 }
66212 #endif
66213 
66214 #ifdef __LITTLE_ENDIAN__
vaba_u16(uint16x4_t __p0,uint16x4_t __p1,uint16x4_t __p2)66215 __ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
66216   uint16x4_t __ret;
66217   __ret = __p0 + vabd_u16(__p1, __p2);
66218   return __ret;
66219 }
66220 #else
vaba_u16(uint16x4_t __p0,uint16x4_t __p1,uint16x4_t __p2)66221 __ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
66222   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
66223   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
66224   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
66225   uint16x4_t __ret;
66226   __ret = __rev0 + __noswap_vabd_u16(__rev1, __rev2);
66227   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
66228   return __ret;
66229 }
66230 #endif
66231 
66232 #ifdef __LITTLE_ENDIAN__
vaba_s8(int8x8_t __p0,int8x8_t __p1,int8x8_t __p2)66233 __ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
66234   int8x8_t __ret;
66235   __ret = __p0 + vabd_s8(__p1, __p2);
66236   return __ret;
66237 }
66238 #else
vaba_s8(int8x8_t __p0,int8x8_t __p1,int8x8_t __p2)66239 __ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
66240   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
66241   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
66242   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
66243   int8x8_t __ret;
66244   __ret = __rev0 + __noswap_vabd_s8(__rev1, __rev2);
66245   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
66246   return __ret;
66247 }
66248 #endif
66249 
66250 #ifdef __LITTLE_ENDIAN__
vaba_s32(int32x2_t __p0,int32x2_t __p1,int32x2_t __p2)66251 __ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
66252   int32x2_t __ret;
66253   __ret = __p0 + vabd_s32(__p1, __p2);
66254   return __ret;
66255 }
66256 #else
vaba_s32(int32x2_t __p0,int32x2_t __p1,int32x2_t __p2)66257 __ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
66258   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
66259   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
66260   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
66261   int32x2_t __ret;
66262   __ret = __rev0 + __noswap_vabd_s32(__rev1, __rev2);
66263   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
66264   return __ret;
66265 }
66266 #endif
66267 
66268 #ifdef __LITTLE_ENDIAN__
vaba_s16(int16x4_t __p0,int16x4_t __p1,int16x4_t __p2)66269 __ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
66270   int16x4_t __ret;
66271   __ret = __p0 + vabd_s16(__p1, __p2);
66272   return __ret;
66273 }
66274 #else
vaba_s16(int16x4_t __p0,int16x4_t __p1,int16x4_t __p2)66275 __ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
66276   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
66277   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
66278   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
66279   int16x4_t __ret;
66280   __ret = __rev0 + __noswap_vabd_s16(__rev1, __rev2);
66281   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
66282   return __ret;
66283 }
66284 #endif
66285 
66286 #ifdef __LITTLE_ENDIAN__
vabdl_u8(uint8x8_t __p0,uint8x8_t __p1)66287 __ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
66288   uint16x8_t __ret;
66289   __ret = (uint16x8_t)(vmovl_u8((uint8x8_t)(vabd_u8(__p0, __p1))));
66290   return __ret;
66291 }
66292 #else
vabdl_u8(uint8x8_t __p0,uint8x8_t __p1)66293 __ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
66294   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
66295   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
66296   uint16x8_t __ret;
66297   __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__rev0, __rev1))));
66298   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
66299   return __ret;
66300 }
__noswap_vabdl_u8(uint8x8_t __p0,uint8x8_t __p1)66301 __ai uint16x8_t __noswap_vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
66302   uint16x8_t __ret;
66303   __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__p0, __p1))));
66304   return __ret;
66305 }
66306 #endif
66307 
66308 #ifdef __LITTLE_ENDIAN__
vabdl_u32(uint32x2_t __p0,uint32x2_t __p1)66309 __ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
66310   uint64x2_t __ret;
66311   __ret = (uint64x2_t)(vmovl_u32((uint32x2_t)(vabd_u32(__p0, __p1))));
66312   return __ret;
66313 }
66314 #else
vabdl_u32(uint32x2_t __p0,uint32x2_t __p1)66315 __ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
66316   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
66317   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
66318   uint64x2_t __ret;
66319   __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__rev0, __rev1))));
66320   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
66321   return __ret;
66322 }
__noswap_vabdl_u32(uint32x2_t __p0,uint32x2_t __p1)66323 __ai uint64x2_t __noswap_vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
66324   uint64x2_t __ret;
66325   __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__p0, __p1))));
66326   return __ret;
66327 }
66328 #endif
66329 
66330 #ifdef __LITTLE_ENDIAN__
vabdl_u16(uint16x4_t __p0,uint16x4_t __p1)66331 __ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
66332   uint32x4_t __ret;
66333   __ret = (uint32x4_t)(vmovl_u16((uint16x4_t)(vabd_u16(__p0, __p1))));
66334   return __ret;
66335 }
66336 #else
vabdl_u16(uint16x4_t __p0,uint16x4_t __p1)66337 __ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
66338   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
66339   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
66340   uint32x4_t __ret;
66341   __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__rev0, __rev1))));
66342   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
66343   return __ret;
66344 }
__noswap_vabdl_u16(uint16x4_t __p0,uint16x4_t __p1)66345 __ai uint32x4_t __noswap_vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
66346   uint32x4_t __ret;
66347   __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__p0, __p1))));
66348   return __ret;
66349 }
66350 #endif
66351 
66352 #ifdef __LITTLE_ENDIAN__
vabdl_s8(int8x8_t __p0,int8x8_t __p1)66353 __ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
66354   int16x8_t __ret;
66355   __ret = (int16x8_t)(vmovl_u8((uint8x8_t)(vabd_s8(__p0, __p1))));
66356   return __ret;
66357 }
66358 #else
vabdl_s8(int8x8_t __p0,int8x8_t __p1)66359 __ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
66360   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
66361   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
66362   int16x8_t __ret;
66363   __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__rev0, __rev1))));
66364   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
66365   return __ret;
66366 }
__noswap_vabdl_s8(int8x8_t __p0,int8x8_t __p1)66367 __ai int16x8_t __noswap_vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
66368   int16x8_t __ret;
66369   __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__p0, __p1))));
66370   return __ret;
66371 }
66372 #endif
66373 
66374 #ifdef __LITTLE_ENDIAN__
vabdl_s32(int32x2_t __p0,int32x2_t __p1)66375 __ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
66376   int64x2_t __ret;
66377   __ret = (int64x2_t)(vmovl_u32((uint32x2_t)(vabd_s32(__p0, __p1))));
66378   return __ret;
66379 }
66380 #else
vabdl_s32(int32x2_t __p0,int32x2_t __p1)66381 __ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
66382   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
66383   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
66384   int64x2_t __ret;
66385   __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__rev0, __rev1))));
66386   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
66387   return __ret;
66388 }
__noswap_vabdl_s32(int32x2_t __p0,int32x2_t __p1)66389 __ai int64x2_t __noswap_vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
66390   int64x2_t __ret;
66391   __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__p0, __p1))));
66392   return __ret;
66393 }
66394 #endif
66395 
66396 #ifdef __LITTLE_ENDIAN__
vabdl_s16(int16x4_t __p0,int16x4_t __p1)66397 __ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
66398   int32x4_t __ret;
66399   __ret = (int32x4_t)(vmovl_u16((uint16x4_t)(vabd_s16(__p0, __p1))));
66400   return __ret;
66401 }
66402 #else
vabdl_s16(int16x4_t __p0,int16x4_t __p1)66403 __ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
66404   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
66405   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
66406   int32x4_t __ret;
66407   __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__rev0, __rev1))));
66408   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
66409   return __ret;
66410 }
__noswap_vabdl_s16(int16x4_t __p0,int16x4_t __p1)66411 __ai int32x4_t __noswap_vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
66412   int32x4_t __ret;
66413   __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__p0, __p1))));
66414   return __ret;
66415 }
66416 #endif
66417 
66418 #ifdef __LITTLE_ENDIAN__
vaddl_u8(uint8x8_t __p0,uint8x8_t __p1)66419 __ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) {
66420   uint16x8_t __ret;
66421   __ret = vmovl_u8(__p0) + vmovl_u8(__p1);
66422   return __ret;
66423 }
66424 #else
vaddl_u8(uint8x8_t __p0,uint8x8_t __p1)66425 __ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) {
66426   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
66427   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
66428   uint16x8_t __ret;
66429   __ret = __noswap_vmovl_u8(__rev0) + __noswap_vmovl_u8(__rev1);
66430   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
66431   return __ret;
66432 }
66433 #endif
66434 
66435 #ifdef __LITTLE_ENDIAN__
vaddl_u32(uint32x2_t __p0,uint32x2_t __p1)66436 __ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) {
66437   uint64x2_t __ret;
66438   __ret = vmovl_u32(__p0) + vmovl_u32(__p1);
66439   return __ret;
66440 }
66441 #else
vaddl_u32(uint32x2_t __p0,uint32x2_t __p1)66442 __ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) {
66443   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
66444   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
66445   uint64x2_t __ret;
66446   __ret = __noswap_vmovl_u32(__rev0) + __noswap_vmovl_u32(__rev1);
66447   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
66448   return __ret;
66449 }
66450 #endif
66451 
66452 #ifdef __LITTLE_ENDIAN__
vaddl_u16(uint16x4_t __p0,uint16x4_t __p1)66453 __ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) {
66454   uint32x4_t __ret;
66455   __ret = vmovl_u16(__p0) + vmovl_u16(__p1);
66456   return __ret;
66457 }
66458 #else
vaddl_u16(uint16x4_t __p0,uint16x4_t __p1)66459 __ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) {
66460   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
66461   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
66462   uint32x4_t __ret;
66463   __ret = __noswap_vmovl_u16(__rev0) + __noswap_vmovl_u16(__rev1);
66464   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
66465   return __ret;
66466 }
66467 #endif
66468 
66469 #ifdef __LITTLE_ENDIAN__
vaddl_s8(int8x8_t __p0,int8x8_t __p1)66470 __ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) {
66471   int16x8_t __ret;
66472   __ret = vmovl_s8(__p0) + vmovl_s8(__p1);
66473   return __ret;
66474 }
66475 #else
vaddl_s8(int8x8_t __p0,int8x8_t __p1)66476 __ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) {
66477   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
66478   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
66479   int16x8_t __ret;
66480   __ret = __noswap_vmovl_s8(__rev0) + __noswap_vmovl_s8(__rev1);
66481   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
66482   return __ret;
66483 }
66484 #endif
66485 
66486 #ifdef __LITTLE_ENDIAN__
vaddl_s32(int32x2_t __p0,int32x2_t __p1)66487 __ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) {
66488   int64x2_t __ret;
66489   __ret = vmovl_s32(__p0) + vmovl_s32(__p1);
66490   return __ret;
66491 }
66492 #else
vaddl_s32(int32x2_t __p0,int32x2_t __p1)66493 __ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) {
66494   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
66495   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
66496   int64x2_t __ret;
66497   __ret = __noswap_vmovl_s32(__rev0) + __noswap_vmovl_s32(__rev1);
66498   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
66499   return __ret;
66500 }
66501 #endif
66502 
66503 #ifdef __LITTLE_ENDIAN__
vaddl_s16(int16x4_t __p0,int16x4_t __p1)66504 __ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) {
66505   int32x4_t __ret;
66506   __ret = vmovl_s16(__p0) + vmovl_s16(__p1);
66507   return __ret;
66508 }
66509 #else
vaddl_s16(int16x4_t __p0,int16x4_t __p1)66510 __ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) {
66511   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
66512   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
66513   int32x4_t __ret;
66514   __ret = __noswap_vmovl_s16(__rev0) + __noswap_vmovl_s16(__rev1);
66515   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
66516   return __ret;
66517 }
66518 #endif
66519 
66520 #ifdef __LITTLE_ENDIAN__
vaddw_u8(uint16x8_t __p0,uint8x8_t __p1)66521 __ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) {
66522   uint16x8_t __ret;
66523   __ret = __p0 + vmovl_u8(__p1);
66524   return __ret;
66525 }
66526 #else
vaddw_u8(uint16x8_t __p0,uint8x8_t __p1)66527 __ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) {
66528   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
66529   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
66530   uint16x8_t __ret;
66531   __ret = __rev0 + __noswap_vmovl_u8(__rev1);
66532   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
66533   return __ret;
66534 }
66535 #endif
66536 
66537 #ifdef __LITTLE_ENDIAN__
vaddw_u32(uint64x2_t __p0,uint32x2_t __p1)66538 __ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) {
66539   uint64x2_t __ret;
66540   __ret = __p0 + vmovl_u32(__p1);
66541   return __ret;
66542 }
66543 #else
vaddw_u32(uint64x2_t __p0,uint32x2_t __p1)66544 __ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) {
66545   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
66546   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
66547   uint64x2_t __ret;
66548   __ret = __rev0 + __noswap_vmovl_u32(__rev1);
66549   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
66550   return __ret;
66551 }
66552 #endif
66553 
66554 #ifdef __LITTLE_ENDIAN__
vaddw_u16(uint32x4_t __p0,uint16x4_t __p1)66555 __ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) {
66556   uint32x4_t __ret;
66557   __ret = __p0 + vmovl_u16(__p1);
66558   return __ret;
66559 }
66560 #else
vaddw_u16(uint32x4_t __p0,uint16x4_t __p1)66561 __ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) {
66562   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
66563   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
66564   uint32x4_t __ret;
66565   __ret = __rev0 + __noswap_vmovl_u16(__rev1);
66566   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
66567   return __ret;
66568 }
66569 #endif
66570 
66571 #ifdef __LITTLE_ENDIAN__
vaddw_s8(int16x8_t __p0,int8x8_t __p1)66572 __ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) {
66573   int16x8_t __ret;
66574   __ret = __p0 + vmovl_s8(__p1);
66575   return __ret;
66576 }
66577 #else
vaddw_s8(int16x8_t __p0,int8x8_t __p1)66578 __ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) {
66579   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
66580   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
66581   int16x8_t __ret;
66582   __ret = __rev0 + __noswap_vmovl_s8(__rev1);
66583   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
66584   return __ret;
66585 }
66586 #endif
66587 
66588 #ifdef __LITTLE_ENDIAN__
vaddw_s32(int64x2_t __p0,int32x2_t __p1)66589 __ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) {
66590   int64x2_t __ret;
66591   __ret = __p0 + vmovl_s32(__p1);
66592   return __ret;
66593 }
66594 #else
vaddw_s32(int64x2_t __p0,int32x2_t __p1)66595 __ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) {
66596   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
66597   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
66598   int64x2_t __ret;
66599   __ret = __rev0 + __noswap_vmovl_s32(__rev1);
66600   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
66601   return __ret;
66602 }
66603 #endif
66604 
66605 #ifdef __LITTLE_ENDIAN__
vaddw_s16(int32x4_t __p0,int16x4_t __p1)66606 __ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) {
66607   int32x4_t __ret;
66608   __ret = __p0 + vmovl_s16(__p1);
66609   return __ret;
66610 }
66611 #else
vaddw_s16(int32x4_t __p0,int16x4_t __p1)66612 __ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) {
66613   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
66614   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
66615   int32x4_t __ret;
66616   __ret = __rev0 + __noswap_vmovl_s16(__rev1);
66617   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
66618   return __ret;
66619 }
66620 #endif
66621 
66622 #ifdef __LITTLE_ENDIAN__
vmlal_u8(uint16x8_t __p0,uint8x8_t __p1,uint8x8_t __p2)66623 __ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
66624   uint16x8_t __ret;
66625   __ret = __p0 + vmull_u8(__p1, __p2);
66626   return __ret;
66627 }
66628 #else
vmlal_u8(uint16x8_t __p0,uint8x8_t __p1,uint8x8_t __p2)66629 __ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
66630   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
66631   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
66632   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
66633   uint16x8_t __ret;
66634   __ret = __rev0 + __noswap_vmull_u8(__rev1, __rev2);
66635   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
66636   return __ret;
66637 }
__noswap_vmlal_u8(uint16x8_t __p0,uint8x8_t __p1,uint8x8_t __p2)66638 __ai uint16x8_t __noswap_vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
66639   uint16x8_t __ret;
66640   __ret = __p0 + __noswap_vmull_u8(__p1, __p2);
66641   return __ret;
66642 }
66643 #endif
66644 
66645 #ifdef __LITTLE_ENDIAN__
vmlal_u32(uint64x2_t __p0,uint32x2_t __p1,uint32x2_t __p2)66646 __ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
66647   uint64x2_t __ret;
66648   __ret = __p0 + vmull_u32(__p1, __p2);
66649   return __ret;
66650 }
66651 #else
vmlal_u32(uint64x2_t __p0,uint32x2_t __p1,uint32x2_t __p2)66652 __ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
66653   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
66654   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
66655   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
66656   uint64x2_t __ret;
66657   __ret = __rev0 + __noswap_vmull_u32(__rev1, __rev2);
66658   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
66659   return __ret;
66660 }
__noswap_vmlal_u32(uint64x2_t __p0,uint32x2_t __p1,uint32x2_t __p2)66661 __ai uint64x2_t __noswap_vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
66662   uint64x2_t __ret;
66663   __ret = __p0 + __noswap_vmull_u32(__p1, __p2);
66664   return __ret;
66665 }
66666 #endif
66667 
66668 #ifdef __LITTLE_ENDIAN__
vmlal_u16(uint32x4_t __p0,uint16x4_t __p1,uint16x4_t __p2)66669 __ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
66670   uint32x4_t __ret;
66671   __ret = __p0 + vmull_u16(__p1, __p2);
66672   return __ret;
66673 }
66674 #else
vmlal_u16(uint32x4_t __p0,uint16x4_t __p1,uint16x4_t __p2)66675 __ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
66676   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
66677   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
66678   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
66679   uint32x4_t __ret;
66680   __ret = __rev0 + __noswap_vmull_u16(__rev1, __rev2);
66681   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
66682   return __ret;
66683 }
__noswap_vmlal_u16(uint32x4_t __p0,uint16x4_t __p1,uint16x4_t __p2)66684 __ai uint32x4_t __noswap_vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
66685   uint32x4_t __ret;
66686   __ret = __p0 + __noswap_vmull_u16(__p1, __p2);
66687   return __ret;
66688 }
66689 #endif
66690 
66691 #ifdef __LITTLE_ENDIAN__
vmlal_s8(int16x8_t __p0,int8x8_t __p1,int8x8_t __p2)66692 __ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
66693   int16x8_t __ret;
66694   __ret = __p0 + vmull_s8(__p1, __p2);
66695   return __ret;
66696 }
66697 #else
vmlal_s8(int16x8_t __p0,int8x8_t __p1,int8x8_t __p2)66698 __ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
66699   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
66700   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
66701   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
66702   int16x8_t __ret;
66703   __ret = __rev0 + __noswap_vmull_s8(__rev1, __rev2);
66704   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
66705   return __ret;
66706 }
__noswap_vmlal_s8(int16x8_t __p0,int8x8_t __p1,int8x8_t __p2)66707 __ai int16x8_t __noswap_vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
66708   int16x8_t __ret;
66709   __ret = __p0 + __noswap_vmull_s8(__p1, __p2);
66710   return __ret;
66711 }
66712 #endif
66713 
66714 #ifdef __LITTLE_ENDIAN__
vmlal_s32(int64x2_t __p0,int32x2_t __p1,int32x2_t __p2)66715 __ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
66716   int64x2_t __ret;
66717   __ret = __p0 + vmull_s32(__p1, __p2);
66718   return __ret;
66719 }
66720 #else
vmlal_s32(int64x2_t __p0,int32x2_t __p1,int32x2_t __p2)66721 __ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
66722   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
66723   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
66724   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
66725   int64x2_t __ret;
66726   __ret = __rev0 + __noswap_vmull_s32(__rev1, __rev2);
66727   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
66728   return __ret;
66729 }
__noswap_vmlal_s32(int64x2_t __p0,int32x2_t __p1,int32x2_t __p2)66730 __ai int64x2_t __noswap_vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
66731   int64x2_t __ret;
66732   __ret = __p0 + __noswap_vmull_s32(__p1, __p2);
66733   return __ret;
66734 }
66735 #endif
66736 
66737 #ifdef __LITTLE_ENDIAN__
vmlal_s16(int32x4_t __p0,int16x4_t __p1,int16x4_t __p2)66738 __ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
66739   int32x4_t __ret;
66740   __ret = __p0 + vmull_s16(__p1, __p2);
66741   return __ret;
66742 }
66743 #else
vmlal_s16(int32x4_t __p0,int16x4_t __p1,int16x4_t __p2)66744 __ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
66745   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
66746   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
66747   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
66748   int32x4_t __ret;
66749   __ret = __rev0 + __noswap_vmull_s16(__rev1, __rev2);
66750   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
66751   return __ret;
66752 }
__noswap_vmlal_s16(int32x4_t __p0,int16x4_t __p1,int16x4_t __p2)66753 __ai int32x4_t __noswap_vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
66754   int32x4_t __ret;
66755   __ret = __p0 + __noswap_vmull_s16(__p1, __p2);
66756   return __ret;
66757 }
66758 #endif
66759 
66760 #ifdef __LITTLE_ENDIAN__
66761 #define vmlal_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
66762   uint64x2_t __s0 = __p0; \
66763   uint32x2_t __s1 = __p1; \
66764   uint32x2_t __s2 = __p2; \
66765   uint64x2_t __ret; \
66766   __ret = __s0 + vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
66767   __ret; \
66768 })
66769 #else
66770 #define vmlal_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
66771   uint64x2_t __s0 = __p0; \
66772   uint32x2_t __s1 = __p1; \
66773   uint32x2_t __s2 = __p2; \
66774   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
66775   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
66776   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
66777   uint64x2_t __ret; \
66778   __ret = __rev0 + __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
66779   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
66780   __ret; \
66781 })
66782 #endif
66783 
66784 #ifdef __LITTLE_ENDIAN__
66785 #define vmlal_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
66786   uint32x4_t __s0 = __p0; \
66787   uint16x4_t __s1 = __p1; \
66788   uint16x4_t __s2 = __p2; \
66789   uint32x4_t __ret; \
66790   __ret = __s0 + vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
66791   __ret; \
66792 })
66793 #else
66794 #define vmlal_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
66795   uint32x4_t __s0 = __p0; \
66796   uint16x4_t __s1 = __p1; \
66797   uint16x4_t __s2 = __p2; \
66798   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
66799   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
66800   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
66801   uint32x4_t __ret; \
66802   __ret = __rev0 + __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
66803   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
66804   __ret; \
66805 })
66806 #endif
66807 
66808 #ifdef __LITTLE_ENDIAN__
66809 #define vmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
66810   int64x2_t __s0 = __p0; \
66811   int32x2_t __s1 = __p1; \
66812   int32x2_t __s2 = __p2; \
66813   int64x2_t __ret; \
66814   __ret = __s0 + vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
66815   __ret; \
66816 })
66817 #else
66818 #define vmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
66819   int64x2_t __s0 = __p0; \
66820   int32x2_t __s1 = __p1; \
66821   int32x2_t __s2 = __p2; \
66822   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
66823   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
66824   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
66825   int64x2_t __ret; \
66826   __ret = __rev0 + __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
66827   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
66828   __ret; \
66829 })
66830 #endif
66831 
66832 #ifdef __LITTLE_ENDIAN__
66833 #define vmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
66834   int32x4_t __s0 = __p0; \
66835   int16x4_t __s1 = __p1; \
66836   int16x4_t __s2 = __p2; \
66837   int32x4_t __ret; \
66838   __ret = __s0 + vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
66839   __ret; \
66840 })
66841 #else
66842 #define vmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
66843   int32x4_t __s0 = __p0; \
66844   int16x4_t __s1 = __p1; \
66845   int16x4_t __s2 = __p2; \
66846   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
66847   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
66848   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
66849   int32x4_t __ret; \
66850   __ret = __rev0 + __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
66851   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
66852   __ret; \
66853 })
66854 #endif
66855 
66856 #ifdef __LITTLE_ENDIAN__
vmlal_n_u32(uint64x2_t __p0,uint32x2_t __p1,uint32_t __p2)66857 __ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
66858   uint64x2_t __ret;
66859   __ret = __p0 + vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
66860   return __ret;
66861 }
66862 #else
vmlal_n_u32(uint64x2_t __p0,uint32x2_t __p1,uint32_t __p2)66863 __ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
66864   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
66865   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
66866   uint64x2_t __ret;
66867   __ret = __rev0 + __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
66868   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
66869   return __ret;
66870 }
__noswap_vmlal_n_u32(uint64x2_t __p0,uint32x2_t __p1,uint32_t __p2)66871 __ai uint64x2_t __noswap_vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
66872   uint64x2_t __ret;
66873   __ret = __p0 + __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
66874   return __ret;
66875 }
66876 #endif
66877 
66878 #ifdef __LITTLE_ENDIAN__
vmlal_n_u16(uint32x4_t __p0,uint16x4_t __p1,uint16_t __p2)66879 __ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
66880   uint32x4_t __ret;
66881   __ret = __p0 + vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
66882   return __ret;
66883 }
66884 #else
vmlal_n_u16(uint32x4_t __p0,uint16x4_t __p1,uint16_t __p2)66885 __ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
66886   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
66887   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
66888   uint32x4_t __ret;
66889   __ret = __rev0 + __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
66890   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
66891   return __ret;
66892 }
__noswap_vmlal_n_u16(uint32x4_t __p0,uint16x4_t __p1,uint16_t __p2)66893 __ai uint32x4_t __noswap_vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
66894   uint32x4_t __ret;
66895   __ret = __p0 + __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
66896   return __ret;
66897 }
66898 #endif
66899 
66900 #ifdef __LITTLE_ENDIAN__
vmlal_n_s32(int64x2_t __p0,int32x2_t __p1,int32_t __p2)66901 __ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
66902   int64x2_t __ret;
66903   __ret = __p0 + vmull_s32(__p1, (int32x2_t) {__p2, __p2});
66904   return __ret;
66905 }
66906 #else
vmlal_n_s32(int64x2_t __p0,int32x2_t __p1,int32_t __p2)66907 __ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
66908   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
66909   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
66910   int64x2_t __ret;
66911   __ret = __rev0 + __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
66912   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
66913   return __ret;
66914 }
__noswap_vmlal_n_s32(int64x2_t __p0,int32x2_t __p1,int32_t __p2)66915 __ai int64x2_t __noswap_vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
66916   int64x2_t __ret;
66917   __ret = __p0 + __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2});
66918   return __ret;
66919 }
66920 #endif
66921 
66922 #ifdef __LITTLE_ENDIAN__
vmlal_n_s16(int32x4_t __p0,int16x4_t __p1,int16_t __p2)66923 __ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
66924   int32x4_t __ret;
66925   __ret = __p0 + vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
66926   return __ret;
66927 }
66928 #else
vmlal_n_s16(int32x4_t __p0,int16x4_t __p1,int16_t __p2)66929 __ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
66930   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
66931   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
66932   int32x4_t __ret;
66933   __ret = __rev0 + __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
66934   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
66935   return __ret;
66936 }
__noswap_vmlal_n_s16(int32x4_t __p0,int16x4_t __p1,int16_t __p2)66937 __ai int32x4_t __noswap_vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
66938   int32x4_t __ret;
66939   __ret = __p0 + __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
66940   return __ret;
66941 }
66942 #endif
66943 
66944 #ifdef __LITTLE_ENDIAN__
vmlsl_u8(uint16x8_t __p0,uint8x8_t __p1,uint8x8_t __p2)66945 __ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
66946   uint16x8_t __ret;
66947   __ret = __p0 - vmull_u8(__p1, __p2);
66948   return __ret;
66949 }
66950 #else
vmlsl_u8(uint16x8_t __p0,uint8x8_t __p1,uint8x8_t __p2)66951 __ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
66952   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
66953   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
66954   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
66955   uint16x8_t __ret;
66956   __ret = __rev0 - __noswap_vmull_u8(__rev1, __rev2);
66957   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
66958   return __ret;
66959 }
__noswap_vmlsl_u8(uint16x8_t __p0,uint8x8_t __p1,uint8x8_t __p2)66960 __ai uint16x8_t __noswap_vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
66961   uint16x8_t __ret;
66962   __ret = __p0 - __noswap_vmull_u8(__p1, __p2);
66963   return __ret;
66964 }
66965 #endif
66966 
66967 #ifdef __LITTLE_ENDIAN__
vmlsl_u32(uint64x2_t __p0,uint32x2_t __p1,uint32x2_t __p2)66968 __ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
66969   uint64x2_t __ret;
66970   __ret = __p0 - vmull_u32(__p1, __p2);
66971   return __ret;
66972 }
66973 #else
vmlsl_u32(uint64x2_t __p0,uint32x2_t __p1,uint32x2_t __p2)66974 __ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
66975   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
66976   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
66977   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
66978   uint64x2_t __ret;
66979   __ret = __rev0 - __noswap_vmull_u32(__rev1, __rev2);
66980   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
66981   return __ret;
66982 }
__noswap_vmlsl_u32(uint64x2_t __p0,uint32x2_t __p1,uint32x2_t __p2)66983 __ai uint64x2_t __noswap_vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
66984   uint64x2_t __ret;
66985   __ret = __p0 - __noswap_vmull_u32(__p1, __p2);
66986   return __ret;
66987 }
66988 #endif
66989 
66990 #ifdef __LITTLE_ENDIAN__
vmlsl_u16(uint32x4_t __p0,uint16x4_t __p1,uint16x4_t __p2)66991 __ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
66992   uint32x4_t __ret;
66993   __ret = __p0 - vmull_u16(__p1, __p2);
66994   return __ret;
66995 }
66996 #else
vmlsl_u16(uint32x4_t __p0,uint16x4_t __p1,uint16x4_t __p2)66997 __ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
66998   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
66999   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
67000   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
67001   uint32x4_t __ret;
67002   __ret = __rev0 - __noswap_vmull_u16(__rev1, __rev2);
67003   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
67004   return __ret;
67005 }
__noswap_vmlsl_u16(uint32x4_t __p0,uint16x4_t __p1,uint16x4_t __p2)67006 __ai uint32x4_t __noswap_vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
67007   uint32x4_t __ret;
67008   __ret = __p0 - __noswap_vmull_u16(__p1, __p2);
67009   return __ret;
67010 }
67011 #endif
67012 
67013 #ifdef __LITTLE_ENDIAN__
vmlsl_s8(int16x8_t __p0,int8x8_t __p1,int8x8_t __p2)67014 __ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
67015   int16x8_t __ret;
67016   __ret = __p0 - vmull_s8(__p1, __p2);
67017   return __ret;
67018 }
67019 #else
vmlsl_s8(int16x8_t __p0,int8x8_t __p1,int8x8_t __p2)67020 __ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
67021   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
67022   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
67023   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
67024   int16x8_t __ret;
67025   __ret = __rev0 - __noswap_vmull_s8(__rev1, __rev2);
67026   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
67027   return __ret;
67028 }
__noswap_vmlsl_s8(int16x8_t __p0,int8x8_t __p1,int8x8_t __p2)67029 __ai int16x8_t __noswap_vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
67030   int16x8_t __ret;
67031   __ret = __p0 - __noswap_vmull_s8(__p1, __p2);
67032   return __ret;
67033 }
67034 #endif
67035 
67036 #ifdef __LITTLE_ENDIAN__
vmlsl_s32(int64x2_t __p0,int32x2_t __p1,int32x2_t __p2)67037 __ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
67038   int64x2_t __ret;
67039   __ret = __p0 - vmull_s32(__p1, __p2);
67040   return __ret;
67041 }
67042 #else
vmlsl_s32(int64x2_t __p0,int32x2_t __p1,int32x2_t __p2)67043 __ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
67044   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
67045   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
67046   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
67047   int64x2_t __ret;
67048   __ret = __rev0 - __noswap_vmull_s32(__rev1, __rev2);
67049   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
67050   return __ret;
67051 }
__noswap_vmlsl_s32(int64x2_t __p0,int32x2_t __p1,int32x2_t __p2)67052 __ai int64x2_t __noswap_vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
67053   int64x2_t __ret;
67054   __ret = __p0 - __noswap_vmull_s32(__p1, __p2);
67055   return __ret;
67056 }
67057 #endif
67058 
67059 #ifdef __LITTLE_ENDIAN__
vmlsl_s16(int32x4_t __p0,int16x4_t __p1,int16x4_t __p2)67060 __ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
67061   int32x4_t __ret;
67062   __ret = __p0 - vmull_s16(__p1, __p2);
67063   return __ret;
67064 }
67065 #else
vmlsl_s16(int32x4_t __p0,int16x4_t __p1,int16x4_t __p2)67066 __ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
67067   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
67068   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
67069   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
67070   int32x4_t __ret;
67071   __ret = __rev0 - __noswap_vmull_s16(__rev1, __rev2);
67072   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
67073   return __ret;
67074 }
__noswap_vmlsl_s16(int32x4_t __p0,int16x4_t __p1,int16x4_t __p2)67075 __ai int32x4_t __noswap_vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
67076   int32x4_t __ret;
67077   __ret = __p0 - __noswap_vmull_s16(__p1, __p2);
67078   return __ret;
67079 }
67080 #endif
67081 
67082 #ifdef __LITTLE_ENDIAN__
67083 #define vmlsl_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
67084   uint64x2_t __s0 = __p0; \
67085   uint32x2_t __s1 = __p1; \
67086   uint32x2_t __s2 = __p2; \
67087   uint64x2_t __ret; \
67088   __ret = __s0 - vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
67089   __ret; \
67090 })
67091 #else
67092 #define vmlsl_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
67093   uint64x2_t __s0 = __p0; \
67094   uint32x2_t __s1 = __p1; \
67095   uint32x2_t __s2 = __p2; \
67096   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
67097   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
67098   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
67099   uint64x2_t __ret; \
67100   __ret = __rev0 - __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
67101   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
67102   __ret; \
67103 })
67104 #endif
67105 
67106 #ifdef __LITTLE_ENDIAN__
67107 #define vmlsl_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
67108   uint32x4_t __s0 = __p0; \
67109   uint16x4_t __s1 = __p1; \
67110   uint16x4_t __s2 = __p2; \
67111   uint32x4_t __ret; \
67112   __ret = __s0 - vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
67113   __ret; \
67114 })
67115 #else
67116 #define vmlsl_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
67117   uint32x4_t __s0 = __p0; \
67118   uint16x4_t __s1 = __p1; \
67119   uint16x4_t __s2 = __p2; \
67120   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
67121   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
67122   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
67123   uint32x4_t __ret; \
67124   __ret = __rev0 - __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
67125   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
67126   __ret; \
67127 })
67128 #endif
67129 
67130 #ifdef __LITTLE_ENDIAN__
67131 #define vmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
67132   int64x2_t __s0 = __p0; \
67133   int32x2_t __s1 = __p1; \
67134   int32x2_t __s2 = __p2; \
67135   int64x2_t __ret; \
67136   __ret = __s0 - vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
67137   __ret; \
67138 })
67139 #else
67140 #define vmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
67141   int64x2_t __s0 = __p0; \
67142   int32x2_t __s1 = __p1; \
67143   int32x2_t __s2 = __p2; \
67144   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
67145   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
67146   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
67147   int64x2_t __ret; \
67148   __ret = __rev0 - __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
67149   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
67150   __ret; \
67151 })
67152 #endif
67153 
67154 #ifdef __LITTLE_ENDIAN__
67155 #define vmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
67156   int32x4_t __s0 = __p0; \
67157   int16x4_t __s1 = __p1; \
67158   int16x4_t __s2 = __p2; \
67159   int32x4_t __ret; \
67160   __ret = __s0 - vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
67161   __ret; \
67162 })
67163 #else
67164 #define vmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
67165   int32x4_t __s0 = __p0; \
67166   int16x4_t __s1 = __p1; \
67167   int16x4_t __s2 = __p2; \
67168   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
67169   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
67170   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
67171   int32x4_t __ret; \
67172   __ret = __rev0 - __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
67173   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
67174   __ret; \
67175 })
67176 #endif
67177 
67178 #ifdef __LITTLE_ENDIAN__
vmlsl_n_u32(uint64x2_t __p0,uint32x2_t __p1,uint32_t __p2)67179 __ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
67180   uint64x2_t __ret;
67181   __ret = __p0 - vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
67182   return __ret;
67183 }
67184 #else
vmlsl_n_u32(uint64x2_t __p0,uint32x2_t __p1,uint32_t __p2)67185 __ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
67186   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
67187   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
67188   uint64x2_t __ret;
67189   __ret = __rev0 - __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
67190   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
67191   return __ret;
67192 }
__noswap_vmlsl_n_u32(uint64x2_t __p0,uint32x2_t __p1,uint32_t __p2)67193 __ai uint64x2_t __noswap_vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
67194   uint64x2_t __ret;
67195   __ret = __p0 - __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
67196   return __ret;
67197 }
67198 #endif
67199 
67200 #ifdef __LITTLE_ENDIAN__
vmlsl_n_u16(uint32x4_t __p0,uint16x4_t __p1,uint16_t __p2)67201 __ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
67202   uint32x4_t __ret;
67203   __ret = __p0 - vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
67204   return __ret;
67205 }
67206 #else
vmlsl_n_u16(uint32x4_t __p0,uint16x4_t __p1,uint16_t __p2)67207 __ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
67208   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
67209   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
67210   uint32x4_t __ret;
67211   __ret = __rev0 - __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
67212   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
67213   return __ret;
67214 }
__noswap_vmlsl_n_u16(uint32x4_t __p0,uint16x4_t __p1,uint16_t __p2)67215 __ai uint32x4_t __noswap_vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
67216   uint32x4_t __ret;
67217   __ret = __p0 - __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
67218   return __ret;
67219 }
67220 #endif
67221 
67222 #ifdef __LITTLE_ENDIAN__
vmlsl_n_s32(int64x2_t __p0,int32x2_t __p1,int32_t __p2)67223 __ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
67224   int64x2_t __ret;
67225   __ret = __p0 - vmull_s32(__p1, (int32x2_t) {__p2, __p2});
67226   return __ret;
67227 }
67228 #else
vmlsl_n_s32(int64x2_t __p0,int32x2_t __p1,int32_t __p2)67229 __ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
67230   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
67231   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
67232   int64x2_t __ret;
67233   __ret = __rev0 - __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
67234   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
67235   return __ret;
67236 }
__noswap_vmlsl_n_s32(int64x2_t __p0,int32x2_t __p1,int32_t __p2)67237 __ai int64x2_t __noswap_vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
67238   int64x2_t __ret;
67239   __ret = __p0 - __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2});
67240   return __ret;
67241 }
67242 #endif
67243 
67244 #ifdef __LITTLE_ENDIAN__
vmlsl_n_s16(int32x4_t __p0,int16x4_t __p1,int16_t __p2)67245 __ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
67246   int32x4_t __ret;
67247   __ret = __p0 - vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
67248   return __ret;
67249 }
67250 #else
vmlsl_n_s16(int32x4_t __p0,int16x4_t __p1,int16_t __p2)67251 __ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
67252   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
67253   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
67254   int32x4_t __ret;
67255   __ret = __rev0 - __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
67256   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
67257   return __ret;
67258 }
__noswap_vmlsl_n_s16(int32x4_t __p0,int16x4_t __p1,int16_t __p2)67259 __ai int32x4_t __noswap_vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
67260   int32x4_t __ret;
67261   __ret = __p0 - __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
67262   return __ret;
67263 }
67264 #endif
67265 
67266 #if defined(__aarch64__)
67267 #ifdef __LITTLE_ENDIAN__
vabdl_high_u8(uint8x16_t __p0,uint8x16_t __p1)67268 __ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
67269   uint16x8_t __ret;
67270   __ret = vabdl_u8(vget_high_u8(__p0), vget_high_u8(__p1));
67271   return __ret;
67272 }
67273 #else
vabdl_high_u8(uint8x16_t __p0,uint8x16_t __p1)67274 __ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
67275   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
67276   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
67277   uint16x8_t __ret;
67278   __ret = __noswap_vabdl_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
67279   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
67280   return __ret;
67281 }
67282 #endif
67283 
67284 #ifdef __LITTLE_ENDIAN__
vabdl_high_u32(uint32x4_t __p0,uint32x4_t __p1)67285 __ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
67286   uint64x2_t __ret;
67287   __ret = vabdl_u32(vget_high_u32(__p0), vget_high_u32(__p1));
67288   return __ret;
67289 }
67290 #else
vabdl_high_u32(uint32x4_t __p0,uint32x4_t __p1)67291 __ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
67292   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
67293   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
67294   uint64x2_t __ret;
67295   __ret = __noswap_vabdl_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
67296   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
67297   return __ret;
67298 }
67299 #endif
67300 
67301 #ifdef __LITTLE_ENDIAN__
vabdl_high_u16(uint16x8_t __p0,uint16x8_t __p1)67302 __ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
67303   uint32x4_t __ret;
67304   __ret = vabdl_u16(vget_high_u16(__p0), vget_high_u16(__p1));
67305   return __ret;
67306 }
67307 #else
vabdl_high_u16(uint16x8_t __p0,uint16x8_t __p1)67308 __ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
67309   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
67310   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
67311   uint32x4_t __ret;
67312   __ret = __noswap_vabdl_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
67313   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
67314   return __ret;
67315 }
67316 #endif
67317 
67318 #ifdef __LITTLE_ENDIAN__
vabdl_high_s8(int8x16_t __p0,int8x16_t __p1)67319 __ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) {
67320   int16x8_t __ret;
67321   __ret = vabdl_s8(vget_high_s8(__p0), vget_high_s8(__p1));
67322   return __ret;
67323 }
67324 #else
vabdl_high_s8(int8x16_t __p0,int8x16_t __p1)67325 __ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) {
67326   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
67327   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
67328   int16x8_t __ret;
67329   __ret = __noswap_vabdl_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
67330   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
67331   return __ret;
67332 }
67333 #endif
67334 
67335 #ifdef __LITTLE_ENDIAN__
vabdl_high_s32(int32x4_t __p0,int32x4_t __p1)67336 __ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) {
67337   int64x2_t __ret;
67338   __ret = vabdl_s32(vget_high_s32(__p0), vget_high_s32(__p1));
67339   return __ret;
67340 }
67341 #else
vabdl_high_s32(int32x4_t __p0,int32x4_t __p1)67342 __ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) {
67343   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
67344   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
67345   int64x2_t __ret;
67346   __ret = __noswap_vabdl_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
67347   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
67348   return __ret;
67349 }
67350 #endif
67351 
67352 #ifdef __LITTLE_ENDIAN__
vabdl_high_s16(int16x8_t __p0,int16x8_t __p1)67353 __ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) {
67354   int32x4_t __ret;
67355   __ret = vabdl_s16(vget_high_s16(__p0), vget_high_s16(__p1));
67356   return __ret;
67357 }
67358 #else
vabdl_high_s16(int16x8_t __p0,int16x8_t __p1)67359 __ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) {
67360   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
67361   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
67362   int32x4_t __ret;
67363   __ret = __noswap_vabdl_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
67364   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
67365   return __ret;
67366 }
67367 #endif
67368 
67369 #ifdef __LITTLE_ENDIAN__
vaddl_high_u8(uint8x16_t __p0,uint8x16_t __p1)67370 __ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
67371   uint16x8_t __ret;
67372   __ret = vmovl_high_u8(__p0) + vmovl_high_u8(__p1);
67373   return __ret;
67374 }
67375 #else
vaddl_high_u8(uint8x16_t __p0,uint8x16_t __p1)67376 __ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
67377   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
67378   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
67379   uint16x8_t __ret;
67380   __ret = __noswap_vmovl_high_u8(__rev0) + __noswap_vmovl_high_u8(__rev1);
67381   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
67382   return __ret;
67383 }
67384 #endif
67385 
67386 #ifdef __LITTLE_ENDIAN__
vaddl_high_u32(uint32x4_t __p0,uint32x4_t __p1)67387 __ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
67388   uint64x2_t __ret;
67389   __ret = vmovl_high_u32(__p0) + vmovl_high_u32(__p1);
67390   return __ret;
67391 }
67392 #else
vaddl_high_u32(uint32x4_t __p0,uint32x4_t __p1)67393 __ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
67394   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
67395   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
67396   uint64x2_t __ret;
67397   __ret = __noswap_vmovl_high_u32(__rev0) + __noswap_vmovl_high_u32(__rev1);
67398   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
67399   return __ret;
67400 }
67401 #endif
67402 
67403 #ifdef __LITTLE_ENDIAN__
vaddl_high_u16(uint16x8_t __p0,uint16x8_t __p1)67404 __ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
67405   uint32x4_t __ret;
67406   __ret = vmovl_high_u16(__p0) + vmovl_high_u16(__p1);
67407   return __ret;
67408 }
67409 #else
vaddl_high_u16(uint16x8_t __p0,uint16x8_t __p1)67410 __ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
67411   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
67412   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
67413   uint32x4_t __ret;
67414   __ret = __noswap_vmovl_high_u16(__rev0) + __noswap_vmovl_high_u16(__rev1);
67415   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
67416   return __ret;
67417 }
67418 #endif
67419 
67420 #ifdef __LITTLE_ENDIAN__
vaddl_high_s8(int8x16_t __p0,int8x16_t __p1)67421 __ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) {
67422   int16x8_t __ret;
67423   __ret = vmovl_high_s8(__p0) + vmovl_high_s8(__p1);
67424   return __ret;
67425 }
67426 #else
vaddl_high_s8(int8x16_t __p0,int8x16_t __p1)67427 __ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) {
67428   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
67429   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
67430   int16x8_t __ret;
67431   __ret = __noswap_vmovl_high_s8(__rev0) + __noswap_vmovl_high_s8(__rev1);
67432   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
67433   return __ret;
67434 }
67435 #endif
67436 
67437 #ifdef __LITTLE_ENDIAN__
vaddl_high_s32(int32x4_t __p0,int32x4_t __p1)67438 __ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) {
67439   int64x2_t __ret;
67440   __ret = vmovl_high_s32(__p0) + vmovl_high_s32(__p1);
67441   return __ret;
67442 }
67443 #else
vaddl_high_s32(int32x4_t __p0,int32x4_t __p1)67444 __ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) {
67445   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
67446   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
67447   int64x2_t __ret;
67448   __ret = __noswap_vmovl_high_s32(__rev0) + __noswap_vmovl_high_s32(__rev1);
67449   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
67450   return __ret;
67451 }
67452 #endif
67453 
67454 #ifdef __LITTLE_ENDIAN__
vaddl_high_s16(int16x8_t __p0,int16x8_t __p1)67455 __ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) {
67456   int32x4_t __ret;
67457   __ret = vmovl_high_s16(__p0) + vmovl_high_s16(__p1);
67458   return __ret;
67459 }
67460 #else
vaddl_high_s16(int16x8_t __p0,int16x8_t __p1)67461 __ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) {
67462   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
67463   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
67464   int32x4_t __ret;
67465   __ret = __noswap_vmovl_high_s16(__rev0) + __noswap_vmovl_high_s16(__rev1);
67466   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
67467   return __ret;
67468 }
67469 #endif
67470 
67471 #ifdef __LITTLE_ENDIAN__
vaddw_high_u8(uint16x8_t __p0,uint8x16_t __p1)67472 __ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
67473   uint16x8_t __ret;
67474   __ret = __p0 + vmovl_high_u8(__p1);
67475   return __ret;
67476 }
67477 #else
vaddw_high_u8(uint16x8_t __p0,uint8x16_t __p1)67478 __ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
67479   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
67480   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
67481   uint16x8_t __ret;
67482   __ret = __rev0 + __noswap_vmovl_high_u8(__rev1);
67483   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
67484   return __ret;
67485 }
67486 #endif
67487 
67488 #ifdef __LITTLE_ENDIAN__
vaddw_high_u32(uint64x2_t __p0,uint32x4_t __p1)67489 __ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
67490   uint64x2_t __ret;
67491   __ret = __p0 + vmovl_high_u32(__p1);
67492   return __ret;
67493 }
67494 #else
vaddw_high_u32(uint64x2_t __p0,uint32x4_t __p1)67495 __ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
67496   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
67497   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
67498   uint64x2_t __ret;
67499   __ret = __rev0 + __noswap_vmovl_high_u32(__rev1);
67500   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
67501   return __ret;
67502 }
67503 #endif
67504 
67505 #ifdef __LITTLE_ENDIAN__
vaddw_high_u16(uint32x4_t __p0,uint16x8_t __p1)67506 __ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
67507   uint32x4_t __ret;
67508   __ret = __p0 + vmovl_high_u16(__p1);
67509   return __ret;
67510 }
67511 #else
vaddw_high_u16(uint32x4_t __p0,uint16x8_t __p1)67512 __ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
67513   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
67514   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
67515   uint32x4_t __ret;
67516   __ret = __rev0 + __noswap_vmovl_high_u16(__rev1);
67517   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
67518   return __ret;
67519 }
67520 #endif
67521 
67522 #ifdef __LITTLE_ENDIAN__
vaddw_high_s8(int16x8_t __p0,int8x16_t __p1)67523 __ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) {
67524   int16x8_t __ret;
67525   __ret = __p0 + vmovl_high_s8(__p1);
67526   return __ret;
67527 }
67528 #else
vaddw_high_s8(int16x8_t __p0,int8x16_t __p1)67529 __ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) {
67530   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
67531   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
67532   int16x8_t __ret;
67533   __ret = __rev0 + __noswap_vmovl_high_s8(__rev1);
67534   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
67535   return __ret;
67536 }
67537 #endif
67538 
67539 #ifdef __LITTLE_ENDIAN__
vaddw_high_s32(int64x2_t __p0,int32x4_t __p1)67540 __ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) {
67541   int64x2_t __ret;
67542   __ret = __p0 + vmovl_high_s32(__p1);
67543   return __ret;
67544 }
67545 #else
vaddw_high_s32(int64x2_t __p0,int32x4_t __p1)67546 __ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) {
67547   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
67548   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
67549   int64x2_t __ret;
67550   __ret = __rev0 + __noswap_vmovl_high_s32(__rev1);
67551   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
67552   return __ret;
67553 }
67554 #endif
67555 
67556 #ifdef __LITTLE_ENDIAN__
vaddw_high_s16(int32x4_t __p0,int16x8_t __p1)67557 __ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) {
67558   int32x4_t __ret;
67559   __ret = __p0 + vmovl_high_s16(__p1);
67560   return __ret;
67561 }
67562 #else
vaddw_high_s16(int32x4_t __p0,int16x8_t __p1)67563 __ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) {
67564   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
67565   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
67566   int32x4_t __ret;
67567   __ret = __rev0 + __noswap_vmovl_high_s16(__rev1);
67568   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
67569   return __ret;
67570 }
67571 #endif
67572 
67573 #ifdef __LITTLE_ENDIAN__
67574 #define vcopyq_lane_p64(__p0_250, __p1_250, __p2_250, __p3_250) __extension__ ({ \
67575   poly64x2_t __s0_250 = __p0_250; \
67576   poly64x1_t __s2_250 = __p2_250; \
67577   poly64x2_t __ret_250; \
67578   __ret_250 = vsetq_lane_p64(vget_lane_p64(__s2_250, __p3_250), __s0_250, __p1_250); \
67579   __ret_250; \
67580 })
67581 #else
67582 #define vcopyq_lane_p64(__p0_251, __p1_251, __p2_251, __p3_251) __extension__ ({ \
67583   poly64x2_t __s0_251 = __p0_251; \
67584   poly64x1_t __s2_251 = __p2_251; \
67585   poly64x2_t __rev0_251;  __rev0_251 = __builtin_shufflevector(__s0_251, __s0_251, 1, 0); \
67586   poly64x2_t __ret_251; \
67587   __ret_251 = __noswap_vsetq_lane_p64(__noswap_vget_lane_p64(__s2_251, __p3_251), __rev0_251, __p1_251); \
67588   __ret_251 = __builtin_shufflevector(__ret_251, __ret_251, 1, 0); \
67589   __ret_251; \
67590 })
67591 #endif
67592 
67593 #ifdef __LITTLE_ENDIAN__
67594 #define vcopyq_lane_f64(__p0_252, __p1_252, __p2_252, __p3_252) __extension__ ({ \
67595   float64x2_t __s0_252 = __p0_252; \
67596   float64x1_t __s2_252 = __p2_252; \
67597   float64x2_t __ret_252; \
67598   __ret_252 = vsetq_lane_f64(vget_lane_f64(__s2_252, __p3_252), __s0_252, __p1_252); \
67599   __ret_252; \
67600 })
67601 #else
67602 #define vcopyq_lane_f64(__p0_253, __p1_253, __p2_253, __p3_253) __extension__ ({ \
67603   float64x2_t __s0_253 = __p0_253; \
67604   float64x1_t __s2_253 = __p2_253; \
67605   float64x2_t __rev0_253;  __rev0_253 = __builtin_shufflevector(__s0_253, __s0_253, 1, 0); \
67606   float64x2_t __ret_253; \
67607   __ret_253 = __noswap_vsetq_lane_f64(__noswap_vget_lane_f64(__s2_253, __p3_253), __rev0_253, __p1_253); \
67608   __ret_253 = __builtin_shufflevector(__ret_253, __ret_253, 1, 0); \
67609   __ret_253; \
67610 })
67611 #endif
67612 
67613 #ifdef __LITTLE_ENDIAN__
67614 #define vcopy_lane_p64(__p0_254, __p1_254, __p2_254, __p3_254) __extension__ ({ \
67615   poly64x1_t __s0_254 = __p0_254; \
67616   poly64x1_t __s2_254 = __p2_254; \
67617   poly64x1_t __ret_254; \
67618   __ret_254 = vset_lane_p64(vget_lane_p64(__s2_254, __p3_254), __s0_254, __p1_254); \
67619   __ret_254; \
67620 })
67621 #else
67622 #define vcopy_lane_p64(__p0_255, __p1_255, __p2_255, __p3_255) __extension__ ({ \
67623   poly64x1_t __s0_255 = __p0_255; \
67624   poly64x1_t __s2_255 = __p2_255; \
67625   poly64x1_t __ret_255; \
67626   __ret_255 = __noswap_vset_lane_p64(__noswap_vget_lane_p64(__s2_255, __p3_255), __s0_255, __p1_255); \
67627   __ret_255; \
67628 })
67629 #endif
67630 
67631 #ifdef __LITTLE_ENDIAN__
67632 #define vcopy_lane_f64(__p0_256, __p1_256, __p2_256, __p3_256) __extension__ ({ \
67633   float64x1_t __s0_256 = __p0_256; \
67634   float64x1_t __s2_256 = __p2_256; \
67635   float64x1_t __ret_256; \
67636   __ret_256 = vset_lane_f64(vget_lane_f64(__s2_256, __p3_256), __s0_256, __p1_256); \
67637   __ret_256; \
67638 })
67639 #else
67640 #define vcopy_lane_f64(__p0_257, __p1_257, __p2_257, __p3_257) __extension__ ({ \
67641   float64x1_t __s0_257 = __p0_257; \
67642   float64x1_t __s2_257 = __p2_257; \
67643   float64x1_t __ret_257; \
67644   __ret_257 = __noswap_vset_lane_f64(__noswap_vget_lane_f64(__s2_257, __p3_257), __s0_257, __p1_257); \
67645   __ret_257; \
67646 })
67647 #endif
67648 
67649 #ifdef __LITTLE_ENDIAN__
67650 #define vcopyq_laneq_p64(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \
67651   poly64x2_t __s0_258 = __p0_258; \
67652   poly64x2_t __s2_258 = __p2_258; \
67653   poly64x2_t __ret_258; \
67654   __ret_258 = vsetq_lane_p64(vgetq_lane_p64(__s2_258, __p3_258), __s0_258, __p1_258); \
67655   __ret_258; \
67656 })
67657 #else
67658 #define vcopyq_laneq_p64(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \
67659   poly64x2_t __s0_259 = __p0_259; \
67660   poly64x2_t __s2_259 = __p2_259; \
67661   poly64x2_t __rev0_259;  __rev0_259 = __builtin_shufflevector(__s0_259, __s0_259, 1, 0); \
67662   poly64x2_t __rev2_259;  __rev2_259 = __builtin_shufflevector(__s2_259, __s2_259, 1, 0); \
67663   poly64x2_t __ret_259; \
67664   __ret_259 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_259, __p3_259), __rev0_259, __p1_259); \
67665   __ret_259 = __builtin_shufflevector(__ret_259, __ret_259, 1, 0); \
67666   __ret_259; \
67667 })
67668 #endif
67669 
67670 #ifdef __LITTLE_ENDIAN__
67671 #define vcopyq_laneq_f64(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \
67672   float64x2_t __s0_260 = __p0_260; \
67673   float64x2_t __s2_260 = __p2_260; \
67674   float64x2_t __ret_260; \
67675   __ret_260 = vsetq_lane_f64(vgetq_lane_f64(__s2_260, __p3_260), __s0_260, __p1_260); \
67676   __ret_260; \
67677 })
67678 #else
67679 #define vcopyq_laneq_f64(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \
67680   float64x2_t __s0_261 = __p0_261; \
67681   float64x2_t __s2_261 = __p2_261; \
67682   float64x2_t __rev0_261;  __rev0_261 = __builtin_shufflevector(__s0_261, __s0_261, 1, 0); \
67683   float64x2_t __rev2_261;  __rev2_261 = __builtin_shufflevector(__s2_261, __s2_261, 1, 0); \
67684   float64x2_t __ret_261; \
67685   __ret_261 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_261, __p3_261), __rev0_261, __p1_261); \
67686   __ret_261 = __builtin_shufflevector(__ret_261, __ret_261, 1, 0); \
67687   __ret_261; \
67688 })
67689 #endif
67690 
67691 #ifdef __LITTLE_ENDIAN__
67692 #define vcopy_laneq_p64(__p0_262, __p1_262, __p2_262, __p3_262) __extension__ ({ \
67693   poly64x1_t __s0_262 = __p0_262; \
67694   poly64x2_t __s2_262 = __p2_262; \
67695   poly64x1_t __ret_262; \
67696   __ret_262 = vset_lane_p64(vgetq_lane_p64(__s2_262, __p3_262), __s0_262, __p1_262); \
67697   __ret_262; \
67698 })
67699 #else
67700 #define vcopy_laneq_p64(__p0_263, __p1_263, __p2_263, __p3_263) __extension__ ({ \
67701   poly64x1_t __s0_263 = __p0_263; \
67702   poly64x2_t __s2_263 = __p2_263; \
67703   poly64x2_t __rev2_263;  __rev2_263 = __builtin_shufflevector(__s2_263, __s2_263, 1, 0); \
67704   poly64x1_t __ret_263; \
67705   __ret_263 = __noswap_vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_263, __p3_263), __s0_263, __p1_263); \
67706   __ret_263; \
67707 })
67708 #endif
67709 
67710 #ifdef __LITTLE_ENDIAN__
67711 #define vcopy_laneq_f64(__p0_264, __p1_264, __p2_264, __p3_264) __extension__ ({ \
67712   float64x1_t __s0_264 = __p0_264; \
67713   float64x2_t __s2_264 = __p2_264; \
67714   float64x1_t __ret_264; \
67715   __ret_264 = vset_lane_f64(vgetq_lane_f64(__s2_264, __p3_264), __s0_264, __p1_264); \
67716   __ret_264; \
67717 })
67718 #else
67719 #define vcopy_laneq_f64(__p0_265, __p1_265, __p2_265, __p3_265) __extension__ ({ \
67720   float64x1_t __s0_265 = __p0_265; \
67721   float64x2_t __s2_265 = __p2_265; \
67722   float64x2_t __rev2_265;  __rev2_265 = __builtin_shufflevector(__s2_265, __s2_265, 1, 0); \
67723   float64x1_t __ret_265; \
67724   __ret_265 = __noswap_vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_265, __p3_265), __s0_265, __p1_265); \
67725   __ret_265; \
67726 })
67727 #endif
67728 
67729 #ifdef __LITTLE_ENDIAN__
vmlal_high_u8(uint16x8_t __p0,uint8x16_t __p1,uint8x16_t __p2)67730 __ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
67731   uint16x8_t __ret;
67732   __ret = vmlal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
67733   return __ret;
67734 }
67735 #else
vmlal_high_u8(uint16x8_t __p0,uint8x16_t __p1,uint8x16_t __p2)67736 __ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
67737   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
67738   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
67739   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
67740   uint16x8_t __ret;
67741   __ret = __noswap_vmlal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
67742   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
67743   return __ret;
67744 }
67745 #endif
67746 
67747 #ifdef __LITTLE_ENDIAN__
vmlal_high_u32(uint64x2_t __p0,uint32x4_t __p1,uint32x4_t __p2)67748 __ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
67749   uint64x2_t __ret;
67750   __ret = vmlal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
67751   return __ret;
67752 }
67753 #else
vmlal_high_u32(uint64x2_t __p0,uint32x4_t __p1,uint32x4_t __p2)67754 __ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
67755   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
67756   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
67757   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
67758   uint64x2_t __ret;
67759   __ret = __noswap_vmlal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
67760   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
67761   return __ret;
67762 }
67763 #endif
67764 
67765 #ifdef __LITTLE_ENDIAN__
vmlal_high_u16(uint32x4_t __p0,uint16x8_t __p1,uint16x8_t __p2)67766 __ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
67767   uint32x4_t __ret;
67768   __ret = vmlal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
67769   return __ret;
67770 }
67771 #else
vmlal_high_u16(uint32x4_t __p0,uint16x8_t __p1,uint16x8_t __p2)67772 __ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
67773   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
67774   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
67775   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
67776   uint32x4_t __ret;
67777   __ret = __noswap_vmlal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
67778   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
67779   return __ret;
67780 }
67781 #endif
67782 
67783 #ifdef __LITTLE_ENDIAN__
vmlal_high_s8(int16x8_t __p0,int8x16_t __p1,int8x16_t __p2)67784 __ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
67785   int16x8_t __ret;
67786   __ret = vmlal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
67787   return __ret;
67788 }
67789 #else
vmlal_high_s8(int16x8_t __p0,int8x16_t __p1,int8x16_t __p2)67790 __ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
67791   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
67792   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
67793   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
67794   int16x8_t __ret;
67795   __ret = __noswap_vmlal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
67796   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
67797   return __ret;
67798 }
67799 #endif
67800 
67801 #ifdef __LITTLE_ENDIAN__
vmlal_high_s32(int64x2_t __p0,int32x4_t __p1,int32x4_t __p2)67802 __ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
67803   int64x2_t __ret;
67804   __ret = vmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
67805   return __ret;
67806 }
67807 #else
vmlal_high_s32(int64x2_t __p0,int32x4_t __p1,int32x4_t __p2)67808 __ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
67809   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
67810   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
67811   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
67812   int64x2_t __ret;
67813   __ret = __noswap_vmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
67814   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
67815   return __ret;
67816 }
67817 #endif
67818 
67819 #ifdef __LITTLE_ENDIAN__
vmlal_high_s16(int32x4_t __p0,int16x8_t __p1,int16x8_t __p2)67820 __ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
67821   int32x4_t __ret;
67822   __ret = vmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
67823   return __ret;
67824 }
67825 #else
vmlal_high_s16(int32x4_t __p0,int16x8_t __p1,int16x8_t __p2)67826 __ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
67827   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
67828   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
67829   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
67830   int32x4_t __ret;
67831   __ret = __noswap_vmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
67832   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
67833   return __ret;
67834 }
67835 #endif
67836 
67837 #ifdef __LITTLE_ENDIAN__
vmlal_high_n_u32(uint64x2_t __p0,uint32x4_t __p1,uint32_t __p2)67838 __ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
67839   uint64x2_t __ret;
67840   __ret = vmlal_n_u32(__p0, vget_high_u32(__p1), __p2);
67841   return __ret;
67842 }
67843 #else
vmlal_high_n_u32(uint64x2_t __p0,uint32x4_t __p1,uint32_t __p2)67844 __ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
67845   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
67846   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
67847   uint64x2_t __ret;
67848   __ret = __noswap_vmlal_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
67849   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
67850   return __ret;
67851 }
67852 #endif
67853 
67854 #ifdef __LITTLE_ENDIAN__
vmlal_high_n_u16(uint32x4_t __p0,uint16x8_t __p1,uint16_t __p2)67855 __ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
67856   uint32x4_t __ret;
67857   __ret = vmlal_n_u16(__p0, vget_high_u16(__p1), __p2);
67858   return __ret;
67859 }
67860 #else
vmlal_high_n_u16(uint32x4_t __p0,uint16x8_t __p1,uint16_t __p2)67861 __ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
67862   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
67863   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
67864   uint32x4_t __ret;
67865   __ret = __noswap_vmlal_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
67866   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
67867   return __ret;
67868 }
67869 #endif
67870 
67871 #ifdef __LITTLE_ENDIAN__
vmlal_high_n_s32(int64x2_t __p0,int32x4_t __p1,int32_t __p2)67872 __ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
67873   int64x2_t __ret;
67874   __ret = vmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
67875   return __ret;
67876 }
67877 #else
vmlal_high_n_s32(int64x2_t __p0,int32x4_t __p1,int32_t __p2)67878 __ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
67879   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
67880   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
67881   int64x2_t __ret;
67882   __ret = __noswap_vmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
67883   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
67884   return __ret;
67885 }
67886 #endif
67887 
67888 #ifdef __LITTLE_ENDIAN__
vmlal_high_n_s16(int32x4_t __p0,int16x8_t __p1,int16_t __p2)67889 __ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
67890   int32x4_t __ret;
67891   __ret = vmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
67892   return __ret;
67893 }
67894 #else
vmlal_high_n_s16(int32x4_t __p0,int16x8_t __p1,int16_t __p2)67895 __ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
67896   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
67897   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
67898   int32x4_t __ret;
67899   __ret = __noswap_vmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
67900   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
67901   return __ret;
67902 }
67903 #endif
67904 
67905 #ifdef __LITTLE_ENDIAN__
vmlsl_high_u8(uint16x8_t __p0,uint8x16_t __p1,uint8x16_t __p2)67906 __ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
67907   uint16x8_t __ret;
67908   __ret = vmlsl_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
67909   return __ret;
67910 }
67911 #else
vmlsl_high_u8(uint16x8_t __p0,uint8x16_t __p1,uint8x16_t __p2)67912 __ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
67913   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
67914   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
67915   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
67916   uint16x8_t __ret;
67917   __ret = __noswap_vmlsl_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
67918   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
67919   return __ret;
67920 }
67921 #endif
67922 
67923 #ifdef __LITTLE_ENDIAN__
vmlsl_high_u32(uint64x2_t __p0,uint32x4_t __p1,uint32x4_t __p2)67924 __ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
67925   uint64x2_t __ret;
67926   __ret = vmlsl_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
67927   return __ret;
67928 }
67929 #else
vmlsl_high_u32(uint64x2_t __p0,uint32x4_t __p1,uint32x4_t __p2)67930 __ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
67931   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
67932   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
67933   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
67934   uint64x2_t __ret;
67935   __ret = __noswap_vmlsl_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
67936   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
67937   return __ret;
67938 }
67939 #endif
67940 
67941 #ifdef __LITTLE_ENDIAN__
vmlsl_high_u16(uint32x4_t __p0,uint16x8_t __p1,uint16x8_t __p2)67942 __ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
67943   uint32x4_t __ret;
67944   __ret = vmlsl_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
67945   return __ret;
67946 }
67947 #else
vmlsl_high_u16(uint32x4_t __p0,uint16x8_t __p1,uint16x8_t __p2)67948 __ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
67949   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
67950   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
67951   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
67952   uint32x4_t __ret;
67953   __ret = __noswap_vmlsl_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
67954   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
67955   return __ret;
67956 }
67957 #endif
67958 
67959 #ifdef __LITTLE_ENDIAN__
vmlsl_high_s8(int16x8_t __p0,int8x16_t __p1,int8x16_t __p2)67960 __ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
67961   int16x8_t __ret;
67962   __ret = vmlsl_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
67963   return __ret;
67964 }
67965 #else
vmlsl_high_s8(int16x8_t __p0,int8x16_t __p1,int8x16_t __p2)67966 __ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
67967   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
67968   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
67969   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
67970   int16x8_t __ret;
67971   __ret = __noswap_vmlsl_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
67972   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
67973   return __ret;
67974 }
67975 #endif
67976 
67977 #ifdef __LITTLE_ENDIAN__
vmlsl_high_s32(int64x2_t __p0,int32x4_t __p1,int32x4_t __p2)67978 __ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
67979   int64x2_t __ret;
67980   __ret = vmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
67981   return __ret;
67982 }
67983 #else
vmlsl_high_s32(int64x2_t __p0,int32x4_t __p1,int32x4_t __p2)67984 __ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
67985   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
67986   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
67987   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
67988   int64x2_t __ret;
67989   __ret = __noswap_vmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
67990   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
67991   return __ret;
67992 }
67993 #endif
67994 
67995 #ifdef __LITTLE_ENDIAN__
vmlsl_high_s16(int32x4_t __p0,int16x8_t __p1,int16x8_t __p2)67996 __ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
67997   int32x4_t __ret;
67998   __ret = vmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
67999   return __ret;
68000 }
68001 #else
vmlsl_high_s16(int32x4_t __p0,int16x8_t __p1,int16x8_t __p2)68002 __ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
68003   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
68004   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
68005   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
68006   int32x4_t __ret;
68007   __ret = __noswap_vmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
68008   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
68009   return __ret;
68010 }
68011 #endif
68012 
68013 #ifdef __LITTLE_ENDIAN__
vmlsl_high_n_u32(uint64x2_t __p0,uint32x4_t __p1,uint32_t __p2)68014 __ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
68015   uint64x2_t __ret;
68016   __ret = vmlsl_n_u32(__p0, vget_high_u32(__p1), __p2);
68017   return __ret;
68018 }
68019 #else
vmlsl_high_n_u32(uint64x2_t __p0,uint32x4_t __p1,uint32_t __p2)68020 __ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
68021   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
68022   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
68023   uint64x2_t __ret;
68024   __ret = __noswap_vmlsl_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
68025   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
68026   return __ret;
68027 }
68028 #endif
68029 
68030 #ifdef __LITTLE_ENDIAN__
vmlsl_high_n_u16(uint32x4_t __p0,uint16x8_t __p1,uint16_t __p2)68031 __ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
68032   uint32x4_t __ret;
68033   __ret = vmlsl_n_u16(__p0, vget_high_u16(__p1), __p2);
68034   return __ret;
68035 }
68036 #else
vmlsl_high_n_u16(uint32x4_t __p0,uint16x8_t __p1,uint16_t __p2)68037 __ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
68038   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
68039   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
68040   uint32x4_t __ret;
68041   __ret = __noswap_vmlsl_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
68042   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
68043   return __ret;
68044 }
68045 #endif
68046 
68047 #ifdef __LITTLE_ENDIAN__
vmlsl_high_n_s32(int64x2_t __p0,int32x4_t __p1,int32_t __p2)68048 __ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
68049   int64x2_t __ret;
68050   __ret = vmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
68051   return __ret;
68052 }
68053 #else
vmlsl_high_n_s32(int64x2_t __p0,int32x4_t __p1,int32_t __p2)68054 __ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
68055   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
68056   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
68057   int64x2_t __ret;
68058   __ret = __noswap_vmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
68059   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
68060   return __ret;
68061 }
68062 #endif
68063 
68064 #ifdef __LITTLE_ENDIAN__
vmlsl_high_n_s16(int32x4_t __p0,int16x8_t __p1,int16_t __p2)68065 __ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
68066   int32x4_t __ret;
68067   __ret = vmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
68068   return __ret;
68069 }
68070 #else
vmlsl_high_n_s16(int32x4_t __p0,int16x8_t __p1,int16_t __p2)68071 __ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
68072   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
68073   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
68074   int32x4_t __ret;
68075   __ret = __noswap_vmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
68076   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
68077   return __ret;
68078 }
68079 #endif
68080 
68081 #ifdef __LITTLE_ENDIAN__
68082 #define vmulx_lane_f64(__p0_266, __p1_266, __p2_266) __extension__ ({ \
68083   float64x1_t __s0_266 = __p0_266; \
68084   float64x1_t __s1_266 = __p1_266; \
68085   float64x1_t __ret_266; \
68086   float64_t __x_266 = vget_lane_f64(__s0_266, 0); \
68087   float64_t __y_266 = vget_lane_f64(__s1_266, __p2_266); \
68088   float64_t __z_266 = vmulxd_f64(__x_266, __y_266); \
68089   __ret_266 = vset_lane_f64(__z_266, __s0_266, __p2_266); \
68090   __ret_266; \
68091 })
68092 #else
68093 #define vmulx_lane_f64(__p0_267, __p1_267, __p2_267) __extension__ ({ \
68094   float64x1_t __s0_267 = __p0_267; \
68095   float64x1_t __s1_267 = __p1_267; \
68096   float64x1_t __ret_267; \
68097   float64_t __x_267 = __noswap_vget_lane_f64(__s0_267, 0); \
68098   float64_t __y_267 = __noswap_vget_lane_f64(__s1_267, __p2_267); \
68099   float64_t __z_267 = __noswap_vmulxd_f64(__x_267, __y_267); \
68100   __ret_267 = __noswap_vset_lane_f64(__z_267, __s0_267, __p2_267); \
68101   __ret_267; \
68102 })
68103 #endif
68104 
68105 #ifdef __LITTLE_ENDIAN__
68106 #define vmulx_laneq_f64(__p0_268, __p1_268, __p2_268) __extension__ ({ \
68107   float64x1_t __s0_268 = __p0_268; \
68108   float64x2_t __s1_268 = __p1_268; \
68109   float64x1_t __ret_268; \
68110   float64_t __x_268 = vget_lane_f64(__s0_268, 0); \
68111   float64_t __y_268 = vgetq_lane_f64(__s1_268, __p2_268); \
68112   float64_t __z_268 = vmulxd_f64(__x_268, __y_268); \
68113   __ret_268 = vset_lane_f64(__z_268, __s0_268, 0); \
68114   __ret_268; \
68115 })
68116 #else
68117 #define vmulx_laneq_f64(__p0_269, __p1_269, __p2_269) __extension__ ({ \
68118   float64x1_t __s0_269 = __p0_269; \
68119   float64x2_t __s1_269 = __p1_269; \
68120   float64x2_t __rev1_269;  __rev1_269 = __builtin_shufflevector(__s1_269, __s1_269, 1, 0); \
68121   float64x1_t __ret_269; \
68122   float64_t __x_269 = __noswap_vget_lane_f64(__s0_269, 0); \
68123   float64_t __y_269 = __noswap_vgetq_lane_f64(__rev1_269, __p2_269); \
68124   float64_t __z_269 = __noswap_vmulxd_f64(__x_269, __y_269); \
68125   __ret_269 = __noswap_vset_lane_f64(__z_269, __s0_269, 0); \
68126   __ret_269; \
68127 })
68128 #endif
68129 
68130 #endif
68131 #ifdef __LITTLE_ENDIAN__
vabal_u8(uint16x8_t __p0,uint8x8_t __p1,uint8x8_t __p2)68132 __ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
68133   uint16x8_t __ret;
68134   __ret = __p0 + vabdl_u8(__p1, __p2);
68135   return __ret;
68136 }
68137 #else
vabal_u8(uint16x8_t __p0,uint8x8_t __p1,uint8x8_t __p2)68138 __ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
68139   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
68140   uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
68141   uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
68142   uint16x8_t __ret;
68143   __ret = __rev0 + __noswap_vabdl_u8(__rev1, __rev2);
68144   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
68145   return __ret;
68146 }
__noswap_vabal_u8(uint16x8_t __p0,uint8x8_t __p1,uint8x8_t __p2)68147 __ai uint16x8_t __noswap_vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
68148   uint16x8_t __ret;
68149   __ret = __p0 + __noswap_vabdl_u8(__p1, __p2);
68150   return __ret;
68151 }
68152 #endif
68153 
68154 #ifdef __LITTLE_ENDIAN__
vabal_u32(uint64x2_t __p0,uint32x2_t __p1,uint32x2_t __p2)68155 __ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
68156   uint64x2_t __ret;
68157   __ret = __p0 + vabdl_u32(__p1, __p2);
68158   return __ret;
68159 }
68160 #else
vabal_u32(uint64x2_t __p0,uint32x2_t __p1,uint32x2_t __p2)68161 __ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
68162   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
68163   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
68164   uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
68165   uint64x2_t __ret;
68166   __ret = __rev0 + __noswap_vabdl_u32(__rev1, __rev2);
68167   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
68168   return __ret;
68169 }
__noswap_vabal_u32(uint64x2_t __p0,uint32x2_t __p1,uint32x2_t __p2)68170 __ai uint64x2_t __noswap_vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
68171   uint64x2_t __ret;
68172   __ret = __p0 + __noswap_vabdl_u32(__p1, __p2);
68173   return __ret;
68174 }
68175 #endif
68176 
68177 #ifdef __LITTLE_ENDIAN__
vabal_u16(uint32x4_t __p0,uint16x4_t __p1,uint16x4_t __p2)68178 __ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
68179   uint32x4_t __ret;
68180   __ret = __p0 + vabdl_u16(__p1, __p2);
68181   return __ret;
68182 }
68183 #else
vabal_u16(uint32x4_t __p0,uint16x4_t __p1,uint16x4_t __p2)68184 __ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
68185   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
68186   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
68187   uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
68188   uint32x4_t __ret;
68189   __ret = __rev0 + __noswap_vabdl_u16(__rev1, __rev2);
68190   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
68191   return __ret;
68192 }
__noswap_vabal_u16(uint32x4_t __p0,uint16x4_t __p1,uint16x4_t __p2)68193 __ai uint32x4_t __noswap_vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
68194   uint32x4_t __ret;
68195   __ret = __p0 + __noswap_vabdl_u16(__p1, __p2);
68196   return __ret;
68197 }
68198 #endif
68199 
68200 #ifdef __LITTLE_ENDIAN__
vabal_s8(int16x8_t __p0,int8x8_t __p1,int8x8_t __p2)68201 __ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
68202   int16x8_t __ret;
68203   __ret = __p0 + vabdl_s8(__p1, __p2);
68204   return __ret;
68205 }
68206 #else
vabal_s8(int16x8_t __p0,int8x8_t __p1,int8x8_t __p2)68207 __ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
68208   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
68209   int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
68210   int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
68211   int16x8_t __ret;
68212   __ret = __rev0 + __noswap_vabdl_s8(__rev1, __rev2);
68213   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
68214   return __ret;
68215 }
__noswap_vabal_s8(int16x8_t __p0,int8x8_t __p1,int8x8_t __p2)68216 __ai int16x8_t __noswap_vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
68217   int16x8_t __ret;
68218   __ret = __p0 + __noswap_vabdl_s8(__p1, __p2);
68219   return __ret;
68220 }
68221 #endif
68222 
68223 #ifdef __LITTLE_ENDIAN__
vabal_s32(int64x2_t __p0,int32x2_t __p1,int32x2_t __p2)68224 __ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
68225   int64x2_t __ret;
68226   __ret = __p0 + vabdl_s32(__p1, __p2);
68227   return __ret;
68228 }
68229 #else
vabal_s32(int64x2_t __p0,int32x2_t __p1,int32x2_t __p2)68230 __ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
68231   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
68232   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
68233   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
68234   int64x2_t __ret;
68235   __ret = __rev0 + __noswap_vabdl_s32(__rev1, __rev2);
68236   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
68237   return __ret;
68238 }
__noswap_vabal_s32(int64x2_t __p0,int32x2_t __p1,int32x2_t __p2)68239 __ai int64x2_t __noswap_vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
68240   int64x2_t __ret;
68241   __ret = __p0 + __noswap_vabdl_s32(__p1, __p2);
68242   return __ret;
68243 }
68244 #endif
68245 
68246 #ifdef __LITTLE_ENDIAN__
vabal_s16(int32x4_t __p0,int16x4_t __p1,int16x4_t __p2)68247 __ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
68248   int32x4_t __ret;
68249   __ret = __p0 + vabdl_s16(__p1, __p2);
68250   return __ret;
68251 }
68252 #else
vabal_s16(int32x4_t __p0,int16x4_t __p1,int16x4_t __p2)68253 __ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
68254   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
68255   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
68256   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
68257   int32x4_t __ret;
68258   __ret = __rev0 + __noswap_vabdl_s16(__rev1, __rev2);
68259   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
68260   return __ret;
68261 }
__noswap_vabal_s16(int32x4_t __p0,int16x4_t __p1,int16x4_t __p2)68262 __ai int32x4_t __noswap_vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
68263   int32x4_t __ret;
68264   __ret = __p0 + __noswap_vabdl_s16(__p1, __p2);
68265   return __ret;
68266 }
68267 #endif
68268 
68269 #if defined(__aarch64__)
68270 #ifdef __LITTLE_ENDIAN__
vabal_high_u8(uint16x8_t __p0,uint8x16_t __p1,uint8x16_t __p2)68271 __ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
68272   uint16x8_t __ret;
68273   __ret = vabal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
68274   return __ret;
68275 }
68276 #else
vabal_high_u8(uint16x8_t __p0,uint8x16_t __p1,uint8x16_t __p2)68277 __ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
68278   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
68279   uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
68280   uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
68281   uint16x8_t __ret;
68282   __ret = __noswap_vabal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
68283   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
68284   return __ret;
68285 }
68286 #endif
68287 
68288 #ifdef __LITTLE_ENDIAN__
vabal_high_u32(uint64x2_t __p0,uint32x4_t __p1,uint32x4_t __p2)68289 __ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
68290   uint64x2_t __ret;
68291   __ret = vabal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
68292   return __ret;
68293 }
68294 #else
vabal_high_u32(uint64x2_t __p0,uint32x4_t __p1,uint32x4_t __p2)68295 __ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
68296   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
68297   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
68298   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
68299   uint64x2_t __ret;
68300   __ret = __noswap_vabal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
68301   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
68302   return __ret;
68303 }
68304 #endif
68305 
68306 #ifdef __LITTLE_ENDIAN__
vabal_high_u16(uint32x4_t __p0,uint16x8_t __p1,uint16x8_t __p2)68307 __ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
68308   uint32x4_t __ret;
68309   __ret = vabal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
68310   return __ret;
68311 }
68312 #else
vabal_high_u16(uint32x4_t __p0,uint16x8_t __p1,uint16x8_t __p2)68313 __ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
68314   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
68315   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
68316   uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
68317   uint32x4_t __ret;
68318   __ret = __noswap_vabal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
68319   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
68320   return __ret;
68321 }
68322 #endif
68323 
68324 #ifdef __LITTLE_ENDIAN__
vabal_high_s8(int16x8_t __p0,int8x16_t __p1,int8x16_t __p2)68325 __ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
68326   int16x8_t __ret;
68327   __ret = vabal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
68328   return __ret;
68329 }
68330 #else
vabal_high_s8(int16x8_t __p0,int8x16_t __p1,int8x16_t __p2)68331 __ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
68332   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
68333   int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
68334   int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
68335   int16x8_t __ret;
68336   __ret = __noswap_vabal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
68337   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
68338   return __ret;
68339 }
68340 #endif
68341 
68342 #ifdef __LITTLE_ENDIAN__
vabal_high_s32(int64x2_t __p0,int32x4_t __p1,int32x4_t __p2)68343 __ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
68344   int64x2_t __ret;
68345   __ret = vabal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
68346   return __ret;
68347 }
68348 #else
vabal_high_s32(int64x2_t __p0,int32x4_t __p1,int32x4_t __p2)68349 __ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
68350   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
68351   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
68352   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
68353   int64x2_t __ret;
68354   __ret = __noswap_vabal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
68355   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
68356   return __ret;
68357 }
68358 #endif
68359 
68360 #ifdef __LITTLE_ENDIAN__
vabal_high_s16(int32x4_t __p0,int16x8_t __p1,int16x8_t __p2)68361 __ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
68362   int32x4_t __ret;
68363   __ret = vabal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
68364   return __ret;
68365 }
68366 #else
vabal_high_s16(int32x4_t __p0,int16x8_t __p1,int16x8_t __p2)68367 __ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
68368   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
68369   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
68370   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
68371   int32x4_t __ret;
68372   __ret = __noswap_vabal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
68373   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
68374   return __ret;
68375 }
68376 #endif
68377 
68378 #endif
68379 
68380 #undef __ai
68381 
68382 #endif /* __ARM_NEON_H */
68383